CINXE.COM
Deep learning - Wikipedia
<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-sticky-header-enabled vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Deep learning - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-sticky-header-enabled vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy","wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"000f665e-297d-4191-8dca-d518baa9af0f","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Deep_learning","wgTitle":"Deep learning","wgCurRevisionId":1280356588,"wgRevisionId":1280356588,"wgArticleId":32472154,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Webarchive template wayback links","CS1: long volume value","CS1 Finnish-language sources (fi)","CS1 maint: multiple names: authors list","Articles with short description","Short description matches Wikidata","All articles with unsourced statements","Articles with unsourced statements from August 2024","Pages using multiple image with auto scaled images","Articles with unsourced statements from November 2020","Articles with unsourced statements from July 2016","CS1 maint: postscript","Articles prone to spam from June 2015","Deep learning"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Deep_learning","wgRelevantArticleId":32472154,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":200000,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q197536","wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false}; RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","ext.math.styles":"ready","ext.tmh.player.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","mediawiki.page.media","ext.tmh.player","ext.scribunto.logs","site","mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.growthExperiments.SuggestedEditSession"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&modules=ext.cite.styles%7Cext.math.styles%7Cext.tmh.player.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&only=styles&skin=vector-2022"> <script async="" src="/w/load.php?lang=en&modules=startup&only=scripts&raw=1&skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&modules=site.styles&only=styles&skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.20"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/2/26/Deep_Learning.jpg/1200px-Deep_Learning.jpg"> <meta property="og:image:width" content="1200"> <meta property="og:image:height" content="980"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/2/26/Deep_Learning.jpg/800px-Deep_Learning.jpg"> <meta property="og:image:width" content="800"> <meta property="og:image:height" content="653"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/2/26/Deep_Learning.jpg/640px-Deep_Learning.jpg"> <meta property="og:image:width" content="640"> <meta property="og:image:height" content="523"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Deep learning - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/Deep_learning"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Deep_learning&action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Deep_learning"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Deep_learning rootpage-Deep_learning skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" title="Main menu" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li><li id="n-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages"><span>Special pages</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page's font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/?wmf_source=donate&wmf_medium=sidebar&wmf_campaign=en.wikipedia.org&uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&returnto=Deep+learning" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&returnto=Deep+learning" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/?wmf_source=donate&wmf_medium=sidebar&wmf_campaign=en.wikipedia.org&uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&returnto=Deep+learning" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&returnto=Deep+learning" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-Overview" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Overview"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>Overview</span> </div> </a> <ul id="toc-Overview-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Interpretations" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Interpretations"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Interpretations</span> </div> </a> <ul id="toc-Interpretations-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-History" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#History"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>History</span> </div> </a> <button aria-controls="toc-History-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle History subsection</span> </button> <ul id="toc-History-sublist" class="vector-toc-list"> <li id="toc-Before_1980" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Before_1980"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.1</span> <span>Before 1980</span> </div> </a> <ul id="toc-Before_1980-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-1980s-2000s" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#1980s-2000s"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.2</span> <span>1980s-2000s</span> </div> </a> <ul id="toc-1980s-2000s-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-2000s" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#2000s"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.3</span> <span>2000s</span> </div> </a> <ul id="toc-2000s-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Deep_learning_revolution" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Deep_learning_revolution"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4</span> <span>Deep learning revolution</span> </div> </a> <ul id="toc-Deep_learning_revolution-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Neural_networks" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Neural_networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>Neural networks</span> </div> </a> <button aria-controls="toc-Neural_networks-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Neural networks subsection</span> </button> <ul id="toc-Neural_networks-sublist" class="vector-toc-list"> <li id="toc-Deep_neural_networks" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Deep_neural_networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1</span> <span>Deep neural networks</span> </div> </a> <ul id="toc-Deep_neural_networks-sublist" class="vector-toc-list"> <li id="toc-Challenges" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Challenges"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1.1</span> <span>Challenges</span> </div> </a> <ul id="toc-Challenges-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> </ul> </li> <li id="toc-Hardware" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Hardware"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>Hardware</span> </div> </a> <ul id="toc-Hardware-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Applications" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Applications"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Applications</span> </div> </a> <button aria-controls="toc-Applications-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Applications subsection</span> </button> <ul id="toc-Applications-sublist" class="vector-toc-list"> <li id="toc-Automatic_speech_recognition" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Automatic_speech_recognition"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.1</span> <span>Automatic speech recognition</span> </div> </a> <ul id="toc-Automatic_speech_recognition-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Image_recognition" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Image_recognition"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.2</span> <span>Image recognition</span> </div> </a> <ul id="toc-Image_recognition-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Visual_art_processing" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Visual_art_processing"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.3</span> <span>Visual art processing</span> </div> </a> <ul id="toc-Visual_art_processing-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Natural_language_processing" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Natural_language_processing"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.4</span> <span>Natural language processing</span> </div> </a> <ul id="toc-Natural_language_processing-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Drug_discovery_and_toxicology" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Drug_discovery_and_toxicology"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.5</span> <span>Drug discovery and toxicology</span> </div> </a> <ul id="toc-Drug_discovery_and_toxicology-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Customer_relationship_management" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Customer_relationship_management"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.6</span> <span>Customer relationship management</span> </div> </a> <ul id="toc-Customer_relationship_management-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Recommendation_systems" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Recommendation_systems"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.7</span> <span>Recommendation systems</span> </div> </a> <ul id="toc-Recommendation_systems-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Bioinformatics" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Bioinformatics"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.8</span> <span>Bioinformatics</span> </div> </a> <ul id="toc-Bioinformatics-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Deep_Neural_Network_Estimations" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Deep_Neural_Network_Estimations"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.9</span> <span>Deep Neural Network Estimations</span> </div> </a> <ul id="toc-Deep_Neural_Network_Estimations-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Medical_image_analysis" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Medical_image_analysis"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.10</span> <span>Medical image analysis</span> </div> </a> <ul id="toc-Medical_image_analysis-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Mobile_advertising" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Mobile_advertising"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.11</span> <span>Mobile advertising</span> </div> </a> <ul id="toc-Mobile_advertising-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Image_restoration" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Image_restoration"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.12</span> <span>Image restoration</span> </div> </a> <ul id="toc-Image_restoration-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Financial_fraud_detection" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Financial_fraud_detection"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.13</span> <span>Financial fraud detection</span> </div> </a> <ul id="toc-Financial_fraud_detection-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Materials_science" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Materials_science"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.14</span> <span>Materials science</span> </div> </a> <ul id="toc-Materials_science-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Military" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Military"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.15</span> <span>Military</span> </div> </a> <ul id="toc-Military-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Partial_differential_equations" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Partial_differential_equations"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.16</span> <span>Partial differential equations</span> </div> </a> <ul id="toc-Partial_differential_equations-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Deep_backward_stochastic_differential_equation_method" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Deep_backward_stochastic_differential_equation_method"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.17</span> <span>Deep backward stochastic differential equation method</span> </div> </a> <ul id="toc-Deep_backward_stochastic_differential_equation_method-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Image_reconstruction" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Image_reconstruction"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.18</span> <span>Image reconstruction</span> </div> </a> <ul id="toc-Image_reconstruction-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Weather_prediction" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Weather_prediction"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.19</span> <span>Weather prediction</span> </div> </a> <ul id="toc-Weather_prediction-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Epigenetic_clock" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Epigenetic_clock"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.20</span> <span>Epigenetic clock</span> </div> </a> <ul id="toc-Epigenetic_clock-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Relation_to_human_cognitive_and_brain_development" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Relation_to_human_cognitive_and_brain_development"> <div class="vector-toc-text"> <span class="vector-toc-numb">7</span> <span>Relation to human cognitive and brain development</span> </div> </a> <ul id="toc-Relation_to_human_cognitive_and_brain_development-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Commercial_activity" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Commercial_activity"> <div class="vector-toc-text"> <span class="vector-toc-numb">8</span> <span>Commercial activity</span> </div> </a> <ul id="toc-Commercial_activity-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Criticism_and_comment" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Criticism_and_comment"> <div class="vector-toc-text"> <span class="vector-toc-numb">9</span> <span>Criticism and comment</span> </div> </a> <button aria-controls="toc-Criticism_and_comment-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Criticism and comment subsection</span> </button> <ul id="toc-Criticism_and_comment-sublist" class="vector-toc-list"> <li id="toc-Theory" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Theory"> <div class="vector-toc-text"> <span class="vector-toc-numb">9.1</span> <span>Theory</span> </div> </a> <ul id="toc-Theory-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Errors" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Errors"> <div class="vector-toc-text"> <span class="vector-toc-numb">9.2</span> <span>Errors</span> </div> </a> <ul id="toc-Errors-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Cyber_threat" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Cyber_threat"> <div class="vector-toc-text"> <span class="vector-toc-numb">9.3</span> <span>Cyber threat</span> </div> </a> <ul id="toc-Cyber_threat-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Data_collection_ethics" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Data_collection_ethics"> <div class="vector-toc-text"> <span class="vector-toc-numb">9.4</span> <span>Data collection ethics</span> </div> </a> <ul id="toc-Data_collection_ethics-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">10</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">11</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Further_reading" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Further_reading"> <div class="vector-toc-text"> <span class="vector-toc-numb">12</span> <span>Further reading</span> </div> </a> <ul id="toc-Further_reading-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" title="Table of Contents" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Deep learning</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 58 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-58" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">58 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-af mw-list-item"><a href="https://af.wikipedia.org/wiki/Diepleer" title="Diepleer – Afrikaans" lang="af" hreflang="af" data-title="Diepleer" data-language-autonym="Afrikaans" data-language-local-name="Afrikaans" class="interlanguage-link-target"><span>Afrikaans</span></a></li><li class="interlanguage-link interwiki-ar mw-list-item"><a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B9%D9%84%D9%85_%D9%85%D8%AA%D8%B9%D9%85%D9%82" title="تعلم متعمق – Arabic" lang="ar" hreflang="ar" data-title="تعلم متعمق" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-az mw-list-item"><a href="https://az.wikipedia.org/wiki/D%C9%99rin_%C3%B6yr%C9%99nm%C9%99" title="Dərin öyrənmə – Azerbaijani" lang="az" hreflang="az" data-title="Dərin öyrənmə" data-language-autonym="Azərbaycanca" data-language-local-name="Azerbaijani" class="interlanguage-link-target"><span>Azərbaycanca</span></a></li><li class="interlanguage-link interwiki-bn mw-list-item"><a href="https://bn.wikipedia.org/wiki/%E0%A6%97%E0%A6%AD%E0%A7%80%E0%A6%B0_%E0%A6%B6%E0%A6%BF%E0%A6%96%E0%A6%A8" title="গভীর শিখন – Bangla" lang="bn" hreflang="bn" data-title="গভীর শিখন" data-language-autonym="বাংলা" data-language-local-name="Bangla" class="interlanguage-link-target"><span>বাংলা</span></a></li><li class="interlanguage-link interwiki-zh-min-nan mw-list-item"><a href="https://zh-min-nan.wikipedia.org/wiki/Chhim-t%C5%8D%CD%98_ha%CC%8Dk-si%CC%8Dp" title="Chhim-tō͘ ha̍k-si̍p – Minnan" lang="nan" hreflang="nan" data-title="Chhim-tō͘ ha̍k-si̍p" data-language-autonym="閩南語 / Bân-lâm-gú" data-language-local-name="Minnan" class="interlanguage-link-target"><span>閩南語 / Bân-lâm-gú</span></a></li><li class="interlanguage-link interwiki-bg mw-list-item"><a href="https://bg.wikipedia.org/wiki/%D0%94%D1%8A%D0%BB%D0%B1%D0%BE%D0%BA%D0%BE_%D0%BE%D0%B1%D1%83%D1%87%D0%B5%D0%BD%D0%B8%D0%B5" title="Дълбоко обучение – Bulgarian" lang="bg" hreflang="bg" data-title="Дълбоко обучение" data-language-autonym="Български" data-language-local-name="Bulgarian" class="interlanguage-link-target"><span>Български</span></a></li><li class="interlanguage-link interwiki-bs mw-list-item"><a href="https://bs.wikipedia.org/wiki/Duboko_u%C4%8Denje" title="Duboko učenje – Bosnian" lang="bs" hreflang="bs" data-title="Duboko učenje" data-language-autonym="Bosanski" data-language-local-name="Bosnian" class="interlanguage-link-target"><span>Bosanski</span></a></li><li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/Aprenentatge_profund" title="Aprenentatge profund – Catalan" lang="ca" hreflang="ca" data-title="Aprenentatge profund" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-cs mw-list-item"><a href="https://cs.wikipedia.org/wiki/Hlubok%C3%A9_u%C4%8Den%C3%AD" title="Hluboké učení – Czech" lang="cs" hreflang="cs" data-title="Hluboké učení" data-language-autonym="Čeština" data-language-local-name="Czech" class="interlanguage-link-target"><span>Čeština</span></a></li><li class="interlanguage-link interwiki-da mw-list-item"><a href="https://da.wikipedia.org/wiki/Deep_learning" title="Deep learning – Danish" lang="da" hreflang="da" data-title="Deep learning" data-language-autonym="Dansk" data-language-local-name="Danish" class="interlanguage-link-target"><span>Dansk</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Deep_Learning" title="Deep Learning – German" lang="de" hreflang="de" data-title="Deep Learning" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-et mw-list-item"><a href="https://et.wikipedia.org/wiki/S%C3%BCgav%C3%B5pe" title="Sügavõpe – Estonian" lang="et" hreflang="et" data-title="Sügavõpe" data-language-autonym="Eesti" data-language-local-name="Estonian" class="interlanguage-link-target"><span>Eesti</span></a></li><li class="interlanguage-link interwiki-el mw-list-item"><a href="https://el.wikipedia.org/wiki/%CE%92%CE%B1%CE%B8%CE%B9%CE%AC_%CE%BC%CE%AC%CE%B8%CE%B7%CF%83%CE%B7" title="Βαθιά μάθηση – Greek" lang="el" hreflang="el" data-title="Βαθιά μάθηση" data-language-autonym="Ελληνικά" data-language-local-name="Greek" class="interlanguage-link-target"><span>Ελληνικά</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/Aprendizaje_profundo" title="Aprendizaje profundo – Spanish" lang="es" hreflang="es" data-title="Aprendizaje profundo" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-eo mw-list-item"><a href="https://eo.wikipedia.org/wiki/Deep_learning" title="Deep learning – Esperanto" lang="eo" hreflang="eo" data-title="Deep learning" data-language-autonym="Esperanto" data-language-local-name="Esperanto" class="interlanguage-link-target"><span>Esperanto</span></a></li><li class="interlanguage-link interwiki-eu mw-list-item"><a href="https://eu.wikipedia.org/wiki/Ikaskuntza_sakon" title="Ikaskuntza sakon – Basque" lang="eu" hreflang="eu" data-title="Ikaskuntza sakon" data-language-autonym="Euskara" data-language-local-name="Basque" class="interlanguage-link-target"><span>Euskara</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%DB%8C%D8%A7%D8%AF%DA%AF%DB%8C%D8%B1%DB%8C_%D8%B9%D9%85%DB%8C%D9%82" title="یادگیری عمیق – Persian" lang="fa" hreflang="fa" data-title="یادگیری عمیق" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/Apprentissage_profond" title="Apprentissage profond – French" lang="fr" hreflang="fr" data-title="Apprentissage profond" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-ga mw-list-item"><a href="https://ga.wikipedia.org/wiki/Domhainfhoghlaim" title="Domhainfhoghlaim – Irish" lang="ga" hreflang="ga" data-title="Domhainfhoghlaim" data-language-autonym="Gaeilge" data-language-local-name="Irish" class="interlanguage-link-target"><span>Gaeilge</span></a></li><li class="interlanguage-link interwiki-gl mw-list-item"><a href="https://gl.wikipedia.org/wiki/Deep_learning" title="Deep learning – Galician" lang="gl" hreflang="gl" data-title="Deep learning" data-language-autonym="Galego" data-language-local-name="Galician" class="interlanguage-link-target"><span>Galego</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EB%94%A5_%EB%9F%AC%EB%8B%9D" title="딥 러닝 – Korean" lang="ko" hreflang="ko" data-title="딥 러닝" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-hy mw-list-item"><a href="https://hy.wikipedia.org/wiki/%D4%BD%D5%B8%D6%80_%D5%B8%D6%82%D5%BD%D5%B8%D6%82%D6%81%D5%B8%D6%82%D5%B4" title="Խոր ուսուցում – Armenian" lang="hy" hreflang="hy" data-title="Խոր ուսուցում" data-language-autonym="Հայերեն" data-language-local-name="Armenian" class="interlanguage-link-target"><span>Հայերեն</span></a></li><li class="interlanguage-link interwiki-hi mw-list-item"><a href="https://hi.wikipedia.org/wiki/%E0%A4%A1%E0%A5%80%E0%A4%AA_%E0%A4%B2%E0%A4%B0%E0%A5%8D%E0%A4%A8%E0%A4%BF%E0%A4%82%E0%A4%97" title="डीप लर्निंग – Hindi" lang="hi" hreflang="hi" data-title="डीप लर्निंग" data-language-autonym="हिन्दी" data-language-local-name="Hindi" class="interlanguage-link-target"><span>हिन्दी</span></a></li><li class="interlanguage-link interwiki-id mw-list-item"><a href="https://id.wikipedia.org/wiki/Pemelajaran_dalam" title="Pemelajaran dalam – Indonesian" lang="id" hreflang="id" data-title="Pemelajaran dalam" data-language-autonym="Bahasa Indonesia" data-language-local-name="Indonesian" class="interlanguage-link-target"><span>Bahasa Indonesia</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Apprendimento_profondo" title="Apprendimento profondo – Italian" lang="it" hreflang="it" data-title="Apprendimento profondo" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/%D7%9C%D7%9E%D7%99%D7%93%D7%94_%D7%A2%D7%9E%D7%95%D7%A7%D7%94" title="למידה עמוקה – Hebrew" lang="he" hreflang="he" data-title="למידה עמוקה" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-hu mw-list-item"><a href="https://hu.wikipedia.org/wiki/Deep_learning" title="Deep learning – Hungarian" lang="hu" hreflang="hu" data-title="Deep learning" data-language-autonym="Magyar" data-language-local-name="Hungarian" class="interlanguage-link-target"><span>Magyar</span></a></li><li class="interlanguage-link interwiki-ml mw-list-item"><a href="https://ml.wikipedia.org/wiki/%E0%B4%A1%E0%B5%80%E0%B4%AA%E0%B5%8D_%E0%B4%B2%E0%B5%87%E0%B4%A3%E0%B4%BF%E0%B4%82%E0%B4%97%E0%B5%8D" title="ഡീപ് ലേണിംഗ് – Malayalam" lang="ml" hreflang="ml" data-title="ഡീപ് ലേണിംഗ്" data-language-autonym="മലയാളം" data-language-local-name="Malayalam" class="interlanguage-link-target"><span>മലയാളം</span></a></li><li class="interlanguage-link interwiki-ms mw-list-item"><a href="https://ms.wikipedia.org/wiki/Pembelajaran_dalam" title="Pembelajaran dalam – Malay" lang="ms" hreflang="ms" data-title="Pembelajaran dalam" data-language-autonym="Bahasa Melayu" data-language-local-name="Malay" class="interlanguage-link-target"><span>Bahasa Melayu</span></a></li><li class="interlanguage-link interwiki-mn mw-list-item"><a href="https://mn.wikipedia.org/wiki/Deep_learning" title="Deep learning – Mongolian" lang="mn" hreflang="mn" data-title="Deep learning" data-language-autonym="Монгол" data-language-local-name="Mongolian" class="interlanguage-link-target"><span>Монгол</span></a></li><li class="interlanguage-link interwiki-nl mw-list-item"><a href="https://nl.wikipedia.org/wiki/Deep_learning" title="Deep learning – Dutch" lang="nl" hreflang="nl" data-title="Deep learning" data-language-autonym="Nederlands" data-language-local-name="Dutch" class="interlanguage-link-target"><span>Nederlands</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/%E3%83%87%E3%82%A3%E3%83%BC%E3%83%97%E3%83%A9%E3%83%BC%E3%83%8B%E3%83%B3%E3%82%B0" title="ディープラーニング – Japanese" lang="ja" hreflang="ja" data-title="ディープラーニング" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-no mw-list-item"><a href="https://no.wikipedia.org/wiki/Dyp_l%C3%A6ring" title="Dyp læring – Norwegian Bokmål" lang="nb" hreflang="nb" data-title="Dyp læring" data-language-autonym="Norsk bokmål" data-language-local-name="Norwegian Bokmål" class="interlanguage-link-target"><span>Norsk bokmål</span></a></li><li class="interlanguage-link interwiki-oc mw-list-item"><a href="https://oc.wikipedia.org/wiki/Aprendissatge_prigond" title="Aprendissatge prigond – Occitan" lang="oc" hreflang="oc" data-title="Aprendissatge prigond" data-language-autonym="Occitan" data-language-local-name="Occitan" class="interlanguage-link-target"><span>Occitan</span></a></li><li class="interlanguage-link interwiki-ps mw-list-item"><a href="https://ps.wikipedia.org/wiki/%DA%98%D9%88%D8%B1%D9%87_%D8%B2%D8%AF%D9%87_%DA%A9%DA%93%D9%87" title="ژوره زده کړه – Pashto" lang="ps" hreflang="ps" data-title="ژوره زده کړه" data-language-autonym="پښتو" data-language-local-name="Pashto" class="interlanguage-link-target"><span>پښتو</span></a></li><li class="interlanguage-link interwiki-pl mw-list-item"><a href="https://pl.wikipedia.org/wiki/Uczenie_g%C5%82%C4%99bokie" title="Uczenie głębokie – Polish" lang="pl" hreflang="pl" data-title="Uczenie głębokie" data-language-autonym="Polski" data-language-local-name="Polish" class="interlanguage-link-target"><span>Polski</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/Aprendizagem_profunda" title="Aprendizagem profunda – Portuguese" lang="pt" hreflang="pt" data-title="Aprendizagem profunda" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-kaa mw-list-item"><a href="https://kaa.wikipedia.org/wiki/Tere%C5%84_oq%C4%B1t%C4%B1w" title="Tereń oqıtıw – Kara-Kalpak" lang="kaa" hreflang="kaa" data-title="Tereń oqıtıw" data-language-autonym="Qaraqalpaqsha" data-language-local-name="Kara-Kalpak" class="interlanguage-link-target"><span>Qaraqalpaqsha</span></a></li><li class="interlanguage-link interwiki-ro mw-list-item"><a href="https://ro.wikipedia.org/wiki/%C3%8Env%C4%83%C8%9Bare_profund%C4%83" title="Învățare profundă – Romanian" lang="ro" hreflang="ro" data-title="Învățare profundă" data-language-autonym="Română" data-language-local-name="Romanian" class="interlanguage-link-target"><span>Română</span></a></li><li class="interlanguage-link interwiki-qu mw-list-item"><a href="https://qu.wikipedia.org/wiki/Ukhu_yachay" title="Ukhu yachay – Quechua" lang="qu" hreflang="qu" data-title="Ukhu yachay" data-language-autonym="Runa Simi" data-language-local-name="Quechua" class="interlanguage-link-target"><span>Runa Simi</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/%D0%93%D0%BB%D1%83%D0%B1%D0%BE%D0%BA%D0%BE%D0%B5_%D0%BE%D0%B1%D1%83%D1%87%D0%B5%D0%BD%D0%B8%D0%B5" title="Глубокое обучение – Russian" lang="ru" hreflang="ru" data-title="Глубокое обучение" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-sq mw-list-item"><a href="https://sq.wikipedia.org/wiki/M%C3%ABsimi_i_thell%C3%AB" title="Mësimi i thellë – Albanian" lang="sq" hreflang="sq" data-title="Mësimi i thellë" data-language-autonym="Shqip" data-language-local-name="Albanian" class="interlanguage-link-target"><span>Shqip</span></a></li><li class="interlanguage-link interwiki-si mw-list-item"><a href="https://si.wikipedia.org/wiki/%E0%B6%9C%E0%B7%90%E0%B6%B9%E0%B7%94%E0%B6%BB%E0%B7%94_%E0%B6%89%E0%B6%9C%E0%B7%99%E0%B6%B1%E0%B7%93%E0%B6%B8" title="ගැඹුරු ඉගෙනීම – Sinhala" lang="si" hreflang="si" data-title="ගැඹුරු ඉගෙනීම" data-language-autonym="සිංහල" data-language-local-name="Sinhala" class="interlanguage-link-target"><span>සිංහල</span></a></li><li class="interlanguage-link interwiki-simple mw-list-item"><a href="https://simple.wikipedia.org/wiki/Deep_learning" title="Deep learning – Simple English" lang="en-simple" hreflang="en-simple" data-title="Deep learning" data-language-autonym="Simple English" data-language-local-name="Simple English" class="interlanguage-link-target"><span>Simple English</span></a></li><li class="interlanguage-link interwiki-sl mw-list-item"><a href="https://sl.wikipedia.org/wiki/Globoko_u%C4%8Denje" title="Globoko učenje – Slovenian" lang="sl" hreflang="sl" data-title="Globoko učenje" data-language-autonym="Slovenščina" data-language-local-name="Slovenian" class="interlanguage-link-target"><span>Slovenščina</span></a></li><li class="interlanguage-link interwiki-ckb mw-list-item"><a href="https://ckb.wikipedia.org/wiki/%D9%81%DB%8E%D8%B1%D8%A8%D9%88%D9%88%D9%86%DB%8C_%D9%82%D9%88%D9%88%DA%B5" title="فێربوونی قووڵ – Central Kurdish" lang="ckb" hreflang="ckb" data-title="فێربوونی قووڵ" data-language-autonym="کوردی" data-language-local-name="Central Kurdish" class="interlanguage-link-target"><span>کوردی</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/%D0%94%D1%83%D0%B1%D0%BE%D0%BA%D0%BE_%D1%83%D1%87%D0%B5%D1%9A%D0%B5" title="Дубоко учење – Serbian" lang="sr" hreflang="sr" data-title="Дубоко учење" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-sh mw-list-item"><a href="https://sh.wikipedia.org/wiki/Duboko_u%C4%8Denje" title="Duboko učenje – Serbo-Croatian" lang="sh" hreflang="sh" data-title="Duboko učenje" data-language-autonym="Srpskohrvatski / српскохрватски" data-language-local-name="Serbo-Croatian" class="interlanguage-link-target"><span>Srpskohrvatski / српскохрватски</span></a></li><li class="interlanguage-link interwiki-fi mw-list-item"><a href="https://fi.wikipedia.org/wiki/Syv%C3%A4oppiminen" title="Syväoppiminen – Finnish" lang="fi" hreflang="fi" data-title="Syväoppiminen" data-language-autonym="Suomi" data-language-local-name="Finnish" class="interlanguage-link-target"><span>Suomi</span></a></li><li class="interlanguage-link interwiki-sv mw-list-item"><a href="https://sv.wikipedia.org/wiki/Djupinl%C3%A4rning" title="Djupinlärning – Swedish" lang="sv" hreflang="sv" data-title="Djupinlärning" data-language-autonym="Svenska" data-language-local-name="Swedish" class="interlanguage-link-target"><span>Svenska</span></a></li><li class="interlanguage-link interwiki-ta mw-list-item"><a href="https://ta.wikipedia.org/wiki/%E0%AE%86%E0%AE%B4%E0%AE%AE%E0%AE%BE%E0%AE%A9_%E0%AE%95%E0%AE%B1%E0%AF%8D%E0%AE%B1%E0%AE%B2%E0%AF%8D" title="ஆழமான கற்றல் – Tamil" lang="ta" hreflang="ta" data-title="ஆழமான கற்றல்" data-language-autonym="தமிழ்" data-language-local-name="Tamil" class="interlanguage-link-target"><span>தமிழ்</span></a></li><li class="interlanguage-link interwiki-th mw-list-item"><a href="https://th.wikipedia.org/wiki/%E0%B8%81%E0%B8%B2%E0%B8%A3%E0%B9%80%E0%B8%A3%E0%B8%B5%E0%B8%A2%E0%B8%99%E0%B8%A3%E0%B8%B9%E0%B9%89%E0%B9%80%E0%B8%8A%E0%B8%B4%E0%B8%87%E0%B8%A5%E0%B8%B6%E0%B8%81" title="การเรียนรู้เชิงลึก – Thai" lang="th" hreflang="th" data-title="การเรียนรู้เชิงลึก" data-language-autonym="ไทย" data-language-local-name="Thai" class="interlanguage-link-target"><span>ไทย</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/Derin_%C3%B6%C4%9Frenme" title="Derin öğrenme – Turkish" lang="tr" hreflang="tr" data-title="Derin öğrenme" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-uk mw-list-item"><a href="https://uk.wikipedia.org/wiki/%D0%93%D0%BB%D0%B8%D0%B1%D0%BE%D0%BA%D0%B5_%D0%BD%D0%B0%D0%B2%D1%87%D0%B0%D0%BD%D0%BD%D1%8F" title="Глибоке навчання – Ukrainian" lang="uk" hreflang="uk" data-title="Глибоке навчання" data-language-autonym="Українська" data-language-local-name="Ukrainian" class="interlanguage-link-target"><span>Українська</span></a></li><li class="interlanguage-link interwiki-ur mw-list-item"><a href="https://ur.wikipedia.org/wiki/%DA%88%DB%8C%D9%BE_%D9%84%D8%B1%D9%86%D9%86%DA%AF" title="ڈیپ لرننگ – Urdu" lang="ur" hreflang="ur" data-title="ڈیپ لرننگ" data-language-autonym="اردو" data-language-local-name="Urdu" class="interlanguage-link-target"><span>اردو</span></a></li><li class="interlanguage-link interwiki-vi mw-list-item"><a href="https://vi.wikipedia.org/wiki/H%E1%BB%8Dc_s%C3%A2u" title="Học sâu – Vietnamese" lang="vi" hreflang="vi" data-title="Học sâu" data-language-autonym="Tiếng Việt" data-language-local-name="Vietnamese" class="interlanguage-link-target"><span>Tiếng Việt</span></a></li><li class="interlanguage-link interwiki-zh-yue mw-list-item"><a href="https://zh-yue.wikipedia.org/wiki/%E6%B7%B1%E5%BA%A6%E5%AD%B8%E7%BF%92" title="深度學習 – Cantonese" lang="yue" hreflang="yue" data-title="深度學習" data-language-autonym="粵語" data-language-local-name="Cantonese" class="interlanguage-link-target"><span>粵語</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0" title="深度学习 – Chinese" lang="zh" hreflang="zh" data-title="深度学习" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q197536#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Deep_learning" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Deep_learning" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Deep_learning"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Deep_learning&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Deep_learning&action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Deep_learning"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Deep_learning&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Deep_learning&action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Deep_learning" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Deep_learning" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Deep_learning&oldid=1280356588" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Deep_learning&action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&page=Deep_learning&id=1280356588&wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FDeep_learning"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FDeep_learning"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&page=Deep_learning&action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Deep_learning&printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="wb-otherproject-link wb-otherproject-commons mw-list-item"><a href="https://commons.wikimedia.org/wiki/Category:Deep_learning" hreflang="en"><span>Wikimedia Commons</span></a></li><li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q197536" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Branch of machine learning</div> <style data-mw-deduplicate="TemplateStyles:r1236090951">.mw-parser-output .hatnote{font-style:italic}.mw-parser-output div.hatnote{padding-left:1.6em;margin-bottom:0.5em}.mw-parser-output .hatnote i{font-style:normal}.mw-parser-output .hatnote+link+.hatnote{margin-top:-0.5em}@media print{body.ns-0 .mw-parser-output .hatnote{display:none!important}}</style><div role="note" class="hatnote navigation-not-searchable">For the TV series episode, see <a href="/wiki/Deep_Learning_(South_Park)" title="Deep Learning (South Park)">Deep Learning (South Park)</a>.</div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Deep_Learning.jpg" class="mw-file-description"><img alt="Representing images on multiple layers of abstraction in deep learning" src="//upload.wikimedia.org/wikipedia/commons/thumb/2/26/Deep_Learning.jpg/300px-Deep_Learning.jpg" decoding="async" width="300" height="245" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/26/Deep_Learning.jpg/450px-Deep_Learning.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/26/Deep_Learning.jpg/600px-Deep_Learning.jpg 2x" data-file-width="1239" data-file-height="1012" /></a><figcaption>Representing images on multiple layers of abstraction in deep learning<sup id="cite_ref-1" class="reference"><a href="#cite_note-1"><span class="cite-bracket">[</span>1<span class="cite-bracket">]</span></a></sup></figcaption></figure> <style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1246091330">.mw-parser-output .sidebar{width:22em;float:right;clear:right;margin:0.5em 0 1em 1em;background:var(--background-color-neutral-subtle,#f8f9fa);border:1px solid var(--border-color-base,#a2a9b1);padding:0.2em;text-align:center;line-height:1.4em;font-size:88%;border-collapse:collapse;display:table}body.skin-minerva .mw-parser-output .sidebar{display:table!important;float:right!important;margin:0.5em 0 1em 1em!important}.mw-parser-output .sidebar-subgroup{width:100%;margin:0;border-spacing:0}.mw-parser-output .sidebar-left{float:left;clear:left;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-none{float:none;clear:both;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-outer-title{padding:0 0.4em 0.2em;font-size:125%;line-height:1.2em;font-weight:bold}.mw-parser-output .sidebar-top-image{padding:0.4em}.mw-parser-output .sidebar-top-caption,.mw-parser-output .sidebar-pretitle-with-top-image,.mw-parser-output .sidebar-caption{padding:0.2em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-pretitle{padding:0.4em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-title,.mw-parser-output .sidebar-title-with-pretitle{padding:0.2em 0.8em;font-size:145%;line-height:1.2em}.mw-parser-output .sidebar-title-with-pretitle{padding:0.1em 0.4em}.mw-parser-output .sidebar-image{padding:0.2em 0.4em 0.4em}.mw-parser-output .sidebar-heading{padding:0.1em 0.4em}.mw-parser-output .sidebar-content{padding:0 0.5em 0.4em}.mw-parser-output .sidebar-content-with-subgroup{padding:0.1em 0.4em 0.2em}.mw-parser-output .sidebar-above,.mw-parser-output .sidebar-below{padding:0.3em 0.8em;font-weight:bold}.mw-parser-output .sidebar-collapse .sidebar-above,.mw-parser-output .sidebar-collapse .sidebar-below{border-top:1px solid #aaa;border-bottom:1px solid #aaa}.mw-parser-output .sidebar-navbar{text-align:right;font-size:115%;padding:0 0.4em 0.4em}.mw-parser-output .sidebar-list-title{padding:0 0.4em;text-align:left;font-weight:bold;line-height:1.6em;font-size:105%}.mw-parser-output .sidebar-list-title-c{padding:0 0.4em;text-align:center;margin:0 3.3em}@media(max-width:640px){body.mediawiki .mw-parser-output .sidebar{width:100%!important;clear:both;float:none!important;margin-left:0!important;margin-right:0!important}}body.skin--responsive .mw-parser-output .sidebar a>img{max-width:none!important}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media print{body.ns-0 .mw-parser-output .sidebar{display:none!important}}</style><table class="sidebar sidebar-collapse nomobile nowraplinks hlist"><tbody><tr><td class="sidebar-pretitle">Part of a series on</td></tr><tr><th class="sidebar-title-with-pretitle"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence (AI)</a></th></tr><tr><td class="sidebar-image"><figure class="mw-halign-center" typeof="mw:File"><a href="/wiki/File:Dall-e_3_(jan_%2724)_artificial_intelligence_icon.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/120px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png" decoding="async" width="100" height="100" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/250px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png 1.5x" data-file-width="820" data-file-height="820" /></a><figcaption></figcaption></figure></td></tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Artificial_intelligence#Goals" title="Artificial intelligence">Major goals</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a></li> <li><a href="/wiki/Intelligent_agent" title="Intelligent agent">Intelligent agent</a></li> <li><a href="/wiki/Recursive_self-improvement" title="Recursive self-improvement">Recursive self-improvement</a></li> <li><a href="/wiki/Automated_planning_and_scheduling" title="Automated planning and scheduling">Planning</a></li> <li><a href="/wiki/Computer_vision" title="Computer vision">Computer vision</a></li> <li><a href="/wiki/General_game_playing" title="General game playing">General game playing</a></li> <li><a href="/wiki/Knowledge_representation_and_reasoning" title="Knowledge representation and reasoning">Knowledge reasoning</a></li> <li><a href="/wiki/Natural_language_processing" title="Natural language processing">Natural language processing</a></li> <li><a href="/wiki/Robotics" title="Robotics">Robotics</a></li> <li><a href="/wiki/AI_safety" title="AI safety">AI safety</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)">Approaches</div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a></li> <li><a href="/wiki/Symbolic_artificial_intelligence" title="Symbolic artificial intelligence">Symbolic</a></li> <li><a class="mw-selflink selflink">Deep learning</a></li> <li><a href="/wiki/Bayesian_network" title="Bayesian network">Bayesian networks</a></li> <li><a href="/wiki/Evolutionary_algorithm" title="Evolutionary algorithm">Evolutionary algorithms</a></li> <li><a href="/wiki/Hybrid_intelligent_system" title="Hybrid intelligent system">Hybrid intelligent systems</a></li> <li><a href="/wiki/Artificial_intelligence_systems_integration" title="Artificial intelligence systems integration">Systems integration</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Applications_of_artificial_intelligence" title="Applications of artificial intelligence">Applications</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Machine_learning_in_bioinformatics" title="Machine learning in bioinformatics">Bioinformatics</a></li> <li><a href="/wiki/Deepfake" title="Deepfake">Deepfake</a></li> <li><a href="/wiki/Machine_learning_in_earth_sciences" title="Machine learning in earth sciences">Earth sciences</a></li> <li><a href="/wiki/Applications_of_artificial_intelligence#Finance" title="Applications of artificial intelligence"> Finance </a></li> <li><a href="/wiki/Generative_artificial_intelligence" title="Generative artificial intelligence">Generative AI</a> <ul><li><a href="/wiki/Artificial_intelligence_art" title="Artificial intelligence art">Art</a></li> <li><a href="/wiki/Generative_audio" title="Generative audio">Audio</a></li> <li><a href="/wiki/Music_and_artificial_intelligence" title="Music and artificial intelligence">Music</a></li></ul></li> <li><a href="/wiki/Artificial_intelligence_in_government" title="Artificial intelligence in government">Government</a></li> <li><a href="/wiki/Artificial_intelligence_in_healthcare" title="Artificial intelligence in healthcare">Healthcare</a> <ul><li><a href="/wiki/Artificial_intelligence_in_mental_health" title="Artificial intelligence in mental health">Mental health</a></li></ul></li> <li><a href="/wiki/Artificial_intelligence_in_industry" title="Artificial intelligence in industry">Industry</a></li> <li><a href="/wiki/Machine_translation" title="Machine translation">Translation</a></li> <li><a href="/wiki/Artificial_intelligence_arms_race" title="Artificial intelligence arms race"> Military </a></li> <li><a href="/wiki/Machine_learning_in_physics" title="Machine learning in physics">Physics</a></li> <li><a href="/wiki/List_of_artificial_intelligence_projects" title="List of artificial intelligence projects">Projects</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Philosophy_of_artificial_intelligence" title="Philosophy of artificial intelligence">Philosophy</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Artificial_consciousness" title="Artificial consciousness">Artificial consciousness</a></li> <li><a href="/wiki/Chinese_room" title="Chinese room">Chinese room</a></li> <li><a href="/wiki/Friendly_artificial_intelligence" title="Friendly artificial intelligence">Friendly AI</a></li> <li><a href="/wiki/AI_control_problem" class="mw-redirect" title="AI control problem">Control problem</a>/<a href="/wiki/AI_takeover" title="AI takeover">Takeover</a></li> <li><a href="/wiki/Ethics_of_artificial_intelligence" title="Ethics of artificial intelligence">Ethics</a></li> <li><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk</a></li> <li><a href="/wiki/Turing_test" title="Turing test">Turing test</a></li> <li><a href="/wiki/Uncanny_valley" title="Uncanny valley">Uncanny valley</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/History_of_artificial_intelligence" title="History of artificial intelligence">History</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Timeline_of_artificial_intelligence" title="Timeline of artificial intelligence">Timeline</a></li> <li><a href="/wiki/Progress_in_artificial_intelligence" title="Progress in artificial intelligence">Progress</a></li> <li><a href="/wiki/AI_winter" title="AI winter">AI winter</a></li> <li><a href="/wiki/AI_boom" title="AI boom">AI boom</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)">Glossary</div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Glossary_of_artificial_intelligence" title="Glossary of artificial intelligence">Glossary</a></li></ul></div></div></td> </tr><tr><td class="sidebar-navbar"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374" /><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Artificial_intelligence" title="Template:Artificial intelligence"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Artificial_intelligence" title="Template talk:Artificial intelligence"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Artificial_intelligence" title="Special:EditPage/Template:Artificial intelligence"><abbr title="Edit this template">e</abbr></a></li></ul></div></td></tr></tbody></table> <p><b>Deep learning</b> is a subset of <a href="/wiki/Machine_learning" title="Machine learning">machine learning</a> that focuses on utilizing <a href="/wiki/Neural_network_(machine_learning)" title="Neural network (machine learning)">neural networks</a> to perform tasks such as <a href="/wiki/Statistical_classification" title="Statistical classification">classification</a>, <a href="/wiki/Regression_analysis" title="Regression analysis">regression</a>, and <a href="/wiki/Representation_learning" class="mw-redirect" title="Representation learning">representation learning</a>. The field takes inspiration from <a href="/wiki/Neuroscience" title="Neuroscience">biological neuroscience</a> and is centered around stacking <a href="/wiki/Artificial_neuron" title="Artificial neuron">artificial neurons</a> into layers and "training" them to process data. The adjective "deep" refers to the use of multiple layers (ranging from three to several hundred or thousands) in the network. Methods used can be either <a href="/wiki/Supervised_learning" title="Supervised learning">supervised</a>, <a href="/wiki/Semi-supervised_learning" class="mw-redirect" title="Semi-supervised learning">semi-supervised</a> or <a href="/wiki/Unsupervised_learning" title="Unsupervised learning">unsupervised</a>.<sup id="cite_ref-NatureBengio_2-0" class="reference"><a href="#cite_note-NatureBengio-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup> </p><p>Some common deep learning network architectures include <a href="/wiki/Fully_connected_network" class="mw-redirect" title="Fully connected network">fully connected networks</a>, <a href="/wiki/Deep_belief_network" title="Deep belief network">deep belief networks</a>, <a href="/wiki/Recurrent_neural_networks" class="mw-redirect" title="Recurrent neural networks">recurrent neural networks</a>, <a href="/wiki/Convolutional_neural_networks" class="mw-redirect" title="Convolutional neural networks">convolutional neural networks</a>, <a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">generative adversarial networks</a>, <a href="/wiki/Transformer_(machine_learning_model)" class="mw-redirect" title="Transformer (machine learning model)">transformers</a>, and <a href="/wiki/Neural_radiance_field" title="Neural radiance field">neural radiance fields</a>. These architectures have been applied to fields including <a href="/wiki/Computer_vision" title="Computer vision">computer vision</a>, <a href="/wiki/Speech_recognition" title="Speech recognition">speech recognition</a>, <a href="/wiki/Natural_language_processing" title="Natural language processing">natural language processing</a>, <a href="/wiki/Machine_translation" title="Machine translation">machine translation</a>, <a href="/wiki/Bioinformatics" title="Bioinformatics">bioinformatics</a>, <a href="/wiki/Drug_design" title="Drug design">drug design</a>, <a href="/wiki/Medical_image_analysis" class="mw-redirect" title="Medical image analysis">medical image analysis</a>, <a href="/wiki/Climatology" title="Climatology">climate science</a>, material inspection and <a href="/wiki/Board_game" title="Board game">board game</a> programs, where they have produced results comparable to and in some cases surpassing human expert performance.<sup id="cite_ref-:9_3-0" class="reference"><a href="#cite_note-:9-3"><span class="cite-bracket">[</span>3<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-krizhevsky2012_4-0" class="reference"><a href="#cite_note-krizhevsky2012-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-5" class="reference"><a href="#cite_note-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup> </p><p>Early forms of neural networks were inspired by information processing and distributed communication nodes in <a href="/wiki/Biological_system" title="Biological system">biological systems</a>, particularly the <a href="/wiki/Human_brain" title="Human brain">human brain</a>. However, current neural networks do not intend to model the brain function of organisms, and are generally seen as low-quality models for that purpose.<sup id="cite_ref-6" class="reference"><a href="#cite_note-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup> </p> <style data-mw-deduplicate="TemplateStyles:r886046785">.mw-parser-output .toclimit-2 .toclevel-1 ul,.mw-parser-output .toclimit-3 .toclevel-2 ul,.mw-parser-output .toclimit-4 .toclevel-3 ul,.mw-parser-output .toclimit-5 .toclevel-4 ul,.mw-parser-output .toclimit-6 .toclevel-5 ul,.mw-parser-output .toclimit-7 .toclevel-6 ul{display:none}</style><div class="toclimit-3"><meta property="mw:PageProp/toc" /></div> <div class="mw-heading mw-heading2"><h2 id="Overview">Overview</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=1" title="Edit section: Overview"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Most modern deep learning models are based on multi-layered <a href="/wiki/Neural_network_(machine_learning)" title="Neural network (machine learning)">neural networks</a> such as <a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">convolutional neural networks</a> and <a href="/wiki/Transformer_(neural_network)" class="mw-redirect" title="Transformer (neural network)">transformers</a>, although they can also include <a href="/wiki/Propositional_formula" title="Propositional formula">propositional formulas</a> or latent variables organized layer-wise in deep <a href="/wiki/Generative_model" title="Generative model">generative models</a> such as the nodes in <a href="/wiki/Deep_belief_network" title="Deep belief network">deep belief networks</a> and deep <a href="/wiki/Boltzmann_machine" title="Boltzmann machine">Boltzmann machines</a>.<sup id="cite_ref-BENGIODEEP_7-0" class="reference"><a href="#cite_note-BENGIODEEP-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> </p><p>Fundamentally, deep learning refers to a class of <a href="/wiki/Machine_learning" title="Machine learning">machine learning</a> <a href="/wiki/Algorithm" title="Algorithm">algorithms</a> in which a hierarchy of layers is used to transform input data into a progressively more abstract and composite representation. For example, in an <a href="/wiki/Image_recognition" class="mw-redirect" title="Image recognition">image recognition</a> model, the raw input may be an <a href="/wiki/Image" title="Image">image</a> (represented as a <a href="/wiki/Tensor_(machine_learning)" title="Tensor (machine learning)">tensor</a> of <a href="/wiki/Pixel" title="Pixel">pixels</a>). The first representational layer may attempt to identify basic shapes such as lines and circles, the second layer may compose and encode arrangements of edges, the third layer may encode a nose and eyes, and the fourth layer may recognize that the image contains a face. </p><p>Importantly, a deep learning process can learn which features to optimally place at which level <i>on its own</i>. Prior to deep learning, machine learning techniques often involved hand-crafted <a href="/wiki/Feature_engineering" title="Feature engineering">feature engineering</a> to transform the data into a more suitable representation for a classification algorithm to operate on. In the deep learning approach, features are not hand-crafted and the model <a href="/wiki/Representation_learning" class="mw-redirect" title="Representation learning">discovers</a> useful feature representations from the data automatically. This does not eliminate the need for hand-tuning; for example, varying numbers of layers and layer sizes can provide different degrees of abstraction.<sup id="cite_ref-BENGIO2012_8-0" class="reference"><a href="#cite_note-BENGIO2012-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-NatureBengio_2-1" class="reference"><a href="#cite_note-NatureBengio-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup> </p><p>The word "deep" in "deep learning" refers to the number of layers through which the data is transformed. More precisely, deep learning systems have a substantial <i>credit assignment path</i> (CAP) depth. The CAP is the chain of transformations from input to output. CAPs describe potentially causal connections between input and output. For a <a href="/wiki/Feedforward_neural_network" title="Feedforward neural network">feedforward neural network</a>, the depth of the CAPs is that of the network and is the number of hidden layers plus one (as the output layer is also parameterized). For <a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">recurrent neural networks</a>, in which a signal may propagate through a layer more than once, the CAP depth is potentially unlimited.<sup id="cite_ref-SCHIDHUB_9-0" class="reference"><a href="#cite_note-SCHIDHUB-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> No universally agreed-upon threshold of depth divides shallow learning from deep learning, but most researchers agree that deep learning involves CAP depth higher than two. CAP of depth two has been shown to be a universal approximator in the sense that it can emulate any function.<sup id="cite_ref-10" class="reference"><a href="#cite_note-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> Beyond that, more layers do not add to the function approximator ability of the network. Deep models (CAP > two) are able to extract better features than shallow models and hence, extra layers help in learning the features effectively. </p><p>Deep learning architectures can be constructed with a <a href="/wiki/Greedy_algorithm" title="Greedy algorithm">greedy</a> layer-by-layer method.<sup id="cite_ref-BENGIO2007_11-0" class="reference"><a href="#cite_note-BENGIO2007-11"><span class="cite-bracket">[</span>11<span class="cite-bracket">]</span></a></sup> Deep learning helps to disentangle these abstractions and pick out which features improve performance.<sup id="cite_ref-BENGIO2012_8-1" class="reference"><a href="#cite_note-BENGIO2012-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> </p><p>Deep learning algorithms can be applied to unsupervised learning tasks. This is an important benefit because unlabeled data are more abundant than the labeled data. Examples of deep structures that can be trained in an unsupervised manner are <a href="/wiki/Deep_belief_network" title="Deep belief network">deep belief networks</a>.<sup id="cite_ref-BENGIO2012_8-2" class="reference"><a href="#cite_note-BENGIO2012-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-SCHOLARDBNS_12-0" class="reference"><a href="#cite_note-SCHOLARDBNS-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup> </p><p>The term <i>Deep Learning</i> was introduced to the machine learning community by <a href="/wiki/Rina_Dechter" title="Rina Dechter">Rina Dechter</a> in 1986,<sup id="cite_ref-dechter1986_13-0" class="reference"><a href="#cite_note-dechter1986-13"><span class="cite-bracket">[</span>13<span class="cite-bracket">]</span></a></sup> and to artificial neural networks by Igor Aizenberg and colleagues in 2000, in the context of <a href="/wiki/Boolean_network" title="Boolean network">Boolean</a> threshold neurons.<sup id="cite_ref-MV_1_14-0" class="reference"><a href="#cite_note-MV_1-14"><span class="cite-bracket">[</span>14<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-15" class="reference"><a href="#cite_note-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup> Although the history of its appearance is apparently more complicated.<sup id="cite_ref-16" class="reference"><a href="#cite_note-16"><span class="cite-bracket">[</span>16<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Interpretations">Interpretations</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=2" title="Edit section: Interpretations"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Deep neural networks are generally interpreted in terms of the <a href="/wiki/Universal_approximation_theorem" title="Universal approximation theorem">universal approximation theorem</a><sup id="cite_ref-cyb_17-0" class="reference"><a href="#cite_note-cyb-17"><span class="cite-bracket">[</span>17<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-horn_18-0" class="reference"><a href="#cite_note-horn-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Haykin,_Simon_1998_19-0" class="reference"><a href="#cite_note-Haykin,_Simon_1998-19"><span class="cite-bracket">[</span>19<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Hassoun,_M._1995_p._48_20-0" class="reference"><a href="#cite_note-Hassoun,_M._1995_p._48-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-ZhouLu_21-0" class="reference"><a href="#cite_note-ZhouLu-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup> or <a href="/wiki/Bayesian_inference" title="Bayesian inference">probabilistic inference</a>.<sup id="cite_ref-22" class="reference"><a href="#cite_note-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-BOOK2014_23-0" class="reference"><a href="#cite_note-BOOK2014-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-BENGIO2012_8-3" class="reference"><a href="#cite_note-BENGIO2012-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-SCHIDHUB_9-1" class="reference"><a href="#cite_note-SCHIDHUB-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-MURPHY_24-0" class="reference"><a href="#cite_note-MURPHY-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> </p><p>The classic universal approximation theorem concerns the capacity of <a href="/wiki/Feedforward_neural_networks" class="mw-redirect" title="Feedforward neural networks">feedforward neural networks</a> with a single hidden layer of finite size to approximate <a href="/wiki/Continuous_functions" class="mw-redirect" title="Continuous functions">continuous functions</a>.<sup id="cite_ref-cyb_17-1" class="reference"><a href="#cite_note-cyb-17"><span class="cite-bracket">[</span>17<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-horn_18-1" class="reference"><a href="#cite_note-horn-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Haykin,_Simon_1998_19-1" class="reference"><a href="#cite_note-Haykin,_Simon_1998-19"><span class="cite-bracket">[</span>19<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Hassoun,_M._1995_p._48_20-1" class="reference"><a href="#cite_note-Hassoun,_M._1995_p._48-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup> In 1989, the first proof was published by <a href="/wiki/George_Cybenko" title="George Cybenko">George Cybenko</a> for <a href="/wiki/Sigmoid_function" title="Sigmoid function">sigmoid</a> activation functions<sup id="cite_ref-cyb_17-2" class="reference"><a href="#cite_note-cyb-17"><span class="cite-bracket">[</span>17<span class="cite-bracket">]</span></a></sup> and was generalised to feed-forward multi-layer architectures in 1991 by Kurt Hornik.<sup id="cite_ref-horn_18-2" class="reference"><a href="#cite_note-horn-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup> Recent work also showed that universal approximation also holds for non-bounded activation functions such as <a href="/wiki/Kunihiko_Fukushima" title="Kunihiko Fukushima">Kunihiko Fukushima</a>'s <a href="/wiki/Rectified_linear_unit" class="mw-redirect" title="Rectified linear unit">rectified linear unit</a>.<sup id="cite_ref-Fukushima1969_25-0" class="reference"><a href="#cite_note-Fukushima1969-25"><span class="cite-bracket">[</span>25<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-sonoda17_26-0" class="reference"><a href="#cite_note-sonoda17-26"><span class="cite-bracket">[</span>26<span class="cite-bracket">]</span></a></sup> </p><p>The universal approximation theorem for <a href="/wiki/Deep_neural_network" class="mw-redirect" title="Deep neural network">deep neural networks</a> concerns the capacity of networks with bounded width but the depth is allowed to grow. Lu et al.<sup id="cite_ref-ZhouLu_21-1" class="reference"><a href="#cite_note-ZhouLu-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup> proved that if the width of a deep neural network with <a href="/wiki/ReLU" class="mw-redirect" title="ReLU">ReLU</a> activation is strictly larger than the input dimension, then the network can approximate any <a href="/wiki/Lebesgue_integration" class="mw-redirect" title="Lebesgue integration">Lebesgue integrable function</a>; if the width is smaller or equal to the input dimension, then a deep neural network is not a universal approximator. </p><p>The <a href="/wiki/Probabilistic" class="mw-redirect" title="Probabilistic">probabilistic</a> interpretation<sup id="cite_ref-MURPHY_24-1" class="reference"><a href="#cite_note-MURPHY-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> derives from the field of <a href="/wiki/Machine_learning" title="Machine learning">machine learning</a>. It features inference,<sup id="cite_ref-BOOK2014_23-1" class="reference"><a href="#cite_note-BOOK2014-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-BENGIODEEP_7-1" class="reference"><a href="#cite_note-BENGIODEEP-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-BENGIO2012_8-4" class="reference"><a href="#cite_note-BENGIO2012-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-SCHIDHUB_9-2" class="reference"><a href="#cite_note-SCHIDHUB-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-SCHOLARDBNS_12-1" class="reference"><a href="#cite_note-SCHOLARDBNS-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-MURPHY_24-2" class="reference"><a href="#cite_note-MURPHY-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> as well as the <a href="/wiki/Optimization" class="mw-redirect" title="Optimization">optimization</a> concepts of <a href="/wiki/Training" title="Training">training</a> and <a href="/wiki/Test_(assessment)" class="mw-redirect" title="Test (assessment)">testing</a>, related to fitting and <a href="/wiki/Generalization" title="Generalization">generalization</a>, respectively. More specifically, the probabilistic interpretation considers the activation nonlinearity as a <a href="/wiki/Cumulative_distribution_function" title="Cumulative distribution function">cumulative distribution function</a>.<sup id="cite_ref-MURPHY_24-3" class="reference"><a href="#cite_note-MURPHY-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> The probabilistic interpretation led to the introduction of <a href="/wiki/Dropout_(neural_networks)" class="mw-redirect" title="Dropout (neural networks)">dropout</a> as <a href="/wiki/Regularization_(mathematics)" title="Regularization (mathematics)">regularizer</a> in neural networks. The probabilistic interpretation was introduced by researchers including <a href="/wiki/John_Hopfield" title="John Hopfield">Hopfield</a>, <a href="/wiki/Bernard_Widrow" title="Bernard Widrow">Widrow</a> and <a href="/wiki/Kumpati_S._Narendra" title="Kumpati S. Narendra">Narendra</a> and popularized in surveys such as the one by <a href="/wiki/Christopher_Bishop" title="Christopher Bishop">Bishop</a>.<sup id="cite_ref-prml_27-0" class="reference"><a href="#cite_note-prml-27"><span class="cite-bracket">[</span>27<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="History">History</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=3" title="Edit section: History"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Before_1980">Before 1980</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=4" title="Edit section: Before 1980"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>There are two <a href="/wiki/Types_of_artificial_neural_networks" title="Types of artificial neural networks">types</a> of artificial neural network (ANN): <a href="/wiki/Feedforward_neural_network" title="Feedforward neural network">feedforward neural network</a> (FNN) or <a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">multilayer perceptron</a> (MLP) and <a href="/wiki/Recurrent_neural_networks" class="mw-redirect" title="Recurrent neural networks">recurrent neural networks</a> (RNN). RNNs have cycles in their connectivity structure, FNNs don't. In the 1920s, <a href="/wiki/Wilhelm_Lenz" title="Wilhelm Lenz">Wilhelm Lenz</a> and <a href="/wiki/Ernst_Ising" title="Ernst Ising">Ernst Ising</a> created the <a href="/wiki/Ising_model" title="Ising model">Ising model</a><sup id="cite_ref-ising1925_28-0" class="reference"><a href="#cite_note-ising1925-28"><span class="cite-bracket">[</span>28<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-brush67_29-0" class="reference"><a href="#cite_note-brush67-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> which is essentially a non-learning RNN architecture consisting of neuron-like threshold elements. In 1972, <a href="/wiki/Shun%27ichi_Amari" title="Shun'ichi Amari">Shun'ichi Amari</a> made this architecture adaptive.<sup id="cite_ref-Amari1972_30-0" class="reference"><a href="#cite_note-Amari1972-30"><span class="cite-bracket">[</span>30<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-DLhistory_31-0" class="reference"><a href="#cite_note-DLhistory-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> His learning RNN was republished by <a href="/wiki/John_Hopfield" title="John Hopfield">John Hopfield</a> in 1982.<sup id="cite_ref-Hopfield1982_32-0" class="reference"><a href="#cite_note-Hopfield1982-32"><span class="cite-bracket">[</span>32<span class="cite-bracket">]</span></a></sup> Other early <a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">recurrent neural networks</a> were published by Kaoru Nakano in 1971.<sup id="cite_ref-Nakano1971_33-0" class="reference"><a href="#cite_note-Nakano1971-33"><span class="cite-bracket">[</span>33<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Nakano1972_34-0" class="reference"><a href="#cite_note-Nakano1972-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> Already in 1948, <a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a> produced work on "Intelligent Machinery" that was not published in his lifetime,<sup id="cite_ref-turing1948_35-0" class="reference"><a href="#cite_note-turing1948-35"><span class="cite-bracket">[</span>35<span class="cite-bracket">]</span></a></sup> containing "ideas related to artificial evolution and learning RNNs".<sup id="cite_ref-DLhistory_31-1" class="reference"><a href="#cite_note-DLhistory-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Frank_Rosenblatt" title="Frank Rosenblatt">Frank Rosenblatt</a> (1958)<sup id="cite_ref-36" class="reference"><a href="#cite_note-36"><span class="cite-bracket">[</span>36<span class="cite-bracket">]</span></a></sup> proposed the perceptron, an MLP with 3 layers: an input layer, a hidden layer with randomized weights that did not learn, and an output layer. He later published a 1962 book that also introduced variants and computer experiments, including a version with four-layer perceptrons "with adaptive preterminal networks" where the last two layers have learned weights (here he credits H. D. Block and B. W. Knight).<sup id="cite_ref-rosenblatt1962_37-0" class="reference"><a href="#cite_note-rosenblatt1962-37"><span class="cite-bracket">[</span>37<span class="cite-bracket">]</span></a></sup><sup class="reference nowrap"><span title="Page / location: section 16">: section 16 </span></sup> The book cites an earlier network by R. D. Joseph (1960)<sup id="cite_ref-joseph1960_38-0" class="reference"><a href="#cite_note-joseph1960-38"><span class="cite-bracket">[</span>38<span class="cite-bracket">]</span></a></sup> "functionally equivalent to a variation of" this four-layer system (the book mentions Joseph over 30 times). Should Joseph therefore be considered the originator of proper adaptive <a href="/wiki/Multilayer_perceptrons" class="mw-redirect" title="Multilayer perceptrons">multilayer perceptrons</a> with learning hidden units? Unfortunately, the learning algorithm was not a functional one, and fell into oblivion. </p><p>The first working deep learning algorithm was the <a href="/wiki/Group_method_of_data_handling" title="Group method of data handling">Group method of data handling</a>, a method to train arbitrarily deep neural networks, published by <a href="/wiki/Alexey_Ivakhnenko" title="Alexey Ivakhnenko">Alexey Ivakhnenko</a> and Lapa in 1965. They regarded it as a form of polynomial regression,<sup id="cite_ref-ivak1965_39-0" class="reference"><a href="#cite_note-ivak1965-39"><span class="cite-bracket">[</span>39<span class="cite-bracket">]</span></a></sup> or a generalization of Rosenblatt's perceptron.<sup id="cite_ref-40" class="reference"><a href="#cite_note-40"><span class="cite-bracket">[</span>40<span class="cite-bracket">]</span></a></sup> A 1971 paper described a deep network with eight layers trained by this method,<sup id="cite_ref-ivak1971_41-0" class="reference"><a href="#cite_note-ivak1971-41"><span class="cite-bracket">[</span>41<span class="cite-bracket">]</span></a></sup> which is based on layer by layer training through regression analysis. Superfluous hidden units are pruned using a separate validation set. Since the activation functions of the nodes are Kolmogorov-Gabor polynomials, these were also the first deep networks with multiplicative units or "gates".<sup id="cite_ref-DLhistory_31-2" class="reference"><a href="#cite_note-DLhistory-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> </p><p>The first deep learning <a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">multilayer perceptron</a> trained by <a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">stochastic gradient descent</a><sup id="cite_ref-robbins1951_42-0" class="reference"><a href="#cite_note-robbins1951-42"><span class="cite-bracket">[</span>42<span class="cite-bracket">]</span></a></sup> was published in 1967 by <a href="/wiki/Shun%27ichi_Amari" title="Shun'ichi Amari">Shun'ichi Amari</a>.<sup id="cite_ref-Amari1967_43-0" class="reference"><a href="#cite_note-Amari1967-43"><span class="cite-bracket">[</span>43<span class="cite-bracket">]</span></a></sup> In computer experiments conducted by Amari's student Saito, a five layer MLP with two modifiable layers learned <a href="/wiki/Knowledge_representation" class="mw-redirect" title="Knowledge representation">internal representations</a> to classify non-linearily separable pattern classes.<sup id="cite_ref-DLhistory_31-3" class="reference"><a href="#cite_note-DLhistory-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> Subsequent developments in hardware and hyperparameter tunings have made end-to-end <a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">stochastic gradient descent</a> the currently dominant training technique. </p><p>In 1969, <a href="/wiki/Kunihiko_Fukushima" title="Kunihiko Fukushima">Kunihiko Fukushima</a> introduced the <a href="/wiki/Rectifier_(neural_networks)" title="Rectifier (neural networks)">ReLU</a> (rectified linear unit) <a href="/wiki/Activation_function" title="Activation function">activation function</a>.<sup id="cite_ref-Fukushima1969_25-1" class="reference"><a href="#cite_note-Fukushima1969-25"><span class="cite-bracket">[</span>25<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-DLhistory_31-4" class="reference"><a href="#cite_note-DLhistory-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> The rectifier has become the most popular activation function for deep learning.<sup id="cite_ref-44" class="reference"><a href="#cite_note-44"><span class="cite-bracket">[</span>44<span class="cite-bracket">]</span></a></sup> </p><p>Deep learning architectures for <a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">convolutional neural networks</a> (CNNs) with convolutional layers and downsampling layers began with the <a href="/wiki/Neocognitron" title="Neocognitron">Neocognitron</a> introduced by <a href="/wiki/Kunihiko_Fukushima" title="Kunihiko Fukushima">Kunihiko Fukushima</a> in 1979, though not trained by backpropagation.<sup id="cite_ref-FUKU1979_45-0" class="reference"><a href="#cite_note-FUKU1979-45"><span class="cite-bracket">[</span>45<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-FUKU1980_46-0" class="reference"><a href="#cite_note-FUKU1980-46"><span class="cite-bracket">[</span>46<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Backpropagation" title="Backpropagation">Backpropagation</a> is an efficient application of the <a href="/wiki/Chain_rule" title="Chain rule">chain rule</a> derived by <a href="/wiki/Gottfried_Wilhelm_Leibniz" title="Gottfried Wilhelm Leibniz">Gottfried Wilhelm Leibniz</a> in 1673<sup id="cite_ref-leibniz1676_47-0" class="reference"><a href="#cite_note-leibniz1676-47"><span class="cite-bracket">[</span>47<span class="cite-bracket">]</span></a></sup> to networks of differentiable nodes. The terminology "back-propagating errors" was actually introduced in 1962 by Rosenblatt,<sup id="cite_ref-rosenblatt1962_37-1" class="reference"><a href="#cite_note-rosenblatt1962-37"><span class="cite-bracket">[</span>37<span class="cite-bracket">]</span></a></sup> but he did not know how to implement this, although <a href="/wiki/Henry_J._Kelley" title="Henry J. Kelley">Henry J. Kelley</a> had a continuous precursor of backpropagation in 1960 in the context of <a href="/wiki/Control_theory" title="Control theory">control theory</a>.<sup id="cite_ref-kelley1960_48-0" class="reference"><a href="#cite_note-kelley1960-48"><span class="cite-bracket">[</span>48<span class="cite-bracket">]</span></a></sup> The modern form of backpropagation was first published in <a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Seppo Linnainmaa</a>'s master thesis (1970).<sup id="cite_ref-lin19703_49-0" class="reference"><a href="#cite_note-lin19703-49"><span class="cite-bracket">[</span>49<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-lin19763_50-0" class="reference"><a href="#cite_note-lin19763-50"><span class="cite-bracket">[</span>50<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-DLhistory_31-5" class="reference"><a href="#cite_note-DLhistory-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> G.M. Ostrovski et al. republished it in 1971.<sup id="cite_ref-ostrowski1971_51-0" class="reference"><a href="#cite_note-ostrowski1971-51"><span class="cite-bracket">[</span>51<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-backprop_52-0" class="reference"><a href="#cite_note-backprop-52"><span class="cite-bracket">[</span>52<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Paul_Werbos" title="Paul Werbos">Paul Werbos</a> applied backpropagation to neural networks in 1982<sup id="cite_ref-werbos1982_53-0" class="reference"><a href="#cite_note-werbos1982-53"><span class="cite-bracket">[</span>53<span class="cite-bracket">]</span></a></sup> (his 1974 PhD thesis, reprinted in a 1994 book,<sup id="cite_ref-werbos1974_54-0" class="reference"><a href="#cite_note-werbos1974-54"><span class="cite-bracket">[</span>54<span class="cite-bracket">]</span></a></sup> did not yet describe the algorithm<sup id="cite_ref-backprop_52-1" class="reference"><a href="#cite_note-backprop-52"><span class="cite-bracket">[</span>52<span class="cite-bracket">]</span></a></sup>). In 1986, <a href="/wiki/David_E._Rumelhart" class="mw-redirect" title="David E. Rumelhart">David E. Rumelhart</a> et al. popularised backpropagation but did not cite the original work.<sup id="cite_ref-55" class="reference"><a href="#cite_note-55"><span class="cite-bracket">[</span>55<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-rumelhart1986_56-0" class="reference"><a href="#cite_note-rumelhart1986-56"><span class="cite-bracket">[</span>56<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="1980s-2000s">1980s-2000s</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=5" title="Edit section: 1980s-2000s"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The <a href="/wiki/Time_delay_neural_network" title="Time delay neural network">time delay neural network</a> (TDNN) was introduced in 1987 by <a href="/wiki/Alex_Waibel" title="Alex Waibel">Alex Waibel</a> to apply CNN to phoneme recognition. It used convolutions, weight sharing, and backpropagation.<sup id="cite_ref-Waibel1987_57-0" class="reference"><a href="#cite_note-Waibel1987-57"><span class="cite-bracket">[</span>57<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-speechsignal_58-0" class="reference"><a href="#cite_note-speechsignal-58"><span class="cite-bracket">[</span>58<span class="cite-bracket">]</span></a></sup> In 1988, Wei Zhang applied a backpropagation-trained CNN to alphabet recognition.<sup id="cite_ref-wz1988_59-0" class="reference"><a href="#cite_note-wz1988-59"><span class="cite-bracket">[</span>59<span class="cite-bracket">]</span></a></sup> In 1989, <a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> et al. created a CNN called <a href="/wiki/LeNet" title="LeNet">LeNet</a> for <a href="/wiki/Handwriting_recognition" title="Handwriting recognition">recognizing handwritten ZIP codes</a> on mail. Training required 3 days.<sup id="cite_ref-LECUN1989_60-0" class="reference"><a href="#cite_note-LECUN1989-60"><span class="cite-bracket">[</span>60<span class="cite-bracket">]</span></a></sup> In 1990, Wei Zhang implemented a CNN on <a href="/wiki/Optical_computing" title="Optical computing">optical computing</a> hardware.<sup id="cite_ref-wz1990_61-0" class="reference"><a href="#cite_note-wz1990-61"><span class="cite-bracket">[</span>61<span class="cite-bracket">]</span></a></sup> In 1991, a CNN was applied to medical image object segmentation<sup id="cite_ref-62" class="reference"><a href="#cite_note-62"><span class="cite-bracket">[</span>62<span class="cite-bracket">]</span></a></sup> and breast cancer detection in mammograms.<sup id="cite_ref-63" class="reference"><a href="#cite_note-63"><span class="cite-bracket">[</span>63<span class="cite-bracket">]</span></a></sup> <a href="/wiki/LeNet" title="LeNet">LeNet</a>-5 (1998), a 7-level CNN by <a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> et al., that classifies digits, was applied by several banks to recognize hand-written numbers on checks digitized in 32x32 pixel images.<sup id="cite_ref-lecun98_64-0" class="reference"><a href="#cite_note-lecun98-64"><span class="cite-bracket">[</span>64<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">Recurrent neural networks</a> (RNN)<sup id="cite_ref-ising1925_28-1" class="reference"><a href="#cite_note-ising1925-28"><span class="cite-bracket">[</span>28<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Amari1972_30-1" class="reference"><a href="#cite_note-Amari1972-30"><span class="cite-bracket">[</span>30<span class="cite-bracket">]</span></a></sup> were further developed in the 1980s. Recurrence is used for sequence processing, and when a recurrent network is unrolled, it mathematically resembles a deep feedforward layer. Consequently, they have similar properties and issues, and their developments had mutual influences. In RNN, two early influential works were the <a href="/wiki/Recurrent_neural_network#Jordan_network" title="Recurrent neural network">Jordan network</a> (1986)<sup id="cite_ref-65" class="reference"><a href="#cite_note-65"><span class="cite-bracket">[</span>65<span class="cite-bracket">]</span></a></sup> and the <a href="/wiki/Recurrent_neural_network#Elman_network" title="Recurrent neural network">Elman network</a> (1990),<sup id="cite_ref-66" class="reference"><a href="#cite_note-66"><span class="cite-bracket">[</span>66<span class="cite-bracket">]</span></a></sup> which applied RNN to study problems in <a href="/wiki/Cognitive_psychology" title="Cognitive psychology">cognitive psychology</a>. </p><p>In the 1980s, backpropagation did not work well for deep learning with long credit assignment paths. To overcome this problem, in 1991, <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a> proposed a hierarchy of RNNs pre-trained one level at a time by <a href="/wiki/Self-supervised_learning" title="Self-supervised learning">self-supervised learning</a> where each RNN tries to predict its own next input, which is the next unexpected input of the RNN below.<sup id="cite_ref-chunker1991_67-0" class="reference"><a href="#cite_note-chunker1991-67"><span class="cite-bracket">[</span>67<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-schmidhuber1992_68-0" class="reference"><a href="#cite_note-schmidhuber1992-68"><span class="cite-bracket">[</span>68<span class="cite-bracket">]</span></a></sup> This "neural history compressor" uses <a href="/wiki/Predictive_coding" title="Predictive coding">predictive coding</a> to learn <a href="/wiki/Knowledge_representation" class="mw-redirect" title="Knowledge representation">internal representations</a> at multiple self-organizing time scales. This can substantially facilitate downstream deep learning. The RNN hierarchy can be <i>collapsed</i> into a single RNN, by <a href="/wiki/Knowledge_distillation" title="Knowledge distillation">distilling</a> a higher level <i>chunker</i> network into a lower level <i>automatizer</i> network.<sup id="cite_ref-chunker1991_67-1" class="reference"><a href="#cite_note-chunker1991-67"><span class="cite-bracket">[</span>67<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-schmidhuber1992_68-1" class="reference"><a href="#cite_note-schmidhuber1992-68"><span class="cite-bracket">[</span>68<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-DLhistory_31-6" class="reference"><a href="#cite_note-DLhistory-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> In 1993, a neural history compressor solved a "Very Deep Learning" task that required more than 1000 subsequent <a href="/wiki/Layer_(deep_learning)" title="Layer (deep learning)">layers</a> in an RNN unfolded in time.<sup id="cite_ref-schmidhuber1993_69-0" class="reference"><a href="#cite_note-schmidhuber1993-69"><span class="cite-bracket">[</span>69<span class="cite-bracket">]</span></a></sup> The "P" in <a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a> refers to such pre-training. </p><p><a href="/wiki/Sepp_Hochreiter" title="Sepp Hochreiter">Sepp Hochreiter</a>'s diploma thesis (1991)<sup id="cite_ref-HOCH1991_70-0" class="reference"><a href="#cite_note-HOCH1991-70"><span class="cite-bracket">[</span>70<span class="cite-bracket">]</span></a></sup> implemented the neural history compressor,<sup id="cite_ref-chunker1991_67-2" class="reference"><a href="#cite_note-chunker1991-67"><span class="cite-bracket">[</span>67<span class="cite-bracket">]</span></a></sup> and identified and analyzed the <a href="/wiki/Vanishing_gradient_problem" title="Vanishing gradient problem">vanishing gradient problem</a>.<sup id="cite_ref-HOCH1991_70-1" class="reference"><a href="#cite_note-HOCH1991-70"><span class="cite-bracket">[</span>70<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-HOCH2001_71-0" class="reference"><a href="#cite_note-HOCH2001-71"><span class="cite-bracket">[</span>71<span class="cite-bracket">]</span></a></sup> Hochreiter proposed recurrent <a href="/wiki/Residual_neural_network" title="Residual neural network">residual</a> connections to solve the vanishing gradient problem. This led to the <a href="/wiki/Long_short-term_memory" title="Long short-term memory">long short-term memory</a> (LSTM), published in 1995.<sup id="cite_ref-72" class="reference"><a href="#cite_note-72"><span class="cite-bracket">[</span>72<span class="cite-bracket">]</span></a></sup> LSTM can learn "very deep learning" tasks<sup id="cite_ref-SCHIDHUB_9-3" class="reference"><a href="#cite_note-SCHIDHUB-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> with long credit assignment paths that require memories of events that happened thousands of discrete time steps before. That LSTM was not yet the modern architecture, which required a "forget gate", introduced in 1999,<sup id="cite_ref-lstm1999_73-0" class="reference"><a href="#cite_note-lstm1999-73"><span class="cite-bracket">[</span>73<span class="cite-bracket">]</span></a></sup> which became the standard RNN architecture. </p><p>In 1991, <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a> also published adversarial neural networks that contest with each other in the form of a <a href="/wiki/Zero-sum_game" title="Zero-sum game">zero-sum game</a>, where one network's gain is the other network's loss.<sup id="cite_ref-curiosity1991_74-0" class="reference"><a href="#cite_note-curiosity1991-74"><span class="cite-bracket">[</span>74<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-fun2010_75-0" class="reference"><a href="#cite_note-fun2010-75"><span class="cite-bracket">[</span>75<span class="cite-bracket">]</span></a></sup> The first network is a <a href="/wiki/Generative_model" title="Generative model">generative model</a> that models a <a href="/wiki/Probability_distribution" title="Probability distribution">probability distribution</a> over output patterns. The second network learns by <a href="/wiki/Gradient_descent" title="Gradient descent">gradient descent</a> to predict the reactions of the environment to these patterns. This was called "artificial curiosity". In 2014, this principle was used in <a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">generative adversarial networks</a> (GANs).<sup id="cite_ref-gancurpm2020_76-0" class="reference"><a href="#cite_note-gancurpm2020-76"><span class="cite-bracket">[</span>76<span class="cite-bracket">]</span></a></sup> </p><p>During 1985–1995, inspired by statistical mechanics, several architectures and methods were developed by <a href="/wiki/Terry_Sejnowski" title="Terry Sejnowski">Terry Sejnowski</a>, <a href="/wiki/Peter_Dayan" title="Peter Dayan">Peter Dayan</a>, <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a>, etc., including the <a href="/wiki/Boltzmann_machine" title="Boltzmann machine">Boltzmann machine</a>,<sup id="cite_ref-77" class="reference"><a href="#cite_note-77"><span class="cite-bracket">[</span>77<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Restricted_Boltzmann_machine" title="Restricted Boltzmann machine">restricted Boltzmann machine</a>,<sup id="cite_ref-78" class="reference"><a href="#cite_note-78"><span class="cite-bracket">[</span>78<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Helmholtz_machine" title="Helmholtz machine">Helmholtz machine</a>,<sup id="cite_ref-nc95_79-0" class="reference"><a href="#cite_note-nc95-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> and the <a href="/wiki/Wake-sleep_algorithm" title="Wake-sleep algorithm">wake-sleep algorithm</a>.<sup id="cite_ref-:1_80-0" class="reference"><a href="#cite_note-:1-80"><span class="cite-bracket">[</span>80<span class="cite-bracket">]</span></a></sup> These were designed for unsupervised learning of deep generative models. However, those were more computationally expensive compared to backpropagation. Boltzmann machine learning algorithm, published in 1985, was briefly popular before being eclipsed by the backpropagation algorithm in 1986. (p. 112 <sup id="cite_ref-81" class="reference"><a href="#cite_note-81"><span class="cite-bracket">[</span>81<span class="cite-bracket">]</span></a></sup>). A 1988 network became state of the art in <a href="/wiki/Protein_structure_prediction" title="Protein structure prediction">protein structure prediction</a>, an early application of deep learning to bioinformatics.<sup id="cite_ref-82" class="reference"><a href="#cite_note-82"><span class="cite-bracket">[</span>82<span class="cite-bracket">]</span></a></sup> </p><p>Both shallow and deep learning (e.g., recurrent nets) of ANNs for <a href="/wiki/Speech_recognition" title="Speech recognition">speech recognition</a> have been explored for many years.<sup id="cite_ref-83" class="reference"><a href="#cite_note-83"><span class="cite-bracket">[</span>83<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Robinson1992_84-0" class="reference"><a href="#cite_note-Robinson1992-84"><span class="cite-bracket">[</span>84<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-85" class="reference"><a href="#cite_note-85"><span class="cite-bracket">[</span>85<span class="cite-bracket">]</span></a></sup> These methods never outperformed non-uniform internal-handcrafting Gaussian <a href="/wiki/Mixture_model" title="Mixture model">mixture model</a>/<a href="/wiki/Hidden_Markov_model" title="Hidden Markov model">Hidden Markov model</a> (GMM-HMM) technology based on generative models of speech trained discriminatively.<sup id="cite_ref-Baker2009_86-0" class="reference"><a href="#cite_note-Baker2009-86"><span class="cite-bracket">[</span>86<span class="cite-bracket">]</span></a></sup> Key difficulties have been analyzed, including gradient diminishing<sup id="cite_ref-HOCH1991_70-2" class="reference"><a href="#cite_note-HOCH1991-70"><span class="cite-bracket">[</span>70<span class="cite-bracket">]</span></a></sup> and weak temporal correlation structure in neural predictive models.<sup id="cite_ref-Bengio1991_87-0" class="reference"><a href="#cite_note-Bengio1991-87"><span class="cite-bracket">[</span>87<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Deng1994_88-0" class="reference"><a href="#cite_note-Deng1994-88"><span class="cite-bracket">[</span>88<span class="cite-bracket">]</span></a></sup> Additional difficulties were the lack of training data and limited computing power. </p><p>Most <a href="/wiki/Speech_recognition" title="Speech recognition">speech recognition</a> researchers moved away from neural nets to pursue generative modeling. An exception was at <a href="/wiki/SRI_International" title="SRI International">SRI International</a> in the late 1990s. Funded by the US government's <a href="/wiki/National_Security_Agency" title="National Security Agency">NSA</a> and <a href="/wiki/DARPA" title="DARPA">DARPA</a>, SRI researched in speech and <a href="/wiki/Speaker_recognition" title="Speaker recognition">speaker recognition</a>. The speaker recognition team led by <a href="/wiki/Larry_Heck" title="Larry Heck">Larry Heck</a> reported significant success with deep neural networks in speech processing in the 1998 <a href="/wiki/National_Institute_of_Standards_and_Technology" title="National Institute of Standards and Technology">NIST</a> Speaker Recognition benchmark.<sup id="cite_ref-Doddington2000_89-0" class="reference"><a href="#cite_note-Doddington2000-89"><span class="cite-bracket">[</span>89<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Heck2000_90-0" class="reference"><a href="#cite_note-Heck2000-90"><span class="cite-bracket">[</span>90<span class="cite-bracket">]</span></a></sup> It was deployed in the Nuance Verifier, representing the first major industrial application of deep learning.<sup id="cite_ref-91" class="reference"><a href="#cite_note-91"><span class="cite-bracket">[</span>91<span class="cite-bracket">]</span></a></sup> </p><p>The principle of elevating "raw" features over hand-crafted optimization was first explored successfully in the architecture of deep autoencoder on the "raw" spectrogram or linear <a href="/wiki/Filter_bank" title="Filter bank">filter-bank</a> features in the late 1990s,<sup id="cite_ref-Heck2000_90-1" class="reference"><a href="#cite_note-Heck2000-90"><span class="cite-bracket">[</span>90<span class="cite-bracket">]</span></a></sup> showing its superiority over the <a href="/wiki/Mel-frequency_cepstrum" title="Mel-frequency cepstrum">Mel-Cepstral</a> features that contain stages of fixed transformation from spectrograms. The raw features of speech, <a href="/wiki/Waveform" title="Waveform">waveforms</a>, later produced excellent larger-scale results.<sup id="cite_ref-92" class="reference"><a href="#cite_note-92"><span class="cite-bracket">[</span>92<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="2000s">2000s</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=6" title="Edit section: 2000s"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Neural networks entered a null, and simpler models that use task-specific handcrafted features such as <a href="/wiki/Gabor_filter" title="Gabor filter">Gabor filters</a> and <a href="/wiki/Support_vector_machine" title="Support vector machine">support vector machines</a> (SVMs) became the preferred choices in the 1990s and 2000s, because of artificial neural networks' computational cost and a lack of understanding of how the brain wires its biological networks.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (August 2024)">citation needed</span></a></i>]</sup> </p><p>In 2003, LSTM became competitive with traditional speech recognizers on certain tasks.<sup id="cite_ref-graves2003_93-0" class="reference"><a href="#cite_note-graves2003-93"><span class="cite-bracket">[</span>93<span class="cite-bracket">]</span></a></sup> In 2006, <a href="/wiki/Alex_Graves_(computer_scientist)" title="Alex Graves (computer scientist)">Alex Graves</a>, Santiago Fernández, Faustino Gomez, and Schmidhuber combined it with <a href="/wiki/Connectionist_temporal_classification" title="Connectionist temporal classification">connectionist temporal classification</a> (CTC)<sup id="cite_ref-graves2006_94-0" class="reference"><a href="#cite_note-graves2006-94"><span class="cite-bracket">[</span>94<span class="cite-bracket">]</span></a></sup> in stacks of LSTMs.<sup id="cite_ref-fernandez2007keyword_95-0" class="reference"><a href="#cite_note-fernandez2007keyword-95"><span class="cite-bracket">[</span>95<span class="cite-bracket">]</span></a></sup> In 2009, it became the first RNN to win a <a href="/wiki/Pattern_recognition" title="Pattern recognition">pattern recognition</a> contest, in connected <a href="/wiki/Handwriting_recognition" title="Handwriting recognition">handwriting recognition</a>.<sup id="cite_ref-96" class="reference"><a href="#cite_note-96"><span class="cite-bracket">[</span>96<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-SCHIDHUB_9-4" class="reference"><a href="#cite_note-SCHIDHUB-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> </p><p>In 2006, publications by <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoff Hinton</a>, <a href="/wiki/Russ_Salakhutdinov" class="mw-redirect" title="Russ Salakhutdinov">Ruslan Salakhutdinov</a>, Osindero and <a href="/wiki/Yee_Whye_Teh" title="Yee Whye Teh">Teh</a><sup id="cite_ref-97" class="reference"><a href="#cite_note-97"><span class="cite-bracket">[</span>97<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-hinton06_98-0" class="reference"><a href="#cite_note-hinton06-98"><span class="cite-bracket">[</span>98<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Deep_belief_network" title="Deep belief network">deep belief networks</a> were developed for generative modeling. They are trained by training one restricted Boltzmann machine, then freezing it and training another one on top of the first one, and so on, then optionally <a href="/wiki/Fine-tuning_(deep_learning)" title="Fine-tuning (deep learning)">fine-tuned</a> using supervised backpropagation.<sup id="cite_ref-HINTON2007_99-0" class="reference"><a href="#cite_note-HINTON2007-99"><span class="cite-bracket">[</span>99<span class="cite-bracket">]</span></a></sup> They could model high-dimensional probability distributions, such as the distribution of <a href="/wiki/MNIST_database" title="MNIST database">MNIST images</a>, but convergence was slow.<sup id="cite_ref-100" class="reference"><a href="#cite_note-100"><span class="cite-bracket">[</span>100<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-101" class="reference"><a href="#cite_note-101"><span class="cite-bracket">[</span>101<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-102" class="reference"><a href="#cite_note-102"><span class="cite-bracket">[</span>102<span class="cite-bracket">]</span></a></sup> </p><p>The impact of deep learning in industry began in the early 2000s, when CNNs already processed an estimated 10% to 20% of all the checks written in the US, according to Yann LeCun.<sup id="cite_ref-lecun2016slides_103-0" class="reference"><a href="#cite_note-lecun2016slides-103"><span class="cite-bracket">[</span>103<span class="cite-bracket">]</span></a></sup> Industrial applications of deep learning to large-scale speech recognition started around 2010. </p><p>The 2009 NIPS Workshop on Deep Learning for Speech Recognition was motivated by the limitations of deep generative models of speech, and the possibility that given more capable hardware and large-scale data sets that deep neural nets might become practical. It was believed that pre-training DNNs using generative models of deep belief nets (DBN) would overcome the main difficulties of neural nets. However, it was discovered that replacing pre-training with large amounts of training data for straightforward backpropagation when using DNNs with large, context-dependent output layers produced error rates dramatically lower than then-state-of-the-art Gaussian mixture model (GMM)/Hidden Markov Model (HMM) and also than more-advanced generative model-based systems.<sup id="cite_ref-HintonDengYu2012_104-0" class="reference"><a href="#cite_note-HintonDengYu2012-104"><span class="cite-bracket">[</span>104<span class="cite-bracket">]</span></a></sup> The nature of the recognition errors produced by the two types of systems was characteristically different,<sup id="cite_ref-ReferenceICASSP2013_105-0" class="reference"><a href="#cite_note-ReferenceICASSP2013-105"><span class="cite-bracket">[</span>105<span class="cite-bracket">]</span></a></sup> offering technical insights into how to integrate deep learning into the existing highly efficient, run-time speech decoding system deployed by all major speech recognition systems.<sup id="cite_ref-BOOK2014_23-2" class="reference"><a href="#cite_note-BOOK2014-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-ReferenceA_106-0" class="reference"><a href="#cite_note-ReferenceA-106"><span class="cite-bracket">[</span>106<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-107" class="reference"><a href="#cite_note-107"><span class="cite-bracket">[</span>107<span class="cite-bracket">]</span></a></sup> Analysis around 2009–2010, contrasting the GMM (and other generative speech models) vs. DNN models, stimulated early industrial investment in deep learning for speech recognition.<sup id="cite_ref-ReferenceICASSP2013_105-1" class="reference"><a href="#cite_note-ReferenceICASSP2013-105"><span class="cite-bracket">[</span>105<span class="cite-bracket">]</span></a></sup> That analysis was done with comparable performance (less than 1.5% in error rate) between discriminative DNNs and generative models.<sup id="cite_ref-HintonDengYu2012_104-1" class="reference"><a href="#cite_note-HintonDengYu2012-104"><span class="cite-bracket">[</span>104<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-ReferenceICASSP2013_105-2" class="reference"><a href="#cite_note-ReferenceICASSP2013-105"><span class="cite-bracket">[</span>105<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-interspeech2014Keynote_108-0" class="reference"><a href="#cite_note-interspeech2014Keynote-108"><span class="cite-bracket">[</span>108<span class="cite-bracket">]</span></a></sup> In 2010, researchers extended deep learning from <a href="/wiki/TIMIT" title="TIMIT">TIMIT</a> to large vocabulary speech recognition, by adopting large output layers of the DNN based on context-dependent HMM states constructed by <a href="/wiki/Decision_tree" title="Decision tree">decision trees</a>.<sup id="cite_ref-Roles2010_109-0" class="reference"><a href="#cite_note-Roles2010-109"><span class="cite-bracket">[</span>109<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-110" class="reference"><a href="#cite_note-110"><span class="cite-bracket">[</span>110<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-111" class="reference"><a href="#cite_note-111"><span class="cite-bracket">[</span>111<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-ReferenceA_106-1" class="reference"><a href="#cite_note-ReferenceA-106"><span class="cite-bracket">[</span>106<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Deep_learning_revolution">Deep learning revolution</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=7" title="Edit section: Deep learning revolution"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:AI-ML-DL.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/b/bb/AI-ML-DL.svg/220px-AI-ML-DL.svg.png" decoding="async" width="220" height="243" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/b/bb/AI-ML-DL.svg/330px-AI-ML-DL.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/bb/AI-ML-DL.svg/440px-AI-ML-DL.svg.png 2x" data-file-width="748" data-file-height="826" /></a><figcaption>How deep learning is a subset of machine learning and how machine learning is a subset of artificial intelligence (AI)</figcaption></figure> <p>The deep learning revolution started around CNN- and GPU-based computer vision. </p><p>Although CNNs trained by backpropagation had been around for decades and GPU implementations of NNs for years,<sup id="cite_ref-jung2004_112-0" class="reference"><a href="#cite_note-jung2004-112"><span class="cite-bracket">[</span>112<span class="cite-bracket">]</span></a></sup> including CNNs,<sup id="cite_ref-chellapilla2006_113-0" class="reference"><a href="#cite_note-chellapilla2006-113"><span class="cite-bracket">[</span>113<span class="cite-bracket">]</span></a></sup> faster implementations of CNNs on GPUs were needed to progress on computer vision. Later, as deep learning becomes widespread, specialized hardware and algorithm optimizations were developed specifically for deep learning.<sup id="cite_ref-sze2017_114-0" class="reference"><a href="#cite_note-sze2017-114"><span class="cite-bracket">[</span>114<span class="cite-bracket">]</span></a></sup> </p><p>A key advance for the deep learning revolution was hardware advances, especially GPU. Some early work dated back to 2004.<sup id="cite_ref-jung2004_112-1" class="reference"><a href="#cite_note-jung2004-112"><span class="cite-bracket">[</span>112<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-chellapilla2006_113-1" class="reference"><a href="#cite_note-chellapilla2006-113"><span class="cite-bracket">[</span>113<span class="cite-bracket">]</span></a></sup> In 2009, Raina, Madhavan, and <a href="/wiki/Andrew_Ng" title="Andrew Ng">Andrew Ng</a> reported a 100M deep belief network trained on 30 Nvidia <a href="/wiki/GeForce_GTX_280" class="mw-redirect" title="GeForce GTX 280">GeForce GTX 280</a> GPUs, an early demonstration of GPU-based deep learning. They reported up to 70 times faster training.<sup id="cite_ref-115" class="reference"><a href="#cite_note-115"><span class="cite-bracket">[</span>115<span class="cite-bracket">]</span></a></sup> </p><p>In 2011, a CNN named <i>DanNet<sup id="cite_ref-:3_116-0" class="reference"><a href="#cite_note-:3-116"><span class="cite-bracket">[</span>116<span class="cite-bracket">]</span></a></sup></i><sup id="cite_ref-:6_117-0" class="reference"><a href="#cite_note-:6-117"><span class="cite-bracket">[</span>117<span class="cite-bracket">]</span></a></sup> by Dan Ciresan, Ueli Meier, Jonathan Masci, <a href="/wiki/Luca_Maria_Gambardella" title="Luca Maria Gambardella">Luca Maria Gambardella</a>, and <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a> achieved for the first time superhuman performance in a visual pattern recognition contest, outperforming traditional methods by a factor of 3.<sup id="cite_ref-SCHIDHUB_9-5" class="reference"><a href="#cite_note-SCHIDHUB-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> It then won more contests.<sup id="cite_ref-:8_118-0" class="reference"><a href="#cite_note-:8-118"><span class="cite-bracket">[</span>118<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-ciresan2013miccai_119-0" class="reference"><a href="#cite_note-ciresan2013miccai-119"><span class="cite-bracket">[</span>119<span class="cite-bracket">]</span></a></sup> They also showed how <a href="/wiki/Max_pooling" class="mw-redirect" title="Max pooling">max-pooling</a> CNNs on GPU improved performance significantly.<sup id="cite_ref-:9_3-1" class="reference"><a href="#cite_note-:9-3"><span class="cite-bracket">[</span>3<span class="cite-bracket">]</span></a></sup> </p><p>In 2012, <a href="/wiki/Andrew_Ng" title="Andrew Ng">Andrew Ng</a> and <a href="/wiki/Jeff_Dean_(computer_scientist)" class="mw-redirect" title="Jeff Dean (computer scientist)">Jeff Dean</a> created an FNN that learned to recognize higher-level concepts, such as cats, only from watching unlabeled images taken from <a href="/wiki/YouTube" title="YouTube">YouTube</a> videos.<sup id="cite_ref-ng2012_120-0" class="reference"><a href="#cite_note-ng2012-120"><span class="cite-bracket">[</span>120<span class="cite-bracket">]</span></a></sup> </p><p>In October 2012, <a href="/wiki/AlexNet" title="AlexNet">AlexNet</a> by <a href="/wiki/Alex_Krizhevsky" title="Alex Krizhevsky">Alex Krizhevsky</a>, <a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Ilya Sutskever</a>, and <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a><sup id="cite_ref-krizhevsky2012_4-1" class="reference"><a href="#cite_note-krizhevsky2012-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup> won the large-scale <a href="/wiki/ImageNet_competition" class="mw-redirect" title="ImageNet competition">ImageNet competition</a> by a significant margin over shallow machine learning methods. Further incremental improvements included the <a href="/wiki/VGG-19" class="mw-redirect" title="VGG-19">VGG-16</a> network by <a href="/w/index.php?title=Karen_Simonyan_(scientist)&action=edit&redlink=1" class="new" title="Karen Simonyan (scientist) (page does not exist)">Karen Simonyan</a> and <a href="/wiki/Andrew_Zisserman" title="Andrew Zisserman">Andrew Zisserman</a><sup id="cite_ref-VGG_121-0" class="reference"><a href="#cite_note-VGG-121"><span class="cite-bracket">[</span>121<span class="cite-bracket">]</span></a></sup> and Google's <a href="/wiki/Inceptionv3" class="mw-redirect" title="Inceptionv3">Inceptionv3</a>.<sup id="cite_ref-szegedy_122-0" class="reference"><a href="#cite_note-szegedy-122"><span class="cite-bracket">[</span>122<span class="cite-bracket">]</span></a></sup> </p><p>The success in image classification was then extended to the more challenging task of <a href="/wiki/Automatic_image_annotation" title="Automatic image annotation">generating descriptions</a> (captions) for images, often as a combination of CNNs and LSTMs.<sup id="cite_ref-1411.4555_123-0" class="reference"><a href="#cite_note-1411.4555-123"><span class="cite-bracket">[</span>123<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-1411.4952_124-0" class="reference"><a href="#cite_note-1411.4952-124"><span class="cite-bracket">[</span>124<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-1411.2539_125-0" class="reference"><a href="#cite_note-1411.2539-125"><span class="cite-bracket">[</span>125<span class="cite-bracket">]</span></a></sup> </p><p>In 2014, the state of the art was training “very deep neural network” with 20 to 30 layers.<sup id="cite_ref-126" class="reference"><a href="#cite_note-126"><span class="cite-bracket">[</span>126<span class="cite-bracket">]</span></a></sup> Stacking too many layers led to a steep reduction in <a href="/wiki/Training,_validation,_and_test_data_sets" title="Training, validation, and test data sets">training</a> accuracy,<sup id="cite_ref-prelu_127-0" class="reference"><a href="#cite_note-prelu-127"><span class="cite-bracket">[</span>127<span class="cite-bracket">]</span></a></sup> known as the "degradation" problem.<sup id="cite_ref-resnet_128-0" class="reference"><a href="#cite_note-resnet-128"><span class="cite-bracket">[</span>128<span class="cite-bracket">]</span></a></sup> In 2015, two techniques were developed to train very deep networks: the Highway Network was published in May 2015, and the <a href="/wiki/Residual_neural_network" title="Residual neural network">residual neural network</a> (ResNet)<sup id="cite_ref-resnet20152_129-0" class="reference"><a href="#cite_note-resnet20152-129"><span class="cite-bracket">[</span>129<span class="cite-bracket">]</span></a></sup> in Dec 2015. ResNet behaves like an open-gated Highway Net. </p><p>Around the same time, deep learning started impacting the field of art. Early examples included <a href="/wiki/DeepDream" title="DeepDream">Google DeepDream</a> (2015), and <a href="/wiki/Neural_style_transfer" title="Neural style transfer">neural style transfer</a> (2015),<sup id="cite_ref-130" class="reference"><a href="#cite_note-130"><span class="cite-bracket">[</span>130<span class="cite-bracket">]</span></a></sup> both of which were based on pretrained image classification neural networks, such as <a href="/wiki/VGG-19" class="mw-redirect" title="VGG-19">VGG-19</a>. </p><p><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">Generative adversarial network</a> (GAN) by (<a href="/wiki/Ian_Goodfellow" title="Ian Goodfellow">Ian Goodfellow</a> et al., 2014)<sup id="cite_ref-GANnips_131-0" class="reference"><a href="#cite_note-GANnips-131"><span class="cite-bracket">[</span>131<span class="cite-bracket">]</span></a></sup> (based on <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a>'s principle of artificial curiosity<sup id="cite_ref-curiosity1991_74-1" class="reference"><a href="#cite_note-curiosity1991-74"><span class="cite-bracket">[</span>74<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-gancurpm2020_76-1" class="reference"><a href="#cite_note-gancurpm2020-76"><span class="cite-bracket">[</span>76<span class="cite-bracket">]</span></a></sup>) became state of the art in generative modeling during 2014-2018 period. Excellent image quality is achieved by <a href="/wiki/Nvidia" title="Nvidia">Nvidia</a>'s <a href="/wiki/StyleGAN" title="StyleGAN">StyleGAN</a> (2018)<sup id="cite_ref-SyncedReview2018_132-0" class="reference"><a href="#cite_note-SyncedReview2018-132"><span class="cite-bracket">[</span>132<span class="cite-bracket">]</span></a></sup> based on the Progressive GAN by Tero Karras et al.<sup id="cite_ref-progressiveGAN2017_133-0" class="reference"><a href="#cite_note-progressiveGAN2017-133"><span class="cite-bracket">[</span>133<span class="cite-bracket">]</span></a></sup> Here the GAN generator is grown from small to large scale in a pyramidal fashion. Image generation by GAN reached popular success, and provoked discussions concerning <a href="/wiki/Deepfake" title="Deepfake">deepfakes</a>.<sup id="cite_ref-134" class="reference"><a href="#cite_note-134"><span class="cite-bracket">[</span>134<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Diffusion_model" title="Diffusion model">Diffusion models</a> (2015)<sup id="cite_ref-135" class="reference"><a href="#cite_note-135"><span class="cite-bracket">[</span>135<span class="cite-bracket">]</span></a></sup> eclipsed GANs in generative modeling since then, with systems such as <a href="/wiki/DALL-E" title="DALL-E">DALL·E 2</a> (2022) and <a href="/wiki/Stable_Diffusion" title="Stable Diffusion">Stable Diffusion</a> (2022). </p><p>In 2015, Google's speech recognition improved by 49% by an LSTM-based model, which they made available through <a href="/wiki/Google_Voice_Search" title="Google Voice Search">Google Voice Search</a> on <a href="/wiki/Smartphone" title="Smartphone">smartphone</a>.<sup id="cite_ref-GoogleVoiceTranscription_136-0" class="reference"><a href="#cite_note-GoogleVoiceTranscription-136"><span class="cite-bracket">[</span>136<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-sak2015_137-0" class="reference"><a href="#cite_note-sak2015-137"><span class="cite-bracket">[</span>137<span class="cite-bracket">]</span></a></sup> </p><p>Deep learning is part of state-of-the-art systems in various disciplines, particularly computer vision and <a href="/wiki/Automatic_speech_recognition" class="mw-redirect" title="Automatic speech recognition">automatic speech recognition</a> (ASR). Results on commonly used evaluation sets such as <a href="/wiki/TIMIT" title="TIMIT">TIMIT</a> (ASR) and <a href="/wiki/MNIST_database" title="MNIST database">MNIST</a> (<a href="/wiki/Image_classification" class="mw-redirect" title="Image classification">image classification</a>), as well as a range of large-vocabulary speech recognition tasks have steadily improved.<sup id="cite_ref-HintonDengYu2012_104-2" class="reference"><a href="#cite_note-HintonDengYu2012-104"><span class="cite-bracket">[</span>104<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-138" class="reference"><a href="#cite_note-138"><span class="cite-bracket">[</span>138<span class="cite-bracket">]</span></a></sup> Convolutional neural networks were superseded for ASR by <a href="/wiki/LSTM" class="mw-redirect" title="LSTM">LSTM</a>.<sup id="cite_ref-sak2015_137-1" class="reference"><a href="#cite_note-sak2015-137"><span class="cite-bracket">[</span>137<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-sak2014_139-0" class="reference"><a href="#cite_note-sak2014-139"><span class="cite-bracket">[</span>139<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-liwu2015_140-0" class="reference"><a href="#cite_note-liwu2015-140"><span class="cite-bracket">[</span>140<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-zen2015_141-0" class="reference"><a href="#cite_note-zen2015-141"><span class="cite-bracket">[</span>141<span class="cite-bracket">]</span></a></sup> but are more successful in computer vision. </p><p><a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a>, <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a> and <a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> were awarded the 2018 <a href="/wiki/Turing_Award" title="Turing Award">Turing Award</a> for "conceptual and engineering breakthroughs that have made deep neural networks a critical component of computing".<sup id="cite_ref-142" class="reference"><a href="#cite_note-142"><span class="cite-bracket">[</span>142<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Neural_networks">Neural networks</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=8" title="Edit section: Neural networks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">Artificial neural network</a></div> <style data-mw-deduplicate="TemplateStyles:r1273380762/mw-parser-output/.tmulti">.mw-parser-output .tmulti .multiimageinner{display:flex;flex-direction:column}.mw-parser-output .tmulti .trow{display:flex;flex-direction:row;clear:left;flex-wrap:wrap;width:100%;box-sizing:border-box}.mw-parser-output .tmulti .tsingle{margin:1px;float:left}.mw-parser-output .tmulti .theader{clear:both;font-weight:bold;text-align:center;align-self:center;background-color:transparent;width:100%}.mw-parser-output .tmulti .thumbcaption{background-color:transparent}.mw-parser-output .tmulti .text-align-left{text-align:left}.mw-parser-output .tmulti .text-align-right{text-align:right}.mw-parser-output .tmulti .text-align-center{text-align:center}@media all and (max-width:720px){.mw-parser-output .tmulti .thumbinner{width:100%!important;box-sizing:border-box;max-width:none!important;align-items:center}.mw-parser-output .tmulti .trow{justify-content:center}.mw-parser-output .tmulti .tsingle{float:none!important;max-width:100%!important;box-sizing:border-box;text-align:center}.mw-parser-output .tmulti .tsingle .thumbcaption{text-align:left}.mw-parser-output .tmulti .trow>.thumbcaption{text-align:center}}@media screen{html.skin-theme-clientpref-night .mw-parser-output .tmulti .multiimageinner span:not(.skin-invert-image):not(.skin-invert):not(.bg-transparent) img{background-color:white}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .tmulti .multiimageinner span:not(.skin-invert-image):not(.skin-invert):not(.bg-transparent) img{background-color:white}}</style><div class="thumb tmulti tright"><div class="thumbinner multiimageinner" style="width:392px;max-width:392px"><div class="trow"><div class="tsingle" style="width:167px;max-width:167px"><div class="thumbimage" style="height:188px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:Simplified_neural_network_training_example.svg" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Simplified_neural_network_training_example.svg/165px-Simplified_neural_network_training_example.svg.png" decoding="async" width="165" height="189" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Simplified_neural_network_training_example.svg/248px-Simplified_neural_network_training_example.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Simplified_neural_network_training_example.svg/330px-Simplified_neural_network_training_example.svg.png 2x" data-file-width="773" data-file-height="884" /></a></span></div><div class="thumbcaption">Simplified example of training a neural network in object detection: The network is trained by multiple images that are known to depict <a href="/wiki/Starfish" title="Starfish">starfish</a> and <a href="/wiki/Sea_urchin" title="Sea urchin">sea urchins</a>, which are correlated with "nodes" that represent visual <a href="/wiki/Feature_(computer_vision)" title="Feature (computer vision)">features</a>. The starfish match with a ringed texture and a star outline, whereas most sea urchins match with a striped texture and oval shape. However, the instance of a ring textured sea urchin creates a weakly weighted association between them.</div></div><div class="tsingle" style="width:221px;max-width:221px"><div class="thumbimage" style="height:188px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:Simplified_neural_network_example.svg" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Simplified_neural_network_example.svg/219px-Simplified_neural_network_example.svg.png" decoding="async" width="219" height="188" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Simplified_neural_network_example.svg/329px-Simplified_neural_network_example.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Simplified_neural_network_example.svg/438px-Simplified_neural_network_example.svg.png 2x" data-file-width="1028" data-file-height="882" /></a></span></div><div class="thumbcaption">Subsequent run of the network on an input image (left):<sup id="cite_ref-143" class="reference"><a href="#cite_note-143"><span class="cite-bracket">[</span>143<span class="cite-bracket">]</span></a></sup> The network correctly detects the starfish. However, the weakly weighted association between ringed texture and sea urchin also confers a weak signal to the latter from one of two intermediate nodes. In addition, a <a href="/wiki/Seashell" title="Seashell">shell</a> that was not included in the training gives a weak signal for the oval shape, also resulting in a weak signal for the sea urchin output. These weak signals may result in a <a href="/wiki/False_positive" class="mw-redirect" title="False positive">false positive</a> result for sea urchin.<br />In reality, textures and outlines would not be represented by single nodes, but rather by associated weight patterns of multiple nodes.</div></div></div></div></div> <p><b>Artificial neural networks</b> (<b>ANNs</b>) or <b><a href="/wiki/Connectionism" title="Connectionism">connectionist</a> systems</b> are computing systems inspired by the <a href="/wiki/Biological_neural_network" class="mw-redirect" title="Biological neural network">biological neural networks</a> that constitute animal brains. Such systems learn (progressively improve their ability) to do tasks by considering examples, generally without task-specific programming. For example, in image recognition, they might learn to identify images that contain cats by analyzing example images that have been manually <a href="/wiki/Labeled_data" title="Labeled data">labeled</a> as "cat" or "no cat" and using the analytic results to identify cats in other images. They have found most use in applications difficult to express with a traditional computer algorithm using <a href="/wiki/Rule-based_programming" class="mw-redirect" title="Rule-based programming">rule-based programming</a>. </p><p>An ANN is based on a collection of connected units called <a href="/wiki/Artificial_neuron" title="Artificial neuron">artificial neurons</a>, (analogous to biological <a href="/wiki/Neuron" title="Neuron">neurons</a> in a <a href="/wiki/Brain" title="Brain">biological brain</a>). Each connection (<a href="/wiki/Synapse" title="Synapse">synapse</a>) between neurons can transmit a signal to another neuron. The receiving (postsynaptic) neuron can process the signal(s) and then signal downstream neurons connected to it. Neurons may have state, generally represented by <a href="/wiki/Real_numbers" class="mw-redirect" title="Real numbers">real numbers</a>, typically between 0 and 1. Neurons and synapses may also have a weight that varies as learning proceeds, which can increase or decrease the strength of the signal that it sends downstream. </p><p>Typically, neurons are organized in layers. Different layers may perform different kinds of transformations on their inputs. Signals travel from the first (input), to the last (output) layer, possibly after traversing the layers multiple times. </p><p>The original goal of the neural network approach was to solve problems in the same way that a human brain would. Over time, attention focused on matching specific mental abilities, leading to deviations from biology such as <a href="/wiki/Backpropagation" title="Backpropagation">backpropagation</a>, or passing information in the reverse direction and adjusting the network to reflect that information. </p><p>Neural networks have been used on a variety of tasks, including computer vision, <a href="/wiki/Speech_recognition" title="Speech recognition">speech recognition</a>, <a href="/wiki/Machine_translation" title="Machine translation">machine translation</a>, <a href="/wiki/Social_network" title="Social network">social network</a> filtering, <a href="/wiki/General_game_playing" title="General game playing">playing board and video games</a> and medical diagnosis. </p><p>As of 2017, neural networks typically have a few thousand to a few million units and millions of connections. Despite this number being several order of magnitude less than the number of neurons on a human brain, these networks can perform many tasks at a level beyond that of humans (e.g., recognizing faces, or playing "Go"<sup id="cite_ref-144" class="reference"><a href="#cite_note-144"><span class="cite-bracket">[</span>144<span class="cite-bracket">]</span></a></sup>). </p> <div class="mw-heading mw-heading3"><h3 id="Deep_neural_networks">Deep neural networks</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=9" title="Edit section: Deep neural networks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>A deep neural network (DNN) is an artificial neural network with multiple layers between the input and output layers.<sup id="cite_ref-BENGIODEEP_7-2" class="reference"><a href="#cite_note-BENGIODEEP-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-SCHIDHUB_9-6" class="reference"><a href="#cite_note-SCHIDHUB-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> There are different types of neural networks but they always consist of the same components: neurons, synapses, weights, biases, and functions.<sup id="cite_ref-Nokkada_145-0" class="reference"><a href="#cite_note-Nokkada-145"><span class="cite-bracket">[</span>145<span class="cite-bracket">]</span></a></sup> These components as a whole function in a way that mimics functions of the human brain, and can be trained like any other ML algorithm.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (November 2020)">citation needed</span></a></i>]</sup> </p><p>For example, a DNN that is trained to recognize dog breeds will go over the given image and calculate the probability that the dog in the image is a certain breed. The user can review the results and select which probabilities the network should display (above a certain threshold, etc.) and return the proposed label. Each mathematical manipulation as such is considered a layer, <sup id="cite_ref-Kumar2021_146-0" class="reference"><a href="#cite_note-Kumar2021-146"><span class="cite-bracket">[</span>146<span class="cite-bracket">]</span></a></sup> and complex DNN have many layers, hence the name "deep" networks. </p><p>DNNs can model complex non-linear relationships. DNN architectures generate compositional models where the object is expressed as a layered composition of <a href="/wiki/Primitive_data_type" title="Primitive data type">primitives</a>.<sup id="cite_ref-147" class="reference"><a href="#cite_note-147"><span class="cite-bracket">[</span>147<span class="cite-bracket">]</span></a></sup> The extra layers enable composition of features from lower layers, potentially modeling complex data with fewer units than a similarly performing shallow network.<sup id="cite_ref-BENGIODEEP_7-3" class="reference"><a href="#cite_note-BENGIODEEP-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> For instance, it was proved that sparse <a href="/wiki/Multivariate_polynomial" class="mw-redirect" title="Multivariate polynomial">multivariate polynomials</a> are exponentially easier to approximate with DNNs than with shallow networks.<sup id="cite_ref-rolnickpaper_148-0" class="reference"><a href="#cite_note-rolnickpaper-148"><span class="cite-bracket">[</span>148<span class="cite-bracket">]</span></a></sup> </p><p>Deep architectures include many variants of a few basic approaches. Each architecture has found success in specific domains. It is not always possible to compare the performance of multiple architectures, unless they have been evaluated on the same data sets.<sup id="cite_ref-Kumar2021_146-1" class="reference"><a href="#cite_note-Kumar2021-146"><span class="cite-bracket">[</span>146<span class="cite-bracket">]</span></a></sup> </p><p>DNNs are typically feedforward networks in which data flows from the input layer to the output layer without looping back. At first, the DNN creates a map of virtual neurons and assigns random numerical values, or "weights", to connections between them. The weights and inputs are multiplied and return an output between 0 and 1. If the network did not accurately recognize a particular pattern, an algorithm would adjust the weights.<sup id="cite_ref-149" class="reference"><a href="#cite_note-149"><span class="cite-bracket">[</span>149<span class="cite-bracket">]</span></a></sup> That way the algorithm can make certain parameters more influential, until it determines the correct mathematical manipulation to fully process the data. </p><p><a href="/wiki/Recurrent_neural_networks" class="mw-redirect" title="Recurrent neural networks">Recurrent neural networks</a>, in which data can flow in any direction, are used for applications such as <a href="/wiki/Language_model" title="Language model">language modeling</a>.<sup id="cite_ref-gers2001_150-0" class="reference"><a href="#cite_note-gers2001-150"><span class="cite-bracket">[</span>150<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-NIPS2014_151-0" class="reference"><a href="#cite_note-NIPS2014-151"><span class="cite-bracket">[</span>151<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-vinyals2016_152-0" class="reference"><a href="#cite_note-vinyals2016-152"><span class="cite-bracket">[</span>152<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-gillick2015_153-0" class="reference"><a href="#cite_note-gillick2015-153"><span class="cite-bracket">[</span>153<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-MIKO2010_154-0" class="reference"><a href="#cite_note-MIKO2010-154"><span class="cite-bracket">[</span>154<span class="cite-bracket">]</span></a></sup> Long short-term memory is particularly effective for this use.<sup id="cite_ref-:0_155-0" class="reference"><a href="#cite_note-:0-155"><span class="cite-bracket">[</span>155<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:10_156-0" class="reference"><a href="#cite_note-:10-156"><span class="cite-bracket">[</span>156<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">Convolutional neural networks</a> (CNNs) are used in computer vision.<sup id="cite_ref-LECUN86_157-0" class="reference"><a href="#cite_note-LECUN86-157"><span class="cite-bracket">[</span>157<span class="cite-bracket">]</span></a></sup> CNNs also have been applied to <a href="/wiki/Acoustic_model" title="Acoustic model">acoustic modeling</a> for automatic speech recognition (ASR).<sup id="cite_ref-:2_158-0" class="reference"><a href="#cite_note-:2-158"><span class="cite-bracket">[</span>158<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Challenges">Challenges</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=10" title="Edit section: Challenges"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>As with ANNs, many issues can arise with naively trained DNNs. Two common issues are <a href="/wiki/Overfitting" title="Overfitting">overfitting</a> and computation time. </p><p>DNNs are prone to overfitting because of the added layers of abstraction, which allow them to model rare dependencies in the training data. <a href="/wiki/Regularization_(mathematics)" title="Regularization (mathematics)">Regularization</a> methods such as Ivakhnenko's unit pruning<sup id="cite_ref-ivak1971_41-1" class="reference"><a href="#cite_note-ivak1971-41"><span class="cite-bracket">[</span>41<span class="cite-bracket">]</span></a></sup> or <a href="/wiki/Weight_decay" class="mw-redirect" title="Weight decay">weight decay</a> (<span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \ell _{2}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <msub> <mi>ℓ<!-- ℓ --></mi> <mrow class="MJX-TeXAtom-ORD"> <mn>2</mn> </mrow> </msub> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \ell _{2}}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/85a4571ee9be10bd3c9df2480ab3d280f99e801a" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.671ex; width:2.024ex; height:2.509ex;" alt="{\displaystyle \ell _{2}}" /></span>-regularization) or <a href="/wiki/Sparse_matrix" title="Sparse matrix">sparsity</a> (<span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \ell _{1}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <msub> <mi>ℓ<!-- ℓ --></mi> <mrow class="MJX-TeXAtom-ORD"> <mn>1</mn> </mrow> </msub> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \ell _{1}}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/361ddd720474aa41cb05453e03424fb7999d3b02" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.671ex; width:2.024ex; height:2.509ex;" alt="{\displaystyle \ell _{1}}" /></span>-regularization) can be applied during training to combat overfitting.<sup id="cite_ref-159" class="reference"><a href="#cite_note-159"><span class="cite-bracket">[</span>159<span class="cite-bracket">]</span></a></sup> Alternatively <a href="/wiki/Dropout_(neural_networks)" class="mw-redirect" title="Dropout (neural networks)">dropout</a> regularization randomly omits units from the hidden layers during training. This helps to exclude rare dependencies.<sup id="cite_ref-DAHL2013_160-0" class="reference"><a href="#cite_note-DAHL2013-160"><span class="cite-bracket">[</span>160<span class="cite-bracket">]</span></a></sup> Another interesting recent development is research into models of just enough complexity through an estimation of the intrinsic complexity of the task being modelled. This approach has been successfully applied for multivariate time series prediction tasks such as traffic prediction.<sup id="cite_ref-Kumar2024_161-0" class="reference"><a href="#cite_note-Kumar2024-161"><span class="cite-bracket">[</span>161<span class="cite-bracket">]</span></a></sup> Finally, data can be augmented via methods such as cropping and rotating such that smaller training sets can be increased in size to reduce the chances of overfitting.<sup id="cite_ref-162" class="reference"><a href="#cite_note-162"><span class="cite-bracket">[</span>162<span class="cite-bracket">]</span></a></sup> </p><p>DNNs must consider many training parameters, such as the size (number of layers and number of units per layer), the <a href="/wiki/Learning_rate" title="Learning rate">learning rate</a>, and initial weights. <a href="/wiki/Hyperparameter_optimization#Grid_search" title="Hyperparameter optimization">Sweeping through the parameter space</a> for optimal parameters may not be feasible due to the cost in time and computational resources. Various tricks, such as <a href="/wiki/Batch_learning" class="mw-redirect" title="Batch learning">batching</a> (computing the gradient on several training examples at once rather than individual examples)<sup id="cite_ref-RBMTRAIN_163-0" class="reference"><a href="#cite_note-RBMTRAIN-163"><span class="cite-bracket">[</span>163<span class="cite-bracket">]</span></a></sup> speed up computation. Large processing capabilities of many-core architectures (such as GPUs or the Intel Xeon Phi) have produced significant speedups in training, because of the suitability of such processing architectures for the matrix and vector computations.<sup id="cite_ref-164" class="reference"><a href="#cite_note-164"><span class="cite-bracket">[</span>164<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-165" class="reference"><a href="#cite_note-165"><span class="cite-bracket">[</span>165<span class="cite-bracket">]</span></a></sup> </p><p>Alternatively, engineers may look for other types of neural networks with more straightforward and convergent training algorithms. CMAC (<a href="/wiki/Cerebellar_model_articulation_controller" title="Cerebellar model articulation controller">cerebellar model articulation controller</a>) is one such kind of neural network. It doesn't require learning rates or randomized initial weights. The training process can be guaranteed to converge in one step with a new batch of data, and the computational complexity of the training algorithm is linear with respect to the number of neurons involved.<sup id="cite_ref-Qin1_166-0" class="reference"><a href="#cite_note-Qin1-166"><span class="cite-bracket">[</span>166<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Qin2_167-0" class="reference"><a href="#cite_note-Qin2-167"><span class="cite-bracket">[</span>167<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Hardware">Hardware</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=11" title="Edit section: Hardware"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Since the 2010s, advances in both machine learning algorithms and <a href="/wiki/Computer_hardware" title="Computer hardware">computer hardware</a> have led to more efficient methods for training deep neural networks that contain many layers of non-linear hidden units and a very large output layer.<sup id="cite_ref-168" class="reference"><a href="#cite_note-168"><span class="cite-bracket">[</span>168<span class="cite-bracket">]</span></a></sup> By 2019, graphics processing units (GPUs), often with AI-specific enhancements, had displaced CPUs as the dominant method for training large-scale commercial cloud AI .<sup id="cite_ref-169" class="reference"><a href="#cite_note-169"><span class="cite-bracket">[</span>169<span class="cite-bracket">]</span></a></sup> <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a> estimated the hardware computation used in the largest deep learning projects from AlexNet (2012) to AlphaZero (2017) and found a 300,000-fold increase in the amount of computation required, with a doubling-time trendline of 3.4 months.<sup id="cite_ref-170" class="reference"><a href="#cite_note-170"><span class="cite-bracket">[</span>170<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-171" class="reference"><a href="#cite_note-171"><span class="cite-bracket">[</span>171<span class="cite-bracket">]</span></a></sup> </p><p>Special <a href="/wiki/Electronic_circuit" title="Electronic circuit">electronic circuits</a> called <a href="/wiki/Deep_learning_processor" class="mw-redirect" title="Deep learning processor">deep learning processors</a> were designed to speed up deep learning algorithms. Deep learning processors include neural processing units (NPUs) in <a href="/wiki/Huawei" title="Huawei">Huawei</a> cellphones<sup id="cite_ref-172" class="reference"><a href="#cite_note-172"><span class="cite-bracket">[</span>172<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Cloud_computing" title="Cloud computing">cloud computing</a> servers such as <a href="/wiki/Tensor_processing_unit" class="mw-redirect" title="Tensor processing unit">tensor processing units</a> (TPU) in the <a href="/wiki/Google_Cloud_Platform" title="Google Cloud Platform">Google Cloud Platform</a>.<sup id="cite_ref-173" class="reference"><a href="#cite_note-173"><span class="cite-bracket">[</span>173<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Cerebras" title="Cerebras">Cerebras Systems</a> has also built a dedicated system to handle large deep learning models, the CS-2, based on the largest processor in the industry, the second-generation Wafer Scale Engine (WSE-2).<sup id="cite_ref-174" class="reference"><a href="#cite_note-174"><span class="cite-bracket">[</span>174<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-175" class="reference"><a href="#cite_note-175"><span class="cite-bracket">[</span>175<span class="cite-bracket">]</span></a></sup> </p><p>Atomically thin <a href="/wiki/Semiconductors" class="mw-redirect" title="Semiconductors">semiconductors</a> are considered promising for energy-efficient deep learning hardware where the same basic device structure is used for both logic operations and data storage. In 2020, Marega et al. published experiments with a large-area active channel material for developing logic-in-memory devices and circuits based on <a href="/wiki/Floating-gate" class="mw-redirect" title="Floating-gate">floating-gate</a> <a href="/wiki/Field-effect_transistor" title="Field-effect transistor">field-effect transistors</a> (FGFETs).<sup id="cite_ref-atomthin_176-0" class="reference"><a href="#cite_note-atomthin-176"><span class="cite-bracket">[</span>176<span class="cite-bracket">]</span></a></sup> </p><p>In 2021, J. Feldmann et al. proposed an integrated <a href="/wiki/Photonic" class="mw-redirect" title="Photonic">photonic</a> <a href="/wiki/Hardware_accelerator" class="mw-redirect" title="Hardware accelerator">hardware accelerator</a> for parallel convolutional processing.<sup id="cite_ref-photonic_177-0" class="reference"><a href="#cite_note-photonic-177"><span class="cite-bracket">[</span>177<span class="cite-bracket">]</span></a></sup> The authors identify two key advantages of integrated photonics over its electronic counterparts: (1) massively parallel data transfer through <a href="/wiki/Wavelength" title="Wavelength">wavelength</a> division <a href="/wiki/Multiplexing" title="Multiplexing">multiplexing</a> in conjunction with <a href="/wiki/Frequency_comb" title="Frequency comb">frequency combs</a>, and (2) extremely high data modulation speeds.<sup id="cite_ref-photonic_177-1" class="reference"><a href="#cite_note-photonic-177"><span class="cite-bracket">[</span>177<span class="cite-bracket">]</span></a></sup> Their system can execute trillions of multiply-accumulate operations per second, indicating the potential of <a href="/wiki/Photonic_integrated_circuit" title="Photonic integrated circuit">integrated</a> <a href="/wiki/Photonics" title="Photonics">photonics</a> in data-heavy AI applications.<sup id="cite_ref-photonic_177-2" class="reference"><a href="#cite_note-photonic-177"><span class="cite-bracket">[</span>177<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Applications">Applications</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=12" title="Edit section: Applications"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Automatic_speech_recognition">Automatic speech recognition</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=13" title="Edit section: Automatic speech recognition"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Speech_recognition" title="Speech recognition">Speech recognition</a></div> <p>Large-scale automatic speech recognition is the first and most convincing successful case of deep learning. LSTM RNNs can learn "Very Deep Learning" tasks<sup id="cite_ref-SCHIDHUB_9-7" class="reference"><a href="#cite_note-SCHIDHUB-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> that involve multi-second intervals containing speech events separated by thousands of discrete time steps, where one time step corresponds to about 10 ms. LSTM with forget gates<sup id="cite_ref-:10_156-1" class="reference"><a href="#cite_note-:10-156"><span class="cite-bracket">[</span>156<span class="cite-bracket">]</span></a></sup> is competitive with traditional speech recognizers on certain tasks.<sup id="cite_ref-graves2003_93-1" class="reference"><a href="#cite_note-graves2003-93"><span class="cite-bracket">[</span>93<span class="cite-bracket">]</span></a></sup> </p><p>The initial success in speech recognition was based on small-scale recognition tasks based on TIMIT. The data set contains 630 speakers from eight major <a href="/wiki/Dialect" title="Dialect">dialects</a> of <a href="/wiki/American_English" title="American English">American English</a>, where each speaker reads 10 sentences.<sup id="cite_ref-APC_1_178-0" class="reference"><a href="#cite_note-APC_1-178"><span class="cite-bracket">[</span>178<span class="cite-bracket">]</span></a></sup> Its small size lets many configurations be tried. More importantly, the TIMIT task concerns <a href="/wiki/Phone_(phonetics)" title="Phone (phonetics)">phone</a>-sequence recognition, which, unlike word-sequence recognition, allows weak phone <a href="/wiki/Bigram" title="Bigram">bigram</a> language models. This lets the strength of the acoustic modeling aspects of speech recognition be more easily analyzed. The error rates listed below, including these early results and measured as percent phone error rates (PER), have been summarized since 1991. </p> <table class="wikitable"> <tbody><tr> <th>Method</th> <th>Percent phone<br />error rate (PER) (%) </th></tr> <tr> <td>Randomly Initialized RNN<sup id="cite_ref-179" class="reference"><a href="#cite_note-179"><span class="cite-bracket">[</span>179<span class="cite-bracket">]</span></a></sup></td> <td>26.1 </td></tr> <tr> <td>Bayesian Triphone GMM-HMM</td> <td>25.6 </td></tr> <tr> <td>Hidden Trajectory (Generative) Model</td> <td>24.8 </td></tr> <tr> <td>Monophone Randomly Initialized DNN</td> <td>23.4 </td></tr> <tr> <td>Monophone DBN-DNN</td> <td>22.4 </td></tr> <tr> <td>Triphone GMM-HMM with BMMI Training</td> <td>21.7 </td></tr> <tr> <td>Monophone DBN-DNN on fbank</td> <td>20.7 </td></tr> <tr> <td>Convolutional DNN<sup id="cite_ref-CNN-2014_180-0" class="reference"><a href="#cite_note-CNN-2014-180"><span class="cite-bracket">[</span>180<span class="cite-bracket">]</span></a></sup></td> <td>20.0 </td></tr> <tr> <td>Convolutional DNN w. Heterogeneous Pooling</td> <td>18.7 </td></tr> <tr> <td>Ensemble DNN/CNN/RNN<sup id="cite_ref-EnsembleDL_181-0" class="reference"><a href="#cite_note-EnsembleDL-181"><span class="cite-bracket">[</span>181<span class="cite-bracket">]</span></a></sup></td> <td>18.3 </td></tr> <tr> <td>Bidirectional LSTM</td> <td>17.8 </td></tr> <tr> <td>Hierarchical Convolutional Deep Maxout Network<sup id="cite_ref-HCDMM_182-0" class="reference"><a href="#cite_note-HCDMM-182"><span class="cite-bracket">[</span>182<span class="cite-bracket">]</span></a></sup></td> <td>16.5 </td></tr></tbody></table> <p>The debut of DNNs for speaker recognition in the late 1990s and speech recognition around 2009-2011 and of LSTM around 2003–2007, accelerated progress in eight major areas:<sup id="cite_ref-BOOK2014_23-3" class="reference"><a href="#cite_note-BOOK2014-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-interspeech2014Keynote_108-1" class="reference"><a href="#cite_note-interspeech2014Keynote-108"><span class="cite-bracket">[</span>108<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-ReferenceA_106-2" class="reference"><a href="#cite_note-ReferenceA-106"><span class="cite-bracket">[</span>106<span class="cite-bracket">]</span></a></sup> </p> <ul><li>Scale-up/out and accelerated DNN training and decoding</li> <li>Sequence discriminative training</li> <li>Feature processing by deep models with solid understanding of the underlying mechanisms</li> <li>Adaptation of DNNs and related deep models</li> <li><a href="/wiki/Multi-task_learning" title="Multi-task learning">Multi-task</a> and <a href="/wiki/Transfer_learning" title="Transfer learning">transfer learning</a> by DNNs and related deep models</li> <li><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">CNNs</a> and how to design them to best exploit <a href="/wiki/Domain_knowledge" title="Domain knowledge">domain knowledge</a> of speech</li> <li><a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">RNN</a> and its rich LSTM variants</li> <li>Other types of deep models including tensor-based models and integrated deep generative/discriminative models.</li></ul> <p>All major commercial speech recognition systems (e.g., Microsoft <a href="/wiki/Cortana_(software)" class="mw-redirect" title="Cortana (software)">Cortana</a>, <a href="/wiki/Xbox" title="Xbox">Xbox</a>, <a href="/wiki/Skype_Translator" title="Skype Translator">Skype Translator</a>, <a href="/wiki/Amazon_Alexa" title="Amazon Alexa">Amazon Alexa</a>, <a href="/wiki/Google_Now" title="Google Now">Google Now</a>, <a href="/wiki/Siri" title="Siri">Apple Siri</a>, <a href="/wiki/Baidu" title="Baidu">Baidu</a> and <a href="/wiki/IFlytek" title="IFlytek">iFlyTek</a> voice search, and a range of <a href="/wiki/Nuance_Communications" title="Nuance Communications">Nuance</a> speech products, etc.) are based on deep learning.<sup id="cite_ref-BOOK2014_23-4" class="reference"><a href="#cite_note-BOOK2014-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-183" class="reference"><a href="#cite_note-183"><span class="cite-bracket">[</span>183<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Baidu_184-0" class="reference"><a href="#cite_note-Baidu-184"><span class="cite-bracket">[</span>184<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Image_recognition">Image recognition</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=14" title="Edit section: Image recognition"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Computer_vision" title="Computer vision">Computer vision</a></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><span><video id="mwe_player_0" poster="//upload.wikimedia.org/wikipedia/commons/thumb/e/e2/Deep_Learning_in_mussel_farming.webm/220px--Deep_Learning_in_mussel_farming.webm.jpg" controls="" preload="none" data-mw-tmh="" class="mw-file-element" width="220" height="124" data-durationhint="41" data-mwtitle="Deep_Learning_in_mussel_farming.webm" data-mwprovider="wikimediacommons" resource="/wiki/File:Deep_Learning_in_mussel_farming.webm"><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/e/e2/Deep_Learning_in_mussel_farming.webm/Deep_Learning_in_mussel_farming.webm.480p.vp9.webm" type="video/webm; codecs="vp9, opus"" data-transcodekey="480p.vp9.webm" data-width="854" data-height="480" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/e/e2/Deep_Learning_in_mussel_farming.webm/Deep_Learning_in_mussel_farming.webm.720p.vp9.webm" type="video/webm; codecs="vp9, opus"" data-transcodekey="720p.vp9.webm" data-width="1280" data-height="720" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/e/e2/Deep_Learning_in_mussel_farming.webm/Deep_Learning_in_mussel_farming.webm.1080p.vp9.webm" type="video/webm; codecs="vp9, opus"" data-transcodekey="1080p.vp9.webm" data-width="1920" data-height="1080" /><source src="//upload.wikimedia.org/wikipedia/commons/e/e2/Deep_Learning_in_mussel_farming.webm" type="video/webm; codecs="vp8, vorbis"" data-width="1920" data-height="1080" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/e/e2/Deep_Learning_in_mussel_farming.webm/Deep_Learning_in_mussel_farming.webm.144p.mjpeg.mov" type="video/quicktime" data-transcodekey="144p.mjpeg.mov" data-width="256" data-height="144" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/e/e2/Deep_Learning_in_mussel_farming.webm/Deep_Learning_in_mussel_farming.webm.240p.vp9.webm" type="video/webm; codecs="vp9, opus"" data-transcodekey="240p.vp9.webm" data-width="426" data-height="240" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/e/e2/Deep_Learning_in_mussel_farming.webm/Deep_Learning_in_mussel_farming.webm.360p.vp9.webm" type="video/webm; codecs="vp9, opus"" data-transcodekey="360p.vp9.webm" data-width="640" data-height="360" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/e/e2/Deep_Learning_in_mussel_farming.webm/Deep_Learning_in_mussel_farming.webm.360p.webm" type="video/webm; codecs="vp8, vorbis"" data-transcodekey="360p.webm" data-width="640" data-height="360" /></video></span><figcaption>Richard Green explains how deep learning is used with a <a href="/wiki/Remotely_operated_underwater_vehicle" title="Remotely operated underwater vehicle">remotely operated vehicle</a> in <a href="/wiki/Mussel#Aquaculture" title="Mussel">mussel aquaculture</a>.</figcaption></figure> <p>A common evaluation set for image classification is the <a href="/wiki/MNIST_database" title="MNIST database">MNIST database</a> data set. MNIST is composed of handwritten digits and includes 60,000 training examples and 10,000 test examples. As with TIMIT, its small size lets users test multiple configurations. A comprehensive list of results on this set is available.<sup id="cite_ref-YANNMNIST_185-0" class="reference"><a href="#cite_note-YANNMNIST-185"><span class="cite-bracket">[</span>185<span class="cite-bracket">]</span></a></sup> </p><p>Deep learning-based image recognition has become "superhuman", producing more accurate results than human contestants. This first occurred in 2011 in recognition of traffic signs, and in 2014, with recognition of human faces.<sup id="cite_ref-:7_186-0" class="reference"><a href="#cite_note-:7-186"><span class="cite-bracket">[</span>186<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-surpass1_187-0" class="reference"><a href="#cite_note-surpass1-187"><span class="cite-bracket">[</span>187<span class="cite-bracket">]</span></a></sup> </p><p>Deep learning-trained vehicles now interpret 360° camera views.<sup id="cite_ref-188" class="reference"><a href="#cite_note-188"><span class="cite-bracket">[</span>188<span class="cite-bracket">]</span></a></sup> Another example is Facial Dysmorphology Novel Analysis (FDNA) used to analyze cases of human malformation connected to a large database of genetic syndromes. </p> <div class="mw-heading mw-heading3"><h3 id="Visual_art_processing">Visual art processing</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=15" title="Edit section: Visual art processing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Jimmy_Wales_in_France,_with_the_style_of_Munch%27s_%22The_Scream%22_applied_using_neural_style_transfer.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/7/75/Jimmy_Wales_in_France%2C_with_the_style_of_Munch%27s_%22The_Scream%22_applied_using_neural_style_transfer.jpg/142px-Jimmy_Wales_in_France%2C_with_the_style_of_Munch%27s_%22The_Scream%22_applied_using_neural_style_transfer.jpg" decoding="async" width="142" height="164" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/7/75/Jimmy_Wales_in_France%2C_with_the_style_of_Munch%27s_%22The_Scream%22_applied_using_neural_style_transfer.jpg/212px-Jimmy_Wales_in_France%2C_with_the_style_of_Munch%27s_%22The_Scream%22_applied_using_neural_style_transfer.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/7/75/Jimmy_Wales_in_France%2C_with_the_style_of_Munch%27s_%22The_Scream%22_applied_using_neural_style_transfer.jpg/283px-Jimmy_Wales_in_France%2C_with_the_style_of_Munch%27s_%22The_Scream%22_applied_using_neural_style_transfer.jpg 2x" data-file-width="1168" data-file-height="1352" /></a><figcaption>Visual art processing of Jimmy Wales in France, with the style of Munch's "<a href="/wiki/The_Scream" title="The Scream">The Scream</a>" applied using neural style transfer</figcaption></figure> <p>Closely related to the progress that has been made in image recognition is the increasing application of deep learning techniques to various visual art tasks. DNNs have proven themselves capable, for example, of </p> <ul><li>identifying the style period of a given painting<sup id="cite_ref-art1_189-0" class="reference"><a href="#cite_note-art1-189"><span class="cite-bracket">[</span>189<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-art2_190-0" class="reference"><a href="#cite_note-art2-190"><span class="cite-bracket">[</span>190<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/Neural_Style_Transfer" class="mw-redirect" title="Neural Style Transfer">Neural Style Transfer</a> –  capturing the style of a given artwork and applying it in a visually pleasing manner to an arbitrary photograph or video<sup id="cite_ref-art1_189-1" class="reference"><a href="#cite_note-art1-189"><span class="cite-bracket">[</span>189<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-art2_190-1" class="reference"><a href="#cite_note-art2-190"><span class="cite-bracket">[</span>190<span class="cite-bracket">]</span></a></sup></li> <li>generating striking imagery based on random visual input fields.<sup id="cite_ref-art1_189-2" class="reference"><a href="#cite_note-art1-189"><span class="cite-bracket">[</span>189<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-art2_190-2" class="reference"><a href="#cite_note-art2-190"><span class="cite-bracket">[</span>190<span class="cite-bracket">]</span></a></sup></li></ul> <div class="mw-heading mw-heading3"><h3 id="Natural_language_processing">Natural language processing</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=16" title="Edit section: Natural language processing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Natural_language_processing" title="Natural language processing">Natural language processing</a></div> <p>Neural networks have been used for implementing language models since the early 2000s.<sup id="cite_ref-gers2001_150-1" class="reference"><a href="#cite_note-gers2001-150"><span class="cite-bracket">[</span>150<span class="cite-bracket">]</span></a></sup> LSTM helped to improve machine translation and language modeling.<sup id="cite_ref-NIPS2014_151-1" class="reference"><a href="#cite_note-NIPS2014-151"><span class="cite-bracket">[</span>151<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-vinyals2016_152-1" class="reference"><a href="#cite_note-vinyals2016-152"><span class="cite-bracket">[</span>152<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-gillick2015_153-1" class="reference"><a href="#cite_note-gillick2015-153"><span class="cite-bracket">[</span>153<span class="cite-bracket">]</span></a></sup> </p><p>Other key techniques in this field are negative sampling<sup id="cite_ref-GoldbergLevy2014_191-0" class="reference"><a href="#cite_note-GoldbergLevy2014-191"><span class="cite-bracket">[</span>191<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Word_embedding" title="Word embedding">word embedding</a>. Word embedding, such as <i><a href="/wiki/Word2vec" title="Word2vec">word2vec</a></i>, can be thought of as a representational layer in a deep learning architecture that transforms an atomic word into a positional representation of the word relative to other words in the dataset; the position is represented as a point in a <a href="/wiki/Vector_space" title="Vector space">vector space</a>. Using word embedding as an RNN input layer allows the network to parse sentences and phrases using an effective compositional vector grammar. A compositional vector grammar can be thought of as <a href="/wiki/Probabilistic_context_free_grammar" class="mw-redirect" title="Probabilistic context free grammar">probabilistic context free grammar</a> (PCFG) implemented by an RNN.<sup id="cite_ref-SocherManning2014_192-0" class="reference"><a href="#cite_note-SocherManning2014-192"><span class="cite-bracket">[</span>192<span class="cite-bracket">]</span></a></sup> Recursive auto-encoders built atop word embeddings can assess sentence similarity and detect paraphrasing.<sup id="cite_ref-SocherManning2014_192-1" class="reference"><a href="#cite_note-SocherManning2014-192"><span class="cite-bracket">[</span>192<span class="cite-bracket">]</span></a></sup> Deep neural architectures provide the best results for constituency parsing,<sup id="cite_ref-193" class="reference"><a href="#cite_note-193"><span class="cite-bracket">[</span>193<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Sentiment_analysis" title="Sentiment analysis">sentiment analysis</a>,<sup id="cite_ref-RDM_1_194-0" class="reference"><a href="#cite_note-RDM_1-194"><span class="cite-bracket">[</span>194<span class="cite-bracket">]</span></a></sup> information retrieval,<sup id="cite_ref-195" class="reference"><a href="#cite_note-195"><span class="cite-bracket">[</span>195<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-196" class="reference"><a href="#cite_note-196"><span class="cite-bracket">[</span>196<span class="cite-bracket">]</span></a></sup> spoken language understanding,<sup id="cite_ref-IEEE-TASL2015_197-0" class="reference"><a href="#cite_note-IEEE-TASL2015-197"><span class="cite-bracket">[</span>197<span class="cite-bracket">]</span></a></sup> machine translation,<sup id="cite_ref-NIPS2014_151-2" class="reference"><a href="#cite_note-NIPS2014-151"><span class="cite-bracket">[</span>151<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-auto_198-0" class="reference"><a href="#cite_note-auto-198"><span class="cite-bracket">[</span>198<span class="cite-bracket">]</span></a></sup> contextual entity linking,<sup id="cite_ref-auto_198-1" class="reference"><a href="#cite_note-auto-198"><span class="cite-bracket">[</span>198<span class="cite-bracket">]</span></a></sup> writing style recognition,<sup id="cite_ref-BROC2017_199-0" class="reference"><a href="#cite_note-BROC2017-199"><span class="cite-bracket">[</span>199<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Named-entity_recognition" title="Named-entity recognition">named-entity recognition</a> (token classification),<sup id="cite_ref-200" class="reference"><a href="#cite_note-200"><span class="cite-bracket">[</span>200<span class="cite-bracket">]</span></a></sup> text classification, and others.<sup id="cite_ref-201" class="reference"><a href="#cite_note-201"><span class="cite-bracket">[</span>201<span class="cite-bracket">]</span></a></sup> </p><p>Recent developments generalize <a href="/wiki/Word_embedding" title="Word embedding">word embedding</a> to <a href="/wiki/Sentence_embedding" title="Sentence embedding">sentence embedding</a>. </p><p><a href="/wiki/Google_Translate" title="Google Translate">Google Translate</a> (GT) uses a large end-to-end <a href="/wiki/Long_short-term_memory" title="Long short-term memory">long short-term memory</a> (LSTM) network.<sup id="cite_ref-GT_Turovsky_2016_202-0" class="reference"><a href="#cite_note-GT_Turovsky_2016-202"><span class="cite-bracket">[</span>202<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-googleblog_GNMT_2016_203-0" class="reference"><a href="#cite_note-googleblog_GNMT_2016-203"><span class="cite-bracket">[</span>203<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-GoogleTranslate_204-0" class="reference"><a href="#cite_note-GoogleTranslate-204"><span class="cite-bracket">[</span>204<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-WiredGoogleTranslate_205-0" class="reference"><a href="#cite_note-WiredGoogleTranslate-205"><span class="cite-bracket">[</span>205<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Google_Neural_Machine_Translation" title="Google Neural Machine Translation">Google Neural Machine Translation (GNMT)</a> uses an <a href="/wiki/Example-based_machine_translation" title="Example-based machine translation">example-based machine translation</a> method in which the system "learns from millions of examples".<sup id="cite_ref-googleblog_GNMT_2016_203-1" class="reference"><a href="#cite_note-googleblog_GNMT_2016-203"><span class="cite-bracket">[</span>203<span class="cite-bracket">]</span></a></sup> It translates "whole sentences at a time, rather than pieces". Google Translate supports over one hundred languages.<sup id="cite_ref-googleblog_GNMT_2016_203-2" class="reference"><a href="#cite_note-googleblog_GNMT_2016-203"><span class="cite-bracket">[</span>203<span class="cite-bracket">]</span></a></sup> The network encodes the "semantics of the sentence rather than simply memorizing phrase-to-phrase translations".<sup id="cite_ref-googleblog_GNMT_2016_203-3" class="reference"><a href="#cite_note-googleblog_GNMT_2016-203"><span class="cite-bracket">[</span>203<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Biotet_206-0" class="reference"><a href="#cite_note-Biotet-206"><span class="cite-bracket">[</span>206<span class="cite-bracket">]</span></a></sup> GT uses English as an intermediate between most language pairs.<sup id="cite_ref-Biotet_206-1" class="reference"><a href="#cite_note-Biotet-206"><span class="cite-bracket">[</span>206<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Drug_discovery_and_toxicology">Drug discovery and toxicology</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=17" title="Edit section: Drug discovery and toxicology"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">For more information, see <a href="/wiki/Drug_discovery" title="Drug discovery">Drug discovery</a> and <a href="/wiki/Toxicology" title="Toxicology">Toxicology</a>.</div> <p>A large percentage of candidate drugs fail to win regulatory approval. These failures are caused by insufficient efficacy (on-target effect), undesired interactions (off-target effects), or unanticipated <a href="/wiki/Toxicity" title="Toxicity">toxic effects</a>.<sup id="cite_ref-ARROWSMITH2013_207-0" class="reference"><a href="#cite_note-ARROWSMITH2013-207"><span class="cite-bracket">[</span>207<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-VERBIEST2015_208-0" class="reference"><a href="#cite_note-VERBIEST2015-208"><span class="cite-bracket">[</span>208<span class="cite-bracket">]</span></a></sup> Research has explored use of deep learning to predict the <a href="/wiki/Biomolecular_target" class="mw-redirect" title="Biomolecular target">biomolecular targets</a>,<sup id="cite_ref-MERCK2012_209-0" class="reference"><a href="#cite_note-MERCK2012-209"><span class="cite-bracket">[</span>209<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:5_210-0" class="reference"><a href="#cite_note-:5-210"><span class="cite-bracket">[</span>210<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Off-target" class="mw-redirect" title="Off-target">off-targets</a>, and <a href="/wiki/Toxicity" title="Toxicity">toxic effects</a> of environmental chemicals in nutrients, household products and drugs.<sup id="cite_ref-TOX21_211-0" class="reference"><a href="#cite_note-TOX21-211"><span class="cite-bracket">[</span>211<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-TOX21Data_212-0" class="reference"><a href="#cite_note-TOX21Data-212"><span class="cite-bracket">[</span>212<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:11_213-0" class="reference"><a href="#cite_note-:11-213"><span class="cite-bracket">[</span>213<span class="cite-bracket">]</span></a></sup> </p><p>AtomNet is a deep learning system for structure-based <a href="/wiki/Drug_design" title="Drug design">rational drug design</a>.<sup id="cite_ref-214" class="reference"><a href="#cite_note-214"><span class="cite-bracket">[</span>214<span class="cite-bracket">]</span></a></sup> AtomNet was used to predict novel candidate biomolecules for disease targets such as the <a href="/wiki/Ebola_virus" class="mw-redirect" title="Ebola virus">Ebola virus</a><sup id="cite_ref-Toronto_215-0" class="reference"><a href="#cite_note-Toronto-215"><span class="cite-bracket">[</span>215<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Multiple_sclerosis" title="Multiple sclerosis">multiple sclerosis</a>.<sup id="cite_ref-216" class="reference"><a href="#cite_note-216"><span class="cite-bracket">[</span>216<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Toronto_215-1" class="reference"><a href="#cite_note-Toronto-215"><span class="cite-bracket">[</span>215<span class="cite-bracket">]</span></a></sup> </p><p>In 2017 <a href="/wiki/Graph_neural_network" title="Graph neural network">graph neural networks</a> were used for the first time to predict various properties of molecules in a large toxicology data set.<sup id="cite_ref-217" class="reference"><a href="#cite_note-217"><span class="cite-bracket">[</span>217<span class="cite-bracket">]</span></a></sup> In 2019, generative neural networks were used to produce molecules that were validated experimentally all the way into mice.<sup id="cite_ref-218" class="reference"><a href="#cite_note-218"><span class="cite-bracket">[</span>218<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-219" class="reference"><a href="#cite_note-219"><span class="cite-bracket">[</span>219<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Customer_relationship_management">Customer relationship management</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=18" title="Edit section: Customer relationship management"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Customer_relationship_management" title="Customer relationship management">Customer relationship management</a></div> <p><a href="/wiki/Deep_reinforcement_learning" title="Deep reinforcement learning">Deep reinforcement learning</a> has been used to approximate the value of possible <a href="/wiki/Direct_marketing" title="Direct marketing">direct marketing</a> actions, defined in terms of <a href="/wiki/RFM_(customer_value)" class="mw-redirect" title="RFM (customer value)">RFM</a> variables. The estimated value function was shown to have a natural interpretation as <a href="/wiki/Customer_lifetime_value" title="Customer lifetime value">customer lifetime value</a>.<sup id="cite_ref-220" class="reference"><a href="#cite_note-220"><span class="cite-bracket">[</span>220<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Recommendation_systems">Recommendation systems</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=19" title="Edit section: Recommendation systems"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Recommender_system" title="Recommender system">Recommender system</a></div> <p>Recommendation systems have used deep learning to extract meaningful features for a latent factor model for content-based music and journal recommendations.<sup id="cite_ref-221" class="reference"><a href="#cite_note-221"><span class="cite-bracket">[</span>221<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-222" class="reference"><a href="#cite_note-222"><span class="cite-bracket">[</span>222<span class="cite-bracket">]</span></a></sup> Multi-view deep learning has been applied for learning user preferences from multiple domains.<sup id="cite_ref-223" class="reference"><a href="#cite_note-223"><span class="cite-bracket">[</span>223<span class="cite-bracket">]</span></a></sup> The model uses a hybrid collaborative and content-based approach and enhances recommendations in multiple tasks. </p> <div class="mw-heading mw-heading3"><h3 id="Bioinformatics">Bioinformatics</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=20" title="Edit section: Bioinformatics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Bioinformatics" title="Bioinformatics">Bioinformatics</a></div> <p>An <a href="/wiki/Autoencoder" title="Autoencoder">autoencoder</a> ANN was used in <a href="/wiki/Bioinformatics" title="Bioinformatics">bioinformatics</a>, to predict <a href="/wiki/Gene_Ontology" title="Gene Ontology">gene ontology</a> annotations and gene-function relationships.<sup id="cite_ref-224" class="reference"><a href="#cite_note-224"><span class="cite-bracket">[</span>224<span class="cite-bracket">]</span></a></sup> </p><p>In medical informatics, deep learning was used to predict sleep quality based on data from wearables<sup id="cite_ref-225" class="reference"><a href="#cite_note-225"><span class="cite-bracket">[</span>225<span class="cite-bracket">]</span></a></sup> and predictions of health complications from <a href="/wiki/Electronic_health_record" title="Electronic health record">electronic health record</a> data.<sup id="cite_ref-226" class="reference"><a href="#cite_note-226"><span class="cite-bracket">[</span>226<span class="cite-bracket">]</span></a></sup> </p><p>Deep neural networks have shown unparalleled performance in <a href="/wiki/Protein_structure_prediction" title="Protein structure prediction">predicting protein structure</a>, according to the sequence of the amino acids that make it up. In 2020, <a href="/wiki/AlphaFold" title="AlphaFold">AlphaFold</a>, a deep-learning based system, achieved a level of accuracy significantly higher than all previous computational methods.<sup id="cite_ref-227" class="reference"><a href="#cite_note-227"><span class="cite-bracket">[</span>227<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-228" class="reference"><a href="#cite_note-228"><span class="cite-bracket">[</span>228<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Deep_Neural_Network_Estimations">Deep Neural Network Estimations</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=21" title="Edit section: Deep Neural Network Estimations"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Deep neural networks can be used to estimate the entropy of a <a href="/wiki/Stochastic_process" title="Stochastic process">stochastic process</a> and called Neural Joint Entropy Estimator (NJEE).<sup id="cite_ref-SPB22_229-0" class="reference"><a href="#cite_note-SPB22-229"><span class="cite-bracket">[</span>229<span class="cite-bracket">]</span></a></sup> Such an estimation provides insights on the effects of input <a href="/wiki/Random_variables" class="mw-redirect" title="Random variables">random variables</a> on an independent <a href="/wiki/Random_variable" title="Random variable">random variable</a>. Practically, the DNN is trained as a <a href="/wiki/Classifier_(machine_learning)" class="mw-redirect" title="Classifier (machine learning)">classifier</a> that maps an input <a href="/wiki/Vector_(mathematics_and_physics)" title="Vector (mathematics and physics)">vector</a> or <a href="/wiki/Matrix_(mathematics)" title="Matrix (mathematics)">matrix</a> X to an output <a href="/wiki/Probability_distribution" title="Probability distribution">probability distribution</a> over the possible classes of random variable Y, given input X. For example, in <a href="/wiki/Image_classification" class="mw-redirect" title="Image classification">image classification</a> tasks, the NJEE maps a vector of <a href="/wiki/Pixels" class="mw-redirect" title="Pixels">pixels</a>' color values to probabilities over possible image classes. In practice, the probability distribution of Y is obtained by a <a href="/wiki/Softmax" class="mw-redirect" title="Softmax">Softmax</a> layer with number of nodes that is equal to the <a href="/wiki/Alphabet" title="Alphabet">alphabet</a> size of Y. NJEE uses continuously differentiable <a href="/wiki/Activation_function" title="Activation function">activation functions</a>, such that the conditions for the <a href="/wiki/Universal_approximation_theorem" title="Universal approximation theorem">universal approximation theorem</a> holds. It is shown that this method provides a strongly <a href="/wiki/Consistent_estimator" title="Consistent estimator">consistent estimator</a> and outperforms other methods in case of large alphabet sizes.<sup id="cite_ref-SPB22_229-1" class="reference"><a href="#cite_note-SPB22-229"><span class="cite-bracket">[</span>229<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Medical_image_analysis">Medical image analysis</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=22" title="Edit section: Medical image analysis"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Deep learning has been shown to produce competitive results in medical application such as cancer cell classification, lesion detection, organ segmentation and image enhancement.<sup id="cite_ref-230" class="reference"><a href="#cite_note-230"><span class="cite-bracket">[</span>230<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-231" class="reference"><a href="#cite_note-231"><span class="cite-bracket">[</span>231<span class="cite-bracket">]</span></a></sup> Modern deep learning tools demonstrate the high accuracy of detecting various diseases and the helpfulness of their use by specialists to improve the diagnosis efficiency.<sup id="cite_ref-232" class="reference"><a href="#cite_note-232"><span class="cite-bracket">[</span>232<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-233" class="reference"><a href="#cite_note-233"><span class="cite-bracket">[</span>233<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Mobile_advertising">Mobile advertising</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=23" title="Edit section: Mobile advertising"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Finding the appropriate mobile audience for <a href="/wiki/Mobile_advertising" title="Mobile advertising">mobile advertising</a> is always challenging, since many data points must be considered and analyzed before a target segment can be created and used in ad serving by any ad server.<sup id="cite_ref-234" class="reference"><a href="#cite_note-234"><span class="cite-bracket">[</span>234<span class="cite-bracket">]</span></a></sup> Deep learning has been used to interpret large, many-dimensioned advertising datasets. Many data points are collected during the request/serve/click internet advertising cycle. This information can form the basis of machine learning to improve ad selection. </p> <div class="mw-heading mw-heading3"><h3 id="Image_restoration">Image restoration</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=24" title="Edit section: Image restoration"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Deep learning has been successfully applied to <a href="/wiki/Inverse_problems" class="mw-redirect" title="Inverse problems">inverse problems</a> such as <a href="/wiki/Denoising" class="mw-redirect" title="Denoising">denoising</a>, <a href="/wiki/Super-resolution" class="mw-redirect" title="Super-resolution">super-resolution</a>, <a href="/wiki/Inpainting" title="Inpainting">inpainting</a>, and <a href="/wiki/Film_colorization" title="Film colorization">film colorization</a>.<sup id="cite_ref-235" class="reference"><a href="#cite_note-235"><span class="cite-bracket">[</span>235<span class="cite-bracket">]</span></a></sup> These applications include learning methods such as "Shrinkage Fields for Effective Image Restoration"<sup id="cite_ref-236" class="reference"><a href="#cite_note-236"><span class="cite-bracket">[</span>236<span class="cite-bracket">]</span></a></sup> which trains on an image dataset, and <a href="/wiki/Deep_Image_Prior" class="mw-redirect" title="Deep Image Prior">Deep Image Prior</a>, which trains on the image that needs restoration. </p> <div class="mw-heading mw-heading3"><h3 id="Financial_fraud_detection">Financial fraud detection</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=25" title="Edit section: Financial fraud detection"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Deep learning is being successfully applied to financial <a href="/wiki/Fraud_detection" class="mw-redirect" title="Fraud detection">fraud detection</a>, tax evasion detection,<sup id="cite_ref-237" class="reference"><a href="#cite_note-237"><span class="cite-bracket">[</span>237<span class="cite-bracket">]</span></a></sup> and anti-money laundering.<sup id="cite_ref-238" class="reference"><a href="#cite_note-238"><span class="cite-bracket">[</span>238<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Materials_science">Materials science</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=26" title="Edit section: Materials science"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In November 2023, researchers at <a href="/wiki/Google_DeepMind" title="Google DeepMind">Google DeepMind</a> and <a href="/wiki/Lawrence_Berkeley_National_Laboratory" title="Lawrence Berkeley National Laboratory">Lawrence Berkeley National Laboratory</a> announced that they had developed an AI system known as GNoME. This system has contributed to <a href="/wiki/Materials_science" title="Materials science">materials science</a> by discovering over 2 million new materials within a relatively short timeframe. GNoME employs deep learning techniques to efficiently explore potential material structures, achieving a significant increase in the identification of stable inorganic <a href="/wiki/Crystal_structure" title="Crystal structure">crystal structures</a>. The system's predictions were validated through autonomous robotic experiments, demonstrating a noteworthy success rate of 71%. The data of newly discovered materials is publicly available through the <a href="/wiki/Materials_Project" title="Materials Project">Materials Project</a> database, offering researchers the opportunity to identify materials with desired properties for various applications. This development has implications for the future of scientific discovery and the integration of AI in material science research, potentially expediting material innovation and reducing costs in product development. The use of AI and deep learning suggests the possibility of minimizing or eliminating manual lab experiments and allowing scientists to focus more on the design and analysis of unique compounds.<sup id="cite_ref-239" class="reference"><a href="#cite_note-239"><span class="cite-bracket">[</span>239<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-240" class="reference"><a href="#cite_note-240"><span class="cite-bracket">[</span>240<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-241" class="reference"><a href="#cite_note-241"><span class="cite-bracket">[</span>241<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Military">Military</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=27" title="Edit section: Military"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The United States Department of Defense applied deep learning to train robots in new tasks through observation.<sup id="cite_ref-:12_242-0" class="reference"><a href="#cite_note-:12-242"><span class="cite-bracket">[</span>242<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Partial_differential_equations">Partial differential equations</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=28" title="Edit section: Partial differential equations"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Physics informed neural networks have been used to solve <a href="/wiki/Partial_differential_equation" title="Partial differential equation">partial differential equations</a> in both forward and inverse problems in a data driven manner.<sup id="cite_ref-243" class="reference"><a href="#cite_note-243"><span class="cite-bracket">[</span>243<span class="cite-bracket">]</span></a></sup> One example is the reconstructing fluid flow governed by the <a href="/wiki/Navier%E2%80%93Stokes_equations" title="Navier–Stokes equations">Navier-Stokes equations</a>. Using physics informed neural networks does not require the often expensive mesh generation that conventional <a href="/wiki/Computational_fluid_dynamics" title="Computational fluid dynamics">CFD</a> methods rely on.<sup id="cite_ref-244" class="reference"><a href="#cite_note-244"><span class="cite-bracket">[</span>244<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-245" class="reference"><a href="#cite_note-245"><span class="cite-bracket">[</span>245<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Deep_backward_stochastic_differential_equation_method">Deep backward stochastic differential equation method</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=29" title="Edit section: Deep backward stochastic differential equation method"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Deep_backward_stochastic_differential_equation_method" title="Deep backward stochastic differential equation method">Deep backward stochastic differential equation method</a> is a numerical method that combines deep learning with <a href="/wiki/Backward_stochastic_differential_equation" title="Backward stochastic differential equation">Backward stochastic differential equation</a> (BSDE). This method is particularly useful for solving high-dimensional problems in financial mathematics. By leveraging the powerful function approximation capabilities of <a href="/wiki/Deep_neural_networks" class="mw-redirect" title="Deep neural networks">deep neural networks</a>, deep BSDE addresses the computational challenges faced by traditional numerical methods in high-dimensional settings. Specifically, traditional methods like finite difference methods or Monte Carlo simulations often struggle with the curse of dimensionality, where computational cost increases exponentially with the number of dimensions. Deep BSDE methods, however, employ deep neural networks to approximate solutions of high-dimensional partial differential equations (PDEs), effectively reducing the computational burden.<sup id="cite_ref-Han2018_246-0" class="reference"><a href="#cite_note-Han2018-246"><span class="cite-bracket">[</span>246<span class="cite-bracket">]</span></a></sup> </p><p>In addition, the integration of <a href="/wiki/Physics-informed_neural_networks" title="Physics-informed neural networks">Physics-informed neural networks</a> (PINNs) into the deep BSDE framework enhances its capability by embedding the underlying physical laws directly into the neural network architecture. This ensures that the solutions not only fit the data but also adhere to the governing stochastic differential equations. PINNs leverage the power of deep learning while respecting the constraints imposed by the physical models, resulting in more accurate and reliable solutions for financial mathematics problems. </p> <div class="mw-heading mw-heading3"><h3 id="Image_reconstruction">Image reconstruction</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=30" title="Edit section: Image reconstruction"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Image reconstruction is the reconstruction of the underlying images from the image-related measurements. Several works showed the better and superior performance of the deep learning methods compared to analytical methods for various applications, e.g., spectral imaging <sup id="cite_ref-247" class="reference"><a href="#cite_note-247"><span class="cite-bracket">[</span>247<span class="cite-bracket">]</span></a></sup> and ultrasound imaging.<sup id="cite_ref-248" class="reference"><a href="#cite_note-248"><span class="cite-bracket">[</span>248<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Weather_prediction">Weather prediction</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=31" title="Edit section: Weather prediction"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Traditional weather prediction systems solve a very complex system of partial differential equations. GraphCast is a deep learning based model, trained on a long history of weather data to predict how weather patterns change over time. It is able to predict weather conditions for up to 10 days globally, at a very detailed level, and in under a minute, with precision similar to state of the art systems.<sup id="cite_ref-249" class="reference"><a href="#cite_note-249"><span class="cite-bracket">[</span>249<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-250" class="reference"><a href="#cite_note-250"><span class="cite-bracket">[</span>250<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Epigenetic_clock">Epigenetic clock</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=32" title="Edit section: Epigenetic clock"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Epigenetic_clock" title="Epigenetic clock">Epigenetic clock</a></div> <p>An epigenetic clock is a <a href="/wiki/Biomarkers_of_aging" title="Biomarkers of aging">biochemical test</a> that can be used to measure age. Galkin et al. used deep neural networks to train an epigenetic aging clock of unprecedented accuracy using >6,000 blood samples.<sup id="cite_ref-251" class="reference"><a href="#cite_note-251"><span class="cite-bracket">[</span>251<span class="cite-bracket">]</span></a></sup> The clock uses information from 1000 <a href="/wiki/CpG_site" title="CpG site">CpG sites</a> and predicts people with certain conditions older than healthy controls: <a href="/wiki/Inflammatory_bowel_disease" title="Inflammatory bowel disease">IBD</a>, <a href="/wiki/Frontotemporal_dementia" title="Frontotemporal dementia">frontotemporal dementia</a>, <a href="/wiki/Ovarian_cancer" title="Ovarian cancer">ovarian cancer</a>, <a href="/wiki/Obesity" title="Obesity">obesity</a>. The aging clock was planned to be released for public use in 2021 by an <a href="/wiki/Insilico_Medicine" title="Insilico Medicine">Insilico Medicine</a> spinoff company Deep Longevity. </p> <div class="mw-heading mw-heading2"><h2 id="Relation_to_human_cognitive_and_brain_development">Relation to human cognitive and brain development</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=33" title="Edit section: Relation to human cognitive and brain development"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Deep learning is closely related to a class of theories of <a href="/wiki/Brain_development" class="mw-redirect" title="Brain development">brain development</a> (specifically, neocortical development) proposed by <a href="/wiki/Cognitive_neuroscientist" class="mw-redirect" title="Cognitive neuroscientist">cognitive neuroscientists</a> in the early 1990s.<sup id="cite_ref-UTGOFF_252-0" class="reference"><a href="#cite_note-UTGOFF-252"><span class="cite-bracket">[</span>252<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-ELMAN_253-0" class="reference"><a href="#cite_note-ELMAN-253"><span class="cite-bracket">[</span>253<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-SHRAGER_254-0" class="reference"><a href="#cite_note-SHRAGER-254"><span class="cite-bracket">[</span>254<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-QUARTZ_255-0" class="reference"><a href="#cite_note-QUARTZ-255"><span class="cite-bracket">[</span>255<span class="cite-bracket">]</span></a></sup> These developmental theories were instantiated in computational models, making them predecessors of deep learning systems. These developmental models share the property that various proposed learning dynamics in the brain (e.g., a wave of <a href="/wiki/Nerve_growth_factor" title="Nerve growth factor">nerve growth factor</a>) support the <a href="/wiki/Self-organization" title="Self-organization">self-organization</a> somewhat analogous to the neural networks utilized in deep learning models. Like the <a href="/wiki/Neocortex" title="Neocortex">neocortex</a>, neural networks employ a hierarchy of layered filters in which each layer considers information from a prior layer (or the operating environment), and then passes its output (and possibly the original input), to other layers. This process yields a self-organizing stack of <a href="/wiki/Transducer" title="Transducer">transducers</a>, well-tuned to their operating environment. A 1995 description stated, "...the infant's brain seems to organize itself under the influence of waves of so-called trophic-factors ... different regions of the brain become connected sequentially, with one layer of tissue maturing before another and so on until the whole brain is mature".<sup id="cite_ref-BLAKESLEE_256-0" class="reference"><a href="#cite_note-BLAKESLEE-256"><span class="cite-bracket">[</span>256<span class="cite-bracket">]</span></a></sup> </p><p>A variety of approaches have been used to investigate the plausibility of deep learning models from a neurobiological perspective. On the one hand, several variants of the <a href="/wiki/Backpropagation" title="Backpropagation">backpropagation</a> algorithm have been proposed in order to increase its processing realism.<sup id="cite_ref-257" class="reference"><a href="#cite_note-257"><span class="cite-bracket">[</span>257<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-258" class="reference"><a href="#cite_note-258"><span class="cite-bracket">[</span>258<span class="cite-bracket">]</span></a></sup> Other researchers have argued that unsupervised forms of deep learning, such as those based on hierarchical <a href="/wiki/Generative_model" title="Generative model">generative models</a> and <a href="/wiki/Deep_belief_network" title="Deep belief network">deep belief networks</a>, may be closer to biological reality.<sup id="cite_ref-259" class="reference"><a href="#cite_note-259"><span class="cite-bracket">[</span>259<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-260" class="reference"><a href="#cite_note-260"><span class="cite-bracket">[</span>260<span class="cite-bracket">]</span></a></sup> In this respect, generative neural network models have been related to neurobiological evidence about sampling-based processing in the cerebral cortex.<sup id="cite_ref-261" class="reference"><a href="#cite_note-261"><span class="cite-bracket">[</span>261<span class="cite-bracket">]</span></a></sup> </p><p>Although a systematic comparison between the human brain organization and the neuronal encoding in deep networks has not yet been established, several analogies have been reported. For example, the computations performed by deep learning units could be similar to those of actual neurons<sup id="cite_ref-262" class="reference"><a href="#cite_note-262"><span class="cite-bracket">[</span>262<span class="cite-bracket">]</span></a></sup> and neural populations.<sup id="cite_ref-263" class="reference"><a href="#cite_note-263"><span class="cite-bracket">[</span>263<span class="cite-bracket">]</span></a></sup> Similarly, the representations developed by deep learning models are similar to those measured in the primate visual system<sup id="cite_ref-264" class="reference"><a href="#cite_note-264"><span class="cite-bracket">[</span>264<span class="cite-bracket">]</span></a></sup> both at the single-unit<sup id="cite_ref-265" class="reference"><a href="#cite_note-265"><span class="cite-bracket">[</span>265<span class="cite-bracket">]</span></a></sup> and at the population<sup id="cite_ref-266" class="reference"><a href="#cite_note-266"><span class="cite-bracket">[</span>266<span class="cite-bracket">]</span></a></sup> levels. </p> <div class="mw-heading mw-heading2"><h2 id="Commercial_activity">Commercial activity</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=34" title="Edit section: Commercial activity"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Facebook" title="Facebook">Facebook</a>'s AI lab performs tasks such as <a href="/wiki/Automatic_image_annotation" title="Automatic image annotation">automatically tagging uploaded pictures</a> with the names of the people in them.<sup id="cite_ref-METZ2013_267-0" class="reference"><a href="#cite_note-METZ2013-267"><span class="cite-bracket">[</span>267<span class="cite-bracket">]</span></a></sup> </p><p>Google's <a href="/wiki/DeepMind_Technologies" class="mw-redirect" title="DeepMind Technologies">DeepMind Technologies</a> developed a system capable of learning how to play <a href="/wiki/Atari" title="Atari">Atari</a> video games using only pixels as data input. In 2015 they demonstrated their <a href="/wiki/AlphaGo" title="AlphaGo">AlphaGo</a> system, which learned the game of <a href="/wiki/Go_(game)" title="Go (game)">Go</a> well enough to beat a professional Go player.<sup id="cite_ref-268" class="reference"><a href="#cite_note-268"><span class="cite-bracket">[</span>268<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-269" class="reference"><a href="#cite_note-269"><span class="cite-bracket">[</span>269<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-270" class="reference"><a href="#cite_note-270"><span class="cite-bracket">[</span>270<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Google_Translate" title="Google Translate">Google Translate</a> uses a neural network to translate between more than 100 languages. </p><p>In 2017, Covariant.ai was launched, which focuses on integrating deep learning into factories.<sup id="cite_ref-271" class="reference"><a href="#cite_note-271"><span class="cite-bracket">[</span>271<span class="cite-bracket">]</span></a></sup> </p><p>As of 2008,<sup id="cite_ref-272" class="reference"><a href="#cite_note-272"><span class="cite-bracket">[</span>272<span class="cite-bracket">]</span></a></sup> researchers at <a href="/wiki/University_of_Texas_at_Austin" title="University of Texas at Austin">The University of Texas at Austin</a> (UT) developed a machine learning framework called Training an Agent Manually via Evaluative Reinforcement, or TAMER, which proposed new methods for robots or computer programs to learn how to perform tasks by interacting with a human instructor.<sup id="cite_ref-:12_242-1" class="reference"><a href="#cite_note-:12-242"><span class="cite-bracket">[</span>242<span class="cite-bracket">]</span></a></sup> First developed as TAMER, a new algorithm called Deep TAMER was later introduced in 2018 during a collaboration between <a href="/wiki/U.S._Army_Research_Laboratory" class="mw-redirect" title="U.S. Army Research Laboratory">U.S. Army Research Laboratory</a> (ARL) and UT researchers. Deep TAMER used deep learning to provide a robot with the ability to learn new tasks through observation.<sup id="cite_ref-:12_242-2" class="reference"><a href="#cite_note-:12-242"><span class="cite-bracket">[</span>242<span class="cite-bracket">]</span></a></sup> Using Deep TAMER, a robot learned a task with a human trainer, watching video streams or observing a human perform a task in-person. The robot later practiced the task with the help of some coaching from the trainer, who provided feedback such as "good job" and "bad job".<sup id="cite_ref-273" class="reference"><a href="#cite_note-273"><span class="cite-bracket">[</span>273<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Criticism_and_comment">Criticism and comment</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=35" title="Edit section: Criticism and comment"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Deep learning has attracted both criticism and comment, in some cases from outside the field of computer science. </p> <div class="mw-heading mw-heading3"><h3 id="Theory">Theory</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=36" title="Edit section: Theory"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951" /><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Explainable_artificial_intelligence" title="Explainable artificial intelligence">Explainable artificial intelligence</a></div> <p>A main criticism concerns the lack of theory surrounding some methods.<sup id="cite_ref-274" class="reference"><a href="#cite_note-274"><span class="cite-bracket">[</span>274<span class="cite-bracket">]</span></a></sup> Learning in the most common deep architectures is implemented using well-understood gradient descent. However, the theory surrounding other algorithms, such as contrastive divergence is less clear.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (July 2016)">citation needed</span></a></i>]</sup> (e.g., Does it converge? If so, how fast? What is it approximating?) Deep learning methods are often looked at as a <a href="/wiki/Black_box" title="Black box">black box</a>, with most confirmations done empirically, rather than theoretically.<sup id="cite_ref-Knight_2017_275-0" class="reference"><a href="#cite_note-Knight_2017-275"><span class="cite-bracket">[</span>275<span class="cite-bracket">]</span></a></sup> </p><p>In further reference to the idea that artistic sensitivity might be inherent in relatively low levels of the cognitive hierarchy, a published series of graphic representations of the internal states of deep (20-30 layers) neural networks attempting to discern within essentially random data the images on which they were trained<sup id="cite_ref-276" class="reference"><a href="#cite_note-276"><span class="cite-bracket">[</span>276<span class="cite-bracket">]</span></a></sup> demonstrate a visual appeal: the original research notice received well over 1,000 comments, and was the subject of what was for a time the most frequently accessed article on <i><a href="/wiki/The_Guardian" title="The Guardian">The Guardian</a>'s</i><sup id="cite_ref-277" class="reference"><a href="#cite_note-277"><span class="cite-bracket">[</span>277<span class="cite-bracket">]</span></a></sup> website. </p> <div class="mw-heading mw-heading3"><h3 id="Errors">Errors</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=37" title="Edit section: Errors"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Some deep learning architectures display problematic behaviors,<sup id="cite_ref-goertzel_278-0" class="reference"><a href="#cite_note-goertzel-278"><span class="cite-bracket">[</span>278<span class="cite-bracket">]</span></a></sup> such as confidently classifying unrecognizable images as belonging to a familiar category of ordinary images (2014)<sup id="cite_ref-279" class="reference"><a href="#cite_note-279"><span class="cite-bracket">[</span>279<span class="cite-bracket">]</span></a></sup> and misclassifying minuscule perturbations of correctly classified images (2013).<sup id="cite_ref-280" class="reference"><a href="#cite_note-280"><span class="cite-bracket">[</span>280<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Ben_Goertzel" title="Ben Goertzel">Goertzel</a> hypothesized that these behaviors are due to limitations in their internal representations and that these limitations would inhibit integration into heterogeneous multi-component <a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">artificial general intelligence</a> (AGI) architectures.<sup id="cite_ref-goertzel_278-1" class="reference"><a href="#cite_note-goertzel-278"><span class="cite-bracket">[</span>278<span class="cite-bracket">]</span></a></sup> These issues may possibly be addressed by deep learning architectures that internally form states homologous to image-grammar<sup id="cite_ref-281" class="reference"><a href="#cite_note-281"><span class="cite-bracket">[</span>281<span class="cite-bracket">]</span></a></sup> decompositions of observed entities and events.<sup id="cite_ref-goertzel_278-2" class="reference"><a href="#cite_note-goertzel-278"><span class="cite-bracket">[</span>278<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Grammar_induction" title="Grammar induction">Learning a grammar</a> (visual or linguistic) from training data would be equivalent to restricting the system to <a href="/wiki/Commonsense_reasoning" title="Commonsense reasoning">commonsense reasoning</a> that operates on concepts in terms of grammatical <a href="/wiki/Production_(computer_science)" title="Production (computer science)">production rules</a> and is a basic goal of both human language acquisition<sup id="cite_ref-282" class="reference"><a href="#cite_note-282"><span class="cite-bracket">[</span>282<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a> (AI).<sup id="cite_ref-283" class="reference"><a href="#cite_note-283"><span class="cite-bracket">[</span>283<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Cyber_threat">Cyber threat</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=38" title="Edit section: Cyber threat"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>As deep learning moves from the lab into the world, research and experience show that artificial neural networks are vulnerable to hacks and deception.<sup id="cite_ref-284" class="reference"><a href="#cite_note-284"><span class="cite-bracket">[</span>284<span class="cite-bracket">]</span></a></sup> By identifying patterns that these systems use to function, attackers can modify inputs to ANNs in such a way that the ANN finds a match that human observers would not recognize. For example, an attacker can make subtle changes to an image such that the ANN finds a match even though the image looks to a human nothing like the search target. Such manipulation is termed an "<a href="/wiki/Adversarial_machine_learning" title="Adversarial machine learning">adversarial attack</a>".<sup id="cite_ref-285" class="reference"><a href="#cite_note-285"><span class="cite-bracket">[</span>285<span class="cite-bracket">]</span></a></sup> </p><p>In 2016 researchers used one ANN to doctor images in trial and error fashion, identify another's focal points, and thereby generate images that deceived it. The modified images looked no different to human eyes. Another group showed that printouts of doctored images then photographed successfully tricked an image classification system.<sup id="cite_ref-:4_286-0" class="reference"><a href="#cite_note-:4-286"><span class="cite-bracket">[</span>286<span class="cite-bracket">]</span></a></sup> One defense is reverse image search, in which a possible fake image is submitted to a site such as <a href="/wiki/TinEye" title="TinEye">TinEye</a> that can then find other instances of it. A refinement is to search using only parts of the image, to identify images from which that piece may have been taken<b>.</b><sup id="cite_ref-287" class="reference"><a href="#cite_note-287"><span class="cite-bracket">[</span>287<span class="cite-bracket">]</span></a></sup> </p><p>Another group showed that certain <a href="/wiki/Psychedelic_art" title="Psychedelic art">psychedelic</a> spectacles could fool a <a href="/wiki/Facial_recognition_system" title="Facial recognition system">facial recognition system</a> into thinking ordinary people were celebrities, potentially allowing one person to impersonate another. In 2017 researchers added stickers to <a href="/wiki/Stop_sign" title="Stop sign">stop signs</a> and caused an ANN to misclassify them.<sup id="cite_ref-:4_286-1" class="reference"><a href="#cite_note-:4-286"><span class="cite-bracket">[</span>286<span class="cite-bracket">]</span></a></sup> </p><p>ANNs can however be further trained to detect attempts at <a href="/wiki/Deception" title="Deception">deception</a>, potentially leading attackers and defenders into an arms race similar to the kind that already defines the <a href="/wiki/Malware" title="Malware">malware</a> defense industry. ANNs have been trained to defeat ANN-based anti-<a href="/wiki/Malware" title="Malware">malware</a> software by repeatedly attacking a defense with malware that was continually altered by a <a href="/wiki/Genetic_algorithm" title="Genetic algorithm">genetic algorithm</a> until it tricked the anti-malware while retaining its ability to damage the target.<sup id="cite_ref-:4_286-2" class="reference"><a href="#cite_note-:4-286"><span class="cite-bracket">[</span>286<span class="cite-bracket">]</span></a></sup> </p><p>In 2016, another group demonstrated that certain sounds could make the <a href="/wiki/Google_Now" title="Google Now">Google Now</a> voice command system open a particular web address, and hypothesized that this could "serve as a stepping stone for further attacks (e.g., opening a web page hosting drive-by malware)".<sup id="cite_ref-:4_286-3" class="reference"><a href="#cite_note-:4-286"><span class="cite-bracket">[</span>286<span class="cite-bracket">]</span></a></sup> </p><p>In "<a href="/wiki/Adversarial_machine_learning#Data_poisoning" title="Adversarial machine learning">data poisoning</a>", false data is continually smuggled into a machine learning system's training set to prevent it from achieving mastery.<sup id="cite_ref-:4_286-4" class="reference"><a href="#cite_note-:4-286"><span class="cite-bracket">[</span>286<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Data_collection_ethics">Data collection ethics</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=39" title="Edit section: Data collection ethics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The deep learning systems that are trained using supervised learning often rely on data that is created and/or annotated by humans.<sup id="cite_ref-288" class="reference"><a href="#cite_note-288"><span class="cite-bracket">[</span>288<span class="cite-bracket">]</span></a></sup> It has been argued that not only low-paid <a href="/wiki/Clickworkers" title="Clickworkers">clickwork</a> (such as on <a href="/wiki/Amazon_Mechanical_Turk" title="Amazon Mechanical Turk">Amazon Mechanical Turk</a>) is regularly deployed for this purpose, but also implicit forms of human <a href="/wiki/Microwork" title="Microwork">microwork</a> that are often not recognized as such.<sup id="cite_ref-:13_289-0" class="reference"><a href="#cite_note-:13-289"><span class="cite-bracket">[</span>289<span class="cite-bracket">]</span></a></sup> The philosopher <a href="/wiki/Rainer_M%C3%BChlhoff" title="Rainer Mühlhoff">Rainer Mühlhoff</a> distinguishes five types of "machinic capture" of human microwork to generate training data: (1) <a href="/wiki/Gamification" title="Gamification">gamification</a> (the embedding of annotation or computation tasks in the flow of a game), (2) "trapping and tracking" (e.g. <a href="/wiki/CAPTCHA" title="CAPTCHA">CAPTCHAs</a> for image recognition or click-tracking on Google <a href="/wiki/Search_engine_results_page" title="Search engine results page">search results pages</a>), (3) exploitation of social motivations (e.g. <a href="/wiki/Tag_(Facebook)" class="mw-redirect" title="Tag (Facebook)">tagging faces</a> on <a href="/wiki/Facebook" title="Facebook">Facebook</a> to obtain labeled facial images), (4) <a href="/wiki/Information_mining" class="mw-redirect" title="Information mining">information mining</a> (e.g. by leveraging <a href="/wiki/Quantified_self" title="Quantified self">quantified-self</a> devices such as <a href="/wiki/Activity_tracker" class="mw-redirect" title="Activity tracker">activity trackers</a>) and (5) <a href="/wiki/Clickworkers" title="Clickworkers">clickwork</a>.<sup id="cite_ref-:13_289-1" class="reference"><a href="#cite_note-:13-289"><span class="cite-bracket">[</span>289<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=40" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/Applications_of_artificial_intelligence" title="Applications of artificial intelligence">Applications of artificial intelligence</a></li> <li><a href="/wiki/Comparison_of_deep_learning_software" title="Comparison of deep learning software">Comparison of deep learning software</a></li> <li><a href="/wiki/Compressed_sensing" title="Compressed sensing">Compressed sensing</a></li> <li><a href="/wiki/Differentiable_programming" title="Differentiable programming">Differentiable programming</a></li> <li><a href="/wiki/Echo_state_network" title="Echo state network">Echo state network</a></li> <li><a href="/wiki/List_of_artificial_intelligence_projects" title="List of artificial intelligence projects">List of artificial intelligence projects</a></li> <li><a href="/wiki/Liquid_state_machine" title="Liquid state machine">Liquid state machine</a></li> <li><a href="/wiki/List_of_datasets_for_machine-learning_research" title="List of datasets for machine-learning research">List of datasets for machine-learning research</a></li> <li><a href="/wiki/Reservoir_computing" title="Reservoir computing">Reservoir computing</a></li> <li><a href="/wiki/Scale_space#Deep_learning_and_scale_space" title="Scale space">Scale space and deep learning</a></li> <li><a href="/wiki/Sparse_coding" class="mw-redirect" title="Sparse coding">Sparse coding</a></li> <li><a href="/wiki/Stochastic_parrot" title="Stochastic parrot">Stochastic parrot</a></li> <li><a href="/wiki/Topological_deep_learning" title="Topological deep learning">Topological deep learning</a></li></ul> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=41" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist reflist-columns references-column-width" style="column-width: 30em;"> <ol class="references"> <li id="cite_note-1"><span class="mw-cite-backlink"><b><a href="#cite_ref-1">^</a></b></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite id="CITEREFSchulzBehnke2012" class="citation journal cs1">Schulz, Hannes; Behnke, Sven (1 November 2012). <a rel="nofollow" class="external text" href="https://www.semanticscholar.org/paper/51a80649d16a38d41dbd20472deb3bc9b61b59a0">"Deep Learning"</a>. <i>KI - Künstliche Intelligenz</i>. <b>26</b> (4): <span class="nowrap">357–</span>363. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs13218-012-0198-z">10.1007/s13218-012-0198-z</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1610-1987">1610-1987</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:220523562">220523562</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=KI+-+K%C3%BCnstliche+Intelligenz&rft.atitle=Deep+Learning&rft.volume=26&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E357-%3C%2Fspan%3E363&rft.date=2012-11-01&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A220523562%23id-name%3DS2CID&rft.issn=1610-1987&rft_id=info%3Adoi%2F10.1007%2Fs13218-012-0198-z&rft.aulast=Schulz&rft.aufirst=Hannes&rft.au=Behnke%2C+Sven&rft_id=https%3A%2F%2Fwww.semanticscholar.org%2Fpaper%2F51a80649d16a38d41dbd20472deb3bc9b61b59a0&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-NatureBengio-2"><span class="mw-cite-backlink">^ <a href="#cite_ref-NatureBengio_2-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-NatureBengio_2-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLeCunBengioHinton2015" class="citation journal cs1">LeCun, Yann; Bengio, Yoshua; Hinton, Geoffrey (2015). <a rel="nofollow" class="external text" href="https://hal.science/hal-04206682/file/Lecun2015.pdf">"Deep Learning"</a> <span class="cs1-format">(PDF)</span>. <i>Nature</i>. <b>521</b> (7553): <span class="nowrap">436–</span>444. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015Natur.521..436L">2015Natur.521..436L</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnature14539">10.1038/nature14539</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26017442">26017442</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:3074096">3074096</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Deep+Learning&rft.volume=521&rft.issue=7553&rft.pages=%3Cspan+class%3D%22nowrap%22%3E436-%3C%2Fspan%3E444&rft.date=2015&rft_id=info%3Adoi%2F10.1038%2Fnature14539&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A3074096%23id-name%3DS2CID&rft_id=info%3Apmid%2F26017442&rft_id=info%3Abibcode%2F2015Natur.521..436L&rft.aulast=LeCun&rft.aufirst=Yann&rft.au=Bengio%2C+Yoshua&rft.au=Hinton%2C+Geoffrey&rft_id=https%3A%2F%2Fhal.science%2Fhal-04206682%2Ffile%2FLecun2015.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:9-3"><span class="mw-cite-backlink">^ <a href="#cite_ref-:9_3-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:9_3-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCiresanMeierSchmidhuber2012" class="citation book cs1">Ciresan, D.; Meier, U.; Schmidhuber, J. (2012). "Multi-column deep neural networks for image classification". <i>2012 IEEE Conference on Computer Vision and Pattern Recognition</i>. pp. <span class="nowrap">3642–</span>3649. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1202.2745">1202.2745</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fcvpr.2012.6248110">10.1109/cvpr.2012.6248110</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4673-1228-8" title="Special:BookSources/978-1-4673-1228-8"><bdi>978-1-4673-1228-8</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2161592">2161592</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Multi-column+deep+neural+networks+for+image+classification&rft.btitle=2012+IEEE+Conference+on+Computer+Vision+and+Pattern+Recognition&rft.pages=%3Cspan+class%3D%22nowrap%22%3E3642-%3C%2Fspan%3E3649&rft.date=2012&rft_id=info%3Aarxiv%2F1202.2745&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2161592%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2Fcvpr.2012.6248110&rft.isbn=978-1-4673-1228-8&rft.aulast=Ciresan&rft.aufirst=D.&rft.au=Meier%2C+U.&rft.au=Schmidhuber%2C+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-krizhevsky2012-4"><span class="mw-cite-backlink">^ <a href="#cite_ref-krizhevsky2012_4-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-krizhevsky2012_4-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKrizhevskySutskeverHinton2012" class="citation journal cs1">Krizhevsky, Alex; Sutskever, Ilya; Hinton, Geoffrey (2012). <a rel="nofollow" class="external text" href="https://www.cs.toronto.edu/~kriz/imagenet_classification_with_deep_convolutional.pdf">"ImageNet Classification with Deep Convolutional Neural Networks"</a> <span class="cs1-format">(PDF)</span>. <i>NIPS 2012: Neural Information Processing Systems, Lake Tahoe, Nevada</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170110123024/http://www.cs.toronto.edu/~kriz/imagenet_classification_with_deep_convolutional.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-01-10<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-05-24</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=NIPS+2012%3A+Neural+Information+Processing+Systems%2C+Lake+Tahoe%2C+Nevada&rft.atitle=ImageNet+Classification+with+Deep+Convolutional+Neural+Networks&rft.date=2012&rft.aulast=Krizhevsky&rft.aufirst=Alex&rft.au=Sutskever%2C+Ilya&rft.au=Hinton%2C+Geoffrey&rft_id=https%3A%2F%2Fwww.cs.toronto.edu%2F~kriz%2Fimagenet_classification_with_deep_convolutional.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-5"><span class="mw-cite-backlink"><b><a href="#cite_ref-5">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://techcrunch.com/2017/05/24/alphago-beats-planets-best-human-go-player-ke-jie/amp/">"Google's AlphaGo AI wins three-match series against the world's best Go player"</a>. <i>TechCrunch</i>. 25 May 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180617065807/https://techcrunch.com/2017/05/24/alphago-beats-planets-best-human-go-player-ke-jie/amp/">Archived</a> from the original on 17 June 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">17 June</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=TechCrunch&rft.atitle=Google%27s+AlphaGo+AI+wins+three-match+series+against+the+world%27s+best+Go+player&rft.date=2017-05-25&rft_id=https%3A%2F%2Ftechcrunch.com%2F2017%2F05%2F24%2Falphago-beats-planets-best-human-go-player-ke-jie%2Famp%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-6">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://news.mit.edu/2022/neural-networks-brain-function-1102">"Study urges caution when comparing neural networks to the brain"</a>. <i>MIT News | Massachusetts Institute of Technology</i>. 2022-11-02<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=MIT+News+%7C+Massachusetts+Institute+of+Technology&rft.atitle=Study+urges+caution+when+comparing+neural+networks+to+the+brain&rft.date=2022-11-02&rft_id=https%3A%2F%2Fnews.mit.edu%2F2022%2Fneural-networks-brain-function-1102&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-BENGIODEEP-7"><span class="mw-cite-backlink">^ <a href="#cite_ref-BENGIODEEP_7-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-BENGIODEEP_7-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-BENGIODEEP_7-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-BENGIODEEP_7-3"><sup><i><b>d</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBengio2009" class="citation journal cs1">Bengio, Yoshua (2009). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160304084250/http://sanghv.com/download/soft/machine%20learning,%20artificial%20intelligence,%20mathematics%20ebooks/ML/learning%20deep%20architectures%20for%20AI%20(2009).pdf">"Learning Deep Architectures for AI"</a> <span class="cs1-format">(PDF)</span>. <i>Foundations and Trends in Machine Learning</i>. <b>2</b> (1): <span class="nowrap">1–</span>127. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.701.9550">10.1.1.701.9550</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1561%2F2200000006">10.1561/2200000006</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:207178999">207178999</a>. Archived from <a rel="nofollow" class="external text" href="http://sanghv.com/download/soft/machine%20learning,%20artificial%20intelligence,%20mathematics%20ebooks/ML/learning%20deep%20architectures%20for%20AI%20%282009%29.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 4 March 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">3 September</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Foundations+and+Trends+in+Machine+Learning&rft.atitle=Learning+Deep+Architectures+for+AI&rft.volume=2&rft.issue=1&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1-%3C%2Fspan%3E127&rft.date=2009&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.701.9550%23id-name%3DCiteSeerX&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A207178999%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1561%2F2200000006&rft.aulast=Bengio&rft.aufirst=Yoshua&rft_id=http%3A%2F%2Fsanghv.com%2Fdownload%2Fsoft%2Fmachine%2520learning%2C%2520artificial%2520intelligence%2C%2520mathematics%2520ebooks%2FML%2Flearning%2520deep%2520architectures%2520for%2520AI%2520%25282009%2529.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-BENGIO2012-8"><span class="mw-cite-backlink">^ <a href="#cite_ref-BENGIO2012_8-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-BENGIO2012_8-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-BENGIO2012_8-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-BENGIO2012_8-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-BENGIO2012_8-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBengioCourvilleVincent2013" class="citation journal cs1">Bengio, Y.; Courville, A.; Vincent, P. (2013). "Representation Learning: A Review and New Perspectives". <i>IEEE Transactions on Pattern Analysis and Machine Intelligence</i>. <b>35</b> (8): <span class="nowrap">1798–</span>1828. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1206.5538">1206.5538</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ftpami.2013.50">10.1109/tpami.2013.50</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/23787338">23787338</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:393948">393948</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Pattern+Analysis+and+Machine+Intelligence&rft.atitle=Representation+Learning%3A+A+Review+and+New+Perspectives&rft.volume=35&rft.issue=8&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1798-%3C%2Fspan%3E1828&rft.date=2013&rft_id=info%3Aarxiv%2F1206.5538&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A393948%23id-name%3DS2CID&rft_id=info%3Apmid%2F23787338&rft_id=info%3Adoi%2F10.1109%2Ftpami.2013.50&rft.aulast=Bengio&rft.aufirst=Y.&rft.au=Courville%2C+A.&rft.au=Vincent%2C+P.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-SCHIDHUB-9"><span class="mw-cite-backlink">^ <a href="#cite_ref-SCHIDHUB_9-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-SCHIDHUB_9-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-SCHIDHUB_9-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-SCHIDHUB_9-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-SCHIDHUB_9-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-SCHIDHUB_9-5"><sup><i><b>f</b></i></sup></a> <a href="#cite_ref-SCHIDHUB_9-6"><sup><i><b>g</b></i></sup></a> <a href="#cite_ref-SCHIDHUB_9-7"><sup><i><b>h</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber2015" class="citation journal cs1">Schmidhuber, J. (2015). "Deep Learning in Neural Networks: An Overview". <i>Neural Networks</i>. <b>61</b>: <span class="nowrap">85–</span>117. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1404.7828">1404.7828</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neunet.2014.09.003">10.1016/j.neunet.2014.09.003</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/25462637">25462637</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:11715509">11715509</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Networks&rft.atitle=Deep+Learning+in+Neural+Networks%3A+An+Overview&rft.volume=61&rft.pages=%3Cspan+class%3D%22nowrap%22%3E85-%3C%2Fspan%3E117&rft.date=2015&rft_id=info%3Aarxiv%2F1404.7828&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A11715509%23id-name%3DS2CID&rft_id=info%3Apmid%2F25462637&rft_id=info%3Adoi%2F10.1016%2Fj.neunet.2014.09.003&rft.aulast=Schmidhuber&rft.aufirst=J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-10"><span class="mw-cite-backlink"><b><a href="#cite_ref-10">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFShigeki2019" class="citation book cs1">Shigeki, Sugiyama (12 April 2019). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=9CqQDwAAQBAJ&pg=PA15"><i>Human Behavior and Another Kind in Consciousness: Emerging Research and Opportunities: Emerging Research and Opportunities</i></a>. IGI Global. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-5225-8218-2" title="Special:BookSources/978-1-5225-8218-2"><bdi>978-1-5225-8218-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Human+Behavior+and+Another+Kind+in+Consciousness%3A+Emerging+Research+and+Opportunities%3A+Emerging+Research+and+Opportunities&rft.pub=IGI+Global&rft.date=2019-04-12&rft.isbn=978-1-5225-8218-2&rft.aulast=Shigeki&rft.aufirst=Sugiyama&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3D9CqQDwAAQBAJ%26pg%3DPA15&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-BENGIO2007-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-BENGIO2007_11-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBengioLamblinPopoviciLarochelle2007" class="citation conference cs1">Bengio, Yoshua; Lamblin, Pascal; Popovici, Dan; Larochelle, Hugo (2007). <a rel="nofollow" class="external text" href="http://papers.nips.cc/paper/3048-greedy-layer-wise-training-of-deep-networks.pdf"><i>Greedy layer-wise training of deep networks</i></a> <span class="cs1-format">(PDF)</span>. Advances in neural information processing systems. pp. <span class="nowrap">153–</span>160. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191020195638/http://papers.nips.cc/paper/3048-greedy-layer-wise-training-of-deep-networks.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2019-10-20<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-10-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=Greedy+layer-wise+training+of+deep+networks&rft.pages=%3Cspan+class%3D%22nowrap%22%3E153-%3C%2Fspan%3E160&rft.date=2007&rft.aulast=Bengio&rft.aufirst=Yoshua&rft.au=Lamblin%2C+Pascal&rft.au=Popovici%2C+Dan&rft.au=Larochelle%2C+Hugo&rft_id=http%3A%2F%2Fpapers.nips.cc%2Fpaper%2F3048-greedy-layer-wise-training-of-deep-networks.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-SCHOLARDBNS-12"><span class="mw-cite-backlink">^ <a href="#cite_ref-SCHOLARDBNS_12-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-SCHOLARDBNS_12-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHinton2009" class="citation journal cs1">Hinton, G.E. (2009). <a rel="nofollow" class="external text" href="https://doi.org/10.4249%2Fscholarpedia.5947">"Deep belief networks"</a>. <i>Scholarpedia</i>. <b>4</b> (5): 5947. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2009SchpJ...4.5947H">2009SchpJ...4.5947H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.4249%2Fscholarpedia.5947">10.4249/scholarpedia.5947</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Scholarpedia&rft.atitle=Deep+belief+networks&rft.volume=4&rft.issue=5&rft.pages=5947&rft.date=2009&rft_id=info%3Adoi%2F10.4249%2Fscholarpedia.5947&rft_id=info%3Abibcode%2F2009SchpJ...4.5947H&rft.aulast=Hinton&rft.aufirst=G.E.&rft_id=https%3A%2F%2Fdoi.org%2F10.4249%252Fscholarpedia.5947&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-dechter1986-13"><span class="mw-cite-backlink"><b><a href="#cite_ref-dechter1986_13-0">^</a></b></span> <span class="reference-text"><a href="/wiki/Rina_Dechter" title="Rina Dechter">Rina Dechter</a> (1986). Learning while searching in constraint-satisfaction problems. University of California, Computer Science Department, Cognitive Systems Laboratory.<a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/221605378_Learning_While_Searching_in_Constraint-Satisfaction-Problems">Online</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160419054654/https://www.researchgate.net/publication/221605378_Learning_While_Searching_in_Constraint-Satisfaction-Problems">Archived</a> 2016-04-19 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></span> </li> <li id="cite_note-MV_1-14"><span class="mw-cite-backlink"><b><a href="#cite_ref-MV_1_14-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFAizenberg,_I.N.Aizenberg,_N.N.Vandewalle,_J.2000" class="citation book cs1">Aizenberg, I.N.; Aizenberg, N.N.; Vandewalle, J. (2000). <a rel="nofollow" class="external text" href="https://link.springer.com/book/10.1007/978-1-4757-3115-6"><i>Multi-Valued and Universal Binary Neurons</i></a>. Science & Business Media. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-1-4757-3115-6">10.1007/978-1-4757-3115-6</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-7923-7824-2" title="Special:BookSources/978-0-7923-7824-2"><bdi>978-0-7923-7824-2</bdi></a><span class="reference-accessdate">. Retrieved <span class="nowrap">27 December</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Multi-Valued+and+Universal+Binary+Neurons&rft.pub=Science+%26+Business+Media&rft.date=2000&rft_id=info%3Adoi%2F10.1007%2F978-1-4757-3115-6&rft.isbn=978-0-7923-7824-2&rft.au=Aizenberg%2C+I.N.&rft.au=Aizenberg%2C+N.N.&rft.au=Vandewalle%2C+J.&rft_id=https%3A%2F%2Flink.springer.com%2Fbook%2F10.1007%2F978-1-4757-3115-6&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-15">^</a></b></span> <span class="reference-text">Co-evolving recurrent neurons learn deep memory POMDPs. Proc. GECCO, Washington, D. C., pp. 1795–1802, ACM Press, New York, NY, USA, 2005.</span> </li> <li id="cite_note-16"><span class="mw-cite-backlink"><b><a href="#cite_ref-16">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFFradkov2020" class="citation journal cs1">Fradkov, Alexander L. (2020-01-01). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.ifacol.2020.12.1888">"Early History of Machine Learning"</a>. <i>IFAC-PapersOnLine</i>. 21st IFAC World Congress. <b>53</b> (2): <span class="nowrap">1385–</span>1390. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.ifacol.2020.12.1888">10.1016/j.ifacol.2020.12.1888</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2405-8963">2405-8963</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:235081987">235081987</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IFAC-PapersOnLine&rft.atitle=Early+History+of+Machine+Learning&rft.volume=53&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1385-%3C%2Fspan%3E1390&rft.date=2020-01-01&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A235081987%23id-name%3DS2CID&rft.issn=2405-8963&rft_id=info%3Adoi%2F10.1016%2Fj.ifacol.2020.12.1888&rft.aulast=Fradkov&rft.aufirst=Alexander+L.&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.ifacol.2020.12.1888&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-cyb-17"><span class="mw-cite-backlink">^ <a href="#cite_ref-cyb_17-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-cyb_17-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-cyb_17-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCybenko1989" class="citation journal cs1">Cybenko (1989). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151010204407/http://deeplearning.cs.cmu.edu/pdfs/Cybenko.pdf">"Approximations by superpositions of sigmoidal functions"</a> <span class="cs1-format">(PDF)</span>. <i><a href="/wiki/Mathematics_of_Control,_Signals,_and_Systems" title="Mathematics of Control, Signals, and Systems">Mathematics of Control, Signals, and Systems</a></i>. <b>2</b> (4): <span class="nowrap">303–</span>314. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1989MCSS....2..303C">1989MCSS....2..303C</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fbf02551274">10.1007/bf02551274</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:3958369">3958369</a>. Archived from <a rel="nofollow" class="external text" href="http://deeplearning.cs.cmu.edu/pdfs/Cybenko.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 10 October 2015.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Mathematics+of+Control%2C+Signals%2C+and+Systems&rft.atitle=Approximations+by+superpositions+of+sigmoidal+functions&rft.volume=2&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E303-%3C%2Fspan%3E314&rft.date=1989&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A3958369%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1007%2Fbf02551274&rft_id=info%3Abibcode%2F1989MCSS....2..303C&rft.au=Cybenko&rft_id=http%3A%2F%2Fdeeplearning.cs.cmu.edu%2Fpdfs%2FCybenko.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-horn-18"><span class="mw-cite-backlink">^ <a href="#cite_ref-horn_18-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-horn_18-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-horn_18-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHornik1991" class="citation journal cs1">Hornik, Kurt (1991). "Approximation Capabilities of Multilayer Feedforward Networks". <i>Neural Networks</i>. <b>4</b> (2): <span class="nowrap">251–</span>257. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0893-6080%2891%2990009-t">10.1016/0893-6080(91)90009-t</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:7343126">7343126</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Networks&rft.atitle=Approximation+Capabilities+of+Multilayer+Feedforward+Networks&rft.volume=4&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E251-%3C%2Fspan%3E257&rft.date=1991&rft_id=info%3Adoi%2F10.1016%2F0893-6080%2891%2990009-t&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A7343126%23id-name%3DS2CID&rft.aulast=Hornik&rft.aufirst=Kurt&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Haykin,_Simon_1998-19"><span class="mw-cite-backlink">^ <a href="#cite_ref-Haykin,_Simon_1998_19-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Haykin,_Simon_1998_19-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHaykin1999" class="citation book cs1">Haykin, Simon S. (1999). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=bX4pAQAAMAAJ"><i>Neural Networks: A Comprehensive Foundation</i></a>. Prentice Hall. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-13-273350-2" title="Special:BookSources/978-0-13-273350-2"><bdi>978-0-13-273350-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Neural+Networks%3A+A+Comprehensive+Foundation&rft.pub=Prentice+Hall&rft.date=1999&rft.isbn=978-0-13-273350-2&rft.aulast=Haykin&rft.aufirst=Simon+S.&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DbX4pAQAAMAAJ&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Hassoun,_M._1995_p._48-20"><span class="mw-cite-backlink">^ <a href="#cite_ref-Hassoun,_M._1995_p._48_20-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Hassoun,_M._1995_p._48_20-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHassoun1995" class="citation book cs1">Hassoun, Mohamad H. (1995). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=Otk32Y3QkxQC&pg=PA48"><i>Fundamentals of Artificial Neural Networks</i></a>. MIT Press. p. 48. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-08239-6" title="Special:BookSources/978-0-262-08239-6"><bdi>978-0-262-08239-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Fundamentals+of+Artificial+Neural+Networks&rft.pages=48&rft.pub=MIT+Press&rft.date=1995&rft.isbn=978-0-262-08239-6&rft.aulast=Hassoun&rft.aufirst=Mohamad+H.&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DOtk32Y3QkxQC%26pg%3DPA48&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ZhouLu-21"><span class="mw-cite-backlink">^ <a href="#cite_ref-ZhouLu_21-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-ZhouLu_21-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">Lu, Z., Pu, H., Wang, F., Hu, Z., & Wang, L. (2017). <a rel="nofollow" class="external text" href="http://papers.nips.cc/paper/7203-the-expressive-power-of-neural-networks-a-view-from-the-width">The Expressive Power of Neural Networks: A View from the Width</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190213005539/http://papers.nips.cc/paper/7203-the-expressive-power-of-neural-networks-a-view-from-the-width">Archived</a> 2019-02-13 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. Neural Information Processing Systems, 6231-6239.</span> </li> <li id="cite_note-22"><span class="mw-cite-backlink"><b><a href="#cite_ref-22">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFOrhanMa2017" class="citation journal cs1">Orhan, A. E.; Ma, W. J. (2017). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5527101">"Efficient probabilistic inference in generic neural networks trained with non-probabilistic feedback"</a>. <i>Nature Communications</i>. <b>8</b> (1): 138. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2017NatCo...8..138O">2017NatCo...8..138O</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs41467-017-00181-8">10.1038/s41467-017-00181-8</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5527101">5527101</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/28743932">28743932</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature+Communications&rft.atitle=Efficient+probabilistic+inference+in+generic+neural+networks+trained+with+non-probabilistic+feedback&rft.volume=8&rft.issue=1&rft.pages=138&rft.date=2017&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5527101%23id-name%3DPMC&rft_id=info%3Apmid%2F28743932&rft_id=info%3Adoi%2F10.1038%2Fs41467-017-00181-8&rft_id=info%3Abibcode%2F2017NatCo...8..138O&rft.aulast=Orhan&rft.aufirst=A.+E.&rft.au=Ma%2C+W.+J.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5527101&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-BOOK2014-23"><span class="mw-cite-backlink">^ <a href="#cite_ref-BOOK2014_23-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-BOOK2014_23-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-BOOK2014_23-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-BOOK2014_23-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-BOOK2014_23-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDengYu2014" class="citation journal cs1">Deng, L.; Yu, D. (2014). <a rel="nofollow" class="external text" href="http://research.microsoft.com/pubs/209355/DeepLearning-NowPublishing-Vol7-SIG-039.pdf">"Deep Learning: Methods and Applications"</a> <span class="cs1-format">(PDF)</span>. <i>Foundations and Trends in Signal Processing</i>. <b>7</b> (<span class="nowrap">3–</span>4): <span class="nowrap">1–</span>199. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1561%2F2000000039">10.1561/2000000039</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160314152112/http://research.microsoft.com/pubs/209355/DeepLearning-NowPublishing-Vol7-SIG-039.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2016-03-14<span class="reference-accessdate">. Retrieved <span class="nowrap">2014-10-18</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Foundations+and+Trends+in+Signal+Processing&rft.atitle=Deep+Learning%3A+Methods+and+Applications&rft.volume=7&rft.issue=%3Cspan+class%3D%22nowrap%22%3E3%E2%80%93%3C%2Fspan%3E4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1-%3C%2Fspan%3E199&rft.date=2014&rft_id=info%3Adoi%2F10.1561%2F2000000039&rft.aulast=Deng&rft.aufirst=L.&rft.au=Yu%2C+D.&rft_id=http%3A%2F%2Fresearch.microsoft.com%2Fpubs%2F209355%2FDeepLearning-NowPublishing-Vol7-SIG-039.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-MURPHY-24"><span class="mw-cite-backlink">^ <a href="#cite_ref-MURPHY_24-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-MURPHY_24-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-MURPHY_24-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-MURPHY_24-3"><sup><i><b>d</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMurphy2012" class="citation book cs1">Murphy, Kevin P. (24 August 2012). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=NZP6AQAAQBAJ"><i>Machine Learning: A Probabilistic Perspective</i></a>. MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-01802-9" title="Special:BookSources/978-0-262-01802-9"><bdi>978-0-262-01802-9</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Machine+Learning%3A+A+Probabilistic+Perspective&rft.pub=MIT+Press&rft.date=2012-08-24&rft.isbn=978-0-262-01802-9&rft.aulast=Murphy&rft.aufirst=Kevin+P.&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DNZP6AQAAQBAJ&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Fukushima1969-25"><span class="mw-cite-backlink">^ <a href="#cite_ref-Fukushima1969_25-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Fukushima1969_25-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFFukushima1969" class="citation journal cs1">Fukushima, K. (1969). "Visual feature extraction by a multilayered network of analog threshold elements". <i>IEEE Transactions on Systems Science and Cybernetics</i>. <b>5</b> (4): <span class="nowrap">322–</span>333. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTSSC.1969.300225">10.1109/TSSC.1969.300225</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Systems+Science+and+Cybernetics&rft.atitle=Visual+feature+extraction+by+a+multilayered+network+of+analog+threshold+elements&rft.volume=5&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E322-%3C%2Fspan%3E333&rft.date=1969&rft_id=info%3Adoi%2F10.1109%2FTSSC.1969.300225&rft.aulast=Fukushima&rft.aufirst=K.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-sonoda17-26"><span class="mw-cite-backlink"><b><a href="#cite_ref-sonoda17_26-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSonodaMurata2017" class="citation journal cs1">Sonoda, Sho; Murata, Noboru (2017). "Neural network with unbounded activation functions is universal approximator". <i>Applied and Computational Harmonic Analysis</i>. <b>43</b> (2): <span class="nowrap">233–</span>268. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1505.03654">1505.03654</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.acha.2015.12.005">10.1016/j.acha.2015.12.005</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:12149203">12149203</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Applied+and+Computational+Harmonic+Analysis&rft.atitle=Neural+network+with+unbounded+activation+functions+is+universal+approximator&rft.volume=43&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E233-%3C%2Fspan%3E268&rft.date=2017&rft_id=info%3Aarxiv%2F1505.03654&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A12149203%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1016%2Fj.acha.2015.12.005&rft.aulast=Sonoda&rft.aufirst=Sho&rft.au=Murata%2C+Noboru&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-prml-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-prml_27-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBishop,_Christopher_M.2006" class="citation book cs1">Bishop, Christopher M. (2006). <a rel="nofollow" class="external text" href="http://users.isr.ist.utl.pt/~wurmd/Livros/school/Bishop%20-%20Pattern%20Recognition%20And%20Machine%20Learning%20-%20Springer%20%202006.pdf"><i>Pattern Recognition and Machine Learning</i></a> <span class="cs1-format">(PDF)</span>. Springer. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-387-31073-2" title="Special:BookSources/978-0-387-31073-2"><bdi>978-0-387-31073-2</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170111005101/http://users.isr.ist.utl.pt/~wurmd/Livros/school/Bishop%20-%20Pattern%20Recognition%20And%20Machine%20Learning%20-%20Springer%20%202006.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-01-11<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-08-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Pattern+Recognition+and+Machine+Learning&rft.pub=Springer&rft.date=2006&rft.isbn=978-0-387-31073-2&rft.au=Bishop%2C+Christopher+M.&rft_id=http%3A%2F%2Fusers.isr.ist.utl.pt%2F~wurmd%2FLivros%2Fschool%2FBishop%2520-%2520Pattern%2520Recognition%2520And%2520Machine%2520Learning%2520-%2520Springer%2520%25202006.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ising1925-28"><span class="mw-cite-backlink">^ <a href="#cite_ref-ising1925_28-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-ising1925_28-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.hs-augsburg.de/~harsch/anglica/Chronology/20thC/Ising/isi_fm00.html">"bibliotheca Augustana"</a>. <i>www.hs-augsburg.de</i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.hs-augsburg.de&rft.atitle=bibliotheca+Augustana&rft_id=https%3A%2F%2Fwww.hs-augsburg.de%2F~harsch%2Fanglica%2FChronology%2F20thC%2FIsing%2Fisi_fm00.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-brush67-29"><span class="mw-cite-backlink"><b><a href="#cite_ref-brush67_29-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBrush1967" class="citation journal cs1">Brush, Stephen G. (1967). "History of the Lenz-Ising Model". <i>Reviews of Modern Physics</i>. <b>39</b> (4): <span class="nowrap">883–</span>893. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1967RvMP...39..883B">1967RvMP...39..883B</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1103%2FRevModPhys.39.883">10.1103/RevModPhys.39.883</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Reviews+of+Modern+Physics&rft.atitle=History+of+the+Lenz-Ising+Model&rft.volume=39&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E883-%3C%2Fspan%3E893&rft.date=1967&rft_id=info%3Adoi%2F10.1103%2FRevModPhys.39.883&rft_id=info%3Abibcode%2F1967RvMP...39..883B&rft.aulast=Brush&rft.aufirst=Stephen+G.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Amari1972-30"><span class="mw-cite-backlink">^ <a href="#cite_ref-Amari1972_30-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Amari1972_30-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFAmari1972" class="citation journal cs1">Amari, Shun-Ichi (1972). "Learning patterns and pattern sequences by self-organizing nets of threshold elements". <i>IEEE Transactions</i>. <b>C</b> (21): <span class="nowrap">1197–</span>1206.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions&rft.atitle=Learning+patterns+and+pattern+sequences+by+self-organizing+nets+of+threshold+elements&rft.volume=C&rft.issue=21&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1197-%3C%2Fspan%3E1206&rft.date=1972&rft.aulast=Amari&rft.aufirst=Shun-Ichi&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-DLhistory-31"><span class="mw-cite-backlink">^ <a href="#cite_ref-DLhistory_31-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-DLhistory_31-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-DLhistory_31-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-DLhistory_31-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-DLhistory_31-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-DLhistory_31-5"><sup><i><b>f</b></i></sup></a> <a href="#cite_ref-DLhistory_31-6"><sup><i><b>g</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber2022" class="citation arxiv cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber, Jürgen</a> (2022). "Annotated History of Modern AI and Deep Learning". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2212.11279">2212.11279</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.NE">cs.NE</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Annotated+History+of+Modern+AI+and+Deep+Learning&rft.date=2022&rft_id=info%3Aarxiv%2F2212.11279&rft.aulast=Schmidhuber&rft.aufirst=J%C3%BCrgen&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Hopfield1982-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-Hopfield1982_32-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHopfield1982" class="citation journal cs1">Hopfield, J. J. (1982). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC346238">"Neural networks and physical systems with emergent collective computational abilities"</a>. <i>Proceedings of the National Academy of Sciences</i>. <b>79</b> (8): <span class="nowrap">2554–</span>2558. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1982PNAS...79.2554H">1982PNAS...79.2554H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1073%2Fpnas.79.8.2554">10.1073/pnas.79.8.2554</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC346238">346238</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/6953413">6953413</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+National+Academy+of+Sciences&rft.atitle=Neural+networks+and+physical+systems+with+emergent+collective+computational+abilities&rft.volume=79&rft.issue=8&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2554-%3C%2Fspan%3E2558&rft.date=1982&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC346238%23id-name%3DPMC&rft_id=info%3Apmid%2F6953413&rft_id=info%3Adoi%2F10.1073%2Fpnas.79.8.2554&rft_id=info%3Abibcode%2F1982PNAS...79.2554H&rft.aulast=Hopfield&rft.aufirst=J.+J.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC346238&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Nakano1971-33"><span class="mw-cite-backlink"><b><a href="#cite_ref-Nakano1971_33-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFNakano1971" class="citation book cs1">Nakano, Kaoru (1971). "Learning Process in a Model of Associative Memory". <i>Pattern Recognition and Machine Learning</i>. pp. <span class="nowrap">172–</span>186. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-1-4615-7566-5_15">10.1007/978-1-4615-7566-5_15</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4615-7568-9" title="Special:BookSources/978-1-4615-7568-9"><bdi>978-1-4615-7568-9</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Learning+Process+in+a+Model+of+Associative+Memory&rft.btitle=Pattern+Recognition+and+Machine+Learning&rft.pages=%3Cspan+class%3D%22nowrap%22%3E172-%3C%2Fspan%3E186&rft.date=1971&rft_id=info%3Adoi%2F10.1007%2F978-1-4615-7566-5_15&rft.isbn=978-1-4615-7568-9&rft.aulast=Nakano&rft.aufirst=Kaoru&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Nakano1972-34"><span class="mw-cite-backlink"><b><a href="#cite_ref-Nakano1972_34-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFNakano1972" class="citation journal cs1 cs1-prop-long-vol">Nakano, Kaoru (1972). "Associatron-A Model of Associative Memory". <i>IEEE Transactions on Systems, Man, and Cybernetics</i>. SMC-2 (3): <span class="nowrap">380–</span>388. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTSMC.1972.4309133">10.1109/TSMC.1972.4309133</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Systems%2C+Man%2C+and+Cybernetics&rft.atitle=Associatron-A+Model+of+Associative+Memory&rft.volume=SMC-2&rft.issue=3&rft.pages=%3Cspan+class%3D%22nowrap%22%3E380-%3C%2Fspan%3E388&rft.date=1972&rft_id=info%3Adoi%2F10.1109%2FTSMC.1972.4309133&rft.aulast=Nakano&rft.aufirst=Kaoru&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-turing1948-35"><span class="mw-cite-backlink"><b><a href="#cite_ref-turing1948_35-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFTuring1948" class="citation journal cs1">Turing, Alan (1948). "Intelligent Machinery". <i>Unpublished (Later Published in Ince DC, Editor, Collected Works of AM Turing—Mechanical Intelligence, Elsevier Science Publishers, 1992)</i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Unpublished+%28Later+Published+in+Ince+DC%2C+Editor%2C+Collected+Works+of+AM+Turing%E2%80%94Mechanical+Intelligence%2C+Elsevier+Science+Publishers%2C+1992%29&rft.atitle=Intelligent+Machinery&rft.date=1948&rft.aulast=Turing&rft.aufirst=Alan&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-36"><span class="mw-cite-backlink"><b><a href="#cite_ref-36">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRosenblatt1958" class="citation journal cs1">Rosenblatt, F. (1958). <a rel="nofollow" class="external text" href="https://doi.apa.org/doi/10.1037/h0042519">"The perceptron: A probabilistic model for information storage and organization in the brain"</a>. <i>Psychological Review</i>. <b>65</b> (6): <span class="nowrap">386–</span>408. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1037%2Fh0042519">10.1037/h0042519</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1939-1471">1939-1471</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/13602029">13602029</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Psychological+Review&rft.atitle=The+perceptron%3A+A+probabilistic+model+for+information+storage+and+organization+in+the+brain.&rft.volume=65&rft.issue=6&rft.pages=%3Cspan+class%3D%22nowrap%22%3E386-%3C%2Fspan%3E408&rft.date=1958&rft.issn=1939-1471&rft_id=info%3Apmid%2F13602029&rft_id=info%3Adoi%2F10.1037%2Fh0042519&rft.aulast=Rosenblatt&rft.aufirst=F.&rft_id=https%3A%2F%2Fdoi.apa.org%2Fdoi%2F10.1037%2Fh0042519&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-rosenblatt1962-37"><span class="mw-cite-backlink">^ <a href="#cite_ref-rosenblatt1962_37-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-rosenblatt1962_37-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRosenblatt1962" class="citation book cs1"><a href="/wiki/Frank_Rosenblatt" title="Frank Rosenblatt">Rosenblatt, Frank</a> (1962). <i>Principles of Neurodynamics</i>. Spartan, New York.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Principles+of+Neurodynamics&rft.pub=Spartan%2C+New+York&rft.date=1962&rft.aulast=Rosenblatt&rft.aufirst=Frank&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-joseph1960-38"><span class="mw-cite-backlink"><b><a href="#cite_ref-joseph1960_38-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFJoseph1960" class="citation book cs1">Joseph, R. D. (1960). <i>Contributions to Perceptron Theory, Cornell Aeronautical Laboratory Report No. VG-11 96--G-7, Buffalo</i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Contributions+to+Perceptron+Theory%2C+Cornell+Aeronautical+Laboratory+Report+No.+VG-11+96--G-7%2C+Buffalo&rft.date=1960&rft.aulast=Joseph&rft.aufirst=R.+D.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ivak1965-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-ivak1965_39-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFIvakhnenkoLapa1967" class="citation book cs1">Ivakhnenko, A. G.; Lapa, V. G. (1967). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=rGFgAAAAMAAJ"><i>Cybernetics and Forecasting Techniques</i></a>. American Elsevier Publishing Co. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-444-00020-0" title="Special:BookSources/978-0-444-00020-0"><bdi>978-0-444-00020-0</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Cybernetics+and+Forecasting+Techniques&rft.pub=American+Elsevier+Publishing+Co.&rft.date=1967&rft.isbn=978-0-444-00020-0&rft.aulast=Ivakhnenko&rft.aufirst=A.+G.&rft.au=Lapa%2C+V.+G.&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DrGFgAAAAMAAJ&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-40">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFIvakhnenko1970" class="citation journal cs1">Ivakhnenko, A.G. (March 1970). <a rel="nofollow" class="external text" href="https://linkinghub.elsevier.com/retrieve/pii/0005109870900920">"Heuristic self-organization in problems of engineering cybernetics"</a>. <i>Automatica</i>. <b>6</b> (2): <span class="nowrap">207–</span>219. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0005-1098%2870%2990092-0">10.1016/0005-1098(70)90092-0</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Automatica&rft.atitle=Heuristic+self-organization+in+problems+of+engineering+cybernetics&rft.volume=6&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E207-%3C%2Fspan%3E219&rft.date=1970-03&rft_id=info%3Adoi%2F10.1016%2F0005-1098%2870%2990092-0&rft.aulast=Ivakhnenko&rft.aufirst=A.G.&rft_id=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2F0005109870900920&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ivak1971-41"><span class="mw-cite-backlink">^ <a href="#cite_ref-ivak1971_41-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-ivak1971_41-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFIvakhnenko1971" class="citation journal cs1 cs1-prop-long-vol">Ivakhnenko, Alexey (1971). <a rel="nofollow" class="external text" href="http://gmdh.net/articles/history/polynomial.pdf">"Polynomial theory of complex systems"</a> <span class="cs1-format">(PDF)</span>. <i>IEEE Transactions on Systems, Man, and Cybernetics</i>. SMC-1 (4): <span class="nowrap">364–</span>378. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTSMC.1971.4308320">10.1109/TSMC.1971.4308320</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170829230621/http://www.gmdh.net/articles/history/polynomial.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-08-29<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-11-05</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Systems%2C+Man%2C+and+Cybernetics&rft.atitle=Polynomial+theory+of+complex+systems&rft.volume=SMC-1&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E364-%3C%2Fspan%3E378&rft.date=1971&rft_id=info%3Adoi%2F10.1109%2FTSMC.1971.4308320&rft.aulast=Ivakhnenko&rft.aufirst=Alexey&rft_id=http%3A%2F%2Fgmdh.net%2Farticles%2Fhistory%2Fpolynomial.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-robbins1951-42"><span class="mw-cite-backlink"><b><a href="#cite_ref-robbins1951_42-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRobbinsMonro1951" class="citation journal cs1"><a href="/wiki/Herbert_Robbins" title="Herbert Robbins">Robbins, H.</a>; Monro, S. (1951). <a rel="nofollow" class="external text" href="https://doi.org/10.1214%2Faoms%2F1177729586">"A Stochastic Approximation Method"</a>. <i>The Annals of Mathematical Statistics</i>. <b>22</b> (3): 400. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1214%2Faoms%2F1177729586">10.1214/aoms/1177729586</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Annals+of+Mathematical+Statistics&rft.atitle=A+Stochastic+Approximation+Method&rft.volume=22&rft.issue=3&rft.pages=400&rft.date=1951&rft_id=info%3Adoi%2F10.1214%2Faoms%2F1177729586&rft.aulast=Robbins&rft.aufirst=H.&rft.au=Monro%2C+S.&rft_id=https%3A%2F%2Fdoi.org%2F10.1214%252Faoms%252F1177729586&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Amari1967-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-Amari1967_43-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFAmari1967" class="citation journal cs1"><a href="/wiki/Shun%27ichi_Amari" title="Shun'ichi Amari">Amari, Shun'ichi</a> (1967). "A theory of adaptive pattern classifier". <i>IEEE Transactions</i>. <b>EC</b> (16): <span class="nowrap">279–</span>307.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions&rft.atitle=A+theory+of+adaptive+pattern+classifier&rft.volume=EC&rft.issue=16&rft.pages=%3Cspan+class%3D%22nowrap%22%3E279-%3C%2Fspan%3E307&rft.date=1967&rft.aulast=Amari&rft.aufirst=Shun%27ichi&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-44"><span class="mw-cite-backlink"><b><a href="#cite_ref-44">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRamachandranBarretQuoc2017" class="citation arxiv cs1">Ramachandran, Prajit; Barret, Zoph; Quoc, V. Le (October 16, 2017). "Searching for Activation Functions". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1710.05941">1710.05941</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.NE">cs.NE</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Searching+for+Activation+Functions&rft.date=2017-10-16&rft_id=info%3Aarxiv%2F1710.05941&rft.aulast=Ramachandran&rft.aufirst=Prajit&rft.au=Barret%2C+Zoph&rft.au=Quoc%2C+V.+Le&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-FUKU1979-45"><span class="mw-cite-backlink"><b><a href="#cite_ref-FUKU1979_45-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFFukushima1979" class="citation journal cs1 cs1-prop-long-vol">Fukushima, K. (1979). "Neural network model for a mechanism of pattern recognition unaffected by shift in position—Neocognitron". <i>Trans. IECE (In Japanese)</i>. J62-A (10): <span class="nowrap">658–</span>665. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fbf00344251">10.1007/bf00344251</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7370364">7370364</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:206775608">206775608</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Trans.+IECE+%28In+Japanese%29&rft.atitle=Neural+network+model+for+a+mechanism+of+pattern+recognition+unaffected+by+shift+in+position%E2%80%94Neocognitron&rft.volume=J62-A&rft.issue=10&rft.pages=%3Cspan+class%3D%22nowrap%22%3E658-%3C%2Fspan%3E665&rft.date=1979&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A206775608%23id-name%3DS2CID&rft_id=info%3Apmid%2F7370364&rft_id=info%3Adoi%2F10.1007%2Fbf00344251&rft.aulast=Fukushima&rft.aufirst=K.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-FUKU1980-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-FUKU1980_46-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFFukushima1980" class="citation journal cs1">Fukushima, K. (1980). "Neocognitron: A self-organizing neural network model for a mechanism of pattern recognition unaffected by shift in position". <i>Biol. Cybern</i>. <b>36</b> (4): <span class="nowrap">193–</span>202. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fbf00344251">10.1007/bf00344251</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7370364">7370364</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:206775608">206775608</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Biol.+Cybern.&rft.atitle=Neocognitron%3A+A+self-organizing+neural+network+model+for+a+mechanism+of+pattern+recognition+unaffected+by+shift+in+position&rft.volume=36&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E193-%3C%2Fspan%3E202&rft.date=1980&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A206775608%23id-name%3DS2CID&rft_id=info%3Apmid%2F7370364&rft_id=info%3Adoi%2F10.1007%2Fbf00344251&rft.aulast=Fukushima&rft.aufirst=K.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-leibniz1676-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-leibniz1676_47-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLeibniz1920" class="citation book cs1">Leibniz, Gottfried Wilhelm Freiherr von (1920). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=bOIGAAAAYAAJ&q=leibniz+altered+manuscripts&pg=PA90"><i>The Early Mathematical Manuscripts of Leibniz: Translated from the Latin Texts Published by Carl Immanuel Gerhardt with Critical and Historical Notes (Leibniz published the chain rule in a 1676 memoir)</i></a>. Open court publishing Company. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780598818461" title="Special:BookSources/9780598818461"><bdi>9780598818461</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Early+Mathematical+Manuscripts+of+Leibniz%3A+Translated+from+the+Latin+Texts+Published+by+Carl+Immanuel+Gerhardt+with+Critical+and+Historical+Notes+%28Leibniz+published+the+chain+rule+in+a+1676+memoir%29&rft.pub=Open+court+publishing+Company&rft.date=1920&rft.isbn=9780598818461&rft.aulast=Leibniz&rft.aufirst=Gottfried+Wilhelm+Freiherr+von&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DbOIGAAAAYAAJ%26q%3Dleibniz%2Baltered%2Bmanuscripts%26pg%3DPA90&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-kelley1960-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-kelley1960_48-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKelley1960" class="citation journal cs1"><a href="/wiki/Henry_J._Kelley" title="Henry J. Kelley">Kelley, Henry J.</a> (1960). "Gradient theory of optimal flight paths". <i>ARS Journal</i>. <b>30</b> (10): <span class="nowrap">947–</span>954. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.2514%2F8.5282">10.2514/8.5282</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=ARS+Journal&rft.atitle=Gradient+theory+of+optimal+flight+paths&rft.volume=30&rft.issue=10&rft.pages=%3Cspan+class%3D%22nowrap%22%3E947-%3C%2Fspan%3E954&rft.date=1960&rft_id=info%3Adoi%2F10.2514%2F8.5282&rft.aulast=Kelley&rft.aufirst=Henry+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-lin19703-49"><span class="mw-cite-backlink"><b><a href="#cite_ref-lin19703_49-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLinnainmaa1970" class="citation thesis cs1 cs1-prop-foreign-lang-source"><a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Linnainmaa, Seppo</a> (1970). <i>The representation of the cumulative rounding error of an algorithm as a Taylor expansion of the local rounding errors</i> (Masters) (in Finnish). University of Helsinki. p. 6–7.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Adissertation&rft.title=The+representation+of+the+cumulative+rounding+error+of+an+algorithm+as+a+Taylor+expansion+of+the+local+rounding+errors&rft.inst=University+of+Helsinki&rft.date=1970&rft.aulast=Linnainmaa&rft.aufirst=Seppo&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-lin19763-50"><span class="mw-cite-backlink"><b><a href="#cite_ref-lin19763_50-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLinnainmaa1976" class="citation journal cs1"><a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Linnainmaa, Seppo</a> (1976). "Taylor expansion of the accumulated rounding error". <i>BIT Numerical Mathematics</i>. <b>16</b> (2): <span class="nowrap">146–</span>160. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fbf01931367">10.1007/bf01931367</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:122357351">122357351</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=BIT+Numerical+Mathematics&rft.atitle=Taylor+expansion+of+the+accumulated+rounding+error&rft.volume=16&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E146-%3C%2Fspan%3E160&rft.date=1976&rft_id=info%3Adoi%2F10.1007%2Fbf01931367&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A122357351%23id-name%3DS2CID&rft.aulast=Linnainmaa&rft.aufirst=Seppo&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ostrowski1971-51"><span class="mw-cite-backlink"><b><a href="#cite_ref-ostrowski1971_51-0">^</a></b></span> <span class="reference-text">Ostrovski, G.M., Volin,Y.M., and Boris, W.W. (1971). On the computation of derivatives. Wiss. Z. Tech. Hochschule for Chemistry, 13:382–384.</span> </li> <li id="cite_note-backprop-52"><span class="mw-cite-backlink">^ <a href="#cite_ref-backprop_52-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-backprop_52-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber2014" class="citation web cs1"><a href="/wiki/Juergen_Schmidhuber" class="mw-redirect" title="Juergen Schmidhuber">Schmidhuber, Juergen</a> (25 Oct 2014). <a rel="nofollow" class="external text" href="http://web.archive.org/web/20240730110408/https://people.idsia.ch/~juergen/who-invented-backpropagation.html">"Who Invented Backpropagation?"</a>. IDSIA, Switzerland. Archived from <a rel="nofollow" class="external text" href="https://people.idsia.ch/~juergen/who-invented-backpropagation.html">the original</a> on 30 July 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">14 Sep</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Who+Invented+Backpropagation%3F&rft.pub=IDSIA%2C+Switzerland&rft.date=2014-10-25&rft.aulast=Schmidhuber&rft.aufirst=Juergen&rft_id=https%3A%2F%2Fpeople.idsia.ch%2F~juergen%2Fwho-invented-backpropagation.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-werbos1982-53"><span class="mw-cite-backlink"><b><a href="#cite_ref-werbos1982_53-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFWerbos1982" class="citation book cs1"><a href="/wiki/Paul_Werbos" title="Paul Werbos">Werbos, Paul</a> (1982). <a rel="nofollow" class="external text" href="http://werbos.com/Neural/SensitivityIFIPSeptember1981.pdf">"Applications of advances in nonlinear sensitivity analysis"</a> <span class="cs1-format">(PDF)</span>. <i>System modeling and optimization</i>. Springer. pp. <span class="nowrap">762–</span>770. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160414055503/http://werbos.com/Neural/SensitivityIFIPSeptember1981.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 14 April 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">2 July</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Applications+of+advances+in+nonlinear+sensitivity+analysis&rft.btitle=System+modeling+and+optimization&rft.pages=%3Cspan+class%3D%22nowrap%22%3E762-%3C%2Fspan%3E770&rft.pub=Springer&rft.date=1982&rft.aulast=Werbos&rft.aufirst=Paul&rft_id=http%3A%2F%2Fwerbos.com%2FNeural%2FSensitivityIFIPSeptember1981.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-werbos1974-54"><span class="mw-cite-backlink"><b><a href="#cite_ref-werbos1974_54-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFWerbos1994" class="citation book cs1">Werbos, Paul J. (1994). <i>The Roots of Backpropagation : From Ordered Derivatives to Neural Networks and Political Forecasting</i>. New York: John Wiley & Sons. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-471-59897-6" title="Special:BookSources/0-471-59897-6"><bdi>0-471-59897-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Roots+of+Backpropagation+%3A+From+Ordered+Derivatives+to+Neural+Networks+and+Political+Forecasting&rft.place=New+York&rft.pub=John+Wiley+%26+Sons&rft.date=1994&rft.isbn=0-471-59897-6&rft.aulast=Werbos&rft.aufirst=Paul+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-55"><span class="mw-cite-backlink"><b><a href="#cite_ref-55">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRumelhartHintonWilliams1986" class="citation journal cs1">Rumelhart, David E.; Hinton, Geoffrey E.; Williams, Ronald J. (October 1986). <a rel="nofollow" class="external text" href="https://www.nature.com/articles/323533a0">"Learning representations by back-propagating errors"</a>. <i>Nature</i>. <b>323</b> (6088): <span class="nowrap">533–</span>536. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1986Natur.323..533R">1986Natur.323..533R</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F323533a0">10.1038/323533a0</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1476-4687">1476-4687</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Learning+representations+by+back-propagating+errors&rft.volume=323&rft.issue=6088&rft.pages=%3Cspan+class%3D%22nowrap%22%3E533-%3C%2Fspan%3E536&rft.date=1986-10&rft.issn=1476-4687&rft_id=info%3Adoi%2F10.1038%2F323533a0&rft_id=info%3Abibcode%2F1986Natur.323..533R&rft.aulast=Rumelhart&rft.aufirst=David+E.&rft.au=Hinton%2C+Geoffrey+E.&rft.au=Williams%2C+Ronald+J.&rft_id=https%3A%2F%2Fwww.nature.com%2Farticles%2F323533a0&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-rumelhart1986-56"><span class="mw-cite-backlink"><b><a href="#cite_ref-rumelhart1986_56-0">^</a></b></span> <span class="reference-text">Rumelhart, David E., Geoffrey E. Hinton, and R. J. Williams. "<a rel="nofollow" class="external text" href="https://apps.dtic.mil/dtic/tr/fulltext/u2/a164453.pdf">Learning Internal Representations by Error Propagation</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221013070443/https://apps.dtic.mil/dtic/tr/fulltext/u2/a164453.pdf">Archived</a> 2022-10-13 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>". David E. Rumelhart, James L. McClelland, and the PDP research group. (editors), Parallel distributed processing: Explorations in the microstructure of cognition, Volume 1: Foundation. MIT Press, 1986.</span> </li> <li id="cite_note-Waibel1987-57"><span class="mw-cite-backlink"><b><a href="#cite_ref-Waibel1987_57-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFWaibel1987" class="citation conference cs1">Waibel, Alex (December 1987). <a rel="nofollow" class="external text" href="https://isl.anthropomatik.kit.edu/pdf/Waibel1987a.pdf"><i>Phoneme Recognition Using Time-Delay Neural Networks</i></a> <span class="cs1-format">(PDF)</span>. Meeting of the Institute of Electrical, Information and Communication Engineers (IEICE). Tokyo, Japan.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=Phoneme+Recognition+Using+Time-Delay+Neural+Networks&rft.place=Tokyo%2C+Japan&rft.date=1987-12&rft.aulast=Waibel&rft.aufirst=Alex&rft_id=https%3A%2F%2Fisl.anthropomatik.kit.edu%2Fpdf%2FWaibel1987a.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-speechsignal-58"><span class="mw-cite-backlink"><b><a href="#cite_ref-speechsignal_58-0">^</a></b></span> <span class="reference-text"><a href="/wiki/Alex_Waibel" title="Alex Waibel">Alexander Waibel</a> et al., <i><a rel="nofollow" class="external text" href="http://www.inf.ufrgs.br/~engel/data/media/file/cmp121/waibel89_TDNN.pdf">Phoneme Recognition Using Time-Delay Neural Networks</a></i> IEEE Transactions on Acoustics, Speech, and Signal Processing, Volume 37, No. 3, pp. 328. – 339 March 1989.</span> </li> <li id="cite_note-wz1988-59"><span class="mw-cite-backlink"><b><a href="#cite_ref-wz1988_59-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFZhang1988" class="citation journal cs1">Zhang, Wei (1988). <a rel="nofollow" class="external text" href="https://drive.google.com/file/d/1nN_5odSG_QVae54EsQN_qSz-0ZsX6wA0/view?usp=sharing">"Shift-invariant pattern recognition neural network and its optical architecture"</a>. <i>Proceedings of Annual Conference of the Japan Society of Applied Physics</i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+Annual+Conference+of+the+Japan+Society+of+Applied+Physics&rft.atitle=Shift-invariant+pattern+recognition+neural+network+and+its+optical+architecture&rft.date=1988&rft.aulast=Zhang&rft.aufirst=Wei&rft_id=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F1nN_5odSG_QVae54EsQN_qSz-0ZsX6wA0%2Fview%3Fusp%3Dsharing&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-LECUN1989-60"><span class="mw-cite-backlink"><b><a href="#cite_ref-LECUN1989_60-0">^</a></b></span> <span class="reference-text">LeCun <i>et al.</i>, "Backpropagation Applied to Handwritten Zip Code Recognition", <i>Neural Computation</i>, 1, pp. 541–551, 1989.</span> </li> <li id="cite_note-wz1990-61"><span class="mw-cite-backlink"><b><a href="#cite_ref-wz1990_61-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFZhang1990" class="citation journal cs1">Zhang, Wei (1990). <a rel="nofollow" class="external text" href="https://drive.google.com/file/d/0B65v6Wo67Tk5ODRzZmhSR29VeDg/view?usp=sharing">"Parallel distributed processing model with local space-invariant interconnections and its optical architecture"</a>. <i>Applied Optics</i>. <b>29</b> (32): <span class="nowrap">4790–</span>7. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1990ApOpt..29.4790Z">1990ApOpt..29.4790Z</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1364%2FAO.29.004790">10.1364/AO.29.004790</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/20577468">20577468</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Applied+Optics&rft.atitle=Parallel+distributed+processing+model+with+local+space-invariant+interconnections+and+its+optical+architecture&rft.volume=29&rft.issue=32&rft.pages=%3Cspan+class%3D%22nowrap%22%3E4790-%3C%2Fspan%3E7&rft.date=1990&rft_id=info%3Apmid%2F20577468&rft_id=info%3Adoi%2F10.1364%2FAO.29.004790&rft_id=info%3Abibcode%2F1990ApOpt..29.4790Z&rft.aulast=Zhang&rft.aufirst=Wei&rft_id=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F0B65v6Wo67Tk5ODRzZmhSR29VeDg%2Fview%3Fusp%3Dsharing&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-62"><span class="mw-cite-backlink"><b><a href="#cite_ref-62">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFZhang1991" class="citation journal cs1">Zhang, Wei (1991). <a rel="nofollow" class="external text" href="https://drive.google.com/file/d/0B65v6Wo67Tk5cm5DTlNGd0NPUmM/view?usp=sharing">"Image processing of human corneal endothelium based on a learning network"</a>. <i>Applied Optics</i>. <b>30</b> (29): <span class="nowrap">4211–</span>7. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1991ApOpt..30.4211Z">1991ApOpt..30.4211Z</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1364%2FAO.30.004211">10.1364/AO.30.004211</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/20706526">20706526</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Applied+Optics&rft.atitle=Image+processing+of+human+corneal+endothelium+based+on+a+learning+network&rft.volume=30&rft.issue=29&rft.pages=%3Cspan+class%3D%22nowrap%22%3E4211-%3C%2Fspan%3E7&rft.date=1991&rft_id=info%3Apmid%2F20706526&rft_id=info%3Adoi%2F10.1364%2FAO.30.004211&rft_id=info%3Abibcode%2F1991ApOpt..30.4211Z&rft.aulast=Zhang&rft.aufirst=Wei&rft_id=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F0B65v6Wo67Tk5cm5DTlNGd0NPUmM%2Fview%3Fusp%3Dsharing&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-63"><span class="mw-cite-backlink"><b><a href="#cite_ref-63">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFZhang1994" class="citation journal cs1">Zhang, Wei (1994). <a rel="nofollow" class="external text" href="https://drive.google.com/file/d/0B65v6Wo67Tk5Ml9qeW5nQ3poVTQ/view?usp=sharing">"Computerized detection of clustered microcalcifications in digital mammograms using a shift-invariant artificial neural network"</a>. <i>Medical Physics</i>. <b>21</b> (4): <span class="nowrap">517–</span>24. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1994MedPh..21..517Z">1994MedPh..21..517Z</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1118%2F1.597177">10.1118/1.597177</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/8058017">8058017</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Medical+Physics&rft.atitle=Computerized+detection+of+clustered+microcalcifications+in+digital+mammograms+using+a+shift-invariant+artificial+neural+network&rft.volume=21&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E517-%3C%2Fspan%3E24&rft.date=1994&rft_id=info%3Apmid%2F8058017&rft_id=info%3Adoi%2F10.1118%2F1.597177&rft_id=info%3Abibcode%2F1994MedPh..21..517Z&rft.aulast=Zhang&rft.aufirst=Wei&rft_id=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F0B65v6Wo67Tk5Ml9qeW5nQ3poVTQ%2Fview%3Fusp%3Dsharing&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-lecun98-64"><span class="mw-cite-backlink"><b><a href="#cite_ref-lecun98_64-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLeCunLéon_BottouYoshua_BengioPatrick_Haffner1998" class="citation journal cs1">LeCun, Yann; Léon Bottou; Yoshua Bengio; Patrick Haffner (1998). <a rel="nofollow" class="external text" href="http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf">"Gradient-based learning applied to document recognition"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of the IEEE</i>. <b>86</b> (11): <span class="nowrap">2278–</span>2324. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.32.9552">10.1.1.32.9552</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2F5.726791">10.1109/5.726791</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:14542261">14542261</a><span class="reference-accessdate">. Retrieved <span class="nowrap">October 7,</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+IEEE&rft.atitle=Gradient-based+learning+applied+to+document+recognition&rft.volume=86&rft.issue=11&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2278-%3C%2Fspan%3E2324&rft.date=1998&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.32.9552%23id-name%3DCiteSeerX&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A14542261%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2F5.726791&rft.aulast=LeCun&rft.aufirst=Yann&rft.au=L%C3%A9on+Bottou&rft.au=Yoshua+Bengio&rft.au=Patrick+Haffner&rft_id=http%3A%2F%2Fyann.lecun.com%2Fexdb%2Fpublis%2Fpdf%2Flecun-01a.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-65"><span class="mw-cite-backlink"><b><a href="#cite_ref-65">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFJordan1986" class="citation journal cs1">Jordan, Michael I. (1986). <a rel="nofollow" class="external text" href="https://escholarship.org/uc/item/1fg2j76h">"Attractor dynamics and parallelism in a connectionist sequential machine"</a>. <i>Proceedings of the Annual Meeting of the Cognitive Science Society</i>. <b>8</b>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+Annual+Meeting+of+the+Cognitive+Science+Society&rft.atitle=Attractor+dynamics+and+parallelism+in+a+connectionist+sequential+machine&rft.volume=8&rft.date=1986&rft.aulast=Jordan&rft.aufirst=Michael+I.&rft_id=https%3A%2F%2Fescholarship.org%2Fuc%2Fitem%2F1fg2j76h&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-66"><span class="mw-cite-backlink"><b><a href="#cite_ref-66">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFElman1990" class="citation journal cs1">Elman, Jeffrey L. (March 1990). <a rel="nofollow" class="external text" href="https://onlinelibrary.wiley.com/doi/10.1207/s15516709cog1402_1">"Finding Structure in Time"</a>. <i>Cognitive Science</i>. <b>14</b> (2): <span class="nowrap">179–</span>211. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1207%2Fs15516709cog1402_1">10.1207/s15516709cog1402_1</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0364-0213">0364-0213</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Cognitive+Science&rft.atitle=Finding+Structure+in+Time&rft.volume=14&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E179-%3C%2Fspan%3E211&rft.date=1990-03&rft_id=info%3Adoi%2F10.1207%2Fs15516709cog1402_1&rft.issn=0364-0213&rft.aulast=Elman&rft.aufirst=Jeffrey+L.&rft_id=https%3A%2F%2Fonlinelibrary.wiley.com%2Fdoi%2F10.1207%2Fs15516709cog1402_1&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-chunker1991-67"><span class="mw-cite-backlink">^ <a href="#cite_ref-chunker1991_67-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-chunker1991_67-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-chunker1991_67-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber1991" class="citation journal cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber, Jürgen</a> (April 1991). <a rel="nofollow" class="external text" href="https://people.idsia.ch/~juergen/FKI-148-91ocr.pdf">"Neural Sequence Chunkers"</a> <span class="cs1-format">(PDF)</span>. <i>TR FKI-148, TU Munich</i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=TR+FKI-148%2C+TU+Munich&rft.atitle=Neural+Sequence+Chunkers&rft.date=1991-04&rft.aulast=Schmidhuber&rft.aufirst=J%C3%BCrgen&rft_id=https%3A%2F%2Fpeople.idsia.ch%2F~juergen%2FFKI-148-91ocr.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-schmidhuber1992-68"><span class="mw-cite-backlink">^ <a href="#cite_ref-schmidhuber1992_68-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-schmidhuber1992_68-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber1992" class="citation journal cs1">Schmidhuber, Jürgen (1992). <a rel="nofollow" class="external text" href="https://sferics.idsia.ch/pub/juergen/chunker.pdf">"Learning complex, extended sequences using the principle of history compression (based on TR FKI-148, 1991)"</a> <span class="cs1-format">(PDF)</span>. <i>Neural Computation</i>. <b>4</b> (2): <span class="nowrap">234–</span>242. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1992.4.2.234">10.1162/neco.1992.4.2.234</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:18271205">18271205</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Computation&rft.atitle=Learning+complex%2C+extended+sequences+using+the+principle+of+history+compression+%28based+on+TR+FKI-148%2C+1991%29&rft.volume=4&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E234-%3C%2Fspan%3E242&rft.date=1992&rft_id=info%3Adoi%2F10.1162%2Fneco.1992.4.2.234&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A18271205%23id-name%3DS2CID&rft.aulast=Schmidhuber&rft.aufirst=J%C3%BCrgen&rft_id=https%3A%2F%2Fsferics.idsia.ch%2Fpub%2Fjuergen%2Fchunker.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-schmidhuber1993-69"><span class="mw-cite-backlink"><b><a href="#cite_ref-schmidhuber1993_69-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber1993" class="citation book cs1">Schmidhuber, Jürgen (1993). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220516075450/https://sferics.idsia.ch/pub/juergen/habilitation.pdf"><i>Habilitation thesis: System modeling and optimization</i></a> <span class="cs1-format">(PDF)</span>. Archived from <a rel="nofollow" class="external text" href="https://sferics.idsia.ch/pub/juergen/habilitation.pdf">the original</a> <span class="cs1-format">(PDF)</span> on May 16, 2022.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Habilitation+thesis%3A+System+modeling+and+optimization&rft.date=1993&rft.aulast=Schmidhuber&rft.aufirst=J%C3%BCrgen&rft_id=https%3A%2F%2Fsferics.idsia.ch%2Fpub%2Fjuergen%2Fhabilitation.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span> Page 150 ff demonstrates credit assignment across the equivalent of 1,200 layers in an unfolded RNN.</span> </li> <li id="cite_note-HOCH1991-70"><span class="mw-cite-backlink">^ <a href="#cite_ref-HOCH1991_70-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-HOCH1991_70-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-HOCH1991_70-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text">S. Hochreiter., "<a rel="nofollow" class="external text" href="http://people.idsia.ch/~juergen/SeppHochreiter1991ThesisAdvisorSchmidhuber.pdf">Untersuchungen zu dynamischen neuronalen Netzen</a>". <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150306075401/http://people.idsia.ch/~juergen/SeppHochreiter1991ThesisAdvisorSchmidhuber.pdf">Archived</a> 2015-03-06 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. <i>Diploma thesis. Institut f. Informatik, Technische Univ. Munich. Advisor: J. Schmidhuber</i>, 1991.</span> </li> <li id="cite_note-HOCH2001-71"><span class="mw-cite-backlink"><b><a href="#cite_ref-HOCH2001_71-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHochreiter2001" class="citation book cs1">Hochreiter, S.; et al. (15 January 2001). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=NWOcMVA64aAC">"Gradient flow in recurrent nets: the difficulty of learning long-term dependencies"</a>. In Kolen, John F.; Kremer, Stefan C. (eds.). <i>A Field Guide to Dynamical Recurrent Networks</i>. John Wiley & Sons. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-7803-5369-5" title="Special:BookSources/978-0-7803-5369-5"><bdi>978-0-7803-5369-5</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Gradient+flow+in+recurrent+nets%3A+the+difficulty+of+learning+long-term+dependencies&rft.btitle=A+Field+Guide+to+Dynamical+Recurrent+Networks&rft.pub=John+Wiley+%26+Sons&rft.date=2001-01-15&rft.isbn=978-0-7803-5369-5&rft.aulast=Hochreiter&rft.aufirst=S.&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DNWOcMVA64aAC&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-72"><span class="mw-cite-backlink"><b><a href="#cite_ref-72">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSepp_HochreiterJürgen_Schmidhuber1995" class="citation cs2"><a href="/wiki/Sepp_Hochreiter" title="Sepp Hochreiter">Sepp Hochreiter</a>; <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a> (21 August 1995), <a rel="nofollow" class="external text" href="ftp://ftp.idsia.ch/pub/juergen/fki-207-95.ps.gz"><i>Long Short Term Memory</i></a>, <a href="/wiki/WDQ_(identifier)" class="mw-redirect" title="WDQ (identifier)">Wikidata</a> <a href="https://www.wikidata.org/wiki/Q98967430" class="extiw" title="d:Q98967430">Q98967430</a></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Long+Short+Term+Memory&rft.date=1995-08-21&rft.au=Sepp+Hochreiter&rft.au=J%C3%BCrgen+Schmidhuber&rft_id=ftp%3A%2F%2Fftp.idsia.ch%2Fpub%2Fjuergen%2Ffki-207-95.ps.gz&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-lstm1999-73"><span class="mw-cite-backlink"><b><a href="#cite_ref-lstm1999_73-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGersSchmidhuberCummins1999" class="citation book cs1">Gers, Felix; Schmidhuber, Jürgen; Cummins, Fred (1999). "Learning to forget: Continual prediction with LSTM". <i>9th International Conference on Artificial Neural Networks: ICANN '99</i>. Vol. 1999. pp. <span class="nowrap">850–</span>855. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1049%2Fcp%3A19991218">10.1049/cp:19991218</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-85296-721-7" title="Special:BookSources/0-85296-721-7"><bdi>0-85296-721-7</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Learning+to+forget%3A+Continual+prediction+with+LSTM&rft.btitle=9th+International+Conference+on+Artificial+Neural+Networks%3A+ICANN+%2799&rft.pages=%3Cspan+class%3D%22nowrap%22%3E850-%3C%2Fspan%3E855&rft.date=1999&rft_id=info%3Adoi%2F10.1049%2Fcp%3A19991218&rft.isbn=0-85296-721-7&rft.aulast=Gers&rft.aufirst=Felix&rft.au=Schmidhuber%2C+J%C3%BCrgen&rft.au=Cummins%2C+Fred&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-curiosity1991-74"><span class="mw-cite-backlink">^ <a href="#cite_ref-curiosity1991_74-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-curiosity1991_74-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber1991" class="citation conference cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber, Jürgen</a> (1991). "A possibility for implementing curiosity and boredom in model-building neural controllers". <i>Proc. SAB'1991</i>. MIT Press/Bradford Books. pp. <span class="nowrap">222–</span>227.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.atitle=A+possibility+for+implementing+curiosity+and+boredom+in+model-building+neural+controllers&rft.btitle=Proc.+SAB%271991&rft.pages=%3Cspan+class%3D%22nowrap%22%3E222-%3C%2Fspan%3E227&rft.pub=MIT+Press%2FBradford+Books&rft.date=1991&rft.aulast=Schmidhuber&rft.aufirst=J%C3%BCrgen&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-fun2010-75"><span class="mw-cite-backlink"><b><a href="#cite_ref-fun2010_75-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber2010" class="citation journal cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber, Jürgen</a> (2010). "Formal Theory of Creativity, Fun, and Intrinsic Motivation (1990-2010)". <i>IEEE Transactions on Autonomous Mental Development</i>. <b>2</b> (3): <span class="nowrap">230–</span>247. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTAMD.2010.2056368">10.1109/TAMD.2010.2056368</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:234198">234198</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Autonomous+Mental+Development&rft.atitle=Formal+Theory+of+Creativity%2C+Fun%2C+and+Intrinsic+Motivation+%281990-2010%29&rft.volume=2&rft.issue=3&rft.pages=%3Cspan+class%3D%22nowrap%22%3E230-%3C%2Fspan%3E247&rft.date=2010&rft_id=info%3Adoi%2F10.1109%2FTAMD.2010.2056368&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A234198%23id-name%3DS2CID&rft.aulast=Schmidhuber&rft.aufirst=J%C3%BCrgen&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-gancurpm2020-76"><span class="mw-cite-backlink">^ <a href="#cite_ref-gancurpm2020_76-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-gancurpm2020_76-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidhuber2020" class="citation journal cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber, Jürgen</a> (2020). "Generative Adversarial Networks are Special Cases of Artificial Curiosity (1990) and also Closely Related to Predictability Minimization (1991)". <i>Neural Networks</i>. <b>127</b>: <span class="nowrap">58–</span>66. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1906.04493">1906.04493</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neunet.2020.04.008">10.1016/j.neunet.2020.04.008</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32334341">32334341</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:216056336">216056336</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Networks&rft.atitle=Generative+Adversarial+Networks+are+Special+Cases+of+Artificial+Curiosity+%281990%29+and+also+Closely+Related+to+Predictability+Minimization+%281991%29&rft.volume=127&rft.pages=%3Cspan+class%3D%22nowrap%22%3E58-%3C%2Fspan%3E66&rft.date=2020&rft_id=info%3Aarxiv%2F1906.04493&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A216056336%23id-name%3DS2CID&rft_id=info%3Apmid%2F32334341&rft_id=info%3Adoi%2F10.1016%2Fj.neunet.2020.04.008&rft.aulast=Schmidhuber&rft.aufirst=J%C3%BCrgen&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-77"><span class="mw-cite-backlink"><b><a href="#cite_ref-77">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFAckleyHintonSejnowski1985" class="citation journal cs1">Ackley, David H.; Hinton, Geoffrey E.; Sejnowski, Terrence J. (1985-01-01). <a rel="nofollow" class="external text" href="https://www.sciencedirect.com/science/article/pii/S0364021385800124">"A learning algorithm for boltzmann machines"</a>. <i>Cognitive Science</i>. <b>9</b> (1): <span class="nowrap">147–</span>169. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2FS0364-0213%2885%2980012-4">10.1016/S0364-0213(85)80012-4</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0364-0213">0364-0213</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Cognitive+Science&rft.atitle=A+learning+algorithm+for+boltzmann+machines&rft.volume=9&rft.issue=1&rft.pages=%3Cspan+class%3D%22nowrap%22%3E147-%3C%2Fspan%3E169&rft.date=1985-01-01&rft_id=info%3Adoi%2F10.1016%2FS0364-0213%2885%2980012-4&rft.issn=0364-0213&rft.aulast=Ackley&rft.aufirst=David+H.&rft.au=Hinton%2C+Geoffrey+E.&rft.au=Sejnowski%2C+Terrence+J.&rft_id=https%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS0364021385800124&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-78"><span class="mw-cite-backlink"><b><a href="#cite_ref-78">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSmolensky1986" class="citation book cs1">Smolensky, Paul (1986). <a rel="nofollow" class="external text" href="https://stanford.edu/~jlmcc/papers/PDP/Volume%201/Chap6_PDP86.pdf">"Chapter 6: Information Processing in Dynamical Systems: Foundations of Harmony Theory"</a> <span class="cs1-format">(PDF)</span>. In Rumelhart, David E.; McLelland, James L. (eds.). <a href="/wiki/Connectionism" title="Connectionism"><i>Parallel Distributed Processing: Explorations in the Microstructure of Cognition, Volume 1: Foundations</i></a>. MIT Press. pp. <a rel="nofollow" class="external text" href="https://archive.org/details/paralleldistribu00rume/page/194">194–281</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-262-68053-X" title="Special:BookSources/0-262-68053-X"><bdi>0-262-68053-X</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Chapter+6%3A+Information+Processing+in+Dynamical+Systems%3A+Foundations+of+Harmony+Theory&rft.btitle=Parallel+Distributed+Processing%3A+Explorations+in+the+Microstructure+of+Cognition%2C+Volume+1%3A+Foundations&rft.pages=194-281&rft.pub=MIT+Press&rft.date=1986&rft.isbn=0-262-68053-X&rft.aulast=Smolensky&rft.aufirst=Paul&rft_id=https%3A%2F%2Fstanford.edu%2F~jlmcc%2Fpapers%2FPDP%2FVolume%25201%2FChap6_PDP86.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-nc95-79"><span class="mw-cite-backlink"><b><a href="#cite_ref-nc95_79-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFPeterHintonNealZemel1995" class="citation journal cs1"><a href="/wiki/Peter_Dayan" title="Peter Dayan">Peter, Dayan</a>; <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Hinton, Geoffrey E.</a>; <a href="/wiki/Radford_M._Neal" title="Radford M. Neal">Neal, Radford M.</a>; <a href="/wiki/Richard_Zemel" title="Richard Zemel">Zemel, Richard S.</a> (1995). "The Helmholtz machine". <i>Neural Computation</i>. <b>7</b> (5): <span class="nowrap">889–</span>904. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1995.7.5.889">10.1162/neco.1995.7.5.889</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/21.11116%2F0000-0002-D6D3-E">21.11116/0000-0002-D6D3-E</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7584891">7584891</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1890561">1890561</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Computation&rft.atitle=The+Helmholtz+machine.&rft.volume=7&rft.issue=5&rft.pages=%3Cspan+class%3D%22nowrap%22%3E889-%3C%2Fspan%3E904&rft.date=1995&rft_id=info%3Ahdl%2F21.11116%2F0000-0002-D6D3-E&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1890561%23id-name%3DS2CID&rft_id=info%3Apmid%2F7584891&rft_id=info%3Adoi%2F10.1162%2Fneco.1995.7.5.889&rft.aulast=Peter&rft.aufirst=Dayan&rft.au=Hinton%2C+Geoffrey+E.&rft.au=Neal%2C+Radford+M.&rft.au=Zemel%2C+Richard+S.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span> <span style="position:relative; top: -2px;"><span typeof="mw:File"><a href="/wiki/Paywall" title="closed access publication – behind paywall"><img alt="Closed access icon" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/9px-Closed_Access_logo_transparent.svg.png" decoding="async" width="9" height="14" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/14px-Closed_Access_logo_transparent.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/18px-Closed_Access_logo_transparent.svg.png 2x" data-file-width="640" data-file-height="1000" /></a></span></span></span> </li> <li id="cite_note-:1-80"><span class="mw-cite-backlink"><b><a href="#cite_ref-:1_80-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHintonDayanFreyNeal1995" class="citation journal cs1"><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Hinton, Geoffrey E.</a>; <a href="/wiki/Peter_Dayan" title="Peter Dayan">Dayan, Peter</a>; <a href="/wiki/Brendan_Frey" title="Brendan Frey">Frey, Brendan J.</a>; Neal, Radford (1995-05-26). "The wake-sleep algorithm for unsupervised neural networks". <i>Science</i>. <b>268</b> (5214): <span class="nowrap">1158–</span>1161. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1995Sci...268.1158H">1995Sci...268.1158H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.7761831">10.1126/science.7761831</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7761831">7761831</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:871473">871473</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Science&rft.atitle=The+wake-sleep+algorithm+for+unsupervised+neural+networks&rft.volume=268&rft.issue=5214&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1158-%3C%2Fspan%3E1161&rft.date=1995-05-26&rft_id=info%3Adoi%2F10.1126%2Fscience.7761831&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A871473%23id-name%3DS2CID&rft_id=info%3Apmid%2F7761831&rft_id=info%3Abibcode%2F1995Sci...268.1158H&rft.aulast=Hinton&rft.aufirst=Geoffrey+E.&rft.au=Dayan%2C+Peter&rft.au=Frey%2C+Brendan+J.&rft.au=Neal%2C+Radford&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-81"><span class="mw-cite-backlink"><b><a href="#cite_ref-81">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSejnowski2018" class="citation book cs1">Sejnowski, Terrence J. (2018). <i>The deep learning revolution</i>. Cambridge, Massachusetts: The MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-03803-4" title="Special:BookSources/978-0-262-03803-4"><bdi>978-0-262-03803-4</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+deep+learning+revolution&rft.place=Cambridge%2C+Massachusetts&rft.pub=The+MIT+Press&rft.date=2018&rft.isbn=978-0-262-03803-4&rft.aulast=Sejnowski&rft.aufirst=Terrence+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-82"><span class="mw-cite-backlink"><b><a href="#cite_ref-82">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFQianSejnowski1988" class="citation journal cs1">Qian, Ning; Sejnowski, Terrence J. (1988-08-20). <a rel="nofollow" class="external text" href="https://dx.doi.org/10.1016/0022-2836%2888%2990564-5">"Predicting the secondary structure of globular proteins using neural network models"</a>. <i>Journal of Molecular Biology</i>. <b>202</b> (4): <span class="nowrap">865–</span>884. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0022-2836%2888%2990564-5">10.1016/0022-2836(88)90564-5</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0022-2836">0022-2836</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/3172241">3172241</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Molecular+Biology&rft.atitle=Predicting+the+secondary+structure+of+globular+proteins+using+neural+network+models&rft.volume=202&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E865-%3C%2Fspan%3E884&rft.date=1988-08-20&rft.issn=0022-2836&rft_id=info%3Apmid%2F3172241&rft_id=info%3Adoi%2F10.1016%2F0022-2836%2888%2990564-5&rft.aulast=Qian&rft.aufirst=Ning&rft.au=Sejnowski%2C+Terrence+J.&rft_id=https%3A%2F%2Fdx.doi.org%2F10.1016%2F0022-2836%252888%252990564-5&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-83"><span class="mw-cite-backlink"><b><a href="#cite_ref-83">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMorganBourlardRenalsCohen1993" class="citation journal cs1">Morgan, Nelson; Bourlard, Hervé; Renals, Steve; Cohen, Michael; Franco, Horacio (1 August 1993). "Hybrid neural network/hidden markov model systems for continuous speech recognition". <i>International Journal of Pattern Recognition and Artificial Intelligence</i>. <b>07</b> (4): <span class="nowrap">899–</span>916. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1142%2Fs0218001493000455">10.1142/s0218001493000455</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0218-0014">0218-0014</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=International+Journal+of+Pattern+Recognition+and+Artificial+Intelligence&rft.atitle=Hybrid+neural+network%2Fhidden+markov+model+systems+for+continuous+speech+recognition&rft.volume=07&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E899-%3C%2Fspan%3E916&rft.date=1993-08-01&rft_id=info%3Adoi%2F10.1142%2Fs0218001493000455&rft.issn=0218-0014&rft.aulast=Morgan&rft.aufirst=Nelson&rft.au=Bourlard%2C+Herv%C3%A9&rft.au=Renals%2C+Steve&rft.au=Cohen%2C+Michael&rft.au=Franco%2C+Horacio&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Robinson1992-84"><span class="mw-cite-backlink"><b><a href="#cite_ref-Robinson1992_84-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRobinson1992" class="citation journal cs1"><a href="/wiki/Tony_Robinson_(speech_recognition)" title="Tony Robinson (speech recognition)">Robinson, T.</a> (1992). <a rel="nofollow" class="external text" href="http://dl.acm.org/citation.cfm?id=1895720">"A real-time recurrent error propagation network word recognition system"</a>. <i>ICASSP</i>. Icassp'92: <span class="nowrap">617–</span>620. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780780305328" title="Special:BookSources/9780780305328"><bdi>9780780305328</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123135/https://dl.acm.org/doi/10.5555/1895550.1895720">Archived</a> from the original on 2021-05-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-12</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=ICASSP&rft.atitle=A+real-time+recurrent+error+propagation+network+word+recognition+system&rft.pages=%3Cspan+class%3D%22nowrap%22%3E617-%3C%2Fspan%3E620&rft.date=1992&rft.isbn=9780780305328&rft.aulast=Robinson&rft.aufirst=T.&rft_id=http%3A%2F%2Fdl.acm.org%2Fcitation.cfm%3Fid%3D1895720&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-85"><span class="mw-cite-backlink"><b><a href="#cite_ref-85">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFWaibelHanazawaHintonShikano1989" class="citation journal cs1">Waibel, A.; Hanazawa, T.; Hinton, G.; Shikano, K.; Lang, K. J. (March 1989). <a rel="nofollow" class="external text" href="http://dml.cz/bitstream/handle/10338.dmlcz/135496/Kybernetika_38-2002-6_2.pdf">"Phoneme recognition using time-delay neural networks"</a> <span class="cs1-format">(PDF)</span>. <i>IEEE Transactions on Acoustics, Speech, and Signal Processing</i>. <b>37</b> (3): <span class="nowrap">328–</span>339. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2F29.21701">10.1109/29.21701</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/10338.dmlcz%2F135496">10338.dmlcz/135496</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0096-3518">0096-3518</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:9563026">9563026</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210427001446/https://dml.cz/bitstream/handle/10338.dmlcz/135496/Kybernetika_38-2002-6_2.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2021-04-27<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-09-24</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Acoustics%2C+Speech%2C+and+Signal+Processing&rft.atitle=Phoneme+recognition+using+time-delay+neural+networks&rft.volume=37&rft.issue=3&rft.pages=%3Cspan+class%3D%22nowrap%22%3E328-%3C%2Fspan%3E339&rft.date=1989-03&rft_id=info%3Ahdl%2F10338.dmlcz%2F135496&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A9563026%23id-name%3DS2CID&rft.issn=0096-3518&rft_id=info%3Adoi%2F10.1109%2F29.21701&rft.aulast=Waibel&rft.aufirst=A.&rft.au=Hanazawa%2C+T.&rft.au=Hinton%2C+G.&rft.au=Shikano%2C+K.&rft.au=Lang%2C+K.+J.&rft_id=http%3A%2F%2Fdml.cz%2Fbitstream%2Fhandle%2F10338.dmlcz%2F135496%2FKybernetika_38-2002-6_2.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Baker2009-86"><span class="mw-cite-backlink"><b><a href="#cite_ref-Baker2009_86-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBakerDengGlassKhudanpur2009" class="citation journal cs1">Baker, J.; Deng, Li; Glass, Jim; Khudanpur, S.; Lee, C.-H.; Morgan, N.; O'Shaughnessy, D. (2009). "Research Developments and Directions in Speech Recognition and Understanding, Part 1". <i>IEEE Signal Processing Magazine</i>. <b>26</b> (3): <span class="nowrap">75–</span>80. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2009ISPM...26...75B">2009ISPM...26...75B</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fmsp.2009.932166">10.1109/msp.2009.932166</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/1721.1%2F51891">1721.1/51891</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:357467">357467</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Signal+Processing+Magazine&rft.atitle=Research+Developments+and+Directions+in+Speech+Recognition+and+Understanding%2C+Part+1&rft.volume=26&rft.issue=3&rft.pages=%3Cspan+class%3D%22nowrap%22%3E75-%3C%2Fspan%3E80&rft.date=2009&rft_id=info%3Ahdl%2F1721.1%2F51891&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A357467%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2Fmsp.2009.932166&rft_id=info%3Abibcode%2F2009ISPM...26...75B&rft.aulast=Baker&rft.aufirst=J.&rft.au=Deng%2C+Li&rft.au=Glass%2C+Jim&rft.au=Khudanpur%2C+S.&rft.au=Lee%2C+C.-H.&rft.au=Morgan%2C+N.&rft.au=O%27Shaughnessy%2C+D.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Bengio1991-87"><span class="mw-cite-backlink"><b><a href="#cite_ref-Bengio1991_87-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBengio1991" class="citation web cs1">Bengio, Y. (1991). <a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/41229141">"Artificial Neural Networks and their Application to Speech/Sequence Recognition"</a>. McGill University Ph.D. thesis. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123131/https://www.researchgate.net/publication/41229141_Artificial_neural_networks_and_their_application_to_sequence_recognition">Archived</a> from the original on 2021-05-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-12</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Artificial+Neural+Networks+and+their+Application+to+Speech%2FSequence+Recognition&rft.pub=McGill+University+Ph.D.+thesis&rft.date=1991&rft.aulast=Bengio&rft.aufirst=Y.&rft_id=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F41229141&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Deng1994-88"><span class="mw-cite-backlink"><b><a href="#cite_ref-Deng1994_88-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDengHassaneinElmasry1994" class="citation journal cs1">Deng, L.; Hassanein, K.; Elmasry, M. (1994). "Analysis of correlation structure for a neural predictive model with applications to speech recognition". <i>Neural Networks</i>. <b>7</b> (2): <span class="nowrap">331–</span>339. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0893-6080%2894%2990027-2">10.1016/0893-6080(94)90027-2</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Networks&rft.atitle=Analysis+of+correlation+structure+for+a+neural+predictive+model+with+applications+to+speech+recognition&rft.volume=7&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E331-%3C%2Fspan%3E339&rft.date=1994&rft_id=info%3Adoi%2F10.1016%2F0893-6080%2894%2990027-2&rft.aulast=Deng&rft.aufirst=L.&rft.au=Hassanein%2C+K.&rft.au=Elmasry%2C+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Doddington2000-89"><span class="mw-cite-backlink"><b><a href="#cite_ref-Doddington2000_89-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDoddingtonPrzybockiMartinReynolds2000" class="citation journal cs1">Doddington, G.; Przybocki, M.; Martin, A.; Reynolds, D. (2000). "The NIST speaker recognition evaluation ± Overview, methodology, systems, results, perspective". <i>Speech Communication</i>. <b>31</b> (2): <span class="nowrap">225–</span>254. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2FS0167-6393%2899%2900080-1">10.1016/S0167-6393(99)00080-1</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Speech+Communication&rft.atitle=The+NIST+speaker+recognition+evaluation+%C2%B1+Overview%2C+methodology%2C+systems%2C+results%2C+perspective&rft.volume=31&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E225-%3C%2Fspan%3E254&rft.date=2000&rft_id=info%3Adoi%2F10.1016%2FS0167-6393%2899%2900080-1&rft.aulast=Doddington&rft.aufirst=G.&rft.au=Przybocki%2C+M.&rft.au=Martin%2C+A.&rft.au=Reynolds%2C+D.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Heck2000-90"><span class="mw-cite-backlink">^ <a href="#cite_ref-Heck2000_90-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Heck2000_90-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHeckKonigSonmezWeintraub2000" class="citation journal cs1">Heck, L.; Konig, Y.; Sonmez, M.; Weintraub, M. (2000). "Robustness to Telephone Handset Distortion in Speaker Recognition by Discriminative Feature Design". <i>Speech Communication</i>. <b>31</b> (2): <span class="nowrap">181–</span>192. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fs0167-6393%2899%2900077-1">10.1016/s0167-6393(99)00077-1</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Speech+Communication&rft.atitle=Robustness+to+Telephone+Handset+Distortion+in+Speaker+Recognition+by+Discriminative+Feature+Design&rft.volume=31&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E181-%3C%2Fspan%3E192&rft.date=2000&rft_id=info%3Adoi%2F10.1016%2Fs0167-6393%2899%2900077-1&rft.aulast=Heck&rft.aufirst=L.&rft.au=Konig%2C+Y.&rft.au=Sonmez%2C+M.&rft.au=Weintraub%2C+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-91"><span class="mw-cite-backlink"><b><a href="#cite_ref-91">^</a></b></span> <span class="reference-text">L.P Heck and R. Teunen. "Secure and Convenient Transactions with Nuance Verifier". Nuance Users Conference, April 1998.</span> </li> <li id="cite_note-92"><span class="mw-cite-backlink"><b><a href="#cite_ref-92">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/266030526">"Acoustic Modeling with Deep Neural Networks Using Raw Time Signal for LVCSR (PDF Download Available)"</a>. <i>ResearchGate</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123218/https://www.researchgate.net/publication/266030526_Acoustic_Modeling_with_Deep_Neural_Networks_Using_Raw_Time_Signal_for_LVCSR">Archived</a> from the original on 9 May 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=ResearchGate&rft.atitle=Acoustic+Modeling+with+Deep+Neural+Networks+Using+Raw+Time+Signal+for+LVCSR+%28PDF+Download+Available%29&rft_id=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F266030526&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-graves2003-93"><span class="mw-cite-backlink">^ <a href="#cite_ref-graves2003_93-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-graves2003_93-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGravesEckBeringerSchmidhuber2003" class="citation web cs1">Graves, Alex; Eck, Douglas; Beringer, Nicole; Schmidhuber, Jürgen (2003). <a rel="nofollow" class="external text" href="ftp://ftp.idsia.ch/pub/juergen/bioadit2004.pdf">"Biologically Plausible Speech Recognition with LSTM Neural Nets"</a> <span class="cs1-format">(PDF)</span>. <i>1st Intl. Workshop on Biologically Inspired Approaches to Advanced Information Technology, Bio-ADIT 2004, Lausanne, Switzerland</i>. pp. <span class="nowrap">175–</span>184. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123139/ftp://ftp.idsia.ch/pub/juergen/bioadit2004.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2021-05-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2016-04-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=1st+Intl.+Workshop+on+Biologically+Inspired+Approaches+to+Advanced+Information+Technology%2C+Bio-ADIT+2004%2C+Lausanne%2C+Switzerland&rft.atitle=Biologically+Plausible+Speech+Recognition+with+LSTM+Neural+Nets&rft.pages=%3Cspan+class%3D%22nowrap%22%3E175-%3C%2Fspan%3E184&rft.date=2003&rft.aulast=Graves&rft.aufirst=Alex&rft.au=Eck%2C+Douglas&rft.au=Beringer%2C+Nicole&rft.au=Schmidhuber%2C+J%C3%BCrgen&rft_id=ftp%3A%2F%2Fftp.idsia.ch%2Fpub%2Fjuergen%2Fbioadit2004.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-graves2006-94"><span class="mw-cite-backlink"><b><a href="#cite_ref-graves2006_94-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGravesFernándezGomezSchmidhuber2006" class="citation journal cs1"><a href="/wiki/Alex_Graves_(computer_scientist)" title="Alex Graves (computer scientist)">Graves, Alex</a>; Fernández, Santiago; Gomez, Faustino; <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber, Jürgen</a> (2006). "Connectionist temporal classification: Labelling unsegmented sequence data with recurrent neural networks". <i>Proceedings of the International Conference on Machine Learning, ICML 2006</i>: <span class="nowrap">369–</span>376. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.75.6306">10.1.1.75.6306</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+International+Conference+on+Machine+Learning%2C+ICML+2006&rft.atitle=Connectionist+temporal+classification%3A+Labelling+unsegmented+sequence+data+with+recurrent+neural+networks&rft.pages=%3Cspan+class%3D%22nowrap%22%3E369-%3C%2Fspan%3E376&rft.date=2006&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.75.6306%23id-name%3DCiteSeerX&rft.aulast=Graves&rft.aufirst=Alex&rft.au=Fern%C3%A1ndez%2C+Santiago&rft.au=Gomez%2C+Faustino&rft.au=Schmidhuber%2C+J%C3%BCrgen&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-fernandez2007keyword-95"><span class="mw-cite-backlink"><b><a href="#cite_ref-fernandez2007keyword_95-0">^</a></b></span> <span class="reference-text">Santiago Fernandez, Alex Graves, and Jürgen Schmidhuber (2007). <a rel="nofollow" class="external text" href="https://mediatum.ub.tum.de/doc/1289941/file.pdf">An application of recurrent neural networks to discriminative keyword spotting</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181118164457/https://mediatum.ub.tum.de/doc/1289941/file.pdf">Archived</a> 2018-11-18 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. Proceedings of ICANN (2), pp. 220–229.</span> </li> <li id="cite_note-96"><span class="mw-cite-backlink"><b><a href="#cite_ref-96">^</a></b></span> <span class="reference-text">Graves, Alex; and Schmidhuber, Jürgen; <i>Offline Handwriting Recognition with Multidimensional Recurrent Neural Networks</i>, in Bengio, Yoshua; Schuurmans, Dale; Lafferty, John; Williams, Chris K. I.; and Culotta, Aron (eds.), <i>Advances in Neural Information Processing Systems 22 (NIPS'22), December 7th–10th, 2009, Vancouver, BC</i>, Neural Information Processing Systems (NIPS) Foundation, 2009, pp. 545–552</span> </li> <li id="cite_note-97"><span class="mw-cite-backlink"><b><a href="#cite_ref-97">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHinton2007" class="citation journal cs1">Hinton, Geoffrey E. (1 October 2007). <a rel="nofollow" class="external text" href="http://www.cell.com/trends/cognitive-sciences/abstract/S1364-6613(07)00217-3">"Learning multiple layers of representation"</a>. <i>Trends in Cognitive Sciences</i>. <b>11</b> (10): <span class="nowrap">428–</span>434. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.tics.2007.09.004">10.1016/j.tics.2007.09.004</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1364-6613">1364-6613</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/17921042">17921042</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:15066318">15066318</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20131011071435/http://www.cell.com/trends/cognitive-sciences/abstract/S1364-6613(07)00217-3">Archived</a> from the original on 11 October 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">12 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Trends+in+Cognitive+Sciences&rft.atitle=Learning+multiple+layers+of+representation&rft.volume=11&rft.issue=10&rft.pages=%3Cspan+class%3D%22nowrap%22%3E428-%3C%2Fspan%3E434&rft.date=2007-10-01&rft.issn=1364-6613&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A15066318%23id-name%3DS2CID&rft_id=info%3Apmid%2F17921042&rft_id=info%3Adoi%2F10.1016%2Fj.tics.2007.09.004&rft.aulast=Hinton&rft.aufirst=Geoffrey+E.&rft_id=http%3A%2F%2Fwww.cell.com%2Ftrends%2Fcognitive-sciences%2Fabstract%2FS1364-6613%2807%2900217-3&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-hinton06-98"><span class="mw-cite-backlink"><b><a href="#cite_ref-hinton06_98-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHintonOsinderoTeh2006" class="citation journal cs1"><a href="/wiki/Geoff_Hinton" class="mw-redirect" title="Geoff Hinton">Hinton, G. E.</a>; Osindero, S.; Teh, Y. W. (2006). <a rel="nofollow" class="external text" href="http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf">"A Fast Learning Algorithm for Deep Belief Nets"</a> <span class="cs1-format">(PDF)</span>. <i><a href="/wiki/Neural_Computation_(journal)" title="Neural Computation (journal)">Neural Computation</a></i>. <b>18</b> (7): <span class="nowrap">1527–</span>1554. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.2006.18.7.1527">10.1162/neco.2006.18.7.1527</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/16764513">16764513</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2309950">2309950</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151223164129/http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2015-12-23<span class="reference-accessdate">. Retrieved <span class="nowrap">2011-07-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Computation&rft.atitle=A+Fast+Learning+Algorithm+for+Deep+Belief+Nets&rft.volume=18&rft.issue=7&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1527-%3C%2Fspan%3E1554&rft.date=2006&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2309950%23id-name%3DS2CID&rft_id=info%3Apmid%2F16764513&rft_id=info%3Adoi%2F10.1162%2Fneco.2006.18.7.1527&rft.aulast=Hinton&rft.aufirst=G.+E.&rft.au=Osindero%2C+S.&rft.au=Teh%2C+Y.+W.&rft_id=http%3A%2F%2Fwww.cs.toronto.edu%2F~hinton%2Fabsps%2Ffastnc.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-HINTON2007-99"><span class="mw-cite-backlink"><b><a href="#cite_ref-HINTON2007_99-0">^</a></b></span> <span class="reference-text">G. E. Hinton., "<a rel="nofollow" class="external text" href="http://www.csri.utoronto.ca/~hinton/absps/ticsdraft.pdf">Learning multiple layers of representation</a>". <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180522112408/http://www.csri.utoronto.ca/~hinton/absps/ticsdraft.pdf">Archived</a> 2018-05-22 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. <i>Trends in Cognitive Sciences</i>, 11, pp. 428–434, 2007.</span> </li> <li id="cite_note-100"><span class="mw-cite-backlink"><b><a href="#cite_ref-100">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHinton2007" class="citation journal cs1">Hinton, Geoffrey E. (October 2007). <a rel="nofollow" class="external text" href="https://linkinghub.elsevier.com/retrieve/pii/S1364661307002173">"Learning multiple layers of representation"</a>. <i>Trends in Cognitive Sciences</i>. <b>11</b> (10): <span class="nowrap">428–</span>434. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.tics.2007.09.004">10.1016/j.tics.2007.09.004</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/17921042">17921042</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Trends+in+Cognitive+Sciences&rft.atitle=Learning+multiple+layers+of+representation&rft.volume=11&rft.issue=10&rft.pages=%3Cspan+class%3D%22nowrap%22%3E428-%3C%2Fspan%3E434&rft.date=2007-10&rft_id=info%3Adoi%2F10.1016%2Fj.tics.2007.09.004&rft_id=info%3Apmid%2F17921042&rft.aulast=Hinton&rft.aufirst=Geoffrey+E.&rft_id=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS1364661307002173&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-101"><span class="mw-cite-backlink"><b><a href="#cite_ref-101">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHintonOsinderoTeh2006" class="citation journal cs1">Hinton, Geoffrey E.; Osindero, Simon; Teh, Yee-Whye (July 2006). <a rel="nofollow" class="external text" href="https://direct.mit.edu/neco/article/18/7/1527-1554/7065">"A Fast Learning Algorithm for Deep Belief Nets"</a>. <i>Neural Computation</i>. <b>18</b> (7): <span class="nowrap">1527–</span>1554. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.2006.18.7.1527">10.1162/neco.2006.18.7.1527</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0899-7667">0899-7667</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/16764513">16764513</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Computation&rft.atitle=A+Fast+Learning+Algorithm+for+Deep+Belief+Nets&rft.volume=18&rft.issue=7&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1527-%3C%2Fspan%3E1554&rft.date=2006-07&rft.issn=0899-7667&rft_id=info%3Apmid%2F16764513&rft_id=info%3Adoi%2F10.1162%2Fneco.2006.18.7.1527&rft.aulast=Hinton&rft.aufirst=Geoffrey+E.&rft.au=Osindero%2C+Simon&rft.au=Teh%2C+Yee-Whye&rft_id=https%3A%2F%2Fdirect.mit.edu%2Fneco%2Farticle%2F18%2F7%2F1527-1554%2F7065&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-102"><span class="mw-cite-backlink"><b><a href="#cite_ref-102">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHinton2009" class="citation journal cs1">Hinton, Geoffrey E. (2009-05-31). <a rel="nofollow" class="external text" href="https://doi.org/10.4249%2Fscholarpedia.5947">"Deep belief networks"</a>. <i>Scholarpedia</i>. <b>4</b> (5): 5947. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2009SchpJ...4.5947H">2009SchpJ...4.5947H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.4249%2Fscholarpedia.5947">10.4249/scholarpedia.5947</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1941-6016">1941-6016</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Scholarpedia&rft.atitle=Deep+belief+networks&rft.volume=4&rft.issue=5&rft.pages=5947&rft.date=2009-05-31&rft.issn=1941-6016&rft_id=info%3Adoi%2F10.4249%2Fscholarpedia.5947&rft_id=info%3Abibcode%2F2009SchpJ...4.5947H&rft.aulast=Hinton&rft.aufirst=Geoffrey+E.&rft_id=https%3A%2F%2Fdoi.org%2F10.4249%252Fscholarpedia.5947&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-lecun2016slides-103"><span class="mw-cite-backlink"><b><a href="#cite_ref-lecun2016slides_103-0">^</a></b></span> <span class="reference-text"><a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> (2016). Slides on Deep Learning <a rel="nofollow" class="external text" href="https://indico.cern.ch/event/510372/">Online</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160423021403/https://indico.cern.ch/event/510372/">Archived</a> 2016-04-23 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></span> </li> <li id="cite_note-HintonDengYu2012-104"><span class="mw-cite-backlink">^ <a href="#cite_ref-HintonDengYu2012_104-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-HintonDengYu2012_104-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-HintonDengYu2012_104-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHintonDengYuDahl2012" class="citation journal cs1">Hinton, G.; Deng, L.; Yu, D.; Dahl, G.; Mohamed, A.; Jaitly, N.; Senior, A.; Vanhoucke, V.; Nguyen, P.; <a href="/wiki/Tara_Sainath" title="Tara Sainath">Sainath, T.</a>; Kingsbury, B. (2012). "Deep Neural Networks for Acoustic Modeling in Speech Recognition: The Shared Views of Four Research Groups". <i>IEEE Signal Processing Magazine</i>. <b>29</b> (6): <span class="nowrap">82–</span>97. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2012ISPM...29...82H">2012ISPM...29...82H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fmsp.2012.2205597">10.1109/msp.2012.2205597</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:206485943">206485943</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Signal+Processing+Magazine&rft.atitle=Deep+Neural+Networks+for+Acoustic+Modeling+in+Speech+Recognition%3A+The+Shared+Views+of+Four+Research+Groups&rft.volume=29&rft.issue=6&rft.pages=%3Cspan+class%3D%22nowrap%22%3E82-%3C%2Fspan%3E97&rft.date=2012&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A206485943%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2Fmsp.2012.2205597&rft_id=info%3Abibcode%2F2012ISPM...29...82H&rft.aulast=Hinton&rft.aufirst=G.&rft.au=Deng%2C+L.&rft.au=Yu%2C+D.&rft.au=Dahl%2C+G.&rft.au=Mohamed%2C+A.&rft.au=Jaitly%2C+N.&rft.au=Senior%2C+A.&rft.au=Vanhoucke%2C+V.&rft.au=Nguyen%2C+P.&rft.au=Sainath%2C+T.&rft.au=Kingsbury%2C+B.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ReferenceICASSP2013-105"><span class="mw-cite-backlink">^ <a href="#cite_ref-ReferenceICASSP2013_105-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-ReferenceICASSP2013_105-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-ReferenceICASSP2013_105-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDengHintonKingsbury2013" class="citation web cs1">Deng, L.; Hinton, G.; Kingsbury, B. (May 2013). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/ICASSP-2013-DengHintonKingsbury-revised.pdf">"New types of deep neural network learning for speech recognition and related applications: An overview (ICASSP)"</a> <span class="cs1-format">(PDF)</span>. Microsoft. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170926190920/https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/ICASSP-2013-DengHintonKingsbury-revised.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-09-26<span class="reference-accessdate">. Retrieved <span class="nowrap">27 December</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=New+types+of+deep+neural+network+learning+for+speech+recognition+and+related+applications%3A+An+overview+%28ICASSP%29&rft.pub=Microsoft&rft.date=2013-05&rft.aulast=Deng&rft.aufirst=L.&rft.au=Hinton%2C+G.&rft.au=Kingsbury%2C+B.&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fwp-content%2Fuploads%2F2016%2F02%2FICASSP-2013-DengHintonKingsbury-revised.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ReferenceA-106"><span class="mw-cite-backlink">^ <a href="#cite_ref-ReferenceA_106-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-ReferenceA_106-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-ReferenceA_106-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFYuDeng2014" class="citation book cs1">Yu, D.; Deng, L. (2014). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=rUBTBQAAQBAJ"><i>Automatic Speech Recognition: A Deep Learning Approach (Publisher: Springer)</i></a>. Springer. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4471-5779-3" title="Special:BookSources/978-1-4471-5779-3"><bdi>978-1-4471-5779-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Automatic+Speech+Recognition%3A+A+Deep+Learning+Approach+%28Publisher%3A+Springer%29&rft.pub=Springer&rft.date=2014&rft.isbn=978-1-4471-5779-3&rft.aulast=Yu&rft.aufirst=D.&rft.au=Deng%2C+L.&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DrUBTBQAAQBAJ&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-107"><span class="mw-cite-backlink"><b><a href="#cite_ref-107">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/blog/deng-receives-prestigious-ieee-technical-achievement-award/">"Deng receives prestigious IEEE Technical Achievement Award - Microsoft Research"</a>. <i>Microsoft Research</i>. 3 December 2015. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180316084821/https://www.microsoft.com/en-us/research/blog/deng-receives-prestigious-ieee-technical-achievement-award/">Archived</a> from the original on 16 March 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">16 March</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Microsoft+Research&rft.atitle=Deng+receives+prestigious+IEEE+Technical+Achievement+Award+-+Microsoft+Research&rft.date=2015-12-03&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fblog%2Fdeng-receives-prestigious-ieee-technical-achievement-award%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-interspeech2014Keynote-108"><span class="mw-cite-backlink">^ <a href="#cite_ref-interspeech2014Keynote_108-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-interspeech2014Keynote_108-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLi2014" class="citation web cs1">Li, Deng (September 2014). <a rel="nofollow" class="external text" href="https://www.superlectures.com/interspeech2014/downloadFile?id=6&type=slides&filename=achievements-and-challenges-of-deep-learning-from-speech-analysis-and-recognition-to-language-and-multimodal-processing">"Keynote talk: 'Achievements and Challenges of Deep Learning - From Speech Analysis and Recognition To Language and Multimodal Processing'<span class="cs1-kern-right"></span>"</a>. <i>Interspeech</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170926190732/https://www.superlectures.com/interspeech2014/downloadFile?id=6&type=slides&filename=achievements-and-challenges-of-deep-learning-from-speech-analysis-and-recognition-to-language-and-multimodal-processing">Archived</a> from the original on 2017-09-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-12</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Interspeech&rft.atitle=Keynote+talk%3A+%27Achievements+and+Challenges+of+Deep+Learning+-+From+Speech+Analysis+and+Recognition+To+Language+and+Multimodal+Processing%27&rft.date=2014-09&rft.aulast=Li&rft.aufirst=Deng&rft_id=https%3A%2F%2Fwww.superlectures.com%2Finterspeech2014%2FdownloadFile%3Fid%3D6%26type%3Dslides%26filename%3Dachievements-and-challenges-of-deep-learning-from-speech-analysis-and-recognition-to-language-and-multimodal-processing&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Roles2010-109"><span class="mw-cite-backlink"><b><a href="#cite_ref-Roles2010_109-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFYuDeng2010" class="citation journal cs1">Yu, D.; Deng, L. (2010). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/publication/roles-of-pre-training-and-fine-tuning-in-context-dependent-dbn-hmms-for-real-world-speech-recognition/">"Roles of Pre-Training and Fine-Tuning in Context-Dependent DBN-HMMs for Real-World Speech Recognition"</a>. <i>NIPS Workshop on Deep Learning and Unsupervised Feature Learning</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171012095148/https://www.microsoft.com/en-us/research/publication/roles-of-pre-training-and-fine-tuning-in-context-dependent-dbn-hmms-for-real-world-speech-recognition/">Archived</a> from the original on 2017-10-12<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-14</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=NIPS+Workshop+on+Deep+Learning+and+Unsupervised+Feature+Learning&rft.atitle=Roles+of+Pre-Training+and+Fine-Tuning+in+Context-Dependent+DBN-HMMs+for+Real-World+Speech+Recognition&rft.date=2010&rft.aulast=Yu&rft.aufirst=D.&rft.au=Deng%2C+L.&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fpublication%2Froles-of-pre-training-and-fine-tuning-in-context-dependent-dbn-hmms-for-real-world-speech-recognition%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-110"><span class="mw-cite-backlink"><b><a href="#cite_ref-110">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSeideLiYu2011" class="citation book cs1">Seide, F.; Li, G.; Yu, D. (2011). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/publication/conversational-speech-transcription-using-context-dependent-deep-neural-networks">"Conversational speech transcription using context-dependent deep neural networks"</a>. <i>Interspeech 2011</i>. pp. <span class="nowrap">437–</span>440. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.21437%2FInterspeech.2011-169">10.21437/Interspeech.2011-169</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:398770">398770</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171012095522/https://www.microsoft.com/en-us/research/publication/conversational-speech-transcription-using-context-dependent-deep-neural-networks/">Archived</a> from the original on 2017-10-12<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-14</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Conversational+speech+transcription+using+context-dependent+deep+neural+networks&rft.btitle=Interspeech+2011&rft.pages=%3Cspan+class%3D%22nowrap%22%3E437-%3C%2Fspan%3E440&rft.date=2011&rft_id=info%3Adoi%2F10.21437%2FInterspeech.2011-169&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A398770%23id-name%3DS2CID&rft.aulast=Seide&rft.aufirst=F.&rft.au=Li%2C+G.&rft.au=Yu%2C+D.&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fpublication%2Fconversational-speech-transcription-using-context-dependent-deep-neural-networks&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-111"><span class="mw-cite-backlink"><b><a href="#cite_ref-111">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDengLiHuangYao2013" class="citation journal cs1">Deng, Li; Li, Jinyu; Huang, Jui-Ting; Yao, Kaisheng; Yu, Dong; Seide, Frank; Seltzer, Mike; Zweig, Geoff; He, Xiaodong (1 May 2013). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/publication/recent-advances-in-deep-learning-for-speech-research-at-microsoft/">"Recent Advances in Deep Learning for Speech Research at Microsoft"</a>. <i>Microsoft Research</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171012044053/https://www.microsoft.com/en-us/research/publication/recent-advances-in-deep-learning-for-speech-research-at-microsoft/">Archived</a> from the original on 12 October 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Microsoft+Research&rft.atitle=Recent+Advances+in+Deep+Learning+for+Speech+Research+at+Microsoft&rft.date=2013-05-01&rft.aulast=Deng&rft.aufirst=Li&rft.au=Li%2C+Jinyu&rft.au=Huang%2C+Jui-Ting&rft.au=Yao%2C+Kaisheng&rft.au=Yu%2C+Dong&rft.au=Seide%2C+Frank&rft.au=Seltzer%2C+Mike&rft.au=Zweig%2C+Geoff&rft.au=He%2C+Xiaodong&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fpublication%2Frecent-advances-in-deep-learning-for-speech-research-at-microsoft%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-jung2004-112"><span class="mw-cite-backlink">^ <a href="#cite_ref-jung2004_112-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-jung2004_112-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFOhJung2004" class="citation journal cs1">Oh, K.-S.; Jung, K. (2004). "GPU implementation of neural networks". <i>Pattern Recognition</i>. <b>37</b> (6): <span class="nowrap">1311–</span>1314. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2004PatRe..37.1311O">2004PatRe..37.1311O</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.patcog.2004.01.013">10.1016/j.patcog.2004.01.013</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Pattern+Recognition&rft.atitle=GPU+implementation+of+neural+networks&rft.volume=37&rft.issue=6&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1311-%3C%2Fspan%3E1314&rft.date=2004&rft_id=info%3Adoi%2F10.1016%2Fj.patcog.2004.01.013&rft_id=info%3Abibcode%2F2004PatRe..37.1311O&rft.aulast=Oh&rft.aufirst=K.-S.&rft.au=Jung%2C+K.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-chellapilla2006-113"><span class="mw-cite-backlink">^ <a href="#cite_ref-chellapilla2006_113-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-chellapilla2006_113-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFChellapillaPuriSimard2006" class="citation cs2">Chellapilla, Kumar; Puri, Sidd; Simard, Patrice (2006), <a rel="nofollow" class="external text" href="https://hal.inria.fr/inria-00112631/document"><i>High performance convolutional neural networks for document processing</i></a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200518193413/https://hal.inria.fr/inria-00112631/document">archived</a> from the original on 2020-05-18<span class="reference-accessdate">, retrieved <span class="nowrap">2021-02-14</span></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=High+performance+convolutional+neural+networks+for+document+processing&rft.date=2006&rft.aulast=Chellapilla&rft.aufirst=Kumar&rft.au=Puri%2C+Sidd&rft.au=Simard%2C+Patrice&rft_id=https%3A%2F%2Fhal.inria.fr%2Finria-00112631%2Fdocument&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-sze2017-114"><span class="mw-cite-backlink"><b><a href="#cite_ref-sze2017_114-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSzeChenYangEmer2017" class="citation arxiv cs1"><a href="/wiki/Vivienne_Sze" title="Vivienne Sze">Sze, Vivienne</a>; Chen, Yu-Hsin; Yang, Tien-Ju; Emer, Joel (2017). "Efficient Processing of Deep Neural Networks: A Tutorial and Survey". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1703.09039">1703.09039</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Efficient+Processing+of+Deep+Neural+Networks%3A+A+Tutorial+and+Survey&rft.date=2017&rft_id=info%3Aarxiv%2F1703.09039&rft.aulast=Sze&rft.aufirst=Vivienne&rft.au=Chen%2C+Yu-Hsin&rft.au=Yang%2C+Tien-Ju&rft.au=Emer%2C+Joel&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-115"><span class="mw-cite-backlink"><b><a href="#cite_ref-115">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRainaMadhavanNg2009" class="citation book cs1">Raina, Rajat; Madhavan, Anand; Ng, Andrew Y. (2009-06-14). <a rel="nofollow" class="external text" href="https://doi.org/10.1145/1553374.1553486">"Large-scale deep unsupervised learning using graphics processors"</a>. <i>Proceedings of the 26th Annual International Conference on Machine Learning</i>. ICML '09. New York, NY, USA: Association for Computing Machinery. pp. <span class="nowrap">873–</span>880. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F1553374.1553486">10.1145/1553374.1553486</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-60558-516-1" title="Special:BookSources/978-1-60558-516-1"><bdi>978-1-60558-516-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Large-scale+deep+unsupervised+learning+using+graphics+processors&rft.btitle=Proceedings+of+the+26th+Annual+International+Conference+on+Machine+Learning&rft.place=New+York%2C+NY%2C+USA&rft.series=ICML+%2709&rft.pages=%3Cspan+class%3D%22nowrap%22%3E873-%3C%2Fspan%3E880&rft.pub=Association+for+Computing+Machinery&rft.date=2009-06-14&rft_id=info%3Adoi%2F10.1145%2F1553374.1553486&rft.isbn=978-1-60558-516-1&rft.aulast=Raina&rft.aufirst=Rajat&rft.au=Madhavan%2C+Anand&rft.au=Ng%2C+Andrew+Y.&rft_id=https%3A%2F%2Fdoi.org%2F10.1145%2F1553374.1553486&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:3-116"><span class="mw-cite-backlink"><b><a href="#cite_ref-:3_116-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCireşanMeierGambardellaSchmidhuber2010" class="citation journal cs1">Cireşan, Dan Claudiu; Meier, Ueli; Gambardella, Luca Maria; Schmidhuber, Jürgen (21 September 2010). "Deep, Big, Simple Neural Nets for Handwritten Digit Recognition". <i>Neural Computation</i>. <b>22</b> (12): <span class="nowrap">3207–</span>3220. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1003.0358">1003.0358</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco_a_00052">10.1162/neco_a_00052</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0899-7667">0899-7667</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/20858131">20858131</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1918673">1918673</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Computation&rft.atitle=Deep%2C+Big%2C+Simple+Neural+Nets+for+Handwritten+Digit+Recognition&rft.volume=22&rft.issue=12&rft.pages=%3Cspan+class%3D%22nowrap%22%3E3207-%3C%2Fspan%3E3220&rft.date=2010-09-21&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1918673%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1162%2Fneco_a_00052&rft_id=info%3Aarxiv%2F1003.0358&rft.issn=0899-7667&rft_id=info%3Apmid%2F20858131&rft.aulast=Cire%C5%9Fan&rft.aufirst=Dan+Claudiu&rft.au=Meier%2C+Ueli&rft.au=Gambardella%2C+Luca+Maria&rft.au=Schmidhuber%2C+J%C3%BCrgen&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:6-117"><span class="mw-cite-backlink"><b><a href="#cite_ref-:6_117-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCiresanMeierMasciGambardella2011" class="citation journal cs1">Ciresan, D. C.; Meier, U.; Masci, J.; Gambardella, L.M.; Schmidhuber, J. (2011). <a rel="nofollow" class="external text" href="http://ijcai.org/papers11/Papers/IJCAI11-210.pdf">"Flexible, High Performance Convolutional Neural Networks for Image Classification"</a> <span class="cs1-format">(PDF)</span>. <i>International Joint Conference on Artificial Intelligence</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.5591%2F978-1-57735-516-8%2Fijcai11-210">10.5591/978-1-57735-516-8/ijcai11-210</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20140929094040/http://ijcai.org/papers11/Papers/IJCAI11-210.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2014-09-29<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=International+Joint+Conference+on+Artificial+Intelligence&rft.atitle=Flexible%2C+High+Performance+Convolutional+Neural+Networks+for+Image+Classification&rft.date=2011&rft_id=info%3Adoi%2F10.5591%2F978-1-57735-516-8%2Fijcai11-210&rft.aulast=Ciresan&rft.aufirst=D.+C.&rft.au=Meier%2C+U.&rft.au=Masci%2C+J.&rft.au=Gambardella%2C+L.M.&rft.au=Schmidhuber%2C+J.&rft_id=http%3A%2F%2Fijcai.org%2Fpapers11%2FPapers%2FIJCAI11-210.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:8-118"><span class="mw-cite-backlink"><b><a href="#cite_ref-:8_118-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCiresanGiustiGambardellaSchmidhuber2012" class="citation book cs1">Ciresan, Dan; Giusti, Alessandro; Gambardella, Luca M.; Schmidhuber, Jürgen (2012). Pereira, F.; Burges, C. J. C.; Bottou, L.; Weinberger, K. Q. (eds.). <a rel="nofollow" class="external text" href="http://papers.nips.cc/paper/4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images.pdf"><i>Advances in Neural Information Processing Systems 25</i></a> <span class="cs1-format">(PDF)</span>. Curran Associates, Inc. pp. <span class="nowrap">2843–</span>2851. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170809081713/http://papers.nips.cc/paper/4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-08-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Advances+in+Neural+Information+Processing+Systems+25&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2843-%3C%2Fspan%3E2851&rft.pub=Curran+Associates%2C+Inc.&rft.date=2012&rft.aulast=Ciresan&rft.aufirst=Dan&rft.au=Giusti%2C+Alessandro&rft.au=Gambardella%2C+Luca+M.&rft.au=Schmidhuber%2C+J%C3%BCrgen&rft_id=http%3A%2F%2Fpapers.nips.cc%2Fpaper%2F4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ciresan2013miccai-119"><span class="mw-cite-backlink"><b><a href="#cite_ref-ciresan2013miccai_119-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCiresanGiustiGambardellaSchmidhuber2013" class="citation book cs1">Ciresan, D.; Giusti, A.; Gambardella, L.M.; Schmidhuber, J. (2013). "Mitosis Detection in Breast Cancer Histology Images with Deep Neural Networks". <i>Medical Image Computing and Computer-Assisted Intervention – MICCAI 2013</i>. Lecture Notes in Computer Science. Vol. 7908. pp. <span class="nowrap">411–</span>418. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-642-40763-5_51">10.1007/978-3-642-40763-5_51</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-642-38708-1" title="Special:BookSources/978-3-642-38708-1"><bdi>978-3-642-38708-1</bdi></a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/24579167">24579167</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Mitosis+Detection+in+Breast+Cancer+Histology+Images+with+Deep+Neural+Networks&rft.btitle=Medical+Image+Computing+and+Computer-Assisted+Intervention+%E2%80%93+MICCAI+2013&rft.series=Lecture+Notes+in+Computer+Science&rft.pages=%3Cspan+class%3D%22nowrap%22%3E411-%3C%2Fspan%3E418&rft.date=2013&rft_id=info%3Apmid%2F24579167&rft_id=info%3Adoi%2F10.1007%2F978-3-642-40763-5_51&rft.isbn=978-3-642-38708-1&rft.aulast=Ciresan&rft.aufirst=D.&rft.au=Giusti%2C+A.&rft.au=Gambardella%2C+L.M.&rft.au=Schmidhuber%2C+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ng2012-120"><span class="mw-cite-backlink"><b><a href="#cite_ref-ng2012_120-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFNgDean2012" class="citation arxiv cs1">Ng, Andrew; Dean, Jeff (2012). "Building High-level Features Using Large Scale Unsupervised Learning". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1112.6209">1112.6209</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Building+High-level+Features+Using+Large+Scale+Unsupervised+Learning&rft.date=2012&rft_id=info%3Aarxiv%2F1112.6209&rft.aulast=Ng&rft.aufirst=Andrew&rft.au=Dean%2C+Jeff&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-VGG-121"><span class="mw-cite-backlink"><b><a href="#cite_ref-VGG_121-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSimonyanAndrew2014" class="citation arxiv cs1">Simonyan, Karen; Andrew, Zisserman (2014). "Very Deep Convolution Networks for Large Scale Image Recognition". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1409.1556">1409.1556</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Very+Deep+Convolution+Networks+for+Large+Scale+Image+Recognition&rft.date=2014&rft_id=info%3Aarxiv%2F1409.1556&rft.aulast=Simonyan&rft.aufirst=Karen&rft.au=Andrew%2C+Zisserman&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-szegedy-122"><span class="mw-cite-backlink"><b><a href="#cite_ref-szegedy_122-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSzegedy2015" class="citation journal cs1">Szegedy, Christian (2015). <a rel="nofollow" class="external text" href="https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43022.pdf">"Going deeper with convolutions"</a> <span class="cs1-format">(PDF)</span>. <i>Cvpr2015</i>. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1409.4842">1409.4842</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Cvpr2015&rft.atitle=Going+deeper+with+convolutions&rft.date=2015&rft_id=info%3Aarxiv%2F1409.4842&rft.aulast=Szegedy&rft.aufirst=Christian&rft_id=https%3A%2F%2Fstatic.googleusercontent.com%2Fmedia%2Fresearch.google.com%2Fen%2F%2Fpubs%2Farchive%2F43022.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-1411.4555-123"><span class="mw-cite-backlink"><b><a href="#cite_ref-1411.4555_123-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFVinyalsToshevBengioErhan2014" class="citation arxiv cs1">Vinyals, Oriol; Toshev, Alexander; Bengio, Samy; Erhan, Dumitru (2014). "Show and Tell: A Neural Image Caption Generator". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1411.4555">1411.4555</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Show+and+Tell%3A+A+Neural+Image+Caption+Generator&rft.date=2014&rft_id=info%3Aarxiv%2F1411.4555&rft.aulast=Vinyals&rft.aufirst=Oriol&rft.au=Toshev%2C+Alexander&rft.au=Bengio%2C+Samy&rft.au=Erhan%2C+Dumitru&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span>.</span> </li> <li id="cite_note-1411.4952-124"><span class="mw-cite-backlink"><b><a href="#cite_ref-1411.4952_124-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFFangGuptaIandolaSrivastava2014" class="citation arxiv cs1">Fang, Hao; Gupta, Saurabh; Iandola, Forrest; Srivastava, Rupesh; Deng, Li; Dollár, Piotr; Gao, Jianfeng; He, Xiaodong; Mitchell, Margaret; Platt, John C; Lawrence Zitnick, C; Zweig, Geoffrey (2014). "From Captions to Visual Concepts and Back". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1411.4952">1411.4952</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=From+Captions+to+Visual+Concepts+and+Back&rft.date=2014&rft_id=info%3Aarxiv%2F1411.4952&rft.aulast=Fang&rft.aufirst=Hao&rft.au=Gupta%2C+Saurabh&rft.au=Iandola%2C+Forrest&rft.au=Srivastava%2C+Rupesh&rft.au=Deng%2C+Li&rft.au=Doll%C3%A1r%2C+Piotr&rft.au=Gao%2C+Jianfeng&rft.au=He%2C+Xiaodong&rft.au=Mitchell%2C+Margaret&rft.au=Platt%2C+John+C&rft.au=Lawrence+Zitnick%2C+C&rft.au=Zweig%2C+Geoffrey&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span>.</span> </li> <li id="cite_note-1411.2539-125"><span class="mw-cite-backlink"><b><a href="#cite_ref-1411.2539_125-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKirosSalakhutdinovZemel2014" class="citation arxiv cs1">Kiros, Ryan; Salakhutdinov, Ruslan; Zemel, Richard S (2014). "Unifying Visual-Semantic Embeddings with Multimodal Neural Language Models". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1411.2539">1411.2539</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Unifying+Visual-Semantic+Embeddings+with+Multimodal+Neural+Language+Models&rft.date=2014&rft_id=info%3Aarxiv%2F1411.2539&rft.aulast=Kiros&rft.aufirst=Ryan&rft.au=Salakhutdinov%2C+Ruslan&rft.au=Zemel%2C+Richard+S&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span>.</span> </li> <li id="cite_note-126"><span class="mw-cite-backlink"><b><a href="#cite_ref-126">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSimonyanZisserman2015" class="citation cs2">Simonyan, Karen; Zisserman, Andrew (2015-04-10), <i>Very Deep Convolutional Networks for Large-Scale Image Recognition</i>, <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1409.1556">1409.1556</a></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Very+Deep+Convolutional+Networks+for+Large-Scale+Image+Recognition&rft.date=2015-04-10&rft_id=info%3Aarxiv%2F1409.1556&rft.aulast=Simonyan&rft.aufirst=Karen&rft.au=Zisserman%2C+Andrew&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-prelu-127"><span class="mw-cite-backlink"><b><a href="#cite_ref-prelu_127-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHeZhangRenSun2016" class="citation arxiv cs1">He, Kaiming; Zhang, Xiangyu; Ren, Shaoqing; Sun, Jian (2016). "Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1502.01852">1502.01852</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Delving+Deep+into+Rectifiers%3A+Surpassing+Human-Level+Performance+on+ImageNet+Classification&rft.date=2016&rft_id=info%3Aarxiv%2F1502.01852&rft.aulast=He&rft.aufirst=Kaiming&rft.au=Zhang%2C+Xiangyu&rft.au=Ren%2C+Shaoqing&rft.au=Sun%2C+Jian&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-resnet-128"><span class="mw-cite-backlink"><b><a href="#cite_ref-resnet_128-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHeZhangRenSun2015" class="citation conference cs1">He, Kaiming; Zhang, Xiangyu; Ren, Shaoqing; Sun, Jian (10 Dec 2015). <i>Deep Residual Learning for Image Recognition</i>. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1512.03385">1512.03385</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=Deep+Residual+Learning+for+Image+Recognition&rft.date=2015-12-10&rft_id=info%3Aarxiv%2F1512.03385&rft.aulast=He&rft.aufirst=Kaiming&rft.au=Zhang%2C+Xiangyu&rft.au=Ren%2C+Shaoqing&rft.au=Sun%2C+Jian&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-resnet20152-129"><span class="mw-cite-backlink"><b><a href="#cite_ref-resnet20152_129-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHeZhangRenSun2016" class="citation conference cs1">He, Kaiming; Zhang, Xiangyu; Ren, Shaoqing; Sun, Jian (2016). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/7780459"><i>Deep Residual Learning for Image Recognition</i></a>. <i>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</i>. Las Vegas, NV, USA: IEEE. pp. <span class="nowrap">770–</span>778. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1512.03385">1512.03385</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FCVPR.2016.90">10.1109/CVPR.2016.90</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4673-8851-1" title="Special:BookSources/978-1-4673-8851-1"><bdi>978-1-4673-8851-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=conference&rft.jtitle=2016+IEEE+Conference+on+Computer+Vision+and+Pattern+Recognition+%28CVPR%29&rft.atitle=Deep+Residual+Learning+for+Image+Recognition&rft.pages=%3Cspan+class%3D%22nowrap%22%3E770-%3C%2Fspan%3E778&rft.date=2016&rft_id=info%3Aarxiv%2F1512.03385&rft_id=info%3Adoi%2F10.1109%2FCVPR.2016.90&rft.isbn=978-1-4673-8851-1&rft.aulast=He&rft.aufirst=Kaiming&rft.au=Zhang%2C+Xiangyu&rft.au=Ren%2C+Shaoqing&rft.au=Sun%2C+Jian&rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F7780459&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-130"><span class="mw-cite-backlink"><b><a href="#cite_ref-130">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGatysEckerBethge2015" class="citation arxiv cs1">Gatys, Leon A.; Ecker, Alexander S.; Bethge, Matthias (26 August 2015). "A Neural Algorithm of Artistic Style". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1508.06576">1508.06576</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=A+Neural+Algorithm+of+Artistic+Style&rft.date=2015-08-26&rft_id=info%3Aarxiv%2F1508.06576&rft.aulast=Gatys&rft.aufirst=Leon+A.&rft.au=Ecker%2C+Alexander+S.&rft.au=Bethge%2C+Matthias&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-GANnips-131"><span class="mw-cite-backlink"><b><a href="#cite_ref-GANnips_131-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGoodfellowPouget-AbadieMirzaXu2014" class="citation conference cs1">Goodfellow, Ian; Pouget-Abadie, Jean; Mirza, Mehdi; Xu, Bing; Warde-Farley, David; Ozair, Sherjil; Courville, Aaron; Bengio, Yoshua (2014). <a rel="nofollow" class="external text" href="https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf"><i>Generative Adversarial Networks</i></a> <span class="cs1-format">(PDF)</span>. Proceedings of the International Conference on Neural Information Processing Systems (NIPS 2014). pp. <span class="nowrap">2672–</span>2680. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191122034612/http://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 22 November 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">20 August</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=Generative+Adversarial+Networks&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2672-%3C%2Fspan%3E2680&rft.date=2014&rft.aulast=Goodfellow&rft.aufirst=Ian&rft.au=Pouget-Abadie%2C+Jean&rft.au=Mirza%2C+Mehdi&rft.au=Xu%2C+Bing&rft.au=Warde-Farley%2C+David&rft.au=Ozair%2C+Sherjil&rft.au=Courville%2C+Aaron&rft.au=Bengio%2C+Yoshua&rft_id=https%3A%2F%2Fpapers.nips.cc%2Fpaper%2F5423-generative-adversarial-nets.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-SyncedReview2018-132"><span class="mw-cite-backlink"><b><a href="#cite_ref-SyncedReview2018_132-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://syncedreview.com/2018/12/14/gan-2-0-nvidias-hyperrealistic-face-generator/">"GAN 2.0: NVIDIA's Hyperrealistic Face Generator"</a>. <i>SyncedReview.com</i>. December 14, 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">October 3,</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=SyncedReview.com&rft.atitle=GAN+2.0%3A+NVIDIA%27s+Hyperrealistic+Face+Generator&rft.date=2018-12-14&rft_id=https%3A%2F%2Fsyncedreview.com%2F2018%2F12%2F14%2Fgan-2-0-nvidias-hyperrealistic-face-generator%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-progressiveGAN2017-133"><span class="mw-cite-backlink"><b><a href="#cite_ref-progressiveGAN2017_133-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKarrasAilaLaineLehtinen2018" class="citation arxiv cs1">Karras, T.; Aila, T.; Laine, S.; Lehtinen, J. (26 February 2018). "Progressive Growing of GANs for Improved Quality, Stability, and Variation". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1710.10196">1710.10196</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.NE">cs.NE</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Progressive+Growing+of+GANs+for+Improved+Quality%2C+Stability%2C+and+Variation&rft.date=2018-02-26&rft_id=info%3Aarxiv%2F1710.10196&rft.aulast=Karras&rft.aufirst=T.&rft.au=Aila%2C+T.&rft.au=Laine%2C+S.&rft.au=Lehtinen%2C+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-134"><span class="mw-cite-backlink"><b><a href="#cite_ref-134">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://lab.witness.org/projects/synthetic-media-and-deep-fakes/">"Prepare, Don't Panic: Synthetic Media and Deepfakes"</a>. witness.org. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201202231744/https://lab.witness.org/projects/synthetic-media-and-deep-fakes/">Archived</a> from the original on 2 December 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">25 November</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Prepare%2C+Don%27t+Panic%3A+Synthetic+Media+and+Deepfakes&rft.pub=witness.org&rft_id=https%3A%2F%2Flab.witness.org%2Fprojects%2Fsynthetic-media-and-deep-fakes%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-135"><span class="mw-cite-backlink"><b><a href="#cite_ref-135">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSohl-DicksteinWeissMaheswaranathanGanguli2015" class="citation journal cs1">Sohl-Dickstein, Jascha; Weiss, Eric; Maheswaranathan, Niru; Ganguli, Surya (2015-06-01). <a rel="nofollow" class="external text" href="http://proceedings.mlr.press/v37/sohl-dickstein15.pdf">"Deep Unsupervised Learning using Nonequilibrium Thermodynamics"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of the 32nd International Conference on Machine Learning</i>. <b>37</b>. PMLR: <span class="nowrap">2256–</span>2265. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1503.03585">1503.03585</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+32nd+International+Conference+on+Machine+Learning&rft.atitle=Deep+Unsupervised+Learning+using+Nonequilibrium+Thermodynamics&rft.volume=37&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2256-%3C%2Fspan%3E2265&rft.date=2015-06-01&rft_id=info%3Aarxiv%2F1503.03585&rft.aulast=Sohl-Dickstein&rft.aufirst=Jascha&rft.au=Weiss%2C+Eric&rft.au=Maheswaranathan%2C+Niru&rft.au=Ganguli%2C+Surya&rft_id=http%3A%2F%2Fproceedings.mlr.press%2Fv37%2Fsohl-dickstein15.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-GoogleVoiceTranscription-136"><span class="mw-cite-backlink"><b><a href="#cite_ref-GoogleVoiceTranscription_136-0">^</a></b></span> <span class="reference-text">Google Research Blog. The neural networks behind Google Voice transcription. August 11, 2015. By Françoise Beaufays <a rel="nofollow" class="external free" href="http://googleresearch.blogspot.co.at/2015/08/the-neural-networks-behind-google-voice.html">http://googleresearch.blogspot.co.at/2015/08/the-neural-networks-behind-google-voice.html</a></span> </li> <li id="cite_note-sak2015-137"><span class="mw-cite-backlink">^ <a href="#cite_ref-sak2015_137-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-sak2015_137-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSakSeniorRaoBeaufays2015" class="citation web cs1">Sak, Haşim; Senior, Andrew; Rao, Kanishka; Beaufays, Françoise; Schalkwyk, Johan (September 2015). <a rel="nofollow" class="external text" href="http://googleresearch.blogspot.ch/2015/09/google-voice-search-faster-and-more.html">"Google voice search: faster and more accurate"</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160309191532/http://googleresearch.blogspot.ch/2015/09/google-voice-search-faster-and-more.html">Archived</a> from the original on 2016-03-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2016-04-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Google+voice+search%3A+faster+and+more+accurate&rft.date=2015-09&rft.aulast=Sak&rft.aufirst=Ha%C5%9Fim&rft.au=Senior%2C+Andrew&rft.au=Rao%2C+Kanishka&rft.au=Beaufays%2C+Fran%C3%A7oise&rft.au=Schalkwyk%2C+Johan&rft_id=http%3A%2F%2Fgoogleresearch.blogspot.ch%2F2015%2F09%2Fgoogle-voice-search-faster-and-more.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-138"><span class="mw-cite-backlink"><b><a href="#cite_ref-138">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSinghSahaSahidullah2021" class="citation book cs1">Singh, Premjeet; Saha, Goutam; Sahidullah, Md (2021). "Non-linear frequency warping using constant-Q transformation for speech emotion recognition". <i>2021 International Conference on Computer Communication and Informatics (ICCCI)</i>. pp. <span class="nowrap">1–</span>4. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2102.04029">2102.04029</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FICCCI50826.2021.9402569">10.1109/ICCCI50826.2021.9402569</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-7281-5875-4" title="Special:BookSources/978-1-7281-5875-4"><bdi>978-1-7281-5875-4</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:231846518">231846518</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Non-linear+frequency+warping+using+constant-Q+transformation+for+speech+emotion+recognition&rft.btitle=2021+International+Conference+on+Computer+Communication+and+Informatics+%28ICCCI%29&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1-%3C%2Fspan%3E4&rft.date=2021&rft_id=info%3Aarxiv%2F2102.04029&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A231846518%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FICCCI50826.2021.9402569&rft.isbn=978-1-7281-5875-4&rft.aulast=Singh&rft.aufirst=Premjeet&rft.au=Saha%2C+Goutam&rft.au=Sahidullah%2C+Md&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-sak2014-139"><span class="mw-cite-backlink"><b><a href="#cite_ref-sak2014_139-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSakSeniorBeaufays2014" class="citation web cs1">Sak, Hasim; Senior, Andrew; Beaufays, Francoise (2014). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180424203806/https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43905.pdf">"Long Short-Term Memory recurrent neural network architectures for large scale acoustic modeling"</a> <span class="cs1-format">(PDF)</span>. Archived from <a rel="nofollow" class="external text" href="https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43905.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 24 April 2018.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Long+Short-Term+Memory+recurrent+neural+network+architectures+for+large+scale+acoustic+modeling&rft.date=2014&rft.aulast=Sak&rft.aufirst=Hasim&rft.au=Senior%2C+Andrew&rft.au=Beaufays%2C+Francoise&rft_id=https%3A%2F%2Fstatic.googleusercontent.com%2Fmedia%2Fresearch.google.com%2Fen%2F%2Fpubs%2Farchive%2F43905.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-liwu2015-140"><span class="mw-cite-backlink"><b><a href="#cite_ref-liwu2015_140-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLiWu2014" class="citation arxiv cs1">Li, Xiangang; Wu, Xihong (2014). "Constructing Long Short-Term Memory based Deep Recurrent Neural Networks for Large Vocabulary Speech Recognition". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1410.4281">1410.4281</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Constructing+Long+Short-Term+Memory+based+Deep+Recurrent+Neural+Networks+for+Large+Vocabulary+Speech+Recognition&rft.date=2014&rft_id=info%3Aarxiv%2F1410.4281&rft.aulast=Li&rft.aufirst=Xiangang&rft.au=Wu%2C+Xihong&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-zen2015-141"><span class="mw-cite-backlink"><b><a href="#cite_ref-zen2015_141-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFZenSak2015" class="citation web cs1">Zen, Heiga; Sak, Hasim (2015). <a rel="nofollow" class="external text" href="https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43266.pdf">"Unidirectional Long Short-Term Memory Recurrent Neural Network with Recurrent Output Layer for Low-Latency Speech Synthesis"</a> <span class="cs1-format">(PDF)</span>. <i>Google.com</i>. ICASSP. pp. <span class="nowrap">4470–</span>4474. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123113/https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43266.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2021-05-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Google.com&rft.atitle=Unidirectional+Long+Short-Term+Memory+Recurrent+Neural+Network+with+Recurrent+Output+Layer+for+Low-Latency+Speech+Synthesis&rft.pages=%3Cspan+class%3D%22nowrap%22%3E4470-%3C%2Fspan%3E4474&rft.date=2015&rft.aulast=Zen&rft.aufirst=Heiga&rft.au=Sak%2C+Hasim&rft_id=https%3A%2F%2Fstatic.googleusercontent.com%2Fmedia%2Fresearch.google.com%2Fen%2F%2Fpubs%2Farchive%2F43266.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-142"><span class="mw-cite-backlink"><b><a href="#cite_ref-142">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://awards.acm.org/about/2018-turing">"2018 ACM A.M. Turing Award Laureates"</a>. <i>awards.acm.org</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-08-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=awards.acm.org&rft.atitle=2018+ACM+A.M.+Turing+Award+Laureates&rft_id=https%3A%2F%2Fawards.acm.org%2Fabout%2F2018-turing&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-143"><span class="mw-cite-backlink"><b><a href="#cite_ref-143">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFFerrie,_C.,_&_Kaiser,_S.2019" class="citation book cs1">Ferrie, C., & Kaiser, S. (2019). <i>Neural Networks for Babies</i>. Sourcebooks. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1492671206" title="Special:BookSources/978-1492671206"><bdi>978-1492671206</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Neural+Networks+for+Babies&rft.pub=Sourcebooks&rft.date=2019&rft.isbn=978-1492671206&rft.au=Ferrie%2C+C.%2C+%26+Kaiser%2C+S.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_book" title="Template:Cite book">cite book</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-144"><span class="mw-cite-backlink"><b><a href="#cite_ref-144">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSilverHuangMaddisonGuez2016" class="citation journal cs1">Silver, David; Huang, Aja; Maddison, Chris J.; Guez, Arthur; Sifre, Laurent; Driessche, George van den; Schrittwieser, Julian; Antonoglou, Ioannis; Panneershelvam, Veda (January 2016). "Mastering the game of Go with deep neural networks and tree search". <i>Nature</i>. <b>529</b> (7587): <span class="nowrap">484–</span>489. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2016Natur.529..484S">2016Natur.529..484S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnature16961">10.1038/nature16961</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1476-4687">1476-4687</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26819042">26819042</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:515925">515925</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Mastering+the+game+of+Go+with+deep+neural+networks+and+tree+search&rft.volume=529&rft.issue=7587&rft.pages=%3Cspan+class%3D%22nowrap%22%3E484-%3C%2Fspan%3E489&rft.date=2016-01&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A515925%23id-name%3DS2CID&rft_id=info%3Abibcode%2F2016Natur.529..484S&rft.issn=1476-4687&rft_id=info%3Adoi%2F10.1038%2Fnature16961&rft_id=info%3Apmid%2F26819042&rft.aulast=Silver&rft.aufirst=David&rft.au=Huang%2C+Aja&rft.au=Maddison%2C+Chris+J.&rft.au=Guez%2C+Arthur&rft.au=Sifre%2C+Laurent&rft.au=Driessche%2C+George+van+den&rft.au=Schrittwieser%2C+Julian&rft.au=Antonoglou%2C+Ioannis&rft.au=Panneershelvam%2C+Veda&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Nokkada-145"><span class="mw-cite-backlink"><b><a href="#cite_ref-Nokkada_145-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation cs2"><a rel="nofollow" class="external text" href="https://serokell.io/blog/deep-learning-and-neural-network-guide#components-of-neural-networks"><i>A Guide to Deep Learning and Neural Networks</i></a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201102151103/https://serokell.io/blog/deep-learning-and-neural-network-guide#components-of-neural-networks">archived</a> from the original on 2020-11-02<span class="reference-accessdate">, retrieved <span class="nowrap">2020-11-16</span></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=A+Guide+to+Deep+Learning+and+Neural+Networks&rft_id=https%3A%2F%2Fserokell.io%2Fblog%2Fdeep-learning-and-neural-network-guide%23components-of-neural-networks&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Kumar2021-146"><span class="mw-cite-backlink">^ <a href="#cite_ref-Kumar2021_146-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Kumar2021_146-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKumarRaubal2021" class="citation journal cs1">Kumar, Nishant; Raubal, Martin (2021). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.trc.2021.103432">"Applications of deep learning in congestion detection, prediction and alleviation: A survey"</a>. <i>Transportation Research Part C: Emerging Technologies</i>. <b>133</b>: 103432. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2102.09759">2102.09759</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2021TRPC..13303432K">2021TRPC..13303432K</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.trc.2021.103432">10.1016/j.trc.2021.103432</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/10230%2F42143">10230/42143</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:240420107">240420107</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Transportation+Research+Part+C%3A+Emerging+Technologies&rft.atitle=Applications+of+deep+learning+in+congestion+detection%2C+prediction+and+alleviation%3A+A+survey&rft.volume=133&rft.pages=103432&rft.date=2021&rft_id=info%3Ahdl%2F10230%2F42143&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A240420107%23id-name%3DS2CID&rft_id=info%3Abibcode%2F2021TRPC..13303432K&rft_id=info%3Aarxiv%2F2102.09759&rft_id=info%3Adoi%2F10.1016%2Fj.trc.2021.103432&rft.aulast=Kumar&rft.aufirst=Nishant&rft.au=Raubal%2C+Martin&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.trc.2021.103432&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-147"><span class="mw-cite-backlink"><b><a href="#cite_ref-147">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSzegedyToshevErhan2013" class="citation journal cs1">Szegedy, Christian; Toshev, Alexander; Erhan, Dumitru (2013). <a rel="nofollow" class="external text" href="https://papers.nips.cc/paper/5207-deep-neural-networks-for-object-detection">"Deep neural networks for object detection"</a>. <i>Advances in Neural Information Processing Systems</i>: <span class="nowrap">2553–</span>2561. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170629172111/http://papers.nips.cc/paper/5207-deep-neural-networks-for-object-detection">Archived</a> from the original on 2017-06-29<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Advances+in+Neural+Information+Processing+Systems&rft.atitle=Deep+neural+networks+for+object+detection&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2553-%3C%2Fspan%3E2561&rft.date=2013&rft.aulast=Szegedy&rft.aufirst=Christian&rft.au=Toshev%2C+Alexander&rft.au=Erhan%2C+Dumitru&rft_id=https%3A%2F%2Fpapers.nips.cc%2Fpaper%2F5207-deep-neural-networks-for-object-detection&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-rolnickpaper-148"><span class="mw-cite-backlink"><b><a href="#cite_ref-rolnickpaper_148-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRolnickTegmark2018" class="citation conference cs1">Rolnick, David; Tegmark, Max (2018). <a rel="nofollow" class="external text" href="https://openreview.net/pdf?id=SyProzZAW">"The power of deeper networks for expressing natural functions"</a>. <i>International Conference on Learning Representations</i>. ICLR 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210107183647/https://openreview.net/pdf?id=SyProzZAW">Archived</a> from the original on 2021-01-07<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-01-05</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.atitle=The+power+of+deeper+networks+for+expressing+natural+functions&rft.btitle=International+Conference+on+Learning+Representations&rft.date=2018&rft.aulast=Rolnick&rft.aufirst=David&rft.au=Tegmark%2C+Max&rft_id=https%3A%2F%2Fopenreview.net%2Fpdf%3Fid%3DSyProzZAW&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-149"><span class="mw-cite-backlink"><b><a href="#cite_ref-149">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHof" class="citation news cs1">Hof, Robert D. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190331092832/https://www.technologyreview.com/s/513696/deep-learning/">"Is Artificial Intelligence Finally Coming into Its Own?"</a>. <i>MIT Technology Review</i>. Archived from <a rel="nofollow" class="external text" href="https://www.technologyreview.com/s/513696/deep-learning/">the original</a> on 31 March 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">10 July</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=MIT+Technology+Review&rft.atitle=Is+Artificial+Intelligence+Finally+Coming+into+Its+Own%3F&rft.aulast=Hof&rft.aufirst=Robert+D.&rft_id=https%3A%2F%2Fwww.technologyreview.com%2Fs%2F513696%2Fdeep-learning%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-gers2001-150"><span class="mw-cite-backlink">^ <a href="#cite_ref-gers2001_150-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-gers2001_150-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGersSchmidhuber2001" class="citation journal cs1">Gers, Felix A.; Schmidhuber, Jürgen (2001). <a rel="nofollow" class="external text" href="http://elartu.tntu.edu.ua/handle/lib/30719">"LSTM Recurrent Networks Learn Simple Context Free and Context Sensitive Languages"</a>. <i>IEEE Transactions on Neural Networks</i>. <b>12</b> (6): <span class="nowrap">1333–</span>1340. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2F72.963769">10.1109/72.963769</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/18249962">18249962</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:10192330">10192330</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200126045722/http://elartu.tntu.edu.ua/handle/lib/30719">Archived</a> from the original on 2020-01-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-02-25</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Neural+Networks&rft.atitle=LSTM+Recurrent+Networks+Learn+Simple+Context+Free+and+Context+Sensitive+Languages&rft.volume=12&rft.issue=6&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1333-%3C%2Fspan%3E1340&rft.date=2001&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A10192330%23id-name%3DS2CID&rft_id=info%3Apmid%2F18249962&rft_id=info%3Adoi%2F10.1109%2F72.963769&rft.aulast=Gers&rft.aufirst=Felix+A.&rft.au=Schmidhuber%2C+J%C3%BCrgen&rft_id=http%3A%2F%2Felartu.tntu.edu.ua%2Fhandle%2Flib%2F30719&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-NIPS2014-151"><span class="mw-cite-backlink">^ <a href="#cite_ref-NIPS2014_151-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-NIPS2014_151-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-NIPS2014_151-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSutskeverVinyalsLe2014" class="citation journal cs1">Sutskever, L.; Vinyals, O.; Le, Q. (2014). <a rel="nofollow" class="external text" href="https://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf">"Sequence to Sequence Learning with Neural Networks"</a> <span class="cs1-format">(PDF)</span>. <i>Proc. NIPS</i>. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1409.3215">1409.3215</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2014arXiv1409.3215S">2014arXiv1409.3215S</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123145/https://papers.nips.cc/paper/2014/file/a14ac55a4f27472c5d894ec1c3c743d2-Paper.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2021-05-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proc.+NIPS&rft.atitle=Sequence+to+Sequence+Learning+with+Neural+Networks&rft.date=2014&rft_id=info%3Aarxiv%2F1409.3215&rft_id=info%3Abibcode%2F2014arXiv1409.3215S&rft.aulast=Sutskever&rft.aufirst=L.&rft.au=Vinyals%2C+O.&rft.au=Le%2C+Q.&rft_id=https%3A%2F%2Fpapers.nips.cc%2Fpaper%2F5346-sequence-to-sequence-learning-with-neural-networks.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-vinyals2016-152"><span class="mw-cite-backlink">^ <a href="#cite_ref-vinyals2016_152-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-vinyals2016_152-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFJozefowiczVinyalsSchusterShazeer2016" class="citation arxiv cs1">Jozefowicz, Rafal; Vinyals, Oriol; Schuster, Mike; Shazeer, Noam; Wu, Yonghui (2016). "Exploring the Limits of Language Modeling". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1602.02410">1602.02410</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Exploring+the+Limits+of+Language+Modeling&rft.date=2016&rft_id=info%3Aarxiv%2F1602.02410&rft.aulast=Jozefowicz&rft.aufirst=Rafal&rft.au=Vinyals%2C+Oriol&rft.au=Schuster%2C+Mike&rft.au=Shazeer%2C+Noam&rft.au=Wu%2C+Yonghui&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-gillick2015-153"><span class="mw-cite-backlink">^ <a href="#cite_ref-gillick2015_153-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-gillick2015_153-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGillickBrunkVinyalsSubramanya2015" class="citation arxiv cs1">Gillick, Dan; Brunk, Cliff; Vinyals, Oriol; Subramanya, Amarnag (2015). "Multilingual Language Processing from Bytes". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1512.00103">1512.00103</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Multilingual+Language+Processing+from+Bytes&rft.date=2015&rft_id=info%3Aarxiv%2F1512.00103&rft.aulast=Gillick&rft.aufirst=Dan&rft.au=Brunk%2C+Cliff&rft.au=Vinyals%2C+Oriol&rft.au=Subramanya%2C+Amarnag&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-MIKO2010-154"><span class="mw-cite-backlink"><b><a href="#cite_ref-MIKO2010_154-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMikolov2010" class="citation journal cs1">Mikolov, T.; et al. (2010). <a rel="nofollow" class="external text" href="http://www.fit.vutbr.cz/research/groups/speech/servite/2010/rnnlm_mikolov.pdf">"Recurrent neural network based language model"</a> <span class="cs1-format">(PDF)</span>. <i>Interspeech</i>: <span class="nowrap">1045–</span>1048. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.21437%2FInterspeech.2010-343">10.21437/Interspeech.2010-343</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:17048224">17048224</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170516181940/http://www.fit.vutbr.cz/research/groups/speech/servite/2010/rnnlm_mikolov.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-05-16<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Interspeech&rft.atitle=Recurrent+neural+network+based+language+model&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1045-%3C%2Fspan%3E1048&rft.date=2010&rft_id=info%3Adoi%2F10.21437%2FInterspeech.2010-343&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A17048224%23id-name%3DS2CID&rft.aulast=Mikolov&rft.aufirst=T.&rft_id=http%3A%2F%2Fwww.fit.vutbr.cz%2Fresearch%2Fgroups%2Fspeech%2Fservite%2F2010%2Frnnlm_mikolov.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:0-155"><span class="mw-cite-backlink"><b><a href="#cite_ref-:0_155-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHochreiterSchmidhuber1997" class="citation journal cs1">Hochreiter, Sepp; Schmidhuber, Jürgen (1 November 1997). "Long Short-Term Memory". <i>Neural Computation</i>. <b>9</b> (8): <span class="nowrap">1735–</span>1780. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1997.9.8.1735">10.1162/neco.1997.9.8.1735</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0899-7667">0899-7667</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/9377276">9377276</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1915014">1915014</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Computation&rft.atitle=Long+Short-Term+Memory&rft.volume=9&rft.issue=8&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1735-%3C%2Fspan%3E1780&rft.date=1997-11-01&rft.issn=0899-7667&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1915014%23id-name%3DS2CID&rft_id=info%3Apmid%2F9377276&rft_id=info%3Adoi%2F10.1162%2Fneco.1997.9.8.1735&rft.aulast=Hochreiter&rft.aufirst=Sepp&rft.au=Schmidhuber%2C+J%C3%BCrgen&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:10-156"><span class="mw-cite-backlink">^ <a href="#cite_ref-:10_156-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:10_156-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/220320057">"Learning Precise Timing with LSTM Recurrent Networks (PDF Download Available)"</a>. <i>ResearchGate</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123147/https://www.researchgate.net/publication/220320057_Learning_Precise_Timing_with_LSTM_Recurrent_Networks">Archived</a> from the original on 9 May 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">13 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=ResearchGate&rft.atitle=Learning+Precise+Timing+with+LSTM+Recurrent+Networks+%28PDF+Download+Available%29&rft_id=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F220320057&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-LECUN86-157"><span class="mw-cite-backlink"><b><a href="#cite_ref-LECUN86_157-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLeCun1998" class="citation journal cs1">LeCun, Y.; et al. (1998). <a rel="nofollow" class="external text" href="http://elartu.tntu.edu.ua/handle/lib/38369">"Gradient-based learning applied to document recognition"</a>. <i>Proceedings of the IEEE</i>. <b>86</b> (11): <span class="nowrap">2278–</span>2324. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2F5.726791">10.1109/5.726791</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:14542261">14542261</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+IEEE&rft.atitle=Gradient-based+learning+applied+to+document+recognition&rft.volume=86&rft.issue=11&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2278-%3C%2Fspan%3E2324&rft.date=1998&rft_id=info%3Adoi%2F10.1109%2F5.726791&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A14542261%23id-name%3DS2CID&rft.aulast=LeCun&rft.aufirst=Y.&rft_id=http%3A%2F%2Felartu.tntu.edu.ua%2Fhandle%2Flib%2F38369&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:2-158"><span class="mw-cite-backlink"><b><a href="#cite_ref-:2_158-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSainathMohamedKingsburyRamabhadran2013" class="citation book cs1"><a href="/wiki/Tara_Sainath" title="Tara Sainath">Sainath, Tara N.</a>; Mohamed, Abdel-Rahman; Kingsbury, Brian; <a href="/wiki/Bhuvana_Ramabhadran" title="Bhuvana Ramabhadran">Ramabhadran, Bhuvana</a> (2013). "Deep convolutional neural networks for LVCSR". <i>2013 IEEE International Conference on Acoustics, Speech and Signal Processing</i>. pp. <span class="nowrap">8614–</span>8618. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ficassp.2013.6639347">10.1109/icassp.2013.6639347</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4799-0356-6" title="Special:BookSources/978-1-4799-0356-6"><bdi>978-1-4799-0356-6</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:13816461">13816461</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Deep+convolutional+neural+networks+for+LVCSR&rft.btitle=2013+IEEE+International+Conference+on+Acoustics%2C+Speech+and+Signal+Processing&rft.pages=%3Cspan+class%3D%22nowrap%22%3E8614-%3C%2Fspan%3E8618&rft.date=2013&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A13816461%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2Ficassp.2013.6639347&rft.isbn=978-1-4799-0356-6&rft.aulast=Sainath&rft.aufirst=Tara+N.&rft.au=Mohamed%2C+Abdel-Rahman&rft.au=Kingsbury%2C+Brian&rft.au=Ramabhadran%2C+Bhuvana&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-159"><span class="mw-cite-backlink"><b><a href="#cite_ref-159">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBengioBoulanger-LewandowskiPascanu2013" class="citation book cs1">Bengio, Yoshua; Boulanger-Lewandowski, Nicolas; Pascanu, Razvan (2013). "Advances in optimizing recurrent networks". <i>2013 IEEE International Conference on Acoustics, Speech and Signal Processing</i>. pp. <span class="nowrap">8624–</span>8628. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1212.0901">1212.0901</a></span>. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.752.9151">10.1.1.752.9151</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ficassp.2013.6639349">10.1109/icassp.2013.6639349</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4799-0356-6" title="Special:BookSources/978-1-4799-0356-6"><bdi>978-1-4799-0356-6</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:12485056">12485056</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Advances+in+optimizing+recurrent+networks&rft.btitle=2013+IEEE+International+Conference+on+Acoustics%2C+Speech+and+Signal+Processing&rft.pages=%3Cspan+class%3D%22nowrap%22%3E8624-%3C%2Fspan%3E8628&rft.date=2013&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A12485056%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2Ficassp.2013.6639349&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.752.9151%23id-name%3DCiteSeerX&rft_id=info%3Aarxiv%2F1212.0901&rft.isbn=978-1-4799-0356-6&rft.aulast=Bengio&rft.aufirst=Yoshua&rft.au=Boulanger-Lewandowski%2C+Nicolas&rft.au=Pascanu%2C+Razvan&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-DAHL2013-160"><span class="mw-cite-backlink"><b><a href="#cite_ref-DAHL2013_160-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDahl2013" class="citation journal cs1">Dahl, G.; et al. (2013). <a rel="nofollow" class="external text" href="http://www.cs.toronto.edu/~gdahl/papers/reluDropoutBN_icassp2013.pdf">"Improving DNNs for LVCSR using rectified linear units and dropout"</a> <span class="cs1-format">(PDF)</span>. <i>ICASSP</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170812140509/http://www.cs.toronto.edu/~gdahl/papers/reluDropoutBN_icassp2013.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-08-12<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=ICASSP&rft.atitle=Improving+DNNs+for+LVCSR+using+rectified+linear+units+and+dropout&rft.date=2013&rft.aulast=Dahl&rft.aufirst=G.&rft_id=http%3A%2F%2Fwww.cs.toronto.edu%2F~gdahl%2Fpapers%2FreluDropoutBN_icassp2013.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Kumar2024-161"><span class="mw-cite-backlink"><b><a href="#cite_ref-Kumar2024_161-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKumarMartinRaubal2024" class="citation journal cs1">Kumar, Nishant; Martin, Henry; Raubal, Martin (2024). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs42421-024-00109-x">"Enhancing Deep Learning-Based City-Wide Traffic Prediction Pipelines Through Complexity Analysis"</a>. <i>Data Science for Transportation</i>. <b>6</b> (3): Article 24. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs42421-024-00109-x">10.1007/s42421-024-00109-x</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/20.500.11850%2F695425">20.500.11850/695425</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Data+Science+for+Transportation&rft.atitle=Enhancing+Deep+Learning-Based+City-Wide+Traffic+Prediction+Pipelines+Through+Complexity+Analysis&rft.volume=6&rft.issue=3&rft.pages=Article+24&rft.date=2024&rft_id=info%3Ahdl%2F20.500.11850%2F695425&rft_id=info%3Adoi%2F10.1007%2Fs42421-024-00109-x&rft.aulast=Kumar&rft.aufirst=Nishant&rft.au=Martin%2C+Henry&rft.au=Raubal%2C+Martin&rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252Fs42421-024-00109-x&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-162"><span class="mw-cite-backlink"><b><a href="#cite_ref-162">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.coursera.org/learn/convolutional-neural-networks/lecture/AYzbX/data-augmentation">"Data Augmentation - deeplearning.ai | Coursera"</a>. <i>Coursera</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171201032606/https://www.coursera.org/learn/convolutional-neural-networks/lecture/AYzbX/data-augmentation">Archived</a> from the original on 1 December 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">30 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Coursera&rft.atitle=Data+Augmentation+-+deeplearning.ai+%7C+Coursera&rft_id=https%3A%2F%2Fwww.coursera.org%2Flearn%2Fconvolutional-neural-networks%2Flecture%2FAYzbX%2Fdata-augmentation&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-RBMTRAIN-163"><span class="mw-cite-backlink"><b><a href="#cite_ref-RBMTRAIN_163-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHinton2010" class="citation journal cs1">Hinton, G. E. (2010). <a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/221166159">"A Practical Guide to Training Restricted Boltzmann Machines"</a>. <i>Tech. Rep. UTML TR 2010-003</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123211/https://www.researchgate.net/publication/221166159_A_brief_introduction_to_Weightless_Neural_Systems">Archived</a> from the original on 2021-05-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-13</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Tech.+Rep.+UTML+TR+2010-003&rft.atitle=A+Practical+Guide+to+Training+Restricted+Boltzmann+Machines&rft.date=2010&rft.aulast=Hinton&rft.aufirst=G.+E.&rft_id=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F221166159&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-164"><span class="mw-cite-backlink"><b><a href="#cite_ref-164">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFYouBuluçDemmel2017" class="citation book cs1">You, Yang; Buluç, Aydın; Demmel, James (November 2017). <a rel="nofollow" class="external text" href="https://dl.acm.org/citation.cfm?doid=3126908.3126912">"Scaling deep learning on GPU and knights landing clusters"</a>. <a rel="nofollow" class="external text" href="http://www.escholarship.org/uc/item/6ch40821"><i>Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis on - SC '17</i></a>. SC '17, ACM. pp. <span class="nowrap">1–</span>12. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F3126908.3126912">10.1145/3126908.3126912</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781450351140" title="Special:BookSources/9781450351140"><bdi>9781450351140</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:8869270">8869270</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200729133850/https://escholarship.org/uc/item/6ch40821">Archived</a> from the original on 29 July 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">5 March</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Scaling+deep+learning+on+GPU+and+knights+landing+clusters&rft.btitle=Proceedings+of+the+International+Conference+for+High+Performance+Computing%2C+Networking%2C+Storage+and+Analysis+on+-+SC+%2717&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1-%3C%2Fspan%3E12&rft.pub=SC+%2717%2C+ACM&rft.date=2017-11&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A8869270%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1145%2F3126908.3126912&rft.isbn=9781450351140&rft.aulast=You&rft.aufirst=Yang&rft.au=Bulu%C3%A7%2C+Ayd%C4%B1n&rft.au=Demmel%2C+James&rft_id=https%3A%2F%2Fdl.acm.org%2Fcitation.cfm%3Fdoid%3D3126908.3126912&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-165"><span class="mw-cite-backlink"><b><a href="#cite_ref-165">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFViebkeMemetiPllanaAbraham2019" class="citation journal cs1">Viebke, André; Memeti, Suejb; Pllana, Sabri; Abraham, Ajith (2019). "CHAOS: a parallelization scheme for training convolutional neural networks on Intel Xeon Phi". <i>The Journal of Supercomputing</i>. <b>75</b>: <span class="nowrap">197–</span>227. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1702.07908">1702.07908</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2017arXiv170207908V">2017arXiv170207908V</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs11227-017-1994-x">10.1007/s11227-017-1994-x</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:14135321">14135321</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Journal+of+Supercomputing&rft.atitle=CHAOS%3A+a+parallelization+scheme+for+training+convolutional+neural+networks+on+Intel+Xeon+Phi&rft.volume=75&rft.pages=%3Cspan+class%3D%22nowrap%22%3E197-%3C%2Fspan%3E227&rft.date=2019&rft_id=info%3Aarxiv%2F1702.07908&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A14135321%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1007%2Fs11227-017-1994-x&rft_id=info%3Abibcode%2F2017arXiv170207908V&rft.aulast=Viebke&rft.aufirst=Andr%C3%A9&rft.au=Memeti%2C+Suejb&rft.au=Pllana%2C+Sabri&rft.au=Abraham%2C+Ajith&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Qin1-166"><span class="mw-cite-backlink"><b><a href="#cite_ref-Qin1_166-0">^</a></b></span> <span class="reference-text">Ting Qin, et al. "A learning algorithm of CMAC based on RLS". Neural Processing Letters 19.1 (2004): 49-61.</span> </li> <li id="cite_note-Qin2-167"><span class="mw-cite-backlink"><b><a href="#cite_ref-Qin2_167-0">^</a></b></span> <span class="reference-text">Ting Qin, et al. "<a rel="nofollow" class="external text" href="http://www-control.eng.cam.ac.uk/Homepage/papers/cued_control_997.pdf">Continuous CMAC-QRLS and its systolic array</a>". <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181118122850/http://www-control.eng.cam.ac.uk/Homepage/papers/cued_control_997.pdf">Archived</a> 2018-11-18 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. Neural Processing Letters 22.1 (2005): 1-16.</span> </li> <li id="cite_note-168"><span class="mw-cite-backlink"><b><a href="#cite_ref-168">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFResearch2015" class="citation web cs1">Research, AI (23 October 2015). <a rel="nofollow" class="external text" href="http://airesearch.com/ai-research-papers/deep-neural-networks-for-acoustic-modeling-in-speech-recognition/">"Deep Neural Networks for Acoustic Modeling in Speech Recognition"</a>. <i>airesearch.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160201033801/http://airesearch.com/ai-research-papers/deep-neural-networks-for-acoustic-modeling-in-speech-recognition/">Archived</a> from the original on 1 February 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">23 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=airesearch.com&rft.atitle=Deep+Neural+Networks+for+Acoustic+Modeling+in+Speech+Recognition&rft.date=2015-10-23&rft.aulast=Research&rft.aufirst=AI&rft_id=http%3A%2F%2Fairesearch.com%2Fai-research-papers%2Fdeep-neural-networks-for-acoustic-modeling-in-speech-recognition%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-169"><span class="mw-cite-backlink"><b><a href="#cite_ref-169">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.informationweek.com/big-data/ai-machine-learning/gpus-continue-to-dominate-the-ai-accelerator-market-for-now/a/d-id/1336475">"GPUs Continue to Dominate the AI Accelerator Market for Now"</a>. <i>InformationWeek</i>. December 2019. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200610094310/https://www.informationweek.com/big-data/ai-machine-learning/gpus-continue-to-dominate-the-ai-accelerator-market-for-now/a/d-id/1336475">Archived</a> from the original on 10 June 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 June</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=InformationWeek&rft.atitle=GPUs+Continue+to+Dominate+the+AI+Accelerator+Market+for+Now&rft.date=2019-12&rft_id=https%3A%2F%2Fwww.informationweek.com%2Fbig-data%2Fai-machine-learning%2Fgpus-continue-to-dominate-the-ai-accelerator-market-for-now%2Fa%2Fd-id%2F1336475&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-170"><span class="mw-cite-backlink"><b><a href="#cite_ref-170">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRay2019" class="citation news cs1">Ray, Tiernan (2019). <a rel="nofollow" class="external text" href="https://www.zdnet.com/article/ai-is-changing-the-entire-nature-of-compute/">"AI is changing the entire nature of computation"</a>. <i>ZDNet</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200525144635/https://www.zdnet.com/article/ai-is-changing-the-entire-nature-of-compute/">Archived</a> from the original on 25 May 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 June</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=ZDNet&rft.atitle=AI+is+changing+the+entire+nature+of+computation&rft.date=2019&rft.aulast=Ray&rft.aufirst=Tiernan&rft_id=https%3A%2F%2Fwww.zdnet.com%2Farticle%2Fai-is-changing-the-entire-nature-of-compute%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-171"><span class="mw-cite-backlink"><b><a href="#cite_ref-171">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://openai.com/blog/ai-and-compute/">"AI and Compute"</a>. <i>OpenAI</i>. 16 May 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200617200602/https://openai.com/blog/ai-and-compute/">Archived</a> from the original on 17 June 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 June</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=OpenAI&rft.atitle=AI+and+Compute&rft.date=2018-05-16&rft_id=https%3A%2F%2Fopenai.com%2Fblog%2Fai-and-compute%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-172"><span class="mw-cite-backlink"><b><a href="#cite_ref-172">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://consumer.huawei.com/en/press/news/2017/ifa2017-kirin970/">"HUAWEI Reveals the Future of Mobile AI at IFA 2017 | HUAWEI Latest News | HUAWEI Global"</a>. <i>consumer.huawei.com</i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=consumer.huawei.com&rft.atitle=HUAWEI+Reveals+the+Future+of+Mobile+AI+at+IFA+2017+%26%23124%3B+HUAWEI+Latest+News+%26%23124%3B+HUAWEI+Global&rft_id=https%3A%2F%2Fconsumer.huawei.com%2Fen%2Fpress%2Fnews%2F2017%2Fifa2017-kirin970%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-173"><span class="mw-cite-backlink"><b><a href="#cite_ref-173">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFPYoungCliffPatilNishantPattersonDavid2017" class="citation journal cs1">P, JouppiNorman; YoungCliff; PatilNishant; PattersonDavid; AgrawalGaurav; BajwaRaminder; BatesSarah; BhatiaSuresh; BodenNan; BorchersAl; BoyleRick (2017-06-24). <a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F3140659.3080246">"In-Datacenter Performance Analysis of a Tensor Processing Unit"</a>. <i>ACM SIGARCH Computer Architecture News</i>. <b>45</b> (2): <span class="nowrap">1–</span>12. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1704.04760">1704.04760</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F3140659.3080246">10.1145/3140659.3080246</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=ACM+SIGARCH+Computer+Architecture+News&rft.atitle=In-Datacenter+Performance+Analysis+of+a+Tensor+Processing+Unit&rft.volume=45&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1-%3C%2Fspan%3E12&rft.date=2017-06-24&rft_id=info%3Aarxiv%2F1704.04760&rft_id=info%3Adoi%2F10.1145%2F3140659.3080246&rft.aulast=P&rft.aufirst=JouppiNorman&rft.au=YoungCliff&rft.au=PatilNishant&rft.au=PattersonDavid&rft.au=AgrawalGaurav&rft.au=BajwaRaminder&rft.au=BatesSarah&rft.au=BhatiaSuresh&rft.au=BodenNan&rft.au=BorchersAl&rft.au=BoyleRick&rft_id=https%3A%2F%2Fdoi.org%2F10.1145%252F3140659.3080246&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-174"><span class="mw-cite-backlink"><b><a href="#cite_ref-174">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFWoodie2021" class="citation web cs1">Woodie, Alex (2021-11-01). <a rel="nofollow" class="external text" href="https://www.datanami.com/2021/11/01/cerebras-hits-the-accelerator-for-deep-learning-workloads/">"Cerebras Hits the Accelerator for Deep Learning Workloads"</a>. <i>Datanami</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2022-08-03</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Datanami&rft.atitle=Cerebras+Hits+the+Accelerator+for+Deep+Learning+Workloads&rft.date=2021-11-01&rft.aulast=Woodie&rft.aufirst=Alex&rft_id=https%3A%2F%2Fwww.datanami.com%2F2021%2F11%2F01%2Fcerebras-hits-the-accelerator-for-deep-learning-workloads%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-175"><span class="mw-cite-backlink"><b><a href="#cite_ref-175">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://venturebeat.com/2021/04/20/cerebras-systems-launches-new-ai-supercomputing-processor-with-2-6-trillion-transistors/">"Cerebras launches new AI supercomputing processor with 2.6 trillion transistors"</a>. <i>VentureBeat</i>. 2021-04-20<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-08-03</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=VentureBeat&rft.atitle=Cerebras+launches+new+AI+supercomputing+processor+with+2.6+trillion+transistors&rft.date=2021-04-20&rft_id=https%3A%2F%2Fventurebeat.com%2F2021%2F04%2F20%2Fcerebras-systems-launches-new-ai-supercomputing-processor-with-2-6-trillion-transistors%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-atomthin-176"><span class="mw-cite-backlink"><b><a href="#cite_ref-atomthin_176-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMaregaZhaoAvsarWang2020" class="citation journal cs1">Marega, Guilherme Migliato; Zhao, Yanfei; Avsar, Ahmet; Wang, Zhenyu; Tripati, Mukesh; Radenovic, Aleksandra; Kis, Anras (2020). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7116757">"Logic-in-memory based on an atomically thin semiconductor"</a>. <i>Nature</i>. <b>587</b> (2): <span class="nowrap">72–</span>77. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2020Natur.587...72M">2020Natur.587...72M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs41586-020-2861-0">10.1038/s41586-020-2861-0</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7116757">7116757</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/33149289">33149289</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Logic-in-memory+based+on+an+atomically+thin+semiconductor&rft.volume=587&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E72-%3C%2Fspan%3E77&rft.date=2020&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7116757%23id-name%3DPMC&rft_id=info%3Apmid%2F33149289&rft_id=info%3Adoi%2F10.1038%2Fs41586-020-2861-0&rft_id=info%3Abibcode%2F2020Natur.587...72M&rft.aulast=Marega&rft.aufirst=Guilherme+Migliato&rft.au=Zhao%2C+Yanfei&rft.au=Avsar%2C+Ahmet&rft.au=Wang%2C+Zhenyu&rft.au=Tripati%2C+Mukesh&rft.au=Radenovic%2C+Aleksandra&rft.au=Kis%2C+Anras&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7116757&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-photonic-177"><span class="mw-cite-backlink">^ <a href="#cite_ref-photonic_177-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-photonic_177-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-photonic_177-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFFeldmannYoungbloodKarpovGehring2021" class="citation journal cs1">Feldmann, J.; Youngblood, N.; Karpov, M.; et al. (2021). "Parallel convolutional processing using an integrated photonic tensor". <i>Nature</i>. <b>589</b> (2): <span class="nowrap">52–</span>58. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2002.00281">2002.00281</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs41586-020-03070-1">10.1038/s41586-020-03070-1</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/33408373">33408373</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:211010976">211010976</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Parallel+convolutional+processing+using+an+integrated+photonic+tensor&rft.volume=589&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E52-%3C%2Fspan%3E58&rft.date=2021&rft_id=info%3Aarxiv%2F2002.00281&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A211010976%23id-name%3DS2CID&rft_id=info%3Apmid%2F33408373&rft_id=info%3Adoi%2F10.1038%2Fs41586-020-03070-1&rft.aulast=Feldmann&rft.aufirst=J.&rft.au=Youngblood%2C+N.&rft.au=Karpov%2C+M.&rft.au=Gehring%2C+H.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-APC_1-178"><span class="mw-cite-backlink"><b><a href="#cite_ref-APC_1_178-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGarofolo,_J.S.Lamel,_L.F.Fisher,_W.M.Fiscus,_J.G.1993" class="citation book cs1">Garofolo, J.S.; Lamel, L.F.; Fisher, W.M.; Fiscus, J.G.; Pallett, D.S.; Dahlgren, N.L.; Zue, V. (1993). <a rel="nofollow" class="external text" href="https://catalog.ldc.upenn.edu/LDC93S1"><i>TIMIT Acoustic-Phonetic Continuous Speech Corpus</i></a>. Linguistic Data Consortium. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.35111%2F17gk-bn40">10.35111/17gk-bn40</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/1-58563-019-5" title="Special:BookSources/1-58563-019-5"><bdi>1-58563-019-5</bdi></a><span class="reference-accessdate">. Retrieved <span class="nowrap">27 December</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=TIMIT+Acoustic-Phonetic+Continuous+Speech+Corpus&rft.pub=Linguistic+Data+Consortium&rft.date=1993&rft_id=info%3Adoi%2F10.35111%2F17gk-bn40&rft.isbn=1-58563-019-5&rft.au=Garofolo%2C+J.S.&rft.au=Lamel%2C+L.F.&rft.au=Fisher%2C+W.M.&rft.au=Fiscus%2C+J.G.&rft.au=Pallett%2C+D.S.&rft.au=Dahlgren%2C+N.L.&rft.au=Zue%2C+V.&rft_id=https%3A%2F%2Fcatalog.ldc.upenn.edu%2FLDC93S1&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-179"><span class="mw-cite-backlink"><b><a href="#cite_ref-179">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRobinson1991" class="citation journal cs1 cs1-prop-long-vol"><a href="/wiki/Tony_Robinson_(speech_recognition)" title="Tony Robinson (speech recognition)">Robinson, Tony</a> (30 September 1991). "Several Improvements to a Recurrent Error Propagation Network Phone Recognition System". <i>Cambridge University Engineering Department Technical Report</i>. CUED/F-INFENG/TR82. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.13140%2FRG.2.2.15418.90567">10.13140/RG.2.2.15418.90567</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Cambridge+University+Engineering+Department+Technical+Report&rft.atitle=Several+Improvements+to+a+Recurrent+Error+Propagation+Network+Phone+Recognition+System&rft.volume=CUED%2FF-INFENG%2FTR82&rft.date=1991-09-30&rft_id=info%3Adoi%2F10.13140%2FRG.2.2.15418.90567&rft.aulast=Robinson&rft.aufirst=Tony&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-CNN-2014-180"><span class="mw-cite-backlink"><b><a href="#cite_ref-CNN-2014_180-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFAbdel-Hamid2014" class="citation journal cs1">Abdel-Hamid, O.; et al. (2014). <a rel="nofollow" class="external text" href="https://zenodo.org/record/891433">"Convolutional Neural Networks for Speech Recognition"</a>. <i>IEEE/ACM Transactions on Audio, Speech, and Language Processing</i>. <b>22</b> (10): <span class="nowrap">1533–</span>1545. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ftaslp.2014.2339736">10.1109/taslp.2014.2339736</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:206602362">206602362</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200922180719/https://zenodo.org/record/891433">Archived</a> from the original on 2020-09-22<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-04-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE%2FACM+Transactions+on+Audio%2C+Speech%2C+and+Language+Processing&rft.atitle=Convolutional+Neural+Networks+for+Speech+Recognition&rft.volume=22&rft.issue=10&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1533-%3C%2Fspan%3E1545&rft.date=2014&rft_id=info%3Adoi%2F10.1109%2Ftaslp.2014.2339736&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A206602362%23id-name%3DS2CID&rft.aulast=Abdel-Hamid&rft.aufirst=O.&rft_id=https%3A%2F%2Fzenodo.org%2Frecord%2F891433&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-EnsembleDL-181"><span class="mw-cite-backlink"><b><a href="#cite_ref-EnsembleDL_181-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDengPlatt2014" class="citation journal cs1">Deng, L.; Platt, J. (2014). "Ensemble Deep Learning for Speech Recognition". <i>Proc. Interspeech</i>: <span class="nowrap">1915–</span>1919. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.21437%2FInterspeech.2014-433">10.21437/Interspeech.2014-433</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:15641618">15641618</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proc.+Interspeech&rft.atitle=Ensemble+Deep+Learning+for+Speech+Recognition&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1915-%3C%2Fspan%3E1919&rft.date=2014&rft_id=info%3Adoi%2F10.21437%2FInterspeech.2014-433&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A15641618%23id-name%3DS2CID&rft.aulast=Deng&rft.aufirst=L.&rft.au=Platt%2C+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-HCDMM-182"><span class="mw-cite-backlink"><b><a href="#cite_ref-HCDMM_182-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFTóth2015" class="citation journal cs1">Tóth, Laszló (2015). <a rel="nofollow" class="external text" href="http://publicatio.bibl.u-szeged.hu/5976/1/EURASIP2015.pdf">"Phone Recognition with Hierarchical Convolutional Deep Maxout Networks"</a> <span class="cs1-format">(PDF)</span>. <i>EURASIP Journal on Audio, Speech, and Music Processing</i>. <b>2015</b>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1186%2Fs13636-015-0068-3">10.1186/s13636-015-0068-3</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:217950236">217950236</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200924085514/http://publicatio.bibl.u-szeged.hu/5976/1/EURASIP2015.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2020-09-24<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-04-01</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=EURASIP+Journal+on+Audio%2C+Speech%2C+and+Music+Processing&rft.atitle=Phone+Recognition+with+Hierarchical+Convolutional+Deep+Maxout+Networks&rft.volume=2015&rft.date=2015&rft_id=info%3Adoi%2F10.1186%2Fs13636-015-0068-3&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A217950236%23id-name%3DS2CID&rft.aulast=T%C3%B3th&rft.aufirst=Laszl%C3%B3&rft_id=http%3A%2F%2Fpublicatio.bibl.u-szeged.hu%2F5976%2F1%2FEURASIP2015.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-183"><span class="mw-cite-backlink"><b><a href="#cite_ref-183">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMcMillan2014" class="citation magazine cs1">McMillan, Robert (17 December 2014). <a rel="nofollow" class="external text" href="https://www.wired.com/2014/12/skype-used-ai-build-amazing-new-language-translator/">"How Skype Used AI to Build Its Amazing New Language Translator | WIRED"</a>. <i>Wired</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170608062106/https://www.wired.com/2014/12/skype-used-ai-build-amazing-new-language-translator/">Archived</a> from the original on 8 June 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Wired&rft.atitle=How+Skype+Used+AI+to+Build+Its+Amazing+New+Language+Translator+%7C+WIRED&rft.date=2014-12-17&rft.aulast=McMillan&rft.aufirst=Robert&rft_id=https%3A%2F%2Fwww.wired.com%2F2014%2F12%2Fskype-used-ai-build-amazing-new-language-translator%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Baidu-184"><span class="mw-cite-backlink"><b><a href="#cite_ref-Baidu_184-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHannunCaseCasperCatanzaro2014" class="citation arxiv cs1">Hannun, Awni; Case, Carl; Casper, Jared; Catanzaro, Bryan; Diamos, Greg; Elsen, Erich; Prenger, Ryan; Satheesh, Sanjeev; Sengupta, Shubho; Coates, Adam; Ng, Andrew Y (2014). "Deep Speech: Scaling up end-to-end speech recognition". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1412.5567">1412.5567</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Deep+Speech%3A+Scaling+up+end-to-end+speech+recognition&rft.date=2014&rft_id=info%3Aarxiv%2F1412.5567&rft.aulast=Hannun&rft.aufirst=Awni&rft.au=Case%2C+Carl&rft.au=Casper%2C+Jared&rft.au=Catanzaro%2C+Bryan&rft.au=Diamos%2C+Greg&rft.au=Elsen%2C+Erich&rft.au=Prenger%2C+Ryan&rft.au=Satheesh%2C+Sanjeev&rft.au=Sengupta%2C+Shubho&rft.au=Coates%2C+Adam&rft.au=Ng%2C+Andrew+Y&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-YANNMNIST-185"><span class="mw-cite-backlink"><b><a href="#cite_ref-YANNMNIST_185-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://yann.lecun.com/exdb/mnist/.">"MNIST handwritten digit database, Yann LeCun, Corinna Cortes and Chris Burges"</a>. <i>yann.lecun.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20140113175237/http://yann.lecun.com/exdb/mnist/">Archived</a> from the original on 2014-01-13<span class="reference-accessdate">. Retrieved <span class="nowrap">2014-01-28</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=yann.lecun.com&rft.atitle=MNIST+handwritten+digit+database%2C+Yann+LeCun%2C+Corinna+Cortes+and+Chris+Burges&rft_id=http%3A%2F%2Fyann.lecun.com%2Fexdb%2Fmnist%2F.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:7-186"><span class="mw-cite-backlink"><b><a href="#cite_ref-:7_186-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCireşanMeierMasciSchmidhuber2012" class="citation journal cs1">Cireşan, Dan; Meier, Ueli; Masci, Jonathan; Schmidhuber, Jürgen (August 2012). "Multi-column deep neural network for traffic sign classification". <i>Neural Networks</i>. Selected Papers from IJCNN 2011. <b>32</b>: <span class="nowrap">333–</span>338. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.226.8219">10.1.1.226.8219</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neunet.2012.02.023">10.1016/j.neunet.2012.02.023</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/22386783">22386783</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Networks&rft.atitle=Multi-column+deep+neural+network+for+traffic+sign+classification&rft.volume=32&rft.pages=%3Cspan+class%3D%22nowrap%22%3E333-%3C%2Fspan%3E338&rft.date=2012-08&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.226.8219%23id-name%3DCiteSeerX&rft_id=info%3Apmid%2F22386783&rft_id=info%3Adoi%2F10.1016%2Fj.neunet.2012.02.023&rft.aulast=Cire%C5%9Fan&rft.aufirst=Dan&rft.au=Meier%2C+Ueli&rft.au=Masci%2C+Jonathan&rft.au=Schmidhuber%2C+J%C3%BCrgen&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-surpass1-187"><span class="mw-cite-backlink"><b><a href="#cite_ref-surpass1_187-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFChaochao_LuXiaoou_Tang2014" class="citation arxiv cs1">Chaochao Lu; Xiaoou Tang (2014). "Surpassing Human Level Face Recognition". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1404.3840">1404.3840</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Surpassing+Human+Level+Face+Recognition&rft.date=2014&rft_id=info%3Aarxiv%2F1404.3840&rft.au=Chaochao+Lu&rft.au=Xiaoou+Tang&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-188"><span class="mw-cite-backlink"><b><a href="#cite_ref-188">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.technologyreview.com/news/533936/nvidia-demos-a-car-computer-trained-with-deep-learning/">Nvidia Demos a Car Computer Trained with "Deep Learning"</a> (6 January 2015), David Talbot, <i><a href="/wiki/MIT_Technology_Review" title="MIT Technology Review">MIT Technology Review</a></i></span> </li> <li id="cite_note-art1-189"><span class="mw-cite-backlink">^ <a href="#cite_ref-art1_189-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-art1_189-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-art1_189-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFG._W._SmithFrederic_Fol_Leymarie2017" class="citation journal cs1">G. W. Smith; Frederic Fol Leymarie (10 April 2017). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Farts6020005">"The Machine as Artist: An Introduction"</a>. <i>Arts</i>. <b>6</b> (4): 5. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Farts6020005">10.3390/arts6020005</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Arts&rft.atitle=The+Machine+as+Artist%3A+An+Introduction&rft.volume=6&rft.issue=4&rft.pages=5&rft.date=2017-04-10&rft_id=info%3Adoi%2F10.3390%2Farts6020005&rft.au=G.+W.+Smith&rft.au=Frederic+Fol+Leymarie&rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Farts6020005&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-art2-190"><span class="mw-cite-backlink">^ <a href="#cite_ref-art2_190-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-art2_190-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-art2_190-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBlaise_Agüera_y_Arcas2017" class="citation journal cs1">Blaise Agüera y Arcas (29 September 2017). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Farts6040018">"Art in the Age of Machine Intelligence"</a>. <i>Arts</i>. <b>6</b> (4): 18. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Farts6040018">10.3390/arts6040018</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Arts&rft.atitle=Art+in+the+Age+of+Machine+Intelligence&rft.volume=6&rft.issue=4&rft.pages=18&rft.date=2017-09-29&rft_id=info%3Adoi%2F10.3390%2Farts6040018&rft.au=Blaise+Ag%C3%BCera+y+Arcas&rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Farts6040018&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-GoldbergLevy2014-191"><span class="mw-cite-backlink"><b><a href="#cite_ref-GoldbergLevy2014_191-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGoldbergLevy2014" class="citation arxiv cs1">Goldberg, Yoav; Levy, Omar (2014). "word2vec Explained: Deriving Mikolov et al.'s Negative-Sampling Word-Embedding Method". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1402.3722">1402.3722</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=word2vec+Explained%3A+Deriving+Mikolov+et+al.%27s+Negative-Sampling+Word-Embedding+Method&rft.date=2014&rft_id=info%3Aarxiv%2F1402.3722&rft.aulast=Goldberg&rft.aufirst=Yoav&rft.au=Levy%2C+Omar&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-SocherManning2014-192"><span class="mw-cite-backlink">^ <a href="#cite_ref-SocherManning2014_192-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-SocherManning2014_192-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSocherManning" class="citation web cs1">Socher, Richard; Manning, Christopher. <a rel="nofollow" class="external text" href="http://nlp.stanford.edu/courses/NAACL2013/NAACL2013-Socher-Manning-DeepLearning.pdf">"Deep Learning for NLP"</a> <span class="cs1-format">(PDF)</span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20140706040227/http://nlp.stanford.edu/courses/NAACL2013/NAACL2013-Socher-Manning-DeepLearning.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 6 July 2014<span class="reference-accessdate">. Retrieved <span class="nowrap">26 October</span> 2014</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Deep+Learning+for+NLP&rft.aulast=Socher&rft.aufirst=Richard&rft.au=Manning%2C+Christopher&rft_id=http%3A%2F%2Fnlp.stanford.edu%2Fcourses%2FNAACL2013%2FNAACL2013-Socher-Manning-DeepLearning.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-193"><span class="mw-cite-backlink"><b><a href="#cite_ref-193">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSocherBauerManningNg2013" class="citation journal cs1">Socher, Richard; Bauer, John; Manning, Christopher; Ng, Andrew (2013). <a rel="nofollow" class="external text" href="http://aclweb.org/anthology/P/P13/P13-1045.pdf">"Parsing With Compositional Vector Grammars"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of the ACL 2013 Conference</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20141127005912/http://www.aclweb.org/anthology/P/P13/P13-1045.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2014-11-27<span class="reference-accessdate">. Retrieved <span class="nowrap">2014-09-03</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+ACL+2013+Conference&rft.atitle=Parsing+With+Compositional+Vector+Grammars&rft.date=2013&rft.aulast=Socher&rft.aufirst=Richard&rft.au=Bauer%2C+John&rft.au=Manning%2C+Christopher&rft.au=Ng%2C+Andrew&rft_id=http%3A%2F%2Faclweb.org%2Fanthology%2FP%2FP13%2FP13-1045.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-RDM_1-194"><span class="mw-cite-backlink"><b><a href="#cite_ref-RDM_1_194-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSocher,_R.Perelygin,_A.Wu,_J.Chuang,_J.2013" class="citation book cs1">Socher, R.; Perelygin, A.; Wu, J.; Chuang, J.; Manning, C.D.; Ng, A.; Potts, C. (October 2013). <a rel="nofollow" class="external text" href="https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf">"Recursive Deep Models for Semantic Compositionality over a Sentiment Treebank"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing</i>. Association for Computational Linguistics. pp. <span class="nowrap">1631–</span>1642. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.18653%2Fv1%2FD13-1170">10.18653/v1/D13-1170</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20161228100300/http://nlp.stanford.edu/%7Esocherr/EMNLP2013_RNTN.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 28 December 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">21 December</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Recursive+Deep+Models+for+Semantic+Compositionality+over+a+Sentiment+Treebank&rft.btitle=Proceedings+of+the+2013+Conference+on+Empirical+Methods+in+Natural+Language+Processing&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1631-%3C%2Fspan%3E1642&rft.pub=Association+for+Computational+Linguistics&rft.date=2013-10&rft_id=info%3Adoi%2F10.18653%2Fv1%2FD13-1170&rft.au=Socher%2C+R.&rft.au=Perelygin%2C+A.&rft.au=Wu%2C+J.&rft.au=Chuang%2C+J.&rft.au=Manning%2C+C.D.&rft.au=Ng%2C+A.&rft.au=Potts%2C+C.&rft_id=https%3A%2F%2Fnlp.stanford.edu%2F~socherr%2FEMNLP2013_RNTN.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-195"><span class="mw-cite-backlink"><b><a href="#cite_ref-195">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFShenHeGaoDeng2014" class="citation journal cs1">Shen, Yelong; He, Xiaodong; Gao, Jianfeng; Deng, Li; Mesnil, Gregoire (1 November 2014). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/publication/a-latent-semantic-model-with-convolutional-pooling-structure-for-information-retrieval/">"A Latent Semantic Model with Convolutional-Pooling Structure for Information Retrieval"</a>. <i>Microsoft Research</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171027050418/https://www.microsoft.com/en-us/research/publication/a-latent-semantic-model-with-convolutional-pooling-structure-for-information-retrieval/">Archived</a> from the original on 27 October 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Microsoft+Research&rft.atitle=A+Latent+Semantic+Model+with+Convolutional-Pooling+Structure+for+Information+Retrieval&rft.date=2014-11-01&rft.aulast=Shen&rft.aufirst=Yelong&rft.au=He%2C+Xiaodong&rft.au=Gao%2C+Jianfeng&rft.au=Deng%2C+Li&rft.au=Mesnil%2C+Gregoire&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fpublication%2Fa-latent-semantic-model-with-convolutional-pooling-structure-for-information-retrieval%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-196"><span class="mw-cite-backlink"><b><a href="#cite_ref-196">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHuangHeGaoDeng2013" class="citation journal cs1">Huang, Po-Sen; He, Xiaodong; Gao, Jianfeng; Deng, Li; Acero, Alex; Heck, Larry (1 October 2013). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/">"Learning Deep Structured Semantic Models for Web Search using Clickthrough Data"</a>. <i>Microsoft Research</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171027050414/https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/">Archived</a> from the original on 27 October 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Microsoft+Research&rft.atitle=Learning+Deep+Structured+Semantic+Models+for+Web+Search+using+Clickthrough+Data&rft.date=2013-10-01&rft.aulast=Huang&rft.aufirst=Po-Sen&rft.au=He%2C+Xiaodong&rft.au=Gao%2C+Jianfeng&rft.au=Deng%2C+Li&rft.au=Acero%2C+Alex&rft.au=Heck%2C+Larry&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fpublication%2Flearning-deep-structured-semantic-models-for-web-search-using-clickthrough-data%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-IEEE-TASL2015-197"><span class="mw-cite-backlink"><b><a href="#cite_ref-IEEE-TASL2015_197-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMesnilDauphinYaoBengio2015" class="citation journal cs1">Mesnil, G.; Dauphin, Y.; Yao, K.; Bengio, Y.; Deng, L.; <a href="/wiki/Dilek_Hakkani-T%C3%BCr" title="Dilek Hakkani-Tür">Hakkani-Tur, D.</a>; He, X.; Heck, L.; Tur, G.; Yu, D.; Zweig, G. (2015). "Using recurrent neural networks for slot filling in spoken language understanding". <i>IEEE Transactions on Audio, Speech, and Language Processing</i>. <b>23</b> (3): <span class="nowrap">530–</span>539. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ftaslp.2014.2383614">10.1109/taslp.2014.2383614</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1317136">1317136</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Audio%2C+Speech%2C+and+Language+Processing&rft.atitle=Using+recurrent+neural+networks+for+slot+filling+in+spoken+language+understanding&rft.volume=23&rft.issue=3&rft.pages=%3Cspan+class%3D%22nowrap%22%3E530-%3C%2Fspan%3E539&rft.date=2015&rft_id=info%3Adoi%2F10.1109%2Ftaslp.2014.2383614&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1317136%23id-name%3DS2CID&rft.aulast=Mesnil&rft.aufirst=G.&rft.au=Dauphin%2C+Y.&rft.au=Yao%2C+K.&rft.au=Bengio%2C+Y.&rft.au=Deng%2C+L.&rft.au=Hakkani-Tur%2C+D.&rft.au=He%2C+X.&rft.au=Heck%2C+L.&rft.au=Tur%2C+G.&rft.au=Yu%2C+D.&rft.au=Zweig%2C+G.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-auto-198"><span class="mw-cite-backlink">^ <a href="#cite_ref-auto_198-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-auto_198-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGaoHeYihDeng2014" class="citation journal cs1">Gao, Jianfeng; He, Xiaodong; Yih, Scott Wen-tau; Deng, Li (1 June 2014). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/publication/learning-continuous-phrase-representations-for-translation-modeling/">"Learning Continuous Phrase Representations for Translation Modeling"</a>. <i>Microsoft Research</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171027050403/https://www.microsoft.com/en-us/research/publication/learning-continuous-phrase-representations-for-translation-modeling/">Archived</a> from the original on 27 October 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Microsoft+Research&rft.atitle=Learning+Continuous+Phrase+Representations+for+Translation+Modeling&rft.date=2014-06-01&rft.aulast=Gao&rft.aufirst=Jianfeng&rft.au=He%2C+Xiaodong&rft.au=Yih%2C+Scott+Wen-tau&rft.au=Deng%2C+Li&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fpublication%2Flearning-continuous-phrase-representations-for-translation-modeling%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-BROC2017-199"><span class="mw-cite-backlink"><b><a href="#cite_ref-BROC2017_199-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBrocardoTraoreWoungangObaidat2017" class="citation journal cs1">Brocardo, Marcelo Luiz; Traore, Issa; Woungang, Isaac; Obaidat, Mohammad S. (2017). "Authorship verification using deep belief network systems". <i>International Journal of Communication Systems</i>. <b>30</b> (12): e3259. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1002%2Fdac.3259">10.1002/dac.3259</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:40745740">40745740</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=International+Journal+of+Communication+Systems&rft.atitle=Authorship+verification+using+deep+belief+network+systems&rft.volume=30&rft.issue=12&rft.pages=e3259&rft.date=2017&rft_id=info%3Adoi%2F10.1002%2Fdac.3259&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A40745740%23id-name%3DS2CID&rft.aulast=Brocardo&rft.aufirst=Marcelo+Luiz&rft.au=Traore%2C+Issa&rft.au=Woungang%2C+Isaac&rft.au=Obaidat%2C+Mohammad+S.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-200"><span class="mw-cite-backlink"><b><a href="#cite_ref-200">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKariampuzhaAlyeaQuSanjak2023" class="citation journal cs1">Kariampuzha, William; Alyea, Gioconda; Qu, Sue; Sanjak, Jaleal; Mathé, Ewy; Sid, Eric; Chatelaine, Haley; Yadaw, Arjun; Xu, Yanji; Zhu, Qian (2023). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9972634">"Precision information extraction for rare disease epidemiology at scale"</a>. <i>Journal of Translational Medicine</i>. <b>21</b> (1): 157. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1186%2Fs12967-023-04011-y">10.1186/s12967-023-04011-y</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9972634">9972634</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/36855134">36855134</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Translational+Medicine&rft.atitle=Precision+information+extraction+for+rare+disease+epidemiology+at+scale&rft.volume=21&rft.issue=1&rft.pages=157&rft.date=2023&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC9972634%23id-name%3DPMC&rft_id=info%3Apmid%2F36855134&rft_id=info%3Adoi%2F10.1186%2Fs12967-023-04011-y&rft.aulast=Kariampuzha&rft.aufirst=William&rft.au=Alyea%2C+Gioconda&rft.au=Qu%2C+Sue&rft.au=Sanjak%2C+Jaleal&rft.au=Math%C3%A9%2C+Ewy&rft.au=Sid%2C+Eric&rft.au=Chatelaine%2C+Haley&rft.au=Yadaw%2C+Arjun&rft.au=Xu%2C+Yanji&rft.au=Zhu%2C+Qian&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC9972634&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-201"><span class="mw-cite-backlink"><b><a href="#cite_ref-201">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/project/deep-learning-for-natural-language-processing-theory-and-practice-cikm2014-tutorial/">"Deep Learning for Natural Language Processing: Theory and Practice (CIKM2014 Tutorial) - Microsoft Research"</a>. <i>Microsoft Research</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170313184253/https://www.microsoft.com/en-us/research/project/deep-learning-for-natural-language-processing-theory-and-practice-cikm2014-tutorial/">Archived</a> from the original on 13 March 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Microsoft+Research&rft.atitle=Deep+Learning+for+Natural+Language+Processing%3A+Theory+and+Practice+%28CIKM2014+Tutorial%29+-+Microsoft+Research&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fproject%2Fdeep-learning-for-natural-language-processing-theory-and-practice-cikm2014-tutorial%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-GT_Turovsky_2016-202"><span class="mw-cite-backlink"><b><a href="#cite_ref-GT_Turovsky_2016_202-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFTurovsky2016" class="citation web cs1">Turovsky, Barak (15 November 2016). <a rel="nofollow" class="external text" href="https://blog.google/products/translate/found-translation-more-accurate-fluent-sentences-google-translate/">"Found in translation: More accurate, fluent sentences in Google Translate"</a>. <i>The Keyword Google Blog</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170407071226/https://blog.google/products/translate/found-translation-more-accurate-fluent-sentences-google-translate/">Archived</a> from the original on 7 April 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">23 March</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Keyword+Google+Blog&rft.atitle=Found+in+translation%3A+More+accurate%2C+fluent+sentences+in+Google+Translate&rft.date=2016-11-15&rft.aulast=Turovsky&rft.aufirst=Barak&rft_id=https%3A%2F%2Fblog.google%2Fproducts%2Ftranslate%2Ffound-translation-more-accurate-fluent-sentences-google-translate%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-googleblog_GNMT_2016-203"><span class="mw-cite-backlink">^ <a href="#cite_ref-googleblog_GNMT_2016_203-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-googleblog_GNMT_2016_203-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-googleblog_GNMT_2016_203-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-googleblog_GNMT_2016_203-3"><sup><i><b>d</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchusterJohnsonThorat2016" class="citation web cs1">Schuster, Mike; Johnson, Melvin; Thorat, Nikhil (22 November 2016). <a rel="nofollow" class="external text" href="https://research.googleblog.com/2016/11/zero-shot-translation-with-googles.html">"Zero-Shot Translation with Google's Multilingual Neural Machine Translation System"</a>. <i>Google Research Blog</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170710183732/https://research.googleblog.com/2016/11/zero-shot-translation-with-googles.html">Archived</a> from the original on 10 July 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">23 March</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Google+Research+Blog&rft.atitle=Zero-Shot+Translation+with+Google%27s+Multilingual+Neural+Machine+Translation+System&rft.date=2016-11-22&rft.aulast=Schuster&rft.aufirst=Mike&rft.au=Johnson%2C+Melvin&rft.au=Thorat%2C+Nikhil&rft_id=https%3A%2F%2Fresearch.googleblog.com%2F2016%2F11%2Fzero-shot-translation-with-googles.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-GoogleTranslate-204"><span class="mw-cite-backlink"><b><a href="#cite_ref-GoogleTranslate_204-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFWuSchusterChenLe2016" class="citation arxiv cs1">Wu, Yonghui; Schuster, Mike; Chen, Zhifeng; Le, Quoc V; Norouzi, Mohammad; Macherey, Wolfgang; Krikun, Maxim; Cao, Yuan; Gao, Qin; Macherey, Klaus; Klingner, Jeff; Shah, Apurva; Johnson, Melvin; Liu, Xiaobing; Kaiser, Łukasz; Gouws, Stephan; Kato, Yoshikiyo; Kudo, Taku; Kazawa, Hideto; Stevens, Keith; Kurian, George; Patil, Nishant; Wang, Wei; Young, Cliff; Smith, Jason; Riesa, Jason; Rudnick, Alex; Vinyals, Oriol; Corrado, Greg; et al. (2016). "Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1609.08144">1609.08144</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Google%27s+Neural+Machine+Translation+System%3A+Bridging+the+Gap+between+Human+and+Machine+Translation&rft.date=2016&rft_id=info%3Aarxiv%2F1609.08144&rft.aulast=Wu&rft.aufirst=Yonghui&rft.au=Schuster%2C+Mike&rft.au=Chen%2C+Zhifeng&rft.au=Le%2C+Quoc+V&rft.au=Norouzi%2C+Mohammad&rft.au=Macherey%2C+Wolfgang&rft.au=Krikun%2C+Maxim&rft.au=Cao%2C+Yuan&rft.au=Gao%2C+Qin&rft.au=Macherey%2C+Klaus&rft.au=Klingner%2C+Jeff&rft.au=Shah%2C+Apurva&rft.au=Johnson%2C+Melvin&rft.au=Liu%2C+Xiaobing&rft.au=Kaiser%2C+%C5%81ukasz&rft.au=Gouws%2C+Stephan&rft.au=Kato%2C+Yoshikiyo&rft.au=Kudo%2C+Taku&rft.au=Kazawa%2C+Hideto&rft.au=Stevens%2C+Keith&rft.au=Kurian%2C+George&rft.au=Patil%2C+Nishant&rft.au=Wang%2C+Wei&rft.au=Young%2C+Cliff&rft.au=Smith%2C+Jason&rft.au=Riesa%2C+Jason&rft.au=Rudnick%2C+Alex&rft.au=Vinyals%2C+Oriol&rft.au=Corrado%2C+Greg&rft.au=Hughes%2C+Macduff&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-WiredGoogleTranslate-205"><span class="mw-cite-backlink"><b><a href="#cite_ref-WiredGoogleTranslate_205-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMetz2016" class="citation magazine cs1">Metz, Cade (27 September 2016). <a rel="nofollow" class="external text" href="https://www.wired.com/2016/09/google-claims-ai-breakthrough-machine-translation/">"An Infusion of AI Makes Google Translate More Powerful Than Ever"</a>. <i><a href="/wiki/Wired_(magazine)" title="Wired (magazine)">Wired</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201108101324/https://www.wired.com/2016/09/google-claims-ai-breakthrough-machine-translation/">Archived</a> from the original on 8 November 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">12 October</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Wired&rft.atitle=An+Infusion+of+AI+Makes+Google+Translate+More+Powerful+Than+Ever&rft.date=2016-09-27&rft.aulast=Metz&rft.aufirst=Cade&rft_id=https%3A%2F%2Fwww.wired.com%2F2016%2F09%2Fgoogle-claims-ai-breakthrough-machine-translation%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Biotet-206"><span class="mw-cite-backlink">^ <a href="#cite_ref-Biotet_206-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Biotet_206-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBoitetBlanchonSeligmanBellynck2010" class="citation web cs1">Boitet, Christian; Blanchon, Hervé; Seligman, Mark; Bellynck, Valérie (2010). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170329125916/http://www-clips.imag.fr/geta/herve.blanchon/Pdfs/NLP-KE-10.pdf">"MT on and for the Web"</a> <span class="cs1-format">(PDF)</span>. Archived from <a rel="nofollow" class="external text" href="http://www-clips.imag.fr/geta/herve.blanchon/Pdfs/NLP-KE-10.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 29 March 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">1 December</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=MT+on+and+for+the+Web&rft.date=2010&rft.aulast=Boitet&rft.aufirst=Christian&rft.au=Blanchon%2C+Herv%C3%A9&rft.au=Seligman%2C+Mark&rft.au=Bellynck%2C+Val%C3%A9rie&rft_id=http%3A%2F%2Fwww-clips.imag.fr%2Fgeta%2Fherve.blanchon%2FPdfs%2FNLP-KE-10.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ARROWSMITH2013-207"><span class="mw-cite-backlink"><b><a href="#cite_ref-ARROWSMITH2013_207-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFArrowsmithMiller2013" class="citation journal cs1">Arrowsmith, J; Miller, P (2013). <a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnrd4090">"Trial watch: Phase II and phase III attrition rates 2011-2012"</a>. <i>Nature Reviews Drug Discovery</i>. <b>12</b> (8): 569. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnrd4090">10.1038/nrd4090</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/23903212">23903212</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:20246434">20246434</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature+Reviews+Drug+Discovery&rft.atitle=Trial+watch%3A+Phase+II+and+phase+III+attrition+rates+2011-2012&rft.volume=12&rft.issue=8&rft.pages=569&rft.date=2013&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A20246434%23id-name%3DS2CID&rft_id=info%3Apmid%2F23903212&rft_id=info%3Adoi%2F10.1038%2Fnrd4090&rft.aulast=Arrowsmith&rft.aufirst=J&rft.au=Miller%2C+P&rft_id=https%3A%2F%2Fdoi.org%2F10.1038%252Fnrd4090&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-VERBIEST2015-208"><span class="mw-cite-backlink"><b><a href="#cite_ref-VERBIEST2015_208-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFVerbistKlambauerVervoortTalloen2015" class="citation journal cs1">Verbist, B; Klambauer, G; Vervoort, L; Talloen, W; The Qstar, Consortium; Shkedy, Z; Thas, O; Bender, A; Göhlmann, H. W.; Hochreiter, S (2015). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.drudis.2014.12.014">"Using transcriptomics to guide lead optimization in drug discovery projects: Lessons learned from the QSTAR project"</a>. <i>Drug Discovery Today</i>. <b>20</b> (5): <span class="nowrap">505–</span>513. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.drudis.2014.12.014">10.1016/j.drudis.2014.12.014</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/1942%2F18723">1942/18723</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/25582842">25582842</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Drug+Discovery+Today&rft.atitle=Using+transcriptomics+to+guide+lead+optimization+in+drug+discovery+projects%3A+Lessons+learned+from+the+QSTAR+project&rft.volume=20&rft.issue=5&rft.pages=%3Cspan+class%3D%22nowrap%22%3E505-%3C%2Fspan%3E513&rft.date=2015&rft_id=info%3Ahdl%2F1942%2F18723&rft_id=info%3Apmid%2F25582842&rft_id=info%3Adoi%2F10.1016%2Fj.drudis.2014.12.014&rft.aulast=Verbist&rft.aufirst=B&rft.au=Klambauer%2C+G&rft.au=Vervoort%2C+L&rft.au=Talloen%2C+W&rft.au=The+Qstar%2C+Consortium&rft.au=Shkedy%2C+Z&rft.au=Thas%2C+O&rft.au=Bender%2C+A&rft.au=G%C3%B6hlmann%2C+H.+W.&rft.au=Hochreiter%2C+S&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.drudis.2014.12.014&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-MERCK2012-209"><span class="mw-cite-backlink"><b><a href="#cite_ref-MERCK2012_209-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://kaggle.com/c/MerckActivity">"Merck Molecular Activity Challenge"</a>. <i>kaggle.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200716190808/https://www.kaggle.com/c/MerckActivity">Archived</a> from the original on 2020-07-16<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-07-16</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=kaggle.com&rft.atitle=Merck+Molecular+Activity+Challenge&rft_id=https%3A%2F%2Fkaggle.com%2Fc%2FMerckActivity&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:5-210"><span class="mw-cite-backlink"><b><a href="#cite_ref-:5_210-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.datascienceassn.org/content/multi-task-neural-networks-qsar-predictions">"Multi-task Neural Networks for QSAR Predictions | Data Science Association"</a>. <i>www.datascienceassn.org</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170430142049/http://www.datascienceassn.org/content/multi-task-neural-networks-qsar-predictions">Archived</a> from the original on 30 April 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.datascienceassn.org&rft.atitle=Multi-task+Neural+Networks+for+QSAR+Predictions+%7C+Data+Science+Association&rft_id=http%3A%2F%2Fwww.datascienceassn.org%2Fcontent%2Fmulti-task-neural-networks-qsar-predictions&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-TOX21-211"><span class="mw-cite-backlink"><b><a href="#cite_ref-TOX21_211-0">^</a></b></span> <span class="reference-text">"Toxicology in the 21st century Data Challenge"</span> </li> <li id="cite_note-TOX21Data-212"><span class="mw-cite-backlink"><b><a href="#cite_ref-TOX21Data_212-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://tripod.nih.gov/tox21/challenge/leaderboard.jsp">"NCATS Announces Tox21 Data Challenge Winners"</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150908025122/https://tripod.nih.gov/tox21/challenge/leaderboard.jsp">Archived</a> from the original on 2015-09-08<span class="reference-accessdate">. Retrieved <span class="nowrap">2015-03-05</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=NCATS+Announces+Tox21+Data+Challenge+Winners&rft_id=https%3A%2F%2Ftripod.nih.gov%2Ftox21%2Fchallenge%2Fleaderboard.jsp&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:11-213"><span class="mw-cite-backlink"><b><a href="#cite_ref-:11_213-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20150228225709/http://www.ncats.nih.gov/news-and-events/features/tox21-challenge-winners.html">"NCATS Announces Tox21 Data Challenge Winners"</a>. Archived from <a rel="nofollow" class="external text" href="http://www.ncats.nih.gov/news-and-events/features/tox21-challenge-winners.html">the original</a> on 28 February 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">5 March</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=NCATS+Announces+Tox21+Data+Challenge+Winners&rft_id=http%3A%2F%2Fwww.ncats.nih.gov%2Fnews-and-events%2Ffeatures%2Ftox21-challenge-winners.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-214"><span class="mw-cite-backlink"><b><a href="#cite_ref-214">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFWallachDzambaHeifets2015" class="citation arxiv cs1">Wallach, Izhar; Dzamba, Michael; Heifets, Abraham (9 October 2015). "AtomNet: A Deep Convolutional Neural Network for Bioactivity Prediction in Structure-based Drug Discovery". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1510.02855">1510.02855</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=AtomNet%3A+A+Deep+Convolutional+Neural+Network+for+Bioactivity+Prediction+in+Structure-based+Drug+Discovery&rft.date=2015-10-09&rft_id=info%3Aarxiv%2F1510.02855&rft.aulast=Wallach&rft.aufirst=Izhar&rft.au=Dzamba%2C+Michael&rft.au=Heifets%2C+Abraham&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Toronto-215"><span class="mw-cite-backlink">^ <a href="#cite_ref-Toronto_215-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Toronto_215-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.theglobeandmail.com/report-on-business/small-business/starting-out/toronto-startup-has-a-faster-way-to-discover-effective-medicines/article25660419/">"Toronto startup has a faster way to discover effective medicines"</a>. <i><a href="/wiki/The_Globe_and_Mail" title="The Globe and Mail">The Globe and Mail</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151020040115/http://www.theglobeandmail.com/report-on-business/small-business/starting-out/toronto-startup-has-a-faster-way-to-discover-effective-medicines/article25660419/">Archived</a> from the original on 20 October 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">9 November</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Globe+and+Mail&rft.atitle=Toronto+startup+has+a+faster+way+to+discover+effective+medicines&rft_id=https%3A%2F%2Fwww.theglobeandmail.com%2Freport-on-business%2Fsmall-business%2Fstarting-out%2Ftoronto-startup-has-a-faster-way-to-discover-effective-medicines%2Farticle25660419%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-216"><span class="mw-cite-backlink"><b><a href="#cite_ref-216">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://ww2.kqed.org/futureofyou/2015/05/27/startup-harnesses-supercomputers-to-seek-cures/">"Startup Harnesses Supercomputers to Seek Cures"</a>. <i>KQED Future of You</i>. 27 May 2015. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151224104721/http://ww2.kqed.org/futureofyou/2015/05/27/startup-harnesses-supercomputers-to-seek-cures/">Archived</a> from the original on 24 December 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">9 November</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=KQED+Future+of+You&rft.atitle=Startup+Harnesses+Supercomputers+to+Seek+Cures&rft.date=2015-05-27&rft_id=http%3A%2F%2Fww2.kqed.org%2Ffutureofyou%2F2015%2F05%2F27%2Fstartup-harnesses-supercomputers-to-seek-cures%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-217"><span class="mw-cite-backlink"><b><a href="#cite_ref-217">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGilmerSchoenholzRileyVinyals2017" class="citation arxiv cs1">Gilmer, Justin; Schoenholz, Samuel S.; Riley, Patrick F.; Vinyals, Oriol; Dahl, George E. (2017-06-12). "Neural Message Passing for Quantum Chemistry". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1704.01212">1704.01212</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Neural+Message+Passing+for+Quantum+Chemistry&rft.date=2017-06-12&rft_id=info%3Aarxiv%2F1704.01212&rft.aulast=Gilmer&rft.aufirst=Justin&rft.au=Schoenholz%2C+Samuel+S.&rft.au=Riley%2C+Patrick+F.&rft.au=Vinyals%2C+Oriol&rft.au=Dahl%2C+George+E.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-218"><span class="mw-cite-backlink"><b><a href="#cite_ref-218">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFZhavoronkov2019" class="citation journal cs1">Zhavoronkov, Alex (2019). "Deep learning enables rapid identification of potent DDR1 kinase inhibitors". <i>Nature Biotechnology</i>. <b>37</b> (9): <span class="nowrap">1038–</span>1040. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs41587-019-0224-x">10.1038/s41587-019-0224-x</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/31477924">31477924</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:201716327">201716327</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature+Biotechnology&rft.atitle=Deep+learning+enables+rapid+identification+of+potent+DDR1+kinase+inhibitors&rft.volume=37&rft.issue=9&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1038-%3C%2Fspan%3E1040&rft.date=2019&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A201716327%23id-name%3DS2CID&rft_id=info%3Apmid%2F31477924&rft_id=info%3Adoi%2F10.1038%2Fs41587-019-0224-x&rft.aulast=Zhavoronkov&rft.aufirst=Alex&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-219"><span class="mw-cite-backlink"><b><a href="#cite_ref-219">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGregory" class="citation magazine cs1">Gregory, Barber. <a rel="nofollow" class="external text" href="https://www.wired.com/story/molecule-designed-ai-exhibits-druglike-qualities/">"A Molecule Designed By AI Exhibits 'Druglike' Qualities"</a>. <i>Wired</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200430143244/https://www.wired.com/story/molecule-designed-ai-exhibits-druglike-qualities/">Archived</a> from the original on 2020-04-30<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-09-05</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Wired&rft.atitle=A+Molecule+Designed+By+AI+Exhibits+%27Druglike%27+Qualities&rft.aulast=Gregory&rft.aufirst=Barber&rft_id=https%3A%2F%2Fwww.wired.com%2Fstory%2Fmolecule-designed-ai-exhibits-druglike-qualities%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-220"><span class="mw-cite-backlink"><b><a href="#cite_ref-220">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFTkachenko2015" class="citation arxiv cs1">Tkachenko, Yegor (8 April 2015). "Autonomous CRM Control via CLV Approximation with Deep Reinforcement Learning in Discrete and Continuous Action Space". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1504.01840">1504.01840</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Autonomous+CRM+Control+via+CLV+Approximation+with+Deep+Reinforcement+Learning+in+Discrete+and+Continuous+Action+Space&rft.date=2015-04-08&rft_id=info%3Aarxiv%2F1504.01840&rft.aulast=Tkachenko&rft.aufirst=Yegor&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-221"><span class="mw-cite-backlink"><b><a href="#cite_ref-221">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFvan_den_OordDielemanSchrauwen2013" class="citation book cs1">van den Oord, Aaron; Dieleman, Sander; Schrauwen, Benjamin (2013). Burges, C. J. C.; Bottou, L.; Welling, M.; Ghahramani, Z.; Weinberger, K. Q. (eds.). <a rel="nofollow" class="external text" href="http://papers.nips.cc/paper/5004-deep-content-based-music-recommendation.pdf"><i>Advances in Neural Information Processing Systems 26</i></a> <span class="cs1-format">(PDF)</span>. Curran Associates, Inc. pp. <span class="nowrap">2643–</span>2651. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170516185259/http://papers.nips.cc/paper/5004-deep-content-based-music-recommendation.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-05-16<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-06-14</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Advances+in+Neural+Information+Processing+Systems+26&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2643-%3C%2Fspan%3E2651&rft.pub=Curran+Associates%2C+Inc.&rft.date=2013&rft.aulast=van+den+Oord&rft.aufirst=Aaron&rft.au=Dieleman%2C+Sander&rft.au=Schrauwen%2C+Benjamin&rft_id=http%3A%2F%2Fpapers.nips.cc%2Fpaper%2F5004-deep-content-based-music-recommendation.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-222"><span class="mw-cite-backlink"><b><a href="#cite_ref-222">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFFengZhangRenShang2019" class="citation journal cs1">Feng, X.Y.; Zhang, H.; Ren, Y.J.; Shang, P.H.; Zhu, Y.; Liang, Y.C.; Guan, R.C.; Xu, D. (2019). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6555124">"The Deep Learning–Based Recommender System "Pubmender" for Choosing a Biomedical Publication Venue: Development and Validation Study"</a>. <i><a href="/wiki/Journal_of_Medical_Internet_Research" title="Journal of Medical Internet Research">Journal of Medical Internet Research</a></i>. <b>21</b> (5): e12957. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.2196%2F12957">10.2196/12957</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6555124">6555124</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/31127715">31127715</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Medical+Internet+Research&rft.atitle=The+Deep+Learning%E2%80%93Based+Recommender+System+%22Pubmender%22+for+Choosing+a+Biomedical+Publication+Venue%3A+Development+and+Validation+Study&rft.volume=21&rft.issue=5&rft.pages=e12957&rft.date=2019&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6555124%23id-name%3DPMC&rft_id=info%3Apmid%2F31127715&rft_id=info%3Adoi%2F10.2196%2F12957&rft.aulast=Feng&rft.aufirst=X.Y.&rft.au=Zhang%2C+H.&rft.au=Ren%2C+Y.J.&rft.au=Shang%2C+P.H.&rft.au=Zhu%2C+Y.&rft.au=Liang%2C+Y.C.&rft.au=Guan%2C+R.C.&rft.au=Xu%2C+D.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6555124&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-223"><span class="mw-cite-backlink"><b><a href="#cite_ref-223">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFElkahkySongHe2015" class="citation journal cs1">Elkahky, Ali Mamdouh; Song, Yang; He, Xiaodong (1 May 2015). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/publication/a-multi-view-deep-learning-approach-for-cross-domain-user-modeling-in-recommendation-systems/">"A Multi-View Deep Learning Approach for Cross Domain User Modeling in Recommendation Systems"</a>. <i>Microsoft Research</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180125134534/https://www.microsoft.com/en-us/research/publication/a-multi-view-deep-learning-approach-for-cross-domain-user-modeling-in-recommendation-systems/">Archived</a> from the original on 25 January 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">14 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Microsoft+Research&rft.atitle=A+Multi-View+Deep+Learning+Approach+for+Cross+Domain+User+Modeling+in+Recommendation+Systems&rft.date=2015-05-01&rft.aulast=Elkahky&rft.aufirst=Ali+Mamdouh&rft.au=Song%2C+Yang&rft.au=He%2C+Xiaodong&rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fpublication%2Fa-multi-view-deep-learning-approach-for-cross-domain-user-modeling-in-recommendation-systems%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-224"><span class="mw-cite-backlink"><b><a href="#cite_ref-224">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFChiccoSadowskiBaldi2014" class="citation book cs1">Chicco, Davide; Sadowski, Peter; Baldi, Pierre (1 January 2014). "Deep autoencoder neural networks for gene ontology annotation predictions". <a rel="nofollow" class="external text" href="http://dl.acm.org/citation.cfm?id=2649442"><i>Proceedings of the 5th ACM Conference on Bioinformatics, Computational Biology, and Health Informatics</i></a>. ACM. pp. <span class="nowrap">533–</span>540. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F2649387.2649442">10.1145/2649387.2649442</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/11311%2F964622">11311/964622</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781450328944" title="Special:BookSources/9781450328944"><bdi>9781450328944</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:207217210">207217210</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123140/https://dl.acm.org/doi/10.1145/2649387.2649442">Archived</a> from the original on 9 May 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">23 November</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Deep+autoencoder+neural+networks+for+gene+ontology+annotation+predictions&rft.btitle=Proceedings+of+the+5th+ACM+Conference+on+Bioinformatics%2C+Computational+Biology%2C+and+Health+Informatics&rft.pages=%3Cspan+class%3D%22nowrap%22%3E533-%3C%2Fspan%3E540&rft.pub=ACM&rft.date=2014-01-01&rft_id=info%3Ahdl%2F11311%2F964622&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A207217210%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1145%2F2649387.2649442&rft.isbn=9781450328944&rft.aulast=Chicco&rft.aufirst=Davide&rft.au=Sadowski%2C+Peter&rft.au=Baldi%2C+Pierre&rft_id=http%3A%2F%2Fdl.acm.org%2Fcitation.cfm%3Fid%3D2649442&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-225"><span class="mw-cite-backlink"><b><a href="#cite_ref-225">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSathyanarayana2016" class="citation journal cs1">Sathyanarayana, Aarti (1 January 2016). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5116102">"Sleep Quality Prediction From Wearable Data Using Deep Learning"</a>. <i>JMIR mHealth and uHealth</i>. <b>4</b> (4): e125. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.2196%2Fmhealth.6562">10.2196/mhealth.6562</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5116102">5116102</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/27815231">27815231</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:3821594">3821594</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=JMIR+mHealth+and+uHealth&rft.atitle=Sleep+Quality+Prediction+From+Wearable+Data+Using+Deep+Learning&rft.volume=4&rft.issue=4&rft.pages=e125&rft.date=2016-01-01&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5116102%23id-name%3DPMC&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A3821594%23id-name%3DS2CID&rft_id=info%3Apmid%2F27815231&rft_id=info%3Adoi%2F10.2196%2Fmhealth.6562&rft.aulast=Sathyanarayana&rft.aufirst=Aarti&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5116102&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-226"><span class="mw-cite-backlink"><b><a href="#cite_ref-226">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFChoiSchuetzStewartSun2016" class="citation journal cs1">Choi, Edward; Schuetz, Andy; Stewart, Walter F.; Sun, Jimeng (13 August 2016). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5391725">"Using recurrent neural network models for early detection of heart failure onset"</a>. <i>Journal of the American Medical Informatics Association</i>. <b>24</b> (2): <span class="nowrap">361–</span>370. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1093%2Fjamia%2Focw112">10.1093/jamia/ocw112</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1067-5027">1067-5027</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5391725">5391725</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/27521897">27521897</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+the+American+Medical+Informatics+Association&rft.atitle=Using+recurrent+neural+network+models+for+early+detection+of+heart+failure+onset&rft.volume=24&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E361-%3C%2Fspan%3E370&rft.date=2016-08-13&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5391725%23id-name%3DPMC&rft.issn=1067-5027&rft_id=info%3Apmid%2F27521897&rft_id=info%3Adoi%2F10.1093%2Fjamia%2Focw112&rft.aulast=Choi&rft.aufirst=Edward&rft.au=Schuetz%2C+Andy&rft.au=Stewart%2C+Walter+F.&rft.au=Sun%2C+Jimeng&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5391725&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-227"><span class="mw-cite-backlink"><b><a href="#cite_ref-227">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.technologyreview.com/2020/11/30/1012712/deepmind-protein-folding-ai-solved-biology-science-drugs-disease/">"DeepMind's protein-folding AI has solved a 50-year-old grand challenge of biology"</a>. <i>MIT Technology Review</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=MIT+Technology+Review&rft.atitle=DeepMind%27s+protein-folding+AI+has+solved+a+50-year-old+grand+challenge+of+biology&rft_id=https%3A%2F%2Fwww.technologyreview.com%2F2020%2F11%2F30%2F1012712%2Fdeepmind-protein-folding-ai-solved-biology-science-drugs-disease%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-228"><span class="mw-cite-backlink"><b><a href="#cite_ref-228">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFShead2020" class="citation web cs1">Shead, Sam (2020-11-30). <a rel="nofollow" class="external text" href="https://www.cnbc.com/2020/11/30/deepmind-solves-protein-folding-grand-challenge-with-alphafold-ai.html">"DeepMind solves 50-year-old 'grand challenge' with protein folding A.I."</a> <i>CNBC</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=CNBC&rft.atitle=DeepMind+solves+50-year-old+%27grand+challenge%27+with+protein+folding+A.I.&rft.date=2020-11-30&rft.aulast=Shead&rft.aufirst=Sam&rft_id=https%3A%2F%2Fwww.cnbc.com%2F2020%2F11%2F30%2Fdeepmind-solves-protein-folding-grand-challenge-with-alphafold-ai.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-SPB22-229"><span class="mw-cite-backlink">^ <a href="#cite_ref-SPB22_229-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-SPB22_229-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFShalevPainskyBen-Gal2022" class="citation journal cs1">Shalev, Y.; Painsky, A.; Ben-Gal, I. (2022). <a rel="nofollow" class="external text" href="https://www.iradbengal.sites.tau.ac.il/_files/ugd/901879_d51bc0a620734585b5d3154488b3ae84.pdf">"Neural Joint Entropy Estimation"</a> <span class="cs1-format">(PDF)</span>. <i>IEEE Transactions on Neural Networks and Learning Systems</i>. <b>PP</b> (4): <span class="nowrap">5488–</span>5500. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2012.11197">2012.11197</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTNNLS.2022.3204919">10.1109/TNNLS.2022.3204919</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/36155469">36155469</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:229339809">229339809</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Neural+Networks+and+Learning+Systems&rft.atitle=Neural+Joint+Entropy+Estimation&rft.volume=PP&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E5488-%3C%2Fspan%3E5500&rft.date=2022&rft_id=info%3Aarxiv%2F2012.11197&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A229339809%23id-name%3DS2CID&rft_id=info%3Apmid%2F36155469&rft_id=info%3Adoi%2F10.1109%2FTNNLS.2022.3204919&rft.aulast=Shalev&rft.aufirst=Y.&rft.au=Painsky%2C+A.&rft.au=Ben-Gal%2C+I.&rft_id=https%3A%2F%2Fwww.iradbengal.sites.tau.ac.il%2F_files%2Fugd%2F901879_d51bc0a620734585b5d3154488b3ae84.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-230"><span class="mw-cite-backlink"><b><a href="#cite_ref-230">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLitjensKooiBejnordiSetio2017" class="citation journal cs1">Litjens, Geert; Kooi, Thijs; Bejnordi, Babak Ehteshami; Setio, Arnaud Arindra Adiyoso; Ciompi, Francesco; Ghafoorian, Mohsen; van der Laak, Jeroen A.W.M.; van Ginneken, Bram; Sánchez, Clara I. (December 2017). "A survey on deep learning in medical image analysis". <i>Medical Image Analysis</i>. <b>42</b>: <span class="nowrap">60–</span>88. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1702.05747">1702.05747</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2017arXiv170205747L">2017arXiv170205747L</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.media.2017.07.005">10.1016/j.media.2017.07.005</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/28778026">28778026</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2088679">2088679</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Medical+Image+Analysis&rft.atitle=A+survey+on+deep+learning+in+medical+image+analysis&rft.volume=42&rft.pages=%3Cspan+class%3D%22nowrap%22%3E60-%3C%2Fspan%3E88&rft.date=2017-12&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2088679%23id-name%3DS2CID&rft_id=info%3Abibcode%2F2017arXiv170205747L&rft_id=info%3Aarxiv%2F1702.05747&rft_id=info%3Apmid%2F28778026&rft_id=info%3Adoi%2F10.1016%2Fj.media.2017.07.005&rft.aulast=Litjens&rft.aufirst=Geert&rft.au=Kooi%2C+Thijs&rft.au=Bejnordi%2C+Babak+Ehteshami&rft.au=Setio%2C+Arnaud+Arindra+Adiyoso&rft.au=Ciompi%2C+Francesco&rft.au=Ghafoorian%2C+Mohsen&rft.au=van+der+Laak%2C+Jeroen+A.W.M.&rft.au=van+Ginneken%2C+Bram&rft.au=S%C3%A1nchez%2C+Clara+I.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-231"><span class="mw-cite-backlink"><b><a href="#cite_ref-231">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFForslidWieslanderBengtssonWahlby2017" class="citation book cs1">Forslid, Gustav; Wieslander, Hakan; Bengtsson, Ewert; Wahlby, Carolina; Hirsch, Jan-Michael; Stark, Christina Runow; Sadanandan, Sajith Kecheril (2017). <a rel="nofollow" class="external text" href="http://urn.kb.se/resolve?urn=urn:nbn:se:uu:diva-326160">"Deep Convolutional Neural Networks for Detecting Cellular Changes Due to Malignancy"</a>. <i>2017 IEEE International Conference on Computer Vision Workshops (ICCVW)</i>. pp. <span class="nowrap">82–</span>89. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FICCVW.2017.18">10.1109/ICCVW.2017.18</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781538610343" title="Special:BookSources/9781538610343"><bdi>9781538610343</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:4728736">4728736</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123157/https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js">Archived</a> from the original on 2021-05-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-11-12</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Deep+Convolutional+Neural+Networks+for+Detecting+Cellular+Changes+Due+to+Malignancy&rft.btitle=2017+IEEE+International+Conference+on+Computer+Vision+Workshops+%28ICCVW%29&rft.pages=%3Cspan+class%3D%22nowrap%22%3E82-%3C%2Fspan%3E89&rft.date=2017&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A4728736%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FICCVW.2017.18&rft.isbn=9781538610343&rft.aulast=Forslid&rft.aufirst=Gustav&rft.au=Wieslander%2C+Hakan&rft.au=Bengtsson%2C+Ewert&rft.au=Wahlby%2C+Carolina&rft.au=Hirsch%2C+Jan-Michael&rft.au=Stark%2C+Christina+Runow&rft.au=Sadanandan%2C+Sajith+Kecheril&rft_id=http%3A%2F%2Furn.kb.se%2Fresolve%3Furn%3Durn%3Anbn%3Ase%3Auu%3Adiva-326160&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-232"><span class="mw-cite-backlink"><b><a href="#cite_ref-232">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDongZhouWangPeng2020" class="citation journal cs1">Dong, Xin; Zhou, Yizhao; Wang, Lantian; Peng, Jingfeng; Lou, Yanbo; Fan, Yiqun (2020). <a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FACCESS.2020.3006362">"Liver Cancer Detection Using Hybridized Fully Convolutional Neural Network Based on Deep Learning Framework"</a>. <i>IEEE Access</i>. <b>8</b>: <span class="nowrap">129889–</span>129898. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2020IEEEA...8l9889D">2020IEEEA...8l9889D</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FACCESS.2020.3006362">10.1109/ACCESS.2020.3006362</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2169-3536">2169-3536</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:220733699">220733699</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Access&rft.atitle=Liver+Cancer+Detection+Using+Hybridized+Fully+Convolutional+Neural+Network+Based+on+Deep+Learning+Framework&rft.volume=8&rft.pages=%3Cspan+class%3D%22nowrap%22%3E129889-%3C%2Fspan%3E129898&rft.date=2020&rft_id=info%3Adoi%2F10.1109%2FACCESS.2020.3006362&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A220733699%23id-name%3DS2CID&rft.issn=2169-3536&rft_id=info%3Abibcode%2F2020IEEEA...8l9889D&rft.aulast=Dong&rft.aufirst=Xin&rft.au=Zhou%2C+Yizhao&rft.au=Wang%2C+Lantian&rft.au=Peng%2C+Jingfeng&rft.au=Lou%2C+Yanbo&rft.au=Fan%2C+Yiqun&rft_id=https%3A%2F%2Fdoi.org%2F10.1109%252FACCESS.2020.3006362&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-233"><span class="mw-cite-backlink"><b><a href="#cite_ref-233">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLyakhovLyakhovaNagornov2022" class="citation journal cs1">Lyakhov, Pavel Alekseevich; Lyakhova, Ulyana Alekseevna; Nagornov, Nikolay Nikolaevich (2022-04-03). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8997449">"System for the Recognizing of Pigmented Skin Lesions with Fusion and Analysis of Heterogeneous Data Based on a Multimodal Neural Network"</a>. <i>Cancers</i>. <b>14</b> (7): 1819. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fcancers14071819">10.3390/cancers14071819</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2072-6694">2072-6694</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8997449">8997449</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/35406591">35406591</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Cancers&rft.atitle=System+for+the+Recognizing+of+Pigmented+Skin+Lesions+with+Fusion+and+Analysis+of+Heterogeneous+Data+Based+on+a+Multimodal+Neural+Network&rft.volume=14&rft.issue=7&rft.pages=1819&rft.date=2022-04-03&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC8997449%23id-name%3DPMC&rft.issn=2072-6694&rft_id=info%3Apmid%2F35406591&rft_id=info%3Adoi%2F10.3390%2Fcancers14071819&rft.aulast=Lyakhov&rft.aufirst=Pavel+Alekseevich&rft.au=Lyakhova%2C+Ulyana+Alekseevna&rft.au=Nagornov%2C+Nikolay+Nikolaevich&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC8997449&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-234"><span class="mw-cite-backlink"><b><a href="#cite_ref-234">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDeMaityGoelShitole2017" class="citation book cs1">De, Shaunak; Maity, Abhishek; Goel, Vritti; Shitole, Sanjay; Bhattacharya, Avik (2017). "Predicting the popularity of instagram posts for a lifestyle magazine using deep learning". <i>2017 2nd International Conference on Communication Systems, Computing and IT Applications (CSCITA)</i>. pp. <span class="nowrap">174–</span>177. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FCSCITA.2017.8066548">10.1109/CSCITA.2017.8066548</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-5090-4381-1" title="Special:BookSources/978-1-5090-4381-1"><bdi>978-1-5090-4381-1</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:35350962">35350962</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Predicting+the+popularity+of+instagram+posts+for+a+lifestyle+magazine+using+deep+learning&rft.btitle=2017+2nd+International+Conference+on+Communication+Systems%2C+Computing+and+IT+Applications+%28CSCITA%29&rft.pages=%3Cspan+class%3D%22nowrap%22%3E174-%3C%2Fspan%3E177&rft.date=2017&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A35350962%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FCSCITA.2017.8066548&rft.isbn=978-1-5090-4381-1&rft.aulast=De&rft.aufirst=Shaunak&rft.au=Maity%2C+Abhishek&rft.au=Goel%2C+Vritti&rft.au=Shitole%2C+Sanjay&rft.au=Bhattacharya%2C+Avik&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-235"><span class="mw-cite-backlink"><b><a href="#cite_ref-235">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://blog.floydhub.com/colorizing-and-restoring-old-images-with-deep-learning/">"Colorizing and Restoring Old Images with Deep Learning"</a>. <i>FloydHub Blog</i>. 13 November 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191011162814/https://blog.floydhub.com/colorizing-and-restoring-old-images-with-deep-learning/">Archived</a> from the original on 11 October 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">11 October</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=FloydHub+Blog&rft.atitle=Colorizing+and+Restoring+Old+Images+with+Deep+Learning&rft.date=2018-11-13&rft_id=https%3A%2F%2Fblog.floydhub.com%2Fcolorizing-and-restoring-old-images-with-deep-learning%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-236"><span class="mw-cite-backlink"><b><a href="#cite_ref-236">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSchmidtRoth" class="citation conference cs1">Schmidt, Uwe; Roth, Stefan. <a rel="nofollow" class="external text" href="http://research.uweschmidt.org/pubs/cvpr14schmidt.pdf"><i>Shrinkage Fields for Effective Image Restoration</i></a> <span class="cs1-format">(PDF)</span>. Computer Vision and Pattern Recognition (CVPR), 2014 IEEE Conference on. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180102013217/http://research.uweschmidt.org/pubs/cvpr14schmidt.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2018-01-02<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-01-01</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=Shrinkage+Fields+for+Effective+Image+Restoration&rft.aulast=Schmidt&rft.aufirst=Uwe&rft.au=Roth%2C+Stefan&rft_id=http%3A%2F%2Fresearch.uweschmidt.org%2Fpubs%2Fcvpr14schmidt.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-237"><span class="mw-cite-backlink"><b><a href="#cite_ref-237">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKleanthousChatzis2020" class="citation journal cs1">Kleanthous, Christos; Chatzis, Sotirios (2020). "Gated Mixture Variational Autoencoders for Value Added Tax audit case selection". <i>Knowledge-Based Systems</i>. <b>188</b>: 105048. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.knosys.2019.105048">10.1016/j.knosys.2019.105048</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:204092079">204092079</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Knowledge-Based+Systems&rft.atitle=Gated+Mixture+Variational+Autoencoders+for+Value+Added+Tax+audit+case+selection&rft.volume=188&rft.pages=105048&rft.date=2020&rft_id=info%3Adoi%2F10.1016%2Fj.knosys.2019.105048&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A204092079%23id-name%3DS2CID&rft.aulast=Kleanthous&rft.aufirst=Christos&rft.au=Chatzis%2C+Sotirios&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-238"><span class="mw-cite-backlink"><b><a href="#cite_ref-238">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCzech2018" class="citation journal cs1">Czech, Tomasz (28 June 2018). <a rel="nofollow" class="external text" href="https://www.globalbankingandfinance.com/deep-learning-the-next-frontier-for-money-laundering-detection/">"Deep learning: the next frontier for money laundering detection"</a>. <i>Global Banking and Finance Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181116082711/https://www.globalbankingandfinance.com/deep-learning-the-next-frontier-for-money-laundering-detection/">Archived</a> from the original on 2018-11-16<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-07-15</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Global+Banking+and+Finance+Review&rft.atitle=Deep+learning%3A+the+next+frontier+for+money+laundering+detection&rft.date=2018-06-28&rft.aulast=Czech&rft.aufirst=Tomasz&rft_id=https%3A%2F%2Fwww.globalbankingandfinance.com%2Fdeep-learning-the-next-frontier-for-money-laundering-detection%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-239"><span class="mw-cite-backlink"><b><a href="#cite_ref-239">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFNuñez2023" class="citation web cs1">Nuñez, Michael (2023-11-29). <a rel="nofollow" class="external text" href="https://venturebeat.com/ai/google-deepminds-materials-ai-has-already-discovered-2-2-million-new-crystals/">"Google DeepMind's materials AI has already discovered 2.2 million new crystals"</a>. <i>VentureBeat</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-19</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=VentureBeat&rft.atitle=Google+DeepMind%27s+materials+AI+has+already+discovered+2.2+million+new+crystals&rft.date=2023-11-29&rft.aulast=Nu%C3%B1ez&rft.aufirst=Michael&rft_id=https%3A%2F%2Fventurebeat.com%2Fai%2Fgoogle-deepminds-materials-ai-has-already-discovered-2-2-million-new-crystals%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-240"><span class="mw-cite-backlink"><b><a href="#cite_ref-240">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMerchantBatznerSchoenholzAykol2023" class="citation journal cs1">Merchant, Amil; Batzner, Simon; Schoenholz, Samuel S.; Aykol, Muratahan; Cheon, Gowoon; Cubuk, Ekin Dogus (December 2023). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10700131">"Scaling deep learning for materials discovery"</a>. <i>Nature</i>. <b>624</b> (7990): <span class="nowrap">80–</span>85. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2023Natur.624...80M">2023Natur.624...80M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs41586-023-06735-9">10.1038/s41586-023-06735-9</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1476-4687">1476-4687</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10700131">10700131</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/38030720">38030720</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Scaling+deep+learning+for+materials+discovery&rft.volume=624&rft.issue=7990&rft.pages=%3Cspan+class%3D%22nowrap%22%3E80-%3C%2Fspan%3E85&rft.date=2023-12&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10700131%23id-name%3DPMC&rft_id=info%3Abibcode%2F2023Natur.624...80M&rft_id=info%3Apmid%2F38030720&rft_id=info%3Adoi%2F10.1038%2Fs41586-023-06735-9&rft.issn=1476-4687&rft.aulast=Merchant&rft.aufirst=Amil&rft.au=Batzner%2C+Simon&rft.au=Schoenholz%2C+Samuel+S.&rft.au=Aykol%2C+Muratahan&rft.au=Cheon%2C+Gowoon&rft.au=Cubuk%2C+Ekin+Dogus&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10700131&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-241"><span class="mw-cite-backlink"><b><a href="#cite_ref-241">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFPeplow2023" class="citation journal cs1">Peplow, Mark (2023-11-29). <a rel="nofollow" class="external text" href="https://www.nature.com/articles/d41586-023-03745-5">"Google AI and robots join forces to build new materials"</a>. <i>Nature</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fd41586-023-03745-5">10.1038/d41586-023-03745-5</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/38030771">38030771</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:265503872">265503872</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Google+AI+and+robots+join+forces+to+build+new+materials&rft.date=2023-11-29&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A265503872%23id-name%3DS2CID&rft_id=info%3Apmid%2F38030771&rft_id=info%3Adoi%2F10.1038%2Fd41586-023-03745-5&rft.aulast=Peplow&rft.aufirst=Mark&rft_id=https%3A%2F%2Fwww.nature.com%2Farticles%2Fd41586-023-03745-5&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:12-242"><span class="mw-cite-backlink">^ <a href="#cite_ref-:12_242-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:12_242-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:12_242-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.eurekalert.org/pub_releases/2018-02/uarl-ard020218.php">"Army researchers develop new algorithms to train robots"</a>. <i>EurekAlert!</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180828035608/https://www.eurekalert.org/pub_releases/2018-02/uarl-ard020218.php">Archived</a> from the original on 28 August 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">29 August</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=EurekAlert%21&rft.atitle=Army+researchers+develop+new+algorithms+to+train+robots&rft_id=https%3A%2F%2Fwww.eurekalert.org%2Fpub_releases%2F2018-02%2Fuarl-ard020218.php&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-243"><span class="mw-cite-backlink"><b><a href="#cite_ref-243">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRaissiPerdikarisKarniadakis2019" class="citation journal cs1">Raissi, M.; Perdikaris, P.; Karniadakis, G. E. (2019-02-01). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.jcp.2018.10.045">"Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations"</a>. <i>Journal of Computational Physics</i>. <b>378</b>: <span class="nowrap">686–</span>707. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2019JCoPh.378..686R">2019JCoPh.378..686R</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.jcp.2018.10.045">10.1016/j.jcp.2018.10.045</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0021-9991">0021-9991</a>. <a href="/wiki/OSTI_(identifier)" class="mw-redirect" title="OSTI (identifier)">OSTI</a> <a rel="nofollow" class="external text" href="https://www.osti.gov/biblio/1595805">1595805</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:57379996">57379996</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Computational+Physics&rft.atitle=Physics-informed+neural+networks%3A+A+deep+learning+framework+for+solving+forward+and+inverse+problems+involving+nonlinear+partial+differential+equations&rft.volume=378&rft.pages=%3Cspan+class%3D%22nowrap%22%3E686-%3C%2Fspan%3E707&rft.date=2019-02-01&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A57379996%23id-name%3DS2CID&rft_id=https%3A%2F%2Fwww.osti.gov%2Fbiblio%2F1595805%23id-name%3DOSTI&rft_id=info%3Abibcode%2F2019JCoPh.378..686R&rft.issn=0021-9991&rft_id=info%3Adoi%2F10.1016%2Fj.jcp.2018.10.045&rft.aulast=Raissi&rft.aufirst=M.&rft.au=Perdikaris%2C+P.&rft.au=Karniadakis%2C+G.+E.&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.jcp.2018.10.045&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-244"><span class="mw-cite-backlink"><b><a href="#cite_ref-244">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMaoJagtapKarniadakis2020" class="citation journal cs1">Mao, Zhiping; Jagtap, Ameya D.; Karniadakis, George Em (2020-03-01). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.cma.2019.112789">"Physics-informed neural networks for high-speed flows"</a>. <i>Computer Methods in Applied Mechanics and Engineering</i>. <b>360</b>: 112789. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2020CMAME.360k2789M">2020CMAME.360k2789M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.cma.2019.112789">10.1016/j.cma.2019.112789</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0045-7825">0045-7825</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:212755458">212755458</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Computer+Methods+in+Applied+Mechanics+and+Engineering&rft.atitle=Physics-informed+neural+networks+for+high-speed+flows&rft.volume=360&rft.pages=112789&rft.date=2020-03-01&rft_id=info%3Adoi%2F10.1016%2Fj.cma.2019.112789&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A212755458%23id-name%3DS2CID&rft.issn=0045-7825&rft_id=info%3Abibcode%2F2020CMAME.360k2789M&rft.aulast=Mao&rft.aufirst=Zhiping&rft.au=Jagtap%2C+Ameya+D.&rft.au=Karniadakis%2C+George+Em&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.cma.2019.112789&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-245"><span class="mw-cite-backlink"><b><a href="#cite_ref-245">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRaissiYazdaniKarniadakis2020" class="citation journal cs1">Raissi, Maziar; Yazdani, Alireza; Karniadakis, George Em (2020-02-28). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7219083">"Hidden fluid mechanics: Learning velocity and pressure fields from flow visualizations"</a>. <i>Science</i>. <b>367</b> (6481): <span class="nowrap">1026–</span>1030. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2020Sci...367.1026R">2020Sci...367.1026R</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.aaw4741">10.1126/science.aaw4741</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7219083">7219083</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32001523">32001523</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Science&rft.atitle=Hidden+fluid+mechanics%3A+Learning+velocity+and+pressure+fields+from+flow+visualizations&rft.volume=367&rft.issue=6481&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1026-%3C%2Fspan%3E1030&rft.date=2020-02-28&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7219083%23id-name%3DPMC&rft_id=info%3Apmid%2F32001523&rft_id=info%3Adoi%2F10.1126%2Fscience.aaw4741&rft_id=info%3Abibcode%2F2020Sci...367.1026R&rft.aulast=Raissi&rft.aufirst=Maziar&rft.au=Yazdani%2C+Alireza&rft.au=Karniadakis%2C+George+Em&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7219083&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Han2018-246"><span class="mw-cite-backlink"><b><a href="#cite_ref-Han2018_246-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHanJentzenE2018" class="citation journal cs1">Han, J.; Jentzen, A.; E, W. (2018). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6112690">"Solving high-dimensional partial differential equations using deep learning"</a>. <i>Proceedings of the National Academy of Sciences</i>. <b>115</b> (34): <span class="nowrap">8505–</span>8510. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1707.02568">1707.02568</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2018PNAS..115.8505H">2018PNAS..115.8505H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1073%2Fpnas.1718942115">10.1073/pnas.1718942115</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6112690">6112690</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/30082389">30082389</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+National+Academy+of+Sciences&rft.atitle=Solving+high-dimensional+partial+differential+equations+using+deep+learning&rft.volume=115&rft.issue=34&rft.pages=%3Cspan+class%3D%22nowrap%22%3E8505-%3C%2Fspan%3E8510&rft.date=2018&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6112690%23id-name%3DPMC&rft_id=info%3Abibcode%2F2018PNAS..115.8505H&rft_id=info%3Aarxiv%2F1707.02568&rft_id=info%3Apmid%2F30082389&rft_id=info%3Adoi%2F10.1073%2Fpnas.1718942115&rft.aulast=Han&rft.aufirst=J.&rft.au=Jentzen%2C+A.&rft.au=E%2C+W.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6112690&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-247"><span class="mw-cite-backlink"><b><a href="#cite_ref-247">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFOktemKarBezekKamalabadi2021" class="citation journal cs1">Oktem, Figen S.; Kar, Oğuzhan Fatih; Bezek, Can Deniz; Kamalabadi, Farzad (2021). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/9415140">"High-Resolution Multi-Spectral Imaging With Diffractive Lenses and Learned Reconstruction"</a>. <i>IEEE Transactions on Computational Imaging</i>. <b>7</b>: <span class="nowrap">489–</span>504. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2008.11625">2008.11625</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTCI.2021.3075349">10.1109/TCI.2021.3075349</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2333-9403">2333-9403</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:235340737">235340737</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Computational+Imaging&rft.atitle=High-Resolution+Multi-Spectral+Imaging+With+Diffractive+Lenses+and+Learned+Reconstruction&rft.volume=7&rft.pages=%3Cspan+class%3D%22nowrap%22%3E489-%3C%2Fspan%3E504&rft.date=2021&rft_id=info%3Aarxiv%2F2008.11625&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A235340737%23id-name%3DS2CID&rft.issn=2333-9403&rft_id=info%3Adoi%2F10.1109%2FTCI.2021.3075349&rft.aulast=Oktem&rft.aufirst=Figen+S.&rft.au=Kar%2C+O%C4%9Fuzhan+Fatih&rft.au=Bezek%2C+Can+Deniz&rft.au=Kamalabadi%2C+Farzad&rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F9415140&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-248"><span class="mw-cite-backlink"><b><a href="#cite_ref-248">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBernhardtVishnevskiyRauGoksel2020" class="citation journal cs1">Bernhardt, Melanie; Vishnevskiy, Valery; Rau, Richard; Goksel, Orcun (December 2020). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/9144249">"Training Variational Networks With Multidomain Simulations: Speed-of-Sound Image Reconstruction"</a>. <i>IEEE Transactions on Ultrasonics, Ferroelectrics, and Frequency Control</i>. <b>67</b> (12): <span class="nowrap">2584–</span>2594. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2006.14395">2006.14395</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTUFFC.2020.3010186">10.1109/TUFFC.2020.3010186</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1525-8955">1525-8955</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32746211">32746211</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:220055785">220055785</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Ultrasonics%2C+Ferroelectrics%2C+and+Frequency+Control&rft.atitle=Training+Variational+Networks+With+Multidomain+Simulations%3A+Speed-of-Sound+Image+Reconstruction&rft.volume=67&rft.issue=12&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2584-%3C%2Fspan%3E2594&rft.date=2020-12&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A220055785%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FTUFFC.2020.3010186&rft_id=info%3Aarxiv%2F2006.14395&rft.issn=1525-8955&rft_id=info%3Apmid%2F32746211&rft.aulast=Bernhardt&rft.aufirst=Melanie&rft.au=Vishnevskiy%2C+Valery&rft.au=Rau%2C+Richard&rft.au=Goksel%2C+Orcun&rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F9144249&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-249"><span class="mw-cite-backlink"><b><a href="#cite_ref-249">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLamSanchez-GonzalezWillsonWirnsberger2023" class="citation journal cs1">Lam, Remi; Sanchez-Gonzalez, Alvaro; Willson, Matthew; Wirnsberger, Peter; Fortunato, Meire; Alet, Ferran; Ravuri, Suman; Ewalds, Timo; Eaton-Rosen, Zach; Hu, Weihua; Merose, Alexander; Hoyer, Stephan; Holland, George; Vinyals, Oriol; Stott, Jacklynn (2023-12-22). <a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.adi2336">"Learning skillful medium-range global weather forecasting"</a>. <i>Science</i>. <b>382</b> (6677): <span class="nowrap">1416–</span>1421. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2212.12794">2212.12794</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2023Sci...382.1416L">2023Sci...382.1416L</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.adi2336">10.1126/science.adi2336</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0036-8075">0036-8075</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/37962497">37962497</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Science&rft.atitle=Learning+skillful+medium-range+global+weather+forecasting&rft.volume=382&rft.issue=6677&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1416-%3C%2Fspan%3E1421&rft.date=2023-12-22&rft_id=info%3Abibcode%2F2023Sci...382.1416L&rft_id=info%3Aarxiv%2F2212.12794&rft_id=info%3Apmid%2F37962497&rft_id=info%3Adoi%2F10.1126%2Fscience.adi2336&rft.issn=0036-8075&rft.aulast=Lam&rft.aufirst=Remi&rft.au=Sanchez-Gonzalez%2C+Alvaro&rft.au=Willson%2C+Matthew&rft.au=Wirnsberger%2C+Peter&rft.au=Fortunato%2C+Meire&rft.au=Alet%2C+Ferran&rft.au=Ravuri%2C+Suman&rft.au=Ewalds%2C+Timo&rft.au=Eaton-Rosen%2C+Zach&rft.au=Hu%2C+Weihua&rft.au=Merose%2C+Alexander&rft.au=Hoyer%2C+Stephan&rft.au=Holland%2C+George&rft.au=Vinyals%2C+Oriol&rft.au=Stott%2C+Jacklynn&rft_id=https%3A%2F%2Fdoi.org%2F10.1126%252Fscience.adi2336&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-250"><span class="mw-cite-backlink"><b><a href="#cite_ref-250">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSivakumar2023" class="citation web cs1">Sivakumar, Ramakrishnan (2023-11-27). <a rel="nofollow" class="external text" href="https://ramkrishna2910.medium.com/graphcast-a-breakthrough-in-weather-forecasting-d70fae9ac365">"GraphCast: A breakthrough in Weather Forecasting"</a>. <i>Medium</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-05-19</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Medium&rft.atitle=GraphCast%3A+A+breakthrough+in+Weather+Forecasting&rft.date=2023-11-27&rft.aulast=Sivakumar&rft.aufirst=Ramakrishnan&rft_id=https%3A%2F%2Framkrishna2910.medium.com%2Fgraphcast-a-breakthrough-in-weather-forecasting-d70fae9ac365&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-251"><span class="mw-cite-backlink"><b><a href="#cite_ref-251">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGalkinMamoshinaKochetovSidorenko2020" class="citation journal cs1">Galkin, F.; Mamoshina, P.; Kochetov, K.; Sidorenko, D.; Zhavoronkov, A. (2020). <a rel="nofollow" class="external text" href="https://doi.org/10.14336%2FAD">"DeepMAge: A Methylation Aging Clock Developed with Deep Learning"</a>. <i>Aging and Disease</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.14336%2FAD">10.14336/AD</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Aging+and+Disease&rft.atitle=DeepMAge%3A+A+Methylation+Aging+Clock+Developed+with+Deep+Learning&rft.date=2020&rft_id=info%3Adoi%2F10.14336%2FAD&rft.aulast=Galkin&rft.aufirst=F.&rft.au=Mamoshina%2C+P.&rft.au=Kochetov%2C+K.&rft.au=Sidorenko%2C+D.&rft.au=Zhavoronkov%2C+A.&rft_id=https%3A%2F%2Fdoi.org%2F10.14336%252FAD&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-UTGOFF-252"><span class="mw-cite-backlink"><b><a href="#cite_ref-UTGOFF_252-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFUtgoffStracuzzi2002" class="citation journal cs1">Utgoff, P. E.; Stracuzzi, D. J. (2002). "Many-layered learning". <i>Neural Computation</i>. <b>14</b> (10): <span class="nowrap">2497–</span>2529. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2F08997660260293319">10.1162/08997660260293319</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/12396572">12396572</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1119517">1119517</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Computation&rft.atitle=Many-layered+learning&rft.volume=14&rft.issue=10&rft.pages=%3Cspan+class%3D%22nowrap%22%3E2497-%3C%2Fspan%3E2529&rft.date=2002&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1119517%23id-name%3DS2CID&rft_id=info%3Apmid%2F12396572&rft_id=info%3Adoi%2F10.1162%2F08997660260293319&rft.aulast=Utgoff&rft.aufirst=P.+E.&rft.au=Stracuzzi%2C+D.+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-ELMAN-253"><span class="mw-cite-backlink"><b><a href="#cite_ref-ELMAN_253-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFElman1998" class="citation book cs1">Elman, Jeffrey L. (1998). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=vELaRu_MrwoC"><i>Rethinking Innateness: A Connectionist Perspective on Development</i></a>. MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-55030-7" title="Special:BookSources/978-0-262-55030-7"><bdi>978-0-262-55030-7</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Rethinking+Innateness%3A+A+Connectionist+Perspective+on+Development&rft.pub=MIT+Press&rft.date=1998&rft.isbn=978-0-262-55030-7&rft.aulast=Elman&rft.aufirst=Jeffrey+L.&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DvELaRu_MrwoC&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-SHRAGER-254"><span class="mw-cite-backlink"><b><a href="#cite_ref-SHRAGER_254-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFShragerJohnson1996" class="citation journal cs1">Shrager, J.; Johnson, MH (1996). "Dynamic plasticity influences the emergence of function in a simple cortical array". <i>Neural Networks</i>. <b>9</b> (7): <span class="nowrap">1119–</span>1129. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0893-6080%2896%2900033-0">10.1016/0893-6080(96)00033-0</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/12662587">12662587</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Networks&rft.atitle=Dynamic+plasticity+influences+the+emergence+of+function+in+a+simple+cortical+array&rft.volume=9&rft.issue=7&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1119-%3C%2Fspan%3E1129&rft.date=1996&rft_id=info%3Adoi%2F10.1016%2F0893-6080%2896%2900033-0&rft_id=info%3Apmid%2F12662587&rft.aulast=Shrager&rft.aufirst=J.&rft.au=Johnson%2C+MH&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-QUARTZ-255"><span class="mw-cite-backlink"><b><a href="#cite_ref-QUARTZ_255-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFQuartzSejnowski1997" class="citation journal cs1">Quartz, SR; Sejnowski, TJ (1997). "The neural basis of cognitive development: A constructivist manifesto". <i>Behavioral and Brain Sciences</i>. <b>20</b> (4): <span class="nowrap">537–</span>556. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.7854">10.1.1.41.7854</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1017%2Fs0140525x97001581">10.1017/s0140525x97001581</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/10097006">10097006</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:5818342">5818342</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Behavioral+and+Brain+Sciences&rft.atitle=The+neural+basis+of+cognitive+development%3A+A+constructivist+manifesto&rft.volume=20&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E537-%3C%2Fspan%3E556&rft.date=1997&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.41.7854%23id-name%3DCiteSeerX&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A5818342%23id-name%3DS2CID&rft_id=info%3Apmid%2F10097006&rft_id=info%3Adoi%2F10.1017%2Fs0140525x97001581&rft.aulast=Quartz&rft.aufirst=SR&rft.au=Sejnowski%2C+TJ&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-BLAKESLEE-256"><span class="mw-cite-backlink"><b><a href="#cite_ref-BLAKESLEE_256-0">^</a></b></span> <span class="reference-text">S. Blakeslee, "In brain's early growth, timetable may be critical", <i>The New York Times, Science Section</i>, pp. B5–B6, 1995.</span> </li> <li id="cite_note-257"><span class="mw-cite-backlink"><b><a href="#cite_ref-257">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMazzoniAndersenJordan1991" class="citation journal cs1">Mazzoni, P.; Andersen, R. A.; Jordan, M. I. (15 May 1991). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC51674">"A more biologically plausible learning rule for neural networks"</a>. <i>Proceedings of the National Academy of Sciences</i>. <b>88</b> (10): <span class="nowrap">4433–</span>4437. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1991PNAS...88.4433M">1991PNAS...88.4433M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1073%2Fpnas.88.10.4433">10.1073/pnas.88.10.4433</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0027-8424">0027-8424</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC51674">51674</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/1903542">1903542</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+National+Academy+of+Sciences&rft.atitle=A+more+biologically+plausible+learning+rule+for+neural+networks.&rft.volume=88&rft.issue=10&rft.pages=%3Cspan+class%3D%22nowrap%22%3E4433-%3C%2Fspan%3E4437&rft.date=1991-05-15&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC51674%23id-name%3DPMC&rft_id=info%3Abibcode%2F1991PNAS...88.4433M&rft_id=info%3Apmid%2F1903542&rft_id=info%3Adoi%2F10.1073%2Fpnas.88.10.4433&rft.issn=0027-8424&rft.aulast=Mazzoni&rft.aufirst=P.&rft.au=Andersen%2C+R.+A.&rft.au=Jordan%2C+M.+I.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC51674&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-258"><span class="mw-cite-backlink"><b><a href="#cite_ref-258">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFO'Reilly1996" class="citation journal cs1">O'Reilly, Randall C. (1 July 1996). "Biologically Plausible Error-Driven Learning Using Local Activation Differences: The Generalized Recirculation Algorithm". <i>Neural Computation</i>. <b>8</b> (5): <span class="nowrap">895–</span>938. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1996.8.5.895">10.1162/neco.1996.8.5.895</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0899-7667">0899-7667</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2376781">2376781</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neural+Computation&rft.atitle=Biologically+Plausible+Error-Driven+Learning+Using+Local+Activation+Differences%3A+The+Generalized+Recirculation+Algorithm&rft.volume=8&rft.issue=5&rft.pages=%3Cspan+class%3D%22nowrap%22%3E895-%3C%2Fspan%3E938&rft.date=1996-07-01&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2376781%23id-name%3DS2CID&rft.issn=0899-7667&rft_id=info%3Adoi%2F10.1162%2Fneco.1996.8.5.895&rft.aulast=O%27Reilly&rft.aufirst=Randall+C.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-259"><span class="mw-cite-backlink"><b><a href="#cite_ref-259">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFTestolinZorzi2016" class="citation journal cs1">Testolin, Alberto; Zorzi, Marco (2016). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4943066">"Probabilistic Models and Generative Neural Networks: Towards an Unified Framework for Modeling Normal and Impaired Neurocognitive Functions"</a>. <i>Frontiers in Computational Neuroscience</i>. <b>10</b>: 73. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3389%2Ffncom.2016.00073">10.3389/fncom.2016.00073</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1662-5188">1662-5188</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4943066">4943066</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/27468262">27468262</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:9868901">9868901</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Frontiers+in+Computational+Neuroscience&rft.atitle=Probabilistic+Models+and+Generative+Neural+Networks%3A+Towards+an+Unified+Framework+for+Modeling+Normal+and+Impaired+Neurocognitive+Functions&rft.volume=10&rft.pages=73&rft.date=2016&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC4943066%23id-name%3DPMC&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A9868901%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.3389%2Ffncom.2016.00073&rft.issn=1662-5188&rft_id=info%3Apmid%2F27468262&rft.aulast=Testolin&rft.aufirst=Alberto&rft.au=Zorzi%2C+Marco&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC4943066&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-260"><span class="mw-cite-backlink"><b><a href="#cite_ref-260">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFTestolinStoianovZorzi2017" class="citation journal cs1">Testolin, Alberto; Stoianov, Ivilin; Zorzi, Marco (September 2017). "Letter perception emerges from unsupervised deep learning and recycling of natural image features". <i>Nature Human Behaviour</i>. <b>1</b> (9): <span class="nowrap">657–</span>664. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs41562-017-0186-2">10.1038/s41562-017-0186-2</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2397-3374">2397-3374</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/31024135">31024135</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:24504018">24504018</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature+Human+Behaviour&rft.atitle=Letter+perception+emerges+from+unsupervised+deep+learning+and+recycling+of+natural+image+features&rft.volume=1&rft.issue=9&rft.pages=%3Cspan+class%3D%22nowrap%22%3E657-%3C%2Fspan%3E664&rft.date=2017-09&rft.issn=2397-3374&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A24504018%23id-name%3DS2CID&rft_id=info%3Apmid%2F31024135&rft_id=info%3Adoi%2F10.1038%2Fs41562-017-0186-2&rft.aulast=Testolin&rft.aufirst=Alberto&rft.au=Stoianov%2C+Ivilin&rft.au=Zorzi%2C+Marco&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-261"><span class="mw-cite-backlink"><b><a href="#cite_ref-261">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBuesingBillNesslerMaass2011" class="citation journal cs1">Buesing, Lars; Bill, Johannes; Nessler, Bernhard; Maass, Wolfgang (3 November 2011). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3207943">"Neural Dynamics as Sampling: A Model for Stochastic Computation in Recurrent Networks of Spiking Neurons"</a>. <i>PLOS Computational Biology</i>. <b>7</b> (11): e1002211. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2011PLSCB...7E2211B">2011PLSCB...7E2211B</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1371%2Fjournal.pcbi.1002211">10.1371/journal.pcbi.1002211</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1553-7358">1553-7358</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3207943">3207943</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/22096452">22096452</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:7504633">7504633</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=PLOS+Computational+Biology&rft.atitle=Neural+Dynamics+as+Sampling%3A+A+Model+for+Stochastic+Computation+in+Recurrent+Networks+of+Spiking+Neurons&rft.volume=7&rft.issue=11&rft.pages=e1002211&rft.date=2011-11-03&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC3207943%23id-name%3DPMC&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A7504633%23id-name%3DS2CID&rft_id=info%3Abibcode%2F2011PLSCB...7E2211B&rft.issn=1553-7358&rft_id=info%3Adoi%2F10.1371%2Fjournal.pcbi.1002211&rft_id=info%3Apmid%2F22096452&rft.aulast=Buesing&rft.aufirst=Lars&rft.au=Bill%2C+Johannes&rft.au=Nessler%2C+Bernhard&rft.au=Maass%2C+Wolfgang&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC3207943&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-262"><span class="mw-cite-backlink"><b><a href="#cite_ref-262">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCashYuste1999" class="citation journal cs1">Cash, S.; Yuste, R. (February 1999). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fs0896-6273%2800%2981098-3">"Linear summation of excitatory inputs by CA1 pyramidal neurons"</a>. <i>Neuron</i>. <b>22</b> (2): <span class="nowrap">383–</span>394. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fs0896-6273%2800%2981098-3">10.1016/s0896-6273(00)81098-3</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0896-6273">0896-6273</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/10069343">10069343</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:14663106">14663106</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Neuron&rft.atitle=Linear+summation+of+excitatory+inputs+by+CA1+pyramidal+neurons&rft.volume=22&rft.issue=2&rft.pages=%3Cspan+class%3D%22nowrap%22%3E383-%3C%2Fspan%3E394&rft.date=1999-02&rft.issn=0896-6273&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A14663106%23id-name%3DS2CID&rft_id=info%3Apmid%2F10069343&rft_id=info%3Adoi%2F10.1016%2Fs0896-6273%2800%2981098-3&rft.aulast=Cash&rft.aufirst=S.&rft.au=Yuste%2C+R.&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fs0896-6273%252800%252981098-3&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-263"><span class="mw-cite-backlink"><b><a href="#cite_ref-263">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFOlshausenField2004" class="citation journal cs1">Olshausen, B; Field, D (1 August 2004). "Sparse coding of sensory inputs". <i>Current Opinion in Neurobiology</i>. <b>14</b> (4): <span class="nowrap">481–</span>487. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.conb.2004.07.007">10.1016/j.conb.2004.07.007</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0959-4388">0959-4388</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/15321069">15321069</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:16560320">16560320</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Current+Opinion+in+Neurobiology&rft.atitle=Sparse+coding+of+sensory+inputs&rft.volume=14&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E481-%3C%2Fspan%3E487&rft.date=2004-08-01&rft.issn=0959-4388&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A16560320%23id-name%3DS2CID&rft_id=info%3Apmid%2F15321069&rft_id=info%3Adoi%2F10.1016%2Fj.conb.2004.07.007&rft.aulast=Olshausen&rft.aufirst=B&rft.au=Field%2C+D&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-264"><span class="mw-cite-backlink"><b><a href="#cite_ref-264">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFYaminsDiCarlo2016" class="citation journal cs1">Yamins, Daniel L K; DiCarlo, James J (March 2016). "Using goal-driven deep learning models to understand sensory cortex". <i>Nature Neuroscience</i>. <b>19</b> (3): <span class="nowrap">356–</span>365. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnn.4244">10.1038/nn.4244</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1546-1726">1546-1726</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26906502">26906502</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:16970545">16970545</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature+Neuroscience&rft.atitle=Using+goal-driven+deep+learning+models+to+understand+sensory+cortex&rft.volume=19&rft.issue=3&rft.pages=%3Cspan+class%3D%22nowrap%22%3E356-%3C%2Fspan%3E365&rft.date=2016-03&rft.issn=1546-1726&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A16970545%23id-name%3DS2CID&rft_id=info%3Apmid%2F26906502&rft_id=info%3Adoi%2F10.1038%2Fnn.4244&rft.aulast=Yamins&rft.aufirst=Daniel+L+K&rft.au=DiCarlo%2C+James+J&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-265"><span class="mw-cite-backlink"><b><a href="#cite_ref-265">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFZorziTestolin2018" class="citation journal cs1">Zorzi, Marco; Testolin, Alberto (19 February 2018). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5784047">"An emergentist perspective on the origin of number sense"</a>. <i>Phil. Trans. R. Soc. B</i>. <b>373</b> (1740): 20170043. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1098%2Frstb.2017.0043">10.1098/rstb.2017.0043</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0962-8436">0962-8436</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5784047">5784047</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/29292348">29292348</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:39281431">39281431</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Phil.+Trans.+R.+Soc.+B&rft.atitle=An+emergentist+perspective+on+the+origin+of+number+sense&rft.volume=373&rft.issue=1740&rft.pages=20170043&rft.date=2018-02-19&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5784047%23id-name%3DPMC&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A39281431%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1098%2Frstb.2017.0043&rft.issn=0962-8436&rft_id=info%3Apmid%2F29292348&rft.aulast=Zorzi&rft.aufirst=Marco&rft.au=Testolin%2C+Alberto&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5784047&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-266"><span class="mw-cite-backlink"><b><a href="#cite_ref-266">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGüçlüvan_Gerven2015" class="citation journal cs1">Güçlü, Umut; van Gerven, Marcel A. J. (8 July 2015). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6605414">"Deep Neural Networks Reveal a Gradient in the Complexity of Neural Representations across the Ventral Stream"</a>. <i>Journal of Neuroscience</i>. <b>35</b> (27): <span class="nowrap">10005–</span>10014. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1411.6422">1411.6422</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1523%2Fjneurosci.5023-14.2015">10.1523/jneurosci.5023-14.2015</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6605414">6605414</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26157000">26157000</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Neuroscience&rft.atitle=Deep+Neural+Networks+Reveal+a+Gradient+in+the+Complexity+of+Neural+Representations+across+the+Ventral+Stream&rft.volume=35&rft.issue=27&rft.pages=%3Cspan+class%3D%22nowrap%22%3E10005-%3C%2Fspan%3E10014&rft.date=2015-07-08&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6605414%23id-name%3DPMC&rft_id=info%3Apmid%2F26157000&rft_id=info%3Aarxiv%2F1411.6422&rft_id=info%3Adoi%2F10.1523%2Fjneurosci.5023-14.2015&rft.aulast=G%C3%BC%C3%A7l%C3%BC&rft.aufirst=Umut&rft.au=van+Gerven%2C+Marcel+A.+J.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6605414&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-METZ2013-267"><span class="mw-cite-backlink"><b><a href="#cite_ref-METZ2013_267-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMetz2013" class="citation magazine cs1">Metz, C. (12 December 2013). <a rel="nofollow" class="external text" href="https://www.wired.com/wiredenterprise/2013/12/facebook-yann-lecun-qa/">"Facebook's 'Deep Learning' Guru Reveals the Future of AI"</a>. <i>Wired</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20140328071226/http://www.wired.com/wiredenterprise/2013/12/facebook-yann-lecun-qa/">Archived</a> from the original on 28 March 2014<span class="reference-accessdate">. Retrieved <span class="nowrap">26 August</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Wired&rft.atitle=Facebook%27s+%27Deep+Learning%27+Guru+Reveals+the+Future+of+AI&rft.date=2013-12-12&rft.aulast=Metz&rft.aufirst=C.&rft_id=https%3A%2F%2Fwww.wired.com%2Fwiredenterprise%2F2013%2F12%2Ffacebook-yann-lecun-qa%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-268"><span class="mw-cite-backlink"><b><a href="#cite_ref-268">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGibney2016" class="citation journal cs1">Gibney, Elizabeth (2016). <a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F529445a">"Google AI algorithm masters ancient game of Go"</a>. <i>Nature</i>. <b>529</b> (7587): <span class="nowrap">445–</span>446. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2016Natur.529..445G">2016Natur.529..445G</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F529445a">10.1038/529445a</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26819021">26819021</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:4460235">4460235</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Google+AI+algorithm+masters+ancient+game+of+Go&rft.volume=529&rft.issue=7587&rft.pages=%3Cspan+class%3D%22nowrap%22%3E445-%3C%2Fspan%3E446&rft.date=2016&rft_id=info%3Adoi%2F10.1038%2F529445a&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A4460235%23id-name%3DS2CID&rft_id=info%3Apmid%2F26819021&rft_id=info%3Abibcode%2F2016Natur.529..445G&rft.aulast=Gibney&rft.aufirst=Elizabeth&rft_id=https%3A%2F%2Fdoi.org%2F10.1038%252F529445a&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-269"><span class="mw-cite-backlink"><b><a href="#cite_ref-269">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSilverHuangMaddisonGuez2016" class="citation journal cs1"><a href="/wiki/David_Silver_(programmer)" class="mw-redirect" title="David Silver (programmer)">Silver, David</a>; <a href="/wiki/Aja_Huang" title="Aja Huang">Huang, Aja</a>; Maddison, Chris J.; Guez, Arthur; Sifre, Laurent; Driessche, George van den; Schrittwieser, Julian; Antonoglou, Ioannis; Panneershelvam, Veda; Lanctot, Marc; Dieleman, Sander; Grewe, Dominik; Nham, John; Kalchbrenner, Nal; <a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Sutskever, Ilya</a>; Lillicrap, Timothy; Leach, Madeleine; Kavukcuoglu, Koray; Graepel, Thore; <a href="/wiki/Demis_Hassabis" title="Demis Hassabis">Hassabis, Demis</a> (28 January 2016). "Mastering the game of Go with deep neural networks and tree search". <i><a href="/wiki/Nature_(journal)" title="Nature (journal)">Nature</a></i>. <b>529</b> (7587): <span class="nowrap">484–</span>489. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2016Natur.529..484S">2016Natur.529..484S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnature16961">10.1038/nature16961</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0028-0836">0028-0836</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26819042">26819042</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:515925">515925</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=Mastering+the+game+of+Go+with+deep+neural+networks+and+tree+search&rft.volume=529&rft.issue=7587&rft.pages=%3Cspan+class%3D%22nowrap%22%3E484-%3C%2Fspan%3E489&rft.date=2016-01-28&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A515925%23id-name%3DS2CID&rft_id=info%3Abibcode%2F2016Natur.529..484S&rft.issn=0028-0836&rft_id=info%3Adoi%2F10.1038%2Fnature16961&rft_id=info%3Apmid%2F26819042&rft.aulast=Silver&rft.aufirst=David&rft.au=Huang%2C+Aja&rft.au=Maddison%2C+Chris+J.&rft.au=Guez%2C+Arthur&rft.au=Sifre%2C+Laurent&rft.au=Driessche%2C+George+van+den&rft.au=Schrittwieser%2C+Julian&rft.au=Antonoglou%2C+Ioannis&rft.au=Panneershelvam%2C+Veda&rft.au=Lanctot%2C+Marc&rft.au=Dieleman%2C+Sander&rft.au=Grewe%2C+Dominik&rft.au=Nham%2C+John&rft.au=Kalchbrenner%2C+Nal&rft.au=Sutskever%2C+Ilya&rft.au=Lillicrap%2C+Timothy&rft.au=Leach%2C+Madeleine&rft.au=Kavukcuoglu%2C+Koray&rft.au=Graepel%2C+Thore&rft.au=Hassabis%2C+Demis&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span><span style="position:relative; top: -2px;"><span typeof="mw:File"><a href="/wiki/Paywall" title="closed access publication – behind paywall"><img alt="Closed access icon" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/9px-Closed_Access_logo_transparent.svg.png" decoding="async" width="9" height="14" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/14px-Closed_Access_logo_transparent.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/18px-Closed_Access_logo_transparent.svg.png 2x" data-file-width="640" data-file-height="1000" /></a></span></span></span> </li> <li id="cite_note-270"><span class="mw-cite-backlink"><b><a href="#cite_ref-270">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20160201140636/http://www.technologyreview.com/news/546066/googles-ai-masters-the-game-of-go-a-decade-earlier-than-expected/">"A Google DeepMind Algorithm Uses Deep Learning and More to Master the Game of Go | MIT Technology Review"</a>. <i>MIT Technology Review</i>. Archived from <a rel="nofollow" class="external text" href="http://www.technologyreview.com/news/546066/googles-ai-masters-the-game-of-go-a-decade-earlier-than-expected/">the original</a> on 1 February 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">30 January</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=MIT+Technology+Review&rft.atitle=A+Google+DeepMind+Algorithm+Uses+Deep+Learning+and+More+to+Master+the+Game+of+Go+%7C+MIT+Technology+Review&rft_id=http%3A%2F%2Fwww.technologyreview.com%2Fnews%2F546066%2Fgoogles-ai-masters-the-game-of-go-a-decade-earlier-than-expected%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-271"><span class="mw-cite-backlink"><b><a href="#cite_ref-271">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMetz2017" class="citation news cs1">Metz, Cade (6 November 2017). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2017/11/06/technology/artificial-intelligence-start-up.html">"A.I. Researchers Leave Elon Musk Lab to Begin Robotics Start-Up"</a>. <i>The New York Times</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190707161547/https://www.nytimes.com/2017/11/06/technology/artificial-intelligence-start-up.html">Archived</a> from the original on 7 July 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">5 July</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+New+York+Times&rft.atitle=A.I.+Researchers+Leave+Elon+Musk+Lab+to+Begin+Robotics+Start-Up&rft.date=2017-11-06&rft.aulast=Metz&rft.aufirst=Cade&rft_id=https%3A%2F%2Fwww.nytimes.com%2F2017%2F11%2F06%2Ftechnology%2Fartificial-intelligence-start-up.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-272"><span class="mw-cite-backlink"><b><a href="#cite_ref-272">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBradley_Knox,_W.Stone,_Peter2008" class="citation book cs1">Bradley Knox, W.; Stone, Peter (2008). "TAMER: Training an Agent Manually via Evaluative Reinforcement". <i>2008 7th IEEE International Conference on Development and Learning</i>. pp. <span class="nowrap">292–</span>297. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fdevlrn.2008.4640845">10.1109/devlrn.2008.4640845</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4244-2661-4" title="Special:BookSources/978-1-4244-2661-4"><bdi>978-1-4244-2661-4</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:5613334">5613334</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=TAMER%3A+Training+an+Agent+Manually+via+Evaluative+Reinforcement&rft.btitle=2008+7th+IEEE+International+Conference+on+Development+and+Learning&rft.pages=%3Cspan+class%3D%22nowrap%22%3E292-%3C%2Fspan%3E297&rft.date=2008&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A5613334%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2Fdevlrn.2008.4640845&rft.isbn=978-1-4244-2661-4&rft.au=Bradley+Knox%2C+W.&rft.au=Stone%2C+Peter&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-273"><span class="mw-cite-backlink"><b><a href="#cite_ref-273">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://governmentciomedia.com/talk-algorithms-ai-becomes-faster-learner">"Talk to the Algorithms: AI Becomes a Faster Learner"</a>. <i>governmentciomedia.com</i>. 16 May 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180828001727/https://governmentciomedia.com/talk-algorithms-ai-becomes-faster-learner">Archived</a> from the original on 28 August 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">29 August</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=governmentciomedia.com&rft.atitle=Talk+to+the+Algorithms%3A+AI+Becomes+a+Faster+Learner&rft.date=2018-05-16&rft_id=https%3A%2F%2Fgovernmentciomedia.com%2Ftalk-algorithms-ai-becomes-faster-learner&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-274"><span class="mw-cite-backlink"><b><a href="#cite_ref-274">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMarcus2018" class="citation web cs1">Marcus, Gary (14 January 2018). <a rel="nofollow" class="external text" href="https://medium.com/@GaryMarcus/in-defense-of-skepticism-about-deep-learning-6e8bfd5ae0f1">"In defense of skepticism about deep learning"</a>. <i>Gary Marcus</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181012035405/https://medium.com/@GaryMarcus/in-defense-of-skepticism-about-deep-learning-6e8bfd5ae0f1">Archived</a> from the original on 12 October 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">11 October</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Gary+Marcus&rft.atitle=In+defense+of+skepticism+about+deep+learning&rft.date=2018-01-14&rft.aulast=Marcus&rft.aufirst=Gary&rft_id=https%3A%2F%2Fmedium.com%2F%40GaryMarcus%2Fin-defense-of-skepticism-about-deep-learning-6e8bfd5ae0f1&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-Knight_2017-275"><span class="mw-cite-backlink"><b><a href="#cite_ref-Knight_2017_275-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFKnight2017" class="citation web cs1">Knight, Will (14 March 2017). <a rel="nofollow" class="external text" href="https://www.technologyreview.com/s/603795/the-us-military-wants-its-autonomous-machines-to-explain-themselves/">"DARPA is funding projects that will try to open up AI's black boxes"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191104033107/https://www.technologyreview.com/s/603795/the-us-military-wants-its-autonomous-machines-to-explain-themselves/">Archived</a> from the original on 4 November 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">2 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=MIT+Technology+Review&rft.atitle=DARPA+is+funding+projects+that+will+try+to+open+up+AI%27s+black+boxes&rft.date=2017-03-14&rft.aulast=Knight&rft.aufirst=Will&rft_id=https%3A%2F%2Fwww.technologyreview.com%2Fs%2F603795%2Fthe-us-military-wants-its-autonomous-machines-to-explain-themselves%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-276"><span class="mw-cite-backlink"><b><a href="#cite_ref-276">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFAlexander_MordvintsevChristopher_OlahMike_Tyka2015" class="citation web cs1">Alexander Mordvintsev; Christopher Olah; Mike Tyka (17 June 2015). <a rel="nofollow" class="external text" href="http://googleresearch.blogspot.co.uk/2015/06/inceptionism-going-deeper-into-neural.html">"Inceptionism: Going Deeper into Neural Networks"</a>. Google Research Blog. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150703064823/http://googleresearch.blogspot.co.uk/2015/06/inceptionism-going-deeper-into-neural.html">Archived</a> from the original on 3 July 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">20 June</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Inceptionism%3A+Going+Deeper+into+Neural+Networks&rft.pub=Google+Research+Blog&rft.date=2015-06-17&rft.au=Alexander+Mordvintsev&rft.au=Christopher+Olah&rft.au=Mike+Tyka&rft_id=http%3A%2F%2Fgoogleresearch.blogspot.co.uk%2F2015%2F06%2Finceptionism-going-deeper-into-neural.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-277"><span class="mw-cite-backlink"><b><a href="#cite_ref-277">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFAlex_Hern2015" class="citation news cs1">Alex Hern (18 June 2015). <a rel="nofollow" class="external text" href="https://www.theguardian.com/technology/2015/jun/18/google-image-recognition-neural-network-androids-dream-electric-sheep">"Yes, androids do dream of electric sheep"</a>. <i>The Guardian</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150619200845/http://www.theguardian.com/technology/2015/jun/18/google-image-recognition-neural-network-androids-dream-electric-sheep">Archived</a> from the original on 19 June 2015<span class="reference-accessdate">. Retrieved <span class="nowrap">20 June</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Guardian&rft.atitle=Yes%2C+androids+do+dream+of+electric+sheep&rft.date=2015-06-18&rft.au=Alex+Hern&rft_id=https%3A%2F%2Fwww.theguardian.com%2Ftechnology%2F2015%2Fjun%2F18%2Fgoogle-image-recognition-neural-network-androids-dream-electric-sheep&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-goertzel-278"><span class="mw-cite-backlink">^ <a href="#cite_ref-goertzel_278-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-goertzel_278-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-goertzel_278-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGoertzel2015" class="citation web cs1">Goertzel, Ben (2015). <a rel="nofollow" class="external text" href="http://goertzel.org/DeepLearning_v1.pdf">"Are there Deep Reasons Underlying the Pathologies of Today's Deep Learning Algorithms?"</a> <span class="cs1-format">(PDF)</span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150513053107/http://goertzel.org/DeepLearning_v1.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2015-05-13<span class="reference-accessdate">. Retrieved <span class="nowrap">2015-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Are+there+Deep+Reasons+Underlying+the+Pathologies+of+Today%27s+Deep+Learning+Algorithms%3F&rft.date=2015&rft.aulast=Goertzel&rft.aufirst=Ben&rft_id=http%3A%2F%2Fgoertzel.org%2FDeepLearning_v1.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-279"><span class="mw-cite-backlink"><b><a href="#cite_ref-279">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFNguyenYosinskiClune2014" class="citation arxiv cs1">Nguyen, Anh; Yosinski, Jason; Clune, Jeff (2014). "Deep Neural Networks are Easily Fooled: High Confidence Predictions for Unrecognizable Images". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1412.1897">1412.1897</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Deep+Neural+Networks+are+Easily+Fooled%3A+High+Confidence+Predictions+for+Unrecognizable+Images&rft.date=2014&rft_id=info%3Aarxiv%2F1412.1897&rft.aulast=Nguyen&rft.aufirst=Anh&rft.au=Yosinski%2C+Jason&rft.au=Clune%2C+Jeff&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-280"><span class="mw-cite-backlink"><b><a href="#cite_ref-280">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFSzegedyZarembaSutskeverBruna2013" class="citation arxiv cs1">Szegedy, Christian; Zaremba, Wojciech; Sutskever, Ilya; Bruna, Joan; Erhan, Dumitru; Goodfellow, Ian; Fergus, Rob (2013). "Intriguing properties of neural networks". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1312.6199">1312.6199</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Intriguing+properties+of+neural+networks&rft.date=2013&rft_id=info%3Aarxiv%2F1312.6199&rft.aulast=Szegedy&rft.aufirst=Christian&rft.au=Zaremba%2C+Wojciech&rft.au=Sutskever%2C+Ilya&rft.au=Bruna%2C+Joan&rft.au=Erhan%2C+Dumitru&rft.au=Goodfellow%2C+Ian&rft.au=Fergus%2C+Rob&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-281"><span class="mw-cite-backlink"><b><a href="#cite_ref-281">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFZhuMumford2006" class="citation journal cs1">Zhu, S.C.; Mumford, D. (2006). "A stochastic grammar of images". <i>Found. Trends Comput. Graph. Vis</i>. <b>2</b> (4): <span class="nowrap">259–</span>362. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.681.2190">10.1.1.681.2190</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1561%2F0600000018">10.1561/0600000018</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Found.+Trends+Comput.+Graph.+Vis.&rft.atitle=A+stochastic+grammar+of+images&rft.volume=2&rft.issue=4&rft.pages=%3Cspan+class%3D%22nowrap%22%3E259-%3C%2Fspan%3E362&rft.date=2006&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.681.2190%23id-name%3DCiteSeerX&rft_id=info%3Adoi%2F10.1561%2F0600000018&rft.aulast=Zhu&rft.aufirst=S.C.&rft.au=Mumford%2C+D.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-282"><span class="mw-cite-backlink"><b><a href="#cite_ref-282">^</a></b></span> <span class="reference-text">Miller, G. A., and N. Chomsky. "Pattern conception". Paper for Conference on pattern detection, University of Michigan. 1957.</span> </li> <li id="cite_note-283"><span class="mw-cite-backlink"><b><a href="#cite_ref-283">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFEisner" class="citation web cs1">Eisner, Jason. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171230010335/http://techtalks.tv/talks/deep-learning-of-recursive-structure-grammar-induction/58089/">"Deep Learning of Recursive Structure: Grammar Induction"</a>. Archived from <a rel="nofollow" class="external text" href="http://techtalks.tv/talks/deep-learning-of-recursive-structure-grammar-induction/58089/">the original</a> on 2017-12-30<span class="reference-accessdate">. Retrieved <span class="nowrap">2015-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Deep+Learning+of+Recursive+Structure%3A+Grammar+Induction&rft.aulast=Eisner&rft.aufirst=Jason&rft_id=http%3A%2F%2Ftechtalks.tv%2Ftalks%2Fdeep-learning-of-recursive-structure-grammar-induction%2F58089%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-284"><span class="mw-cite-backlink"><b><a href="#cite_ref-284">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://gizmodo.com/hackers-have-already-started-to-weaponize-artificial-in-1797688425">"Hackers Have Already Started to Weaponize Artificial Intelligence"</a>. <i>Gizmodo</i>. 11 September 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191011162231/https://gizmodo.com/hackers-have-already-started-to-weaponize-artificial-in-1797688425">Archived</a> from the original on 11 October 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">11 October</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Gizmodo&rft.atitle=Hackers+Have+Already+Started+to+Weaponize+Artificial+Intelligence&rft.date=2017-09-11&rft_id=https%3A%2F%2Fgizmodo.com%2Fhackers-have-already-started-to-weaponize-artificial-in-1797688425&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-285"><span class="mw-cite-backlink"><b><a href="#cite_ref-285">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.dailydot.com/debug/adversarial-attacks-ai-mistakes/">"How hackers can force AI to make dumb mistakes"</a>. <i>The Daily Dot</i>. 18 June 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191011162230/https://www.dailydot.com/debug/adversarial-attacks-ai-mistakes/">Archived</a> from the original on 11 October 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">11 October</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Daily+Dot&rft.atitle=How+hackers+can+force+AI+to+make+dumb+mistakes&rft.date=2018-06-18&rft_id=https%3A%2F%2Fwww.dailydot.com%2Fdebug%2Fadversarial-attacks-ai-mistakes%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:4-286"><span class="mw-cite-backlink">^ <a href="#cite_ref-:4_286-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:4_286-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:4_286-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-:4_286-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-:4_286-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://singularityhub.com/2017/10/10/ai-is-easy-to-fool-why-that-needs-to-change">"AI Is Easy to Fool—Why That Needs to Change"</a>. <i>Singularity Hub</i>. 10 October 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171011233017/https://singularityhub.com/2017/10/10/ai-is-easy-to-fool-why-that-needs-to-change/">Archived</a> from the original on 11 October 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">11 October</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Singularity+Hub&rft.atitle=AI+Is+Easy+to+Fool%E2%80%94Why+That+Needs+to+Change&rft.date=2017-10-10&rft_id=https%3A%2F%2Fsingularityhub.com%2F2017%2F10%2F10%2Fai-is-easy-to-fool-why-that-needs-to-change&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-287"><span class="mw-cite-backlink"><b><a href="#cite_ref-287">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGibney2017" class="citation journal cs1">Gibney, Elizabeth (2017). <a rel="nofollow" class="external text" href="https://www.nature.com/news/the-scientist-who-spots-fake-videos-1.22784">"The scientist who spots fake videos"</a>. <i>Nature</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnature.2017.22784">10.1038/nature.2017.22784</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171010011017/http://www.nature.com/news/the-scientist-who-spots-fake-videos-1.22784">Archived</a> from the original on 2017-10-10<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-10-11</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=The+scientist+who+spots+fake+videos&rft.date=2017&rft_id=info%3Adoi%2F10.1038%2Fnature.2017.22784&rft.aulast=Gibney&rft.aufirst=Elizabeth&rft_id=https%3A%2F%2Fwww.nature.com%2Fnews%2Fthe-scientist-who-spots-fake-videos-1.22784&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-288"><span class="mw-cite-backlink"><b><a href="#cite_ref-288">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFTubaro2020" class="citation journal cs1">Tubaro, Paola (2020). <a rel="nofollow" class="external text" href="https://hal.science/hal-03029735">"Whose intelligence is artificial intelligence?"</a>. <i>Global Dialogue</i>: <span class="nowrap">38–</span>39.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Global+Dialogue&rft.atitle=Whose+intelligence+is+artificial+intelligence%3F&rft.pages=%3Cspan+class%3D%22nowrap%22%3E38-%3C%2Fspan%3E39&rft.date=2020&rft.aulast=Tubaro&rft.aufirst=Paola&rft_id=https%3A%2F%2Fhal.science%2Fhal-03029735&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> <li id="cite_note-:13-289"><span class="mw-cite-backlink">^ <a href="#cite_ref-:13_289-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:13_289-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFMühlhoff2019" class="citation journal cs1">Mühlhoff, Rainer (6 November 2019). <a rel="nofollow" class="external text" href="https://depositonce.tu-berlin.de/handle/11303/12510">"Human-aided artificial intelligence: Or, how to run large computations in human brains? Toward a media sociology of machine learning"</a>. <i>New Media & Society</i>. <b>22</b> (10): <span class="nowrap">1868–</span>1884. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1177%2F1461444819885334">10.1177/1461444819885334</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1461-4448">1461-4448</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:209363848">209363848</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=New+Media+%26+Society&rft.atitle=Human-aided+artificial+intelligence%3A+Or%2C+how+to+run+large+computations+in+human+brains%3F+Toward+a+media+sociology+of+machine+learning&rft.volume=22&rft.issue=10&rft.pages=%3Cspan+class%3D%22nowrap%22%3E1868-%3C%2Fspan%3E1884&rft.date=2019-11-06&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A209363848%23id-name%3DS2CID&rft.issn=1461-4448&rft_id=info%3Adoi%2F10.1177%2F1461444819885334&rft.aulast=M%C3%BChlhoff&rft.aufirst=Rainer&rft_id=https%3A%2F%2Fdepositonce.tu-berlin.de%2Fhandle%2F11303%2F12510&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></span> </li> </ol></div> <div class="mw-heading mw-heading2"><h2 id="Further_reading">Further reading</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Deep_learning&action=edit&section=42" title="Edit section: Further reading"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239549316">.mw-parser-output .refbegin{margin-bottom:0.5em}.mw-parser-output .refbegin-hanging-indents>ul{margin-left:0}.mw-parser-output .refbegin-hanging-indents>ul>li{margin-left:0;padding-left:3.2em;text-indent:-3.2em}.mw-parser-output .refbegin-hanging-indents ul,.mw-parser-output .refbegin-hanging-indents ul li{list-style:none}@media(max-width:720px){.mw-parser-output .refbegin-hanging-indents>ul>li{padding-left:1.6em;text-indent:-1.6em}}.mw-parser-output .refbegin-columns{margin-top:0.3em}.mw-parser-output .refbegin-columns ul{margin-top:0}.mw-parser-output .refbegin-columns li{page-break-inside:avoid;break-inside:avoid-column}@media screen{.mw-parser-output .refbegin{font-size:90%}}</style><div class="refbegin" style=""> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBishopBishop2024" class="citation book cs1">Bishop, Christopher M.; Bishop, Hugh (2024). <i>Deep learning: foundations and concepts</i>. Springer. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-031-45467-7" title="Special:BookSources/978-3-031-45467-7"><bdi>978-3-031-45467-7</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Deep+learning%3A+foundations+and+concepts&rft.pub=Springer&rft.date=2024&rft.isbn=978-3-031-45467-7&rft.aulast=Bishop&rft.aufirst=Christopher+M.&rft.au=Bishop%2C+Hugh&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFPrince2023" class="citation book cs1">Prince, Simon J. D. (2023). <i>Understanding deep learning</i>. The MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780262048644" title="Special:BookSources/9780262048644"><bdi>9780262048644</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Understanding+deep+learning&rft.pub=The+MIT+Press&rft.date=2023&rft.isbn=9780262048644&rft.aulast=Prince&rft.aufirst=Simon+J.+D.&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGoodfellowBengioCourville2016" class="citation book cs1"><a href="/wiki/Ian_Goodfellow" title="Ian Goodfellow">Goodfellow, Ian</a>; <a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Bengio, Yoshua</a>; Courville, Aaron (2016). <a rel="nofollow" class="external text" href="http://www.deeplearningbook.org"><i>Deep Learning</i></a>. MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-26203561-3" title="Special:BookSources/978-0-26203561-3"><bdi>978-0-26203561-3</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160416111010/http://www.deeplearningbook.org/">Archived</a> from the original on 2016-04-16<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-05-09</span></span>, introductory textbook.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Deep+Learning&rft.pub=MIT+Press&rft.date=2016&rft.isbn=978-0-26203561-3&rft.aulast=Goodfellow&rft.aufirst=Ian&rft.au=Bengio%2C+Yoshua&rft.au=Courville%2C+Aaron&rft_id=http%3A%2F%2Fwww.deeplearningbook.org&rfr_id=info%3Asid%2Fen.wikipedia.org%3ADeep+learning" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_book" title="Template:Cite book">cite book</a>}}</code>: CS1 maint: postscript (<a href="/wiki/Category:CS1_maint:_postscript" title="Category:CS1 maint: postscript">link</a>)</span></li></ul> </div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374" /><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Artificial_intelligence_(AI)752" style="padding:3px"><table class="nowraplinks hlist mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374" /><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231" /><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Artificial_intelligence_navbox" title="Template:Artificial intelligence navbox"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Artificial_intelligence_navbox" title="Template talk:Artificial intelligence navbox"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Artificial_intelligence_navbox" title="Special:EditPage/Template:Artificial intelligence navbox"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Artificial_intelligence_(AI)752" style="font-size:114%;margin:0 4em"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence</a> (AI)</div></th></tr><tr><td class="navbox-abovebelow" colspan="2"><div><a href="/wiki/History_of_artificial_intelligence" title="History of artificial intelligence">History</a> (<a href="/wiki/Timeline_of_artificial_intelligence" title="Timeline of artificial intelligence">timeline</a>)</div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Parameter" title="Parameter">Parameter</a> <ul><li><a href="/wiki/Hyperparameter_(machine_learning)" title="Hyperparameter (machine learning)">Hyperparameter</a></li></ul></li> <li><a href="/wiki/Loss_functions_for_classification" title="Loss functions for classification">Loss functions</a></li> <li><a href="/wiki/Regression_analysis" title="Regression analysis">Regression</a> <ul><li><a href="/wiki/Bias%E2%80%93variance_tradeoff" title="Bias–variance tradeoff">Bias–variance tradeoff</a></li> <li><a href="/wiki/Double_descent" title="Double descent">Double descent</a></li> <li><a href="/wiki/Overfitting" title="Overfitting">Overfitting</a></li></ul></li> <li><a href="/wiki/Cluster_analysis" title="Cluster analysis">Clustering</a></li> <li><a href="/wiki/Gradient_descent" title="Gradient descent">Gradient descent</a> <ul><li><a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">SGD</a></li> <li><a href="/wiki/Quasi-Newton_method" title="Quasi-Newton method">Quasi-Newton method</a></li> <li><a href="/wiki/Conjugate_gradient_method" title="Conjugate gradient method">Conjugate gradient method</a></li></ul></li> <li><a href="/wiki/Backpropagation" title="Backpropagation">Backpropagation</a></li> <li><a href="/wiki/Attention_(machine_learning)" title="Attention (machine learning)">Attention</a></li> <li><a href="/wiki/Convolution" title="Convolution">Convolution</a></li> <li><a href="/wiki/Normalization_(machine_learning)" title="Normalization (machine learning)">Normalization</a> <ul><li><a href="/wiki/Batch_normalization" title="Batch normalization">Batchnorm</a></li></ul></li> <li><a href="/wiki/Activation_function" title="Activation function">Activation</a> <ul><li><a href="/wiki/Softmax_function" title="Softmax function">Softmax</a></li> <li><a href="/wiki/Sigmoid_function" title="Sigmoid function">Sigmoid</a></li> <li><a href="/wiki/Rectifier_(neural_networks)" title="Rectifier (neural networks)">Rectifier</a></li></ul></li> <li><a href="/wiki/Gating_mechanism" title="Gating mechanism">Gating</a></li> <li><a href="/wiki/Weight_initialization" title="Weight initialization">Weight initialization</a></li> <li><a href="/wiki/Regularization_(mathematics)" title="Regularization (mathematics)">Regularization</a></li> <li><a href="/wiki/Training,_validation,_and_test_data_sets" title="Training, validation, and test data sets">Datasets</a> <ul><li><a href="/wiki/Data_augmentation" title="Data augmentation">Augmentation</a></li></ul></li> <li><a href="/wiki/Prompt_engineering" title="Prompt engineering">Prompt engineering</a></li> <li><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a> <ul><li><a href="/wiki/Q-learning" title="Q-learning">Q-learning</a></li> <li><a href="/wiki/State%E2%80%93action%E2%80%93reward%E2%80%93state%E2%80%93action" title="State–action–reward–state–action">SARSA</a></li> <li><a href="/wiki/Imitation_learning" title="Imitation learning">Imitation</a></li> <li><a href="/wiki/Policy_gradient_method" title="Policy gradient method">Policy gradient</a></li></ul></li> <li><a href="/wiki/Diffusion_process" title="Diffusion process">Diffusion</a></li> <li><a href="/wiki/Latent_diffusion_model" title="Latent diffusion model">Latent diffusion model</a></li> <li><a href="/wiki/Autoregressive_model" title="Autoregressive model">Autoregression</a></li> <li><a href="/wiki/Adversarial_machine_learning" title="Adversarial machine learning">Adversary</a></li> <li><a href="/wiki/Retrieval-augmented_generation" title="Retrieval-augmented generation">RAG</a></li> <li><a href="/wiki/Uncanny_valley" title="Uncanny valley">Uncanny valley</a></li> <li><a href="/wiki/Reinforcement_learning_from_human_feedback" title="Reinforcement learning from human feedback">RLHF</a></li> <li><a href="/wiki/Self-supervised_learning" title="Self-supervised learning">Self-supervised learning</a></li> <li><a href="/wiki/Recursive_self-improvement" title="Recursive self-improvement">Recursive self-improvement</a></li> <li><a href="/wiki/Word_embedding" title="Word embedding">Word embedding</a></li> <li><a href="/wiki/Hallucination_(artificial_intelligence)" title="Hallucination (artificial intelligence)">Hallucination</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Applications</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a> <ul><li><a href="/wiki/Prompt_engineering#In-context_learning" title="Prompt engineering">In-context learning</a></li></ul></li> <li><a href="/wiki/Neural_network_(machine_learning)" title="Neural network (machine learning)">Artificial neural network</a> <ul><li><a class="mw-selflink selflink">Deep learning</a></li></ul></li> <li><a href="/wiki/Language_model" title="Language model">Language model</a> <ul><li><a href="/wiki/Large_language_model" title="Large language model">Large language model</a></li> <li><a href="/wiki/Neural_machine_translation" title="Neural machine translation">NMT</a></li></ul></li> <li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Implementations</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%">Audio–visual</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/AlexNet" title="AlexNet">AlexNet</a></li> <li><a href="/wiki/WaveNet" title="WaveNet">WaveNet</a></li> <li><a href="/wiki/Human_image_synthesis" title="Human image synthesis">Human image synthesis</a></li> <li><a href="/wiki/Handwriting_recognition" title="Handwriting recognition">HWR</a></li> <li><a href="/wiki/Optical_character_recognition" title="Optical character recognition">OCR</a></li> <li><a href="/wiki/Deep_learning_speech_synthesis" title="Deep learning speech synthesis">Speech synthesis</a> <ul><li><a href="/wiki/15.ai" title="15.ai">15.ai</a></li> <li><a href="/wiki/ElevenLabs" title="ElevenLabs">ElevenLabs</a></li></ul></li> <li><a href="/wiki/Speech_recognition" title="Speech recognition">Speech recognition</a> <ul><li><a href="/wiki/Whisper_(speech_recognition_system)" title="Whisper (speech recognition system)">Whisper</a></li></ul></li> <li><a href="/wiki/Facial_recognition_system" title="Facial recognition system">Facial recognition</a></li> <li><a href="/wiki/AlphaFold" title="AlphaFold">AlphaFold</a></li> <li><a href="/wiki/Text-to-image_model" title="Text-to-image model">Text-to-image models</a> <ul><li><a href="/wiki/Aurora_(text-to-image_model)" class="mw-redirect" title="Aurora (text-to-image model)">Aurora</a></li> <li><a href="/wiki/DALL-E" title="DALL-E">DALL-E</a></li> <li><a href="/wiki/Adobe_Firefly" title="Adobe Firefly">Firefly</a></li> <li><a href="/wiki/Flux_(text-to-image_model)" title="Flux (text-to-image model)">Flux</a></li> <li><a href="/wiki/Ideogram_(text-to-image_model)" title="Ideogram (text-to-image model)">Ideogram</a></li> <li><a href="/wiki/Google_Brain#Text-to-image_model" title="Google Brain">Imagen</a></li> <li><a href="/wiki/Midjourney" title="Midjourney">Midjourney</a></li> <li><a href="/wiki/Stable_Diffusion" title="Stable Diffusion">Stable Diffusion</a></li></ul></li> <li><a href="/wiki/Text-to-video_model" title="Text-to-video model">Text-to-video models</a> <ul><li><a href="/wiki/Dream_Machine_(text-to-video_model)" title="Dream Machine (text-to-video model)">Dream Machine</a></li> <li><a href="/wiki/Runway_(company)#Gen-3_Alpha" title="Runway (company)">Gen-3 Alpha</a></li> <li><a href="/wiki/MiniMax_(company)#Hailuo_AI" title="MiniMax (company)">Hailuo AI</a></li> <li><a href="/wiki/Kling_(text-to-video_model)" class="mw-redirect" title="Kling (text-to-video model)">Kling</a></li> <li><a href="/wiki/Sora_(text-to-video_model)" title="Sora (text-to-video model)">Sora</a></li> <li><a href="/wiki/Google_DeepMind#Video_model" title="Google DeepMind">Veo</a></li></ul></li> <li><a href="/wiki/Music_and_artificial_intelligence" title="Music and artificial intelligence">Music generation</a> <ul><li><a href="/wiki/Suno_AI" title="Suno AI">Suno AI</a></li> <li><a href="/wiki/Udio" title="Udio">Udio</a></li></ul></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Text</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Word2vec" title="Word2vec">Word2vec</a></li> <li><a href="/wiki/Seq2seq" title="Seq2seq">Seq2seq</a></li> <li><a href="/wiki/GloVe" title="GloVe">GloVe</a></li> <li><a href="/wiki/BERT_(language_model)" title="BERT (language model)">BERT</a></li> <li><a href="/wiki/T5_(language_model)" title="T5 (language model)">T5</a></li> <li><a href="/wiki/Llama_(language_model)" title="Llama (language model)">Llama</a></li> <li><a href="/wiki/Chinchilla_(language_model)" title="Chinchilla (language model)">Chinchilla AI</a></li> <li><a href="/wiki/PaLM" title="PaLM">PaLM</a></li> <li><a href="/wiki/Generative_pre-trained_transformer" title="Generative pre-trained transformer">GPT</a> <ul><li><a href="/wiki/GPT-1" title="GPT-1">1</a></li> <li><a href="/wiki/GPT-2" title="GPT-2">2</a></li> <li><a href="/wiki/GPT-3" title="GPT-3">3</a></li> <li><a href="/wiki/GPT-J" title="GPT-J">J</a></li> <li><a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a></li> <li><a href="/wiki/GPT-4" title="GPT-4">4</a></li> <li><a href="/wiki/GPT-4o" title="GPT-4o">4o</a></li> <li><a href="/wiki/GPT-4.5" title="GPT-4.5">4.5</a></li> <li><a href="/wiki/OpenAI_o1" title="OpenAI o1">o1</a></li> <li><a href="/wiki/OpenAI_o3" title="OpenAI o3">o3</a></li></ul></li> <li><a href="/wiki/Claude_(language_model)" title="Claude (language model)">Claude</a></li> <li><a href="/wiki/Gemini_(language_model)" title="Gemini (language model)">Gemini</a> <ul><li><a href="/wiki/Gemini_(chatbot)" title="Gemini (chatbot)">chatbot</a></li></ul></li> <li><a href="/wiki/Grok_(chatbot)" title="Grok (chatbot)">Grok</a></li> <li><a href="/wiki/LaMDA" title="LaMDA">LaMDA</a></li> <li><a href="/wiki/BLOOM_(language_model)" title="BLOOM (language model)">BLOOM</a></li> <li><a href="/wiki/Project_Debater" title="Project Debater">Project Debater</a></li> <li><a href="/wiki/IBM_Watson" title="IBM Watson">IBM Watson</a></li> <li><a href="/wiki/IBM_Watsonx" title="IBM Watsonx">IBM Watsonx</a></li> <li><a href="/wiki/IBM_Granite" title="IBM Granite">Granite</a></li> <li><a href="/wiki/Huawei_PanGu" title="Huawei PanGu">PanGu-Σ</a></li> <li><a href="/wiki/DeepSeek_(chatbot)" title="DeepSeek (chatbot)">DeepSeek</a></li> <li><a href="/wiki/Qwen" title="Qwen">Qwen</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Decisional</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/AlphaGo" title="AlphaGo">AlphaGo</a></li> <li><a href="/wiki/AlphaZero" title="AlphaZero">AlphaZero</a></li> <li><a href="/wiki/OpenAI_Five" title="OpenAI Five">OpenAI Five</a></li> <li><a href="/wiki/Self-driving_car" title="Self-driving car">Self-driving car</a></li> <li><a href="/wiki/MuZero" title="MuZero">MuZero</a></li> <li><a href="/wiki/Action_selection" title="Action selection">Action selection</a> <ul><li><a href="/wiki/AutoGPT" title="AutoGPT">AutoGPT</a></li></ul></li> <li><a href="/wiki/Robot_control" title="Robot control">Robot control</a></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">People</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a></li> <li><a href="/wiki/Warren_Sturgis_McCulloch" title="Warren Sturgis McCulloch">Warren Sturgis McCulloch</a></li> <li><a href="/wiki/Walter_Pitts" title="Walter Pitts">Walter Pitts</a></li> <li><a href="/wiki/John_von_Neumann" title="John von Neumann">John von Neumann</a></li> <li><a href="/wiki/Claude_Shannon" title="Claude Shannon">Claude Shannon</a></li> <li><a href="/wiki/Marvin_Minsky" title="Marvin Minsky">Marvin Minsky</a></li> <li><a href="/wiki/John_McCarthy_(computer_scientist)" title="John McCarthy (computer scientist)">John McCarthy</a></li> <li><a href="/wiki/Nathaniel_Rochester_(computer_scientist)" title="Nathaniel Rochester (computer scientist)">Nathaniel Rochester</a></li> <li><a href="/wiki/Allen_Newell" title="Allen Newell">Allen Newell</a></li> <li><a href="/wiki/Cliff_Shaw" title="Cliff Shaw">Cliff Shaw</a></li> <li><a href="/wiki/Herbert_A._Simon" title="Herbert A. Simon">Herbert A. Simon</a></li> <li><a href="/wiki/Oliver_Selfridge" title="Oliver Selfridge">Oliver Selfridge</a></li> <li><a href="/wiki/Frank_Rosenblatt" title="Frank Rosenblatt">Frank Rosenblatt</a></li> <li><a href="/wiki/Bernard_Widrow" title="Bernard Widrow">Bernard Widrow</a></li> <li><a href="/wiki/Joseph_Weizenbaum" title="Joseph Weizenbaum">Joseph Weizenbaum</a></li> <li><a href="/wiki/Seymour_Papert" title="Seymour Papert">Seymour Papert</a></li> <li><a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Seppo Linnainmaa</a></li> <li><a href="/wiki/Paul_Werbos" title="Paul Werbos">Paul Werbos</a></li> <li><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a></li> <li><a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a></li> <li><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a></li> <li><a href="/wiki/John_Hopfield" title="John Hopfield">John Hopfield</a></li> <li><a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a></li> <li><a href="/wiki/Lotfi_A._Zadeh" title="Lotfi A. Zadeh">Lotfi A. Zadeh</a></li> <li><a href="/wiki/Stephen_Grossberg" title="Stephen Grossberg">Stephen Grossberg</a></li> <li><a href="/wiki/Alex_Graves_(computer_scientist)" title="Alex Graves (computer scientist)">Alex Graves</a></li> <li><a href="/wiki/Andrew_Ng" title="Andrew Ng">Andrew Ng</a></li> <li><a href="/wiki/Fei-Fei_Li" title="Fei-Fei Li">Fei-Fei Li</a></li> <li><a href="/wiki/Alex_Krizhevsky" title="Alex Krizhevsky">Alex Krizhevsky</a></li> <li><a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Ilya Sutskever</a></li> <li><a href="/wiki/Demis_Hassabis" title="Demis Hassabis">Demis Hassabis</a></li> <li><a href="/wiki/David_Silver_(computer_scientist)" title="David Silver (computer scientist)">David Silver</a></li> <li><a href="/wiki/Ian_Goodfellow" title="Ian Goodfellow">Ian Goodfellow</a></li> <li><a href="/wiki/Andrej_Karpathy" title="Andrej Karpathy">Andrej Karpathy</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Architectures</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Neural_Turing_machine" title="Neural Turing machine">Neural Turing machine</a></li> <li><a href="/wiki/Differentiable_neural_computer" title="Differentiable neural computer">Differentiable neural computer</a></li> <li><a href="/wiki/Transformer_(deep_learning_architecture)" title="Transformer (deep learning architecture)">Transformer</a> <ul><li><a href="/wiki/Vision_transformer" title="Vision transformer">Vision transformer (ViT)</a></li></ul></li> <li><a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">Recurrent neural network (RNN)</a></li> <li><a href="/wiki/Long_short-term_memory" title="Long short-term memory">Long short-term memory (LSTM)</a></li> <li><a href="/wiki/Gated_recurrent_unit" title="Gated recurrent unit">Gated recurrent unit (GRU)</a></li> <li><a href="/wiki/Echo_state_network" title="Echo state network">Echo state network</a></li> <li><a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">Multilayer perceptron (MLP)</a></li> <li><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">Convolutional neural network (CNN)</a></li> <li><a href="/wiki/Residual_neural_network" title="Residual neural network">Residual neural network (RNN)</a></li> <li><a href="/wiki/Highway_network" title="Highway network">Highway network</a></li> <li><a href="/wiki/Mamba_(deep_learning_architecture)" title="Mamba (deep learning architecture)">Mamba</a></li> <li><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></li> <li><a href="/wiki/Variational_autoencoder" title="Variational autoencoder">Variational autoencoder (VAE)</a></li> <li><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">Generative adversarial network (GAN)</a></li> <li><a href="/wiki/Graph_neural_network" title="Graph neural network">Graph neural network (GNN)</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><a href="/wiki/File:Symbol_portal_class.svg" class="mw-file-description" title="Portal"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/16px-Symbol_portal_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/23px-Symbol_portal_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/31px-Symbol_portal_class.svg.png 2x" data-file-width="180" data-file-height="185" /></a></span> Portals <ul><li><a href="/wiki/Portal:Technology" title="Portal:Technology">Technology</a></li></ul></li> <li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Category:Artificial_intelligence" title="Category:Artificial intelligence">Category</a> <ul><li><a href="/wiki/Category:Artificial_neural_networks" title="Category:Artificial neural networks">Artificial neural networks</a></li> <li><a href="/wiki/Category:Machine_learning" title="Category:Machine learning">Machine learning</a></li></ul></li> <li><span class="noviewer" typeof="mw:File"><span title="List-Class article"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/d/db/Symbol_list_class.svg/16px-Symbol_list_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/d/db/Symbol_list_class.svg/23px-Symbol_list_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/d/db/Symbol_list_class.svg/31px-Symbol_list_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> List <ul><li><a href="/wiki/List_of_artificial_intelligence_companies" title="List of artificial intelligence companies">Companies</a></li> <li><a href="/wiki/List_of_artificial_intelligence_projects" title="List of artificial intelligence projects">Projects</a></li></ul></li></ul> </div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐web.codfw.main‐5886496d‐ljxdg Cached time: 20250316130331 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 2.447 seconds Real time usage: 2.683 seconds Preprocessor visited node count: 18146/1000000 Post‐expand include size: 730887/2097152 bytes Template argument size: 4306/2097152 bytes Highest expansion depth: 16/100 Expensive parser function count: 20/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 1113333/5000000 bytes Lua time usage: 1.603/10.000 seconds Lua memory usage: 9791967/52428800 bytes Lua Profile: ? 320 ms 19.8% dataWrapper <mw.lua:672> 260 ms 16.0% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::callParserFunction 220 ms 13.6% recursiveClone <mwInit.lua:45> 100 ms 6.2% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::gsub 100 ms 6.2% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::match 60 ms 3.7% pairs 60 ms 3.7% <mw.lua:694> 60 ms 3.7% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::getEntityStatements 40 ms 2.5% makeMessage <mw.message.lua:76> 40 ms 2.5% [others] 360 ms 22.2% Number of Wikibase entities loaded: 1/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 2266.415 1 -total 76.03% 1723.079 1 Template:Reflist 37.14% 841.848 136 Template:Cite_journal 9.36% 212.173 40 Template:Cite_book 7.98% 180.930 47 Template:Cite_web 5.38% 121.991 23 Template:Cite_arXiv 4.21% 95.390 1 Template:Artificial_intelligence 4.11% 93.205 1 Template:Sidebar_with_collapsible_lists 3.16% 71.526 1 Template:Short_description 3.10% 70.222 1 Template:Cite_Q --> <!-- Saved in parser cache with key enwiki:pcache:32472154:|#|:idhash:canonical and timestamp 20250316130331 and revision id 1280356588. Rendering was triggered because: page-view --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?useformat=desktop&type=1x1&usesul3=0" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Deep_learning&oldid=1280356588">https://en.wikipedia.org/w/index.php?title=Deep_learning&oldid=1280356588</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Category</a>: <ul><li><a href="/wiki/Category:Deep_learning" title="Category:Deep learning">Deep learning</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:Webarchive_template_wayback_links" title="Category:Webarchive template wayback links">Webarchive template wayback links</a></li><li><a href="/wiki/Category:CS1:_long_volume_value" title="Category:CS1: long volume value">CS1: long volume value</a></li><li><a href="/wiki/Category:CS1_Finnish-language_sources_(fi)" title="Category:CS1 Finnish-language sources (fi)">CS1 Finnish-language sources (fi)</a></li><li><a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">CS1 maint: multiple names: authors list</a></li><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_matches_Wikidata" title="Category:Short description matches Wikidata">Short description matches Wikidata</a></li><li><a href="/wiki/Category:All_articles_with_unsourced_statements" title="Category:All articles with unsourced statements">All articles with unsourced statements</a></li><li><a href="/wiki/Category:Articles_with_unsourced_statements_from_August_2024" title="Category:Articles with unsourced statements from August 2024">Articles with unsourced statements from August 2024</a></li><li><a href="/wiki/Category:Pages_using_multiple_image_with_auto_scaled_images" title="Category:Pages using multiple image with auto scaled images">Pages using multiple image with auto scaled images</a></li><li><a href="/wiki/Category:Articles_with_unsourced_statements_from_November_2020" title="Category:Articles with unsourced statements from November 2020">Articles with unsourced statements from November 2020</a></li><li><a href="/wiki/Category:Articles_with_unsourced_statements_from_July_2016" title="Category:Articles with unsourced statements from July 2016">Articles with unsourced statements from July 2016</a></li><li><a href="/wiki/Category:CS1_maint:_postscript" title="Category:CS1 maint: postscript">CS1 maint: postscript</a></li><li><a href="/wiki/Category:Articles_prone_to_spam_from_June_2015" title="Category:Articles prone to spam from June 2015">Articles prone to spam from June 2015</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 14 March 2025, at 03:34<span class="anonymous-show"> (UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Deep_learning&mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><picture><source media="(min-width: 500px)" srcset="/static/images/footer/wikimedia-button.svg" width="84" height="29"><img src="/static/images/footer/wikimedia.svg" width="25" height="25" alt="Wikimedia Foundation" lang="en" loading="lazy"></picture></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><picture><source media="(min-width: 500px)" srcset="/w/resources/assets/poweredby_mediawiki.svg" width="88" height="31"><img src="/w/resources/assets/mediawiki_compact.svg" alt="Powered by MediaWiki" lang="en" width="25" height="25" loading="lazy"></picture></a></li> </ul> </footer> </div> </div> </div> <div class="vector-header-container vector-sticky-header-container"> <div id="vector-sticky-header" class="vector-sticky-header"> <div class="vector-sticky-header-start"> <div class="vector-sticky-header-icon-start vector-button-flush-left vector-button-flush-right" aria-hidden="true"> <button class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-sticky-header-search-toggle" tabindex="-1" data-event-name="ui.vector-sticky-search-form.icon"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </button> </div> <div role="search" class="vector-search-box-vue vector-search-box-show-thumbnail vector-search-box"> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail"> <form action="/w/index.php" id="vector-sticky-search-form" class="cdx-search-input cdx-search-input--has-end-button"> <div class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia"> <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <div class="vector-sticky-header-context-bar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-sticky-header-toc" class="vector-dropdown mw-portlet mw-portlet-sticky-header-toc vector-sticky-header-toc vector-button-flush-left" > <input type="checkbox" id="vector-sticky-header-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-sticky-header-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-sticky-header-toc-label" for="vector-sticky-header-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-sticky-header-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div class="vector-sticky-header-context-bar-primary" aria-hidden="true" ><span class="mw-page-title-main">Deep learning</span></div> </div> </div> <div class="vector-sticky-header-end" aria-hidden="true"> <div class="vector-sticky-header-icons"> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-talk-sticky-header" tabindex="-1" data-event-name="talk-sticky-header"><span class="vector-icon mw-ui-icon-speechBubbles mw-ui-icon-wikimedia-speechBubbles"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-subject-sticky-header" tabindex="-1" data-event-name="subject-sticky-header"><span class="vector-icon mw-ui-icon-article mw-ui-icon-wikimedia-article"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-history-sticky-header" tabindex="-1" data-event-name="history-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-history mw-ui-icon-wikimedia-wikimedia-history"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only mw-watchlink" id="ca-watchstar-sticky-header" tabindex="-1" data-event-name="watch-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-star mw-ui-icon-wikimedia-wikimedia-star"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-edit-sticky-header" tabindex="-1" data-event-name="wikitext-edit-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-wikiText mw-ui-icon-wikimedia-wikimedia-wikiText"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-ve-edit-sticky-header" tabindex="-1" data-event-name="ve-edit-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-edit mw-ui-icon-wikimedia-wikimedia-edit"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-viewsource-sticky-header" tabindex="-1" data-event-name="ve-edit-protected-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-editLock mw-ui-icon-wikimedia-wikimedia-editLock"></span> <span></span> </a> </div> <div class="vector-sticky-header-buttons"> <button class="cdx-button cdx-button--weight-quiet mw-interlanguage-selector" id="p-lang-btn-sticky-header" tabindex="-1" data-event-name="ui.dropdown-p-lang-btn-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-language mw-ui-icon-wikimedia-wikimedia-language"></span> <span>58 languages</span> </button> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive" id="ca-addsection-sticky-header" tabindex="-1" data-event-name="addsection-sticky-header"><span class="vector-icon mw-ui-icon-speechBubbleAdd-progressive mw-ui-icon-wikimedia-speechBubbleAdd-progressive"></span> <span>Add topic</span> </a> </div> <div class="vector-sticky-header-icon-end"> <div class="vector-user-links"> </div> </div> </div> </div> </div> <div class="mw-portlet mw-portlet-dock-bottom emptyPortlet" id="p-dock-bottom"> <ul> </ul> </div> <script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-5886496d-jc24n","wgBackendResponseTime":387,"wgPageParseReport":{"limitreport":{"cputime":"2.447","walltime":"2.683","ppvisitednodes":{"value":18146,"limit":1000000},"postexpandincludesize":{"value":730887,"limit":2097152},"templateargumentsize":{"value":4306,"limit":2097152},"expansiondepth":{"value":16,"limit":100},"expensivefunctioncount":{"value":20,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":1113333,"limit":5000000},"entityaccesscount":{"value":1,"limit":400},"timingprofile":["100.00% 2266.415 1 -total"," 76.03% 1723.079 1 Template:Reflist"," 37.14% 841.848 136 Template:Cite_journal"," 9.36% 212.173 40 Template:Cite_book"," 7.98% 180.930 47 Template:Cite_web"," 5.38% 121.991 23 Template:Cite_arXiv"," 4.21% 95.390 1 Template:Artificial_intelligence"," 4.11% 93.205 1 Template:Sidebar_with_collapsible_lists"," 3.16% 71.526 1 Template:Short_description"," 3.10% 70.222 1 Template:Cite_Q"]},"scribunto":{"limitreport-timeusage":{"value":"1.603","limit":"10.000"},"limitreport-memusage":{"value":9791967,"limit":52428800},"limitreport-logs":"1 1 Sepp Hochreiter\n2 2 Jürgen Schmidhuber\n","limitreport-profile":[["?","320","19.8"],["dataWrapper \u003Cmw.lua:672\u003E","260","16.0"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::callParserFunction","220","13.6"],["recursiveClone \u003CmwInit.lua:45\u003E","100","6.2"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::gsub","100","6.2"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::match","60","3.7"],["pairs","60","3.7"],["\u003Cmw.lua:694\u003E","60","3.7"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::getEntityStatements","40","2.5"],["makeMessage \u003Cmw.message.lua:76\u003E","40","2.5"],["[others]","360","22.2"]]},"cachereport":{"origin":"mw-web.codfw.main-5886496d-ljxdg","timestamp":"20250316130331","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Deep learning","url":"https:\/\/en.wikipedia.org\/wiki\/Deep_learning","sameAs":"http:\/\/www.wikidata.org\/entity\/Q197536","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q197536","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2011-07-20T06:24:47Z","dateModified":"2025-03-14T03:34:22Z","image":"https:\/\/upload.wikimedia.org\/wikipedia\/commons\/2\/26\/Deep_Learning.jpg","headline":"branch of machine learning"}</script> </body> </html>