CINXE.COM

Neural network (machine learning) - Wikipedia

<!DOCTYPE html> <html class="client-nojs skin-theme-clientpref-day mf-expand-sections-clientpref-0 mf-font-size-clientpref-small mw-mf-amc-clientpref-0" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Neural network (machine learning) - Wikipedia</title> <script>(function(){var className="client-js skin-theme-clientpref-day mf-expand-sections-clientpref-0 mf-font-size-clientpref-small mw-mf-amc-clientpref-0";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy","wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"da1668b1-8c4e-431a-850a-f6c0641e80c5","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Neural_network_(machine_learning)","wgTitle":"Neural network (machine learning)","wgCurRevisionId":1259558855, "wgRevisionId":1259558855,"wgArticleId":21523,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Neural_network_(machine_learning)","wgRelevantArticleId":21523,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgRedirectedFrom":"Artificial_neural_network","wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFMode":"stable","wgMFAmc":false,"wgMFAmcOutreachActive":false,"wgMFAmcOutreachUserEligible":false,"wgMFLazyLoadImages":true,"wgMFEditNoticesFeatureConflict":false,"wgMFDisplayWikibaseDescriptions":{"search":true, "watchlist":true,"tagline":false,"nearby":true},"wgMFIsSupportedEditRequest":true,"wgMFScriptPath":"","wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":200000,"wgInternalRedirectTargetUrl":"/wiki/Neural_network_(machine_learning)","wgRelatedArticlesCompat":[],"wgCentralAuthMobileDomain":true,"wgEditSubmitButtonLabelPublish":true,"wgSectionTranslationMissingLanguages":[{"lang":"ace","autonym":"Acèh","dir":"ltr"},{"lang":"ady","autonym":"адыгабзэ","dir":"ltr"},{"lang":"alt","autonym":"алтай тил","dir":"ltr"},{"lang":"am","autonym":"አማርኛ","dir":"ltr"},{"lang":"ami","autonym":"Pangcah","dir":"ltr"},{"lang":"an","autonym":"aragonés","dir":"ltr"},{"lang":"ang","autonym":"Ænglisc","dir":"ltr"},{"lang":"ann","autonym":"Obolo","dir":"ltr"},{"lang":"anp","autonym":"अंगिका","dir":"ltr"},{"lang":"ary","autonym":"الدارجة","dir":"rtl"},{"lang":"arz","autonym":"مصرى","dir":"rtl"},{"lang":"as","autonym":"অসমীয়া","dir":"ltr"} ,{"lang":"ast","autonym":"asturianu","dir":"ltr"},{"lang":"av","autonym":"авар","dir":"ltr"},{"lang":"avk","autonym":"Kotava","dir":"ltr"},{"lang":"awa","autonym":"अवधी","dir":"ltr"},{"lang":"ay","autonym":"Aymar aru","dir":"ltr"},{"lang":"azb","autonym":"تۆرکجه","dir":"rtl"},{"lang":"ba","autonym":"башҡортса","dir":"ltr"},{"lang":"ban","autonym":"Basa Bali","dir":"ltr"},{"lang":"bar","autonym":"Boarisch","dir":"ltr"},{"lang":"bbc","autonym":"Batak Toba","dir":"ltr"},{"lang":"bcl","autonym":"Bikol Central","dir":"ltr"},{"lang":"bdr","autonym":"Bajau Sama","dir":"ltr"},{"lang":"be","autonym":"беларуская","dir":"ltr"},{"lang":"bew","autonym":"Betawi","dir":"ltr"},{"lang":"bho","autonym":"भोजपुरी","dir":"ltr"},{"lang":"bi","autonym":"Bislama","dir":"ltr"},{"lang":"bjn","autonym":"Banjar","dir":"ltr"},{"lang":"blk","autonym":"ပအိုဝ်ႏဘာႏသာႏ","dir":"ltr"},{"lang":"bm","autonym":"bamanankan","dir":"ltr"},{"lang":"bo" ,"autonym":"བོད་ཡིག","dir":"ltr"},{"lang":"bpy","autonym":"বিষ্ণুপ্রিয়া মণিপুরী","dir":"ltr"},{"lang":"br","autonym":"brezhoneg","dir":"ltr"},{"lang":"btm","autonym":"Batak Mandailing","dir":"ltr"},{"lang":"bug","autonym":"Basa Ugi","dir":"ltr"},{"lang":"cdo","autonym":"閩東語 / Mìng-dĕ̤ng-ngṳ̄","dir":"ltr"},{"lang":"ce","autonym":"нохчийн","dir":"ltr"},{"lang":"ceb","autonym":"Cebuano","dir":"ltr"},{"lang":"ch","autonym":"Chamoru","dir":"ltr"},{"lang":"chr","autonym":"ᏣᎳᎩ","dir":"ltr"},{"lang":"ckb","autonym":"کوردی","dir":"rtl"},{"lang":"co","autonym":"corsu","dir":"ltr"},{"lang":"cr","autonym":"Nēhiyawēwin / ᓀᐦᐃᔭᐍᐏᐣ","dir":"ltr"},{"lang":"crh","autonym":"qırımtatarca","dir":"ltr"},{"lang":"cu","autonym":"словѣньскъ / ⰔⰎⰑⰂⰡⰐⰠⰔⰍⰟ","dir":"ltr"},{"lang":"cy","autonym":"Cymraeg","dir":"ltr"},{"lang":"dag","autonym":"dagbanli","dir":"ltr"},{"lang":"dga", "autonym":"Dagaare","dir":"ltr"},{"lang":"din","autonym":"Thuɔŋjäŋ","dir":"ltr"},{"lang":"diq","autonym":"Zazaki","dir":"ltr"},{"lang":"dsb","autonym":"dolnoserbski","dir":"ltr"},{"lang":"dtp","autonym":"Kadazandusun","dir":"ltr"},{"lang":"dv","autonym":"ދިވެހިބަސް","dir":"rtl"},{"lang":"dz","autonym":"ཇོང་ཁ","dir":"ltr"},{"lang":"ee","autonym":"eʋegbe","dir":"ltr"},{"lang":"eml","autonym":"emiliàn e rumagnòl","dir":"ltr"},{"lang":"fat","autonym":"mfantse","dir":"ltr"},{"lang":"ff","autonym":"Fulfulde","dir":"ltr"},{"lang":"fj","autonym":"Na Vosa Vakaviti","dir":"ltr"},{"lang":"fo","autonym":"føroyskt","dir":"ltr"},{"lang":"fon","autonym":"fɔ̀ngbè","dir":"ltr"},{"lang":"frp","autonym":"arpetan","dir":"ltr"},{"lang":"frr","autonym":"Nordfriisk","dir":"ltr"},{"lang":"fur","autonym":"furlan","dir":"ltr"},{"lang":"fy","autonym":"Frysk","dir":"ltr"},{"lang":"gag","autonym":"Gagauz","dir":"ltr"},{"lang":"gan","autonym":"贛語","dir":"ltr"},{"lang":"gcr", "autonym":"kriyòl gwiyannen","dir":"ltr"},{"lang":"glk","autonym":"گیلکی","dir":"rtl"},{"lang":"gn","autonym":"Avañe'ẽ","dir":"ltr"},{"lang":"gom","autonym":"गोंयची कोंकणी / Gõychi Konknni","dir":"ltr"},{"lang":"gor","autonym":"Bahasa Hulontalo","dir":"ltr"},{"lang":"gpe","autonym":"Ghanaian Pidgin","dir":"ltr"},{"lang":"gu","autonym":"ગુજરાતી","dir":"ltr"},{"lang":"guc","autonym":"wayuunaiki","dir":"ltr"},{"lang":"gur","autonym":"farefare","dir":"ltr"},{"lang":"guw","autonym":"gungbe","dir":"ltr"},{"lang":"gv","autonym":"Gaelg","dir":"ltr"},{"lang":"ha","autonym":"Hausa","dir":"ltr"},{"lang":"hak","autonym":"客家語 / Hak-kâ-ngî","dir":"ltr"},{"lang":"haw","autonym":"Hawaiʻi","dir":"ltr"},{"lang":"hif","autonym":"Fiji Hindi","dir":"ltr"},{"lang":"hsb","autonym":"hornjoserbsce","dir":"ltr"},{"lang":"ht","autonym":"Kreyòl ayisyen","dir":"ltr"},{"lang":"iba","autonym":"Jaku Iban","dir":"ltr"},{"lang":"ie","autonym":"Interlingue","dir" :"ltr"},{"lang":"ig","autonym":"Igbo","dir":"ltr"},{"lang":"igl","autonym":"Igala","dir":"ltr"},{"lang":"ilo","autonym":"Ilokano","dir":"ltr"},{"lang":"io","autonym":"Ido","dir":"ltr"},{"lang":"iu","autonym":"ᐃᓄᒃᑎᑐᑦ / inuktitut","dir":"ltr"},{"lang":"jam","autonym":"Patois","dir":"ltr"},{"lang":"jv","autonym":"Jawa","dir":"ltr"},{"lang":"kaa","autonym":"Qaraqalpaqsha","dir":"ltr"},{"lang":"kab","autonym":"Taqbaylit","dir":"ltr"},{"lang":"kbd","autonym":"адыгэбзэ","dir":"ltr"},{"lang":"kbp","autonym":"Kabɩyɛ","dir":"ltr"},{"lang":"kcg","autonym":"Tyap","dir":"ltr"},{"lang":"kg","autonym":"Kongo","dir":"ltr"},{"lang":"kge","autonym":"Kumoring","dir":"ltr"},{"lang":"ki","autonym":"Gĩkũyũ","dir":"ltr"},{"lang":"kk","autonym":"қазақша","dir":"ltr"},{"lang":"kl","autonym":"kalaallisut","dir":"ltr"},{"lang":"km","autonym":"ភាសាខ្មែរ","dir":"ltr"},{"lang":"kn","autonym":"ಕನ್ನಡ","dir":"ltr"},{"lang":"koi","autonym": "перем коми","dir":"ltr"},{"lang":"krc","autonym":"къарачай-малкъар","dir":"ltr"},{"lang":"ks","autonym":"कॉशुर / کٲشُر","dir":"rtl"},{"lang":"ku","autonym":"kurdî","dir":"ltr"},{"lang":"kus","autonym":"Kʋsaal","dir":"ltr"},{"lang":"kv","autonym":"коми","dir":"ltr"},{"lang":"kw","autonym":"kernowek","dir":"ltr"},{"lang":"ky","autonym":"кыргызча","dir":"ltr"},{"lang":"lad","autonym":"Ladino","dir":"ltr"},{"lang":"lb","autonym":"Lëtzebuergesch","dir":"ltr"},{"lang":"lez","autonym":"лезги","dir":"ltr"},{"lang":"lg","autonym":"Luganda","dir":"ltr"},{"lang":"li","autonym":"Limburgs","dir":"ltr"},{"lang":"lij","autonym":"Ligure","dir":"ltr"},{"lang":"lld","autonym":"Ladin","dir":"ltr"},{"lang":"lmo","autonym":"lombard","dir":"ltr"},{"lang":"ln","autonym":"lingála","dir":"ltr"},{"lang":"lo","autonym":"ລາວ","dir":"ltr"},{"lang":"ltg","autonym":"latgaļu","dir":"ltr"},{"lang":"mad","autonym":"Madhurâ","dir":"ltr"},{"lang":"mai", "autonym":"मैथिली","dir":"ltr"},{"lang":"map-bms","autonym":"Basa Banyumasan","dir":"ltr"},{"lang":"mdf","autonym":"мокшень","dir":"ltr"},{"lang":"mhr","autonym":"олык марий","dir":"ltr"},{"lang":"mi","autonym":"Māori","dir":"ltr"},{"lang":"min","autonym":"Minangkabau","dir":"ltr"},{"lang":"mn","autonym":"монгол","dir":"ltr"},{"lang":"mni","autonym":"ꯃꯤꯇꯩ ꯂꯣꯟ","dir":"ltr"},{"lang":"mnw","autonym":"ဘာသာမန်","dir":"ltr"},{"lang":"mos","autonym":"moore","dir":"ltr"},{"lang":"mr","autonym":"मराठी","dir":"ltr"},{"lang":"mrj","autonym":"кырык мары","dir":"ltr"},{"lang":"mt","autonym":"Malti","dir":"ltr"},{"lang":"mwl","autonym":"Mirandés","dir":"ltr"},{"lang":"my","autonym":"မြန်မာဘာသာ","dir":"ltr"},{"lang":"myv","autonym":"эрзянь","dir":"ltr"},{"lang":"mzn","autonym":"مازِرونی","dir":"rtl"},{"lang":"nah","autonym":"Nāhuatl","dir":"ltr"},{"lang":"nan","autonym": "閩南語 / Bân-lâm-gú","dir":"ltr"},{"lang":"nap","autonym":"Napulitano","dir":"ltr"},{"lang":"nb","autonym":"norsk bokmål","dir":"ltr"},{"lang":"nds","autonym":"Plattdüütsch","dir":"ltr"},{"lang":"nds-nl","autonym":"Nedersaksies","dir":"ltr"},{"lang":"ne","autonym":"नेपाली","dir":"ltr"},{"lang":"new","autonym":"नेपाल भाषा","dir":"ltr"},{"lang":"nia","autonym":"Li Niha","dir":"ltr"},{"lang":"nqo","autonym":"ߒߞߏ","dir":"rtl"},{"lang":"nr","autonym":"isiNdebele seSewula","dir":"ltr"},{"lang":"nso","autonym":"Sesotho sa Leboa","dir":"ltr"},{"lang":"ny","autonym":"Chi-Chewa","dir":"ltr"},{"lang":"oc","autonym":"occitan","dir":"ltr"},{"lang":"om","autonym":"Oromoo","dir":"ltr"},{"lang":"os","autonym":"ирон","dir":"ltr"},{"lang":"pa","autonym":"ਪੰਜਾਬੀ","dir":"ltr"},{"lang":"pag","autonym":"Pangasinan","dir":"ltr"},{"lang":"pam","autonym":"Kapampangan","dir":"ltr"},{"lang":"pap","autonym":"Papiamentu","dir":"ltr"},{"lang":"pcd", "autonym":"Picard","dir":"ltr"},{"lang":"pcm","autonym":"Naijá","dir":"ltr"},{"lang":"pdc","autonym":"Deitsch","dir":"ltr"},{"lang":"pms","autonym":"Piemontèis","dir":"ltr"},{"lang":"pnb","autonym":"پنجابی","dir":"rtl"},{"lang":"ps","autonym":"پښتو","dir":"rtl"},{"lang":"pwn","autonym":"pinayuanan","dir":"ltr"},{"lang":"rm","autonym":"rumantsch","dir":"ltr"},{"lang":"rn","autonym":"ikirundi","dir":"ltr"},{"lang":"rsk","autonym":"руски","dir":"ltr"},{"lang":"rue","autonym":"русиньскый","dir":"ltr"},{"lang":"rup","autonym":"armãneashti","dir":"ltr"},{"lang":"rw","autonym":"Ikinyarwanda","dir":"ltr"},{"lang":"sa","autonym":"संस्कृतम्","dir":"ltr"},{"lang":"sah","autonym":"саха тыла","dir":"ltr"},{"lang":"sat","autonym":"ᱥᱟᱱᱛᱟᱲᱤ","dir":"ltr"},{"lang":"sc","autonym":"sardu","dir":"ltr"},{"lang":"scn","autonym":"sicilianu","dir":"ltr"},{"lang":"sco","autonym":"Scots","dir":"ltr"},{"lang":"sd","autonym":"سنڌي","dir":"rtl" },{"lang":"se","autonym":"davvisámegiella","dir":"ltr"},{"lang":"sg","autonym":"Sängö","dir":"ltr"},{"lang":"sgs","autonym":"žemaitėška","dir":"ltr"},{"lang":"sh","autonym":"srpskohrvatski / српскохрватски","dir":"ltr"},{"lang":"shi","autonym":"Taclḥit","dir":"ltr"},{"lang":"shn","autonym":"ၽႃႇသႃႇတႆး ","dir":"ltr"},{"lang":"si","autonym":"සිංහල","dir":"ltr"},{"lang":"skr","autonym":"سرائیکی","dir":"rtl"},{"lang":"sl","autonym":"slovenščina","dir":"ltr"},{"lang":"sm","autonym":"Gagana Samoa","dir":"ltr"},{"lang":"smn","autonym":"anarâškielâ","dir":"ltr"},{"lang":"sn","autonym":"chiShona","dir":"ltr"},{"lang":"so","autonym":"Soomaaliga","dir":"ltr"},{"lang":"sq","autonym":"shqip","dir":"ltr"},{"lang":"srn","autonym":"Sranantongo","dir":"ltr"},{"lang":"ss","autonym":"SiSwati","dir":"ltr"},{"lang":"st","autonym":"Sesotho","dir":"ltr"},{"lang":"stq","autonym":"Seeltersk","dir":"ltr"},{"lang":"su","autonym":"Sunda","dir":"ltr"},{ "lang":"sw","autonym":"Kiswahili","dir":"ltr"},{"lang":"tay","autonym":"Tayal","dir":"ltr"},{"lang":"tcy","autonym":"ತುಳು","dir":"ltr"},{"lang":"tdd","autonym":"ᥖᥭᥰ ᥖᥬᥲ ᥑᥨᥒᥰ","dir":"ltr"},{"lang":"te","autonym":"తెలుగు","dir":"ltr"},{"lang":"tet","autonym":"tetun","dir":"ltr"},{"lang":"tg","autonym":"тоҷикӣ","dir":"ltr"},{"lang":"ti","autonym":"ትግርኛ","dir":"ltr"},{"lang":"tk","autonym":"Türkmençe","dir":"ltr"},{"lang":"tl","autonym":"Tagalog","dir":"ltr"},{"lang":"tly","autonym":"tolışi","dir":"ltr"},{"lang":"tn","autonym":"Setswana","dir":"ltr"},{"lang":"to","autonym":"lea faka-Tonga","dir":"ltr"},{"lang":"tpi","autonym":"Tok Pisin","dir":"ltr"},{"lang":"trv","autonym":"Seediq","dir":"ltr"},{"lang":"ts","autonym":"Xitsonga","dir":"ltr"},{"lang":"tt","autonym":"татарча / tatarça","dir":"ltr"},{"lang":"tum","autonym":"chiTumbuka","dir":"ltr"},{"lang":"tw","autonym":"Twi","dir":"ltr"},{"lang":"ty","autonym":"reo tahiti", "dir":"ltr"},{"lang":"tyv","autonym":"тыва дыл","dir":"ltr"},{"lang":"udm","autonym":"удмурт","dir":"ltr"},{"lang":"uz","autonym":"oʻzbekcha / ўзбекча","dir":"ltr"},{"lang":"ve","autonym":"Tshivenda","dir":"ltr"},{"lang":"vec","autonym":"vèneto","dir":"ltr"},{"lang":"vep","autonym":"vepsän kel’","dir":"ltr"},{"lang":"vls","autonym":"West-Vlams","dir":"ltr"},{"lang":"vo","autonym":"Volapük","dir":"ltr"},{"lang":"vro","autonym":"võro","dir":"ltr"},{"lang":"wa","autonym":"walon","dir":"ltr"},{"lang":"war","autonym":"Winaray","dir":"ltr"},{"lang":"wo","autonym":"Wolof","dir":"ltr"},{"lang":"xal","autonym":"хальмг","dir":"ltr"},{"lang":"xh","autonym":"isiXhosa","dir":"ltr"},{"lang":"xmf","autonym":"მარგალური","dir":"ltr"},{"lang":"yi","autonym":"ייִדיש","dir":"rtl"},{"lang":"yo","autonym":"Yorùbá","dir":"ltr"},{"lang":"yue","autonym":"粵語","dir":"ltr"},{"lang":"za","autonym":"Vahcuengh","dir":"ltr"},{"lang":"zgh","autonym": "ⵜⴰⵎⴰⵣⵉⵖⵜ ⵜⴰⵏⴰⵡⴰⵢⵜ","dir":"ltr"},{"lang":"zu","autonym":"isiZulu","dir":"ltr"}],"wgSectionTranslationTargetLanguages":["ace","ady","alt","am","ami","an","ang","ann","anp","ar","ary","arz","as","ast","av","avk","awa","ay","az","azb","ba","ban","bar","bbc","bcl","bdr","be","bew","bg","bho","bi","bjn","blk","bm","bn","bo","bpy","br","bs","btm","bug","ca","cdo","ce","ceb","ch","chr","ckb","co","cr","crh","cs","cu","cy","da","dag","de","dga","din","diq","dsb","dtp","dv","dz","ee","el","eml","eo","es","et","eu","fa","fat","ff","fi","fj","fo","fon","fr","frp","frr","fur","fy","gag","gan","gcr","gl","glk","gn","gom","gor","gpe","gu","guc","gur","guw","gv","ha","hak","haw","he","hi","hif","hr","hsb","ht","hu","hy","hyw","ia","iba","ie","ig","igl","ilo","io","is","it","iu","ja","jam","jv","ka","kaa","kab","kbd","kbp","kcg","kg","kge","ki","kk","kl","km","kn","ko","koi","krc","ks","ku","kus","kv","kw","ky","lad","lb","lez","lg","li","lij","lld","lmo","ln","lo","lt" ,"ltg","lv","mad","mai","map-bms","mdf","mg","mhr","mi","min","mk","ml","mn","mni","mnw","mos","mr","mrj","ms","mt","mwl","my","myv","mzn","nah","nan","nap","nb","nds","nds-nl","ne","new","nia","nl","nn","nqo","nr","nso","ny","oc","om","or","os","pa","pag","pam","pap","pcd","pcm","pdc","pl","pms","pnb","ps","pt","pwn","qu","rm","rn","ro","rsk","rue","rup","rw","sa","sah","sat","sc","scn","sco","sd","se","sg","sgs","sh","shi","shn","si","sk","skr","sl","sm","smn","sn","so","sq","sr","srn","ss","st","stq","su","sv","sw","szl","ta","tay","tcy","tdd","te","tet","tg","th","ti","tk","tl","tly","tn","to","tpi","tr","trv","ts","tt","tum","tw","ty","tyv","udm","ur","uz","ve","vec","vep","vi","vls","vo","vro","wa","war","wo","wuu","xal","xh","xmf","yi","yo","yue","za","zgh","zh","zu"],"isLanguageSearcherCXEntrypointEnabled":true,"mintEntrypointLanguages":["ace","ast","azb","bcl","bjn","bh","crh","ff","fon","ig","is","ki","ks","lmo","min","sat","ss","tn","vec"],"wgWikibaseItemId":"Q192776", "wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false,"wgMinervaPermissions":{"watchable":true,"watch":false},"wgMinervaFeatures":{"beta":false,"donate":true,"mobileOptionsLink":true,"categories":false,"pageIssues":true,"talkAtTop":true,"historyInPageActions":false,"overflowSubmenu":false,"tabsOnSpecials":true,"personalMenu":false,"mainMenuExpanded":false,"echo":true,"nightMode":true},"wgMinervaDownloadNamespaces":[0]};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","ext.math.styles":"ready","ext.pygments":"ready","mediawiki.page.gallery.styles":"ready","ext.tmh.player.styles":"ready", "skins.minerva.styles":"ready","skins.minerva.content.styles.images":"ready","mediawiki.hlist":"ready","skins.minerva.codex.styles":"ready","skins.minerva.icons":"ready","skins.minerva.amc.styles":"ready","ext.wikimediamessages.styles":"ready","mobile.init.styles":"ready","ext.relatedArticles.styles":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["mediawiki.action.view.redirect","ext.cite.ux-enhancements","ext.pygments.view","mediawiki.page.media","ext.tmh.player","ext.scribunto.logs","site","mediawiki.page.ready","skins.minerva.scripts","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","ext.popups","mobile.init","ext.echo.centralauth","ext.relatedArticles.readMore.bootstrap","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.cx.eventlogging.campaigns","ext.cx.entrypoints.mffrequentlanguages","ext.cx.entrypoints.languagesearcher.init", "mw.externalguidance.init","ext.checkUser.clientHints","ext.quicksurveys.init","ext.growthExperiments.SuggestedEditSession","wikibase.sidebar.tracking"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=ext.cite.styles%7Cext.math.styles%7Cext.pygments%2CwikimediaBadges%7Cext.relatedArticles.styles%7Cext.tmh.player.styles%7Cext.wikimediamessages.styles%7Cmediawiki.hlist%7Cmediawiki.page.gallery.styles%7Cmobile.init.styles%7Cskins.minerva.amc.styles%7Cskins.minerva.codex.styles%7Cskins.minerva.content.styles.images%7Cskins.minerva.icons%2Cstyles%7Cwikibase.client.init&amp;only=styles&amp;skin=minerva"> <script async="" src="/w/load.php?lang=en&amp;modules=startup&amp;only=scripts&amp;raw=1&amp;skin=minerva"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=site.styles&amp;only=styles&amp;skin=minerva"> <meta name="generator" content="MediaWiki 1.44.0-wmf.4"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="theme-color" content="#eaecf0"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/1200px-Colored_neural_network.svg.png"> <meta property="og:image:width" content="1200"> <meta property="og:image:height" content="1443"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/800px-Colored_neural_network.svg.png"> <meta property="og:image:width" content="800"> <meta property="og:image:height" content="962"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/640px-Colored_neural_network.svg.png"> <meta property="og:image:width" content="640"> <meta property="og:image:height" content="770"> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes, minimum-scale=0.25, maximum-scale=5.0"> <meta property="og:title" content="Neural network (machine learning) - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="manifest" href="/w/api.php?action=webapp-manifest"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Neural_network_(machine_learning)"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Neural_network_machine_learning rootpage-Neural_network_machine_learning stable issues-group-B skin-minerva action-view skin--responsive mw-mf-amc-disabled mw-mf"><div id="mw-mf-viewport"> <div id="mw-mf-page-center"> <a class="mw-mf-page-center__mask" href="#"></a> <header class="header-container header-chrome"> <div class="minerva-header"> <nav class="navigation-drawer toggle-list view-border-box"> <input type="checkbox" id="main-menu-input" class="toggle-list__checkbox" role="button" aria-haspopup="true" aria-expanded="false" aria-labelledby="mw-mf-main-menu-button"> <label role="button" for="main-menu-input" id="mw-mf-main-menu-button" aria-hidden="true" data-event-name="ui.mainmenu" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet toggle-list__toggle"> <span class="minerva-icon minerva-icon--menu"></span> <span></span> </label> <div id="mw-mf-page-left" class="menu view-border-box"> <ul id="p-navigation" class="toggle-list__list"> <li class="toggle-list-item "> <a class="toggle-list-item__anchor menu__item--home" href="/wiki/Main_Page" data-mw="interface"> <span class="minerva-icon minerva-icon--home"></span> <span class="toggle-list-item__label">Home</span> </a> </li> <li class="toggle-list-item "> <a class="toggle-list-item__anchor menu__item--random" href="/wiki/Special:Random" data-mw="interface"> <span class="minerva-icon minerva-icon--die"></span> <span class="toggle-list-item__label">Random</span> </a> </li> <li class="toggle-list-item skin-minerva-list-item-jsonly"> <a class="toggle-list-item__anchor menu__item--nearby" href="/wiki/Special:Nearby" data-event-name="menu.nearby" data-mw="interface"> <span class="minerva-icon minerva-icon--mapPin"></span> <span class="toggle-list-item__label">Nearby</span> </a> </li> </ul> <ul id="p-personal" class="toggle-list__list"> <li class="toggle-list-item "> <a class="toggle-list-item__anchor menu__item--login" href="/w/index.php?title=Special:UserLogin&amp;returnto=Neural+network+%28machine+learning%29" data-event-name="menu.login" data-mw="interface"> <span class="minerva-icon minerva-icon--logIn"></span> <span class="toggle-list-item__label">Log in</span> </a> </li> </ul> <ul id="pt-preferences" class="toggle-list__list"> <li class="toggle-list-item skin-minerva-list-item-jsonly"> <a class="toggle-list-item__anchor menu__item--settings" href="/w/index.php?title=Special:MobileOptions&amp;returnto=Neural+network+%28machine+learning%29" data-event-name="menu.settings" data-mw="interface"> <span class="minerva-icon minerva-icon--settings"></span> <span class="toggle-list-item__label">Settings</span> </a> </li> </ul> <ul id="p-donation" class="toggle-list__list"> <li class="toggle-list-item "> <a class="toggle-list-item__anchor menu__item--donate" href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikipedia.org&amp;uselang=en&amp;utm_key=minerva" data-event-name="menu.donate" data-mw="interface"> <span class="minerva-icon minerva-icon--heart"></span> <span class="toggle-list-item__label">Donate</span> </a> </li> </ul> <ul class="hlist"> <li class="toggle-list-item "> <a class="toggle-list-item__anchor menu__item--about" href="/wiki/Wikipedia:About" data-mw="interface"> <span class="toggle-list-item__label">About Wikipedia</span> </a> </li> <li class="toggle-list-item "> <a class="toggle-list-item__anchor menu__item--disclaimers" href="/wiki/Wikipedia:General_disclaimer" data-mw="interface"> <span class="toggle-list-item__label">Disclaimers</span> </a> </li> </ul> </div> <label class="main-menu-mask" for="main-menu-input"></label> </nav> <div class="branding-box"> <a href="/wiki/Main_Page"> <span><img src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" alt="Wikipedia" width="120" height="18" style="width: 7.5em; height: 1.125em;"/> </span> </a> </div> <form action="/w/index.php" method="get" class="minerva-search-form"> <div class="search-box"> <input type="hidden" name="title" value="Special:Search"/> <input class="search skin-minerva-search-trigger" id="searchInput" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f"> <span class="search-box-icon-overlay"><span class="minerva-icon minerva-icon--search"></span> </span> </div> <button id="searchIcon" class="cdx-button cdx-button--size-large cdx-button--icon-only cdx-button--weight-quiet skin-minerva-search-trigger"> <span class="minerva-icon minerva-icon--search"></span> <span>Search</span> </button> </form> <nav class="minerva-user-navigation" aria-label="User navigation"> </nav> </div> </header> <main id="content" class="mw-body"> <div class="banner-container"> <div id="siteNotice"></div> </div> <div class="pre-content heading-holder"> <div class="page-heading"> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Neural network (machine learning)</span></h1> <div class="tagline"></div> </div> <ul id="p-associated-pages" class="minerva__tab-container"> <li class="minerva__tab selected"> <a class="minerva__tab-text" href="/wiki/Neural_network_(machine_learning)" rel="" data-event-name="tabs.subject">Article</a> </li> <li class="minerva__tab "> <a class="minerva__tab-text" href="/wiki/Talk:Neural_network_(machine_learning)" rel="discussion" data-event-name="tabs.talk">Talk</a> </li> </ul> <nav class="page-actions-menu"> <ul id="p-views" class="page-actions-menu__list"> <li id="language-selector" class="page-actions-menu__list-item"> <a role="button" href="#p-lang" data-mw="interface" data-event-name="menu.languages" title="Language" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet language-selector"> <span class="minerva-icon minerva-icon--language"></span> <span>Language</span> </a> </li> <li id="page-actions-watch" class="page-actions-menu__list-item"> <a role="button" id="ca-watch" href="/w/index.php?title=Special:UserLogin&amp;returnto=Neural+network+%28machine+learning%29" data-event-name="menu.watch" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet menu__item--page-actions-watch"> <span class="minerva-icon minerva-icon--star"></span> <span>Watch</span> </a> </li> <li id="page-actions-edit" class="page-actions-menu__list-item"> <a role="button" id="ca-edit" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit" data-event-name="menu.edit" data-mw="interface" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet edit-page menu__item--page-actions-edit"> <span class="minerva-icon minerva-icon--edit"></span> <span>Edit</span> </a> </li> </ul> </nav> <!-- version 1.0.2 (change every time you update a partial) --> <div id="mw-content-subtitle"><span class="mw-redirectedfrom">(Redirected from <a href="/w/index.php?title=Artificial_neural_network&amp;redirect=no" class="mw-redirect" title="Artificial neural network">Artificial neural network</a>)</span></div> </div> <div id="bodyContent" class="content"> <div id="mw-content-text" class="mw-body-content"><script>function mfTempOpenSection(id){var block=document.getElementById("mf-section-"+id);block.className+=" open-block";block.previousSibling.className+=" open-block";}</script><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><section class="mf-section-0" id="mf-section-0"> <style data-mw-deduplicate="TemplateStyles:r1236090951">.mw-parser-output .hatnote{font-style:italic}.mw-parser-output div.hatnote{padding-left:1.6em;margin-bottom:0.5em}.mw-parser-output .hatnote i{font-style:normal}.mw-parser-output .hatnote+link+.hatnote{margin-top:-0.5em}@media print{body.ns-0 .mw-parser-output .hatnote{display:none!important}}</style><div role="note" class="hatnote navigation-not-searchable">This article is about the computational models used for artificial intelligence. For other uses, see <a href="/wiki/Neural_network_(disambiguation)" class="mw-disambig" title="Neural network (disambiguation)">Neural network (disambiguation)</a>.</div> <p class="mw-empty-elt"> </p> <p>In <a href="/wiki/Machine_learning" title="Machine learning">machine learning</a>, a <b>neural network</b> (also <b>artificial neural network</b> or <b>neural net</b>, abbreviated <b>ANN</b> or <b>NN</b>) is a <a href="/wiki/Machine_learning#Models" title="Machine learning">model</a> inspired by the structure and function of <a href="/wiki/Biological_neural_network" class="mw-redirect" title="Biological neural network">biological neural networks</a> in animal <a href="/wiki/Brain" title="Brain">brains</a>.<sup id="cite_ref-1" class="reference"><a href="#cite_note-1"><span class="cite-bracket">[</span>1<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-2" class="reference"><a href="#cite_note-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup> </p><figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Colored_neural_network.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/250px-Colored_neural_network.svg.png" decoding="async" width="250" height="301" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/375px-Colored_neural_network.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/500px-Colored_neural_network.svg.png 2x" data-file-width="296" data-file-height="356"></a><figcaption>An artificial neural network is an interconnected group of nodes, inspired by a simplification of <a href="/wiki/Neuron" title="Neuron">neurons</a> in a <a href="/wiki/Brain" title="Brain">brain</a>. Here, each circular node represents an <a href="/wiki/Artificial_neuron" title="Artificial neuron">artificial neuron</a> and an arrow represents a connection from the output of one artificial neuron to the input of another.</figcaption></figure> <style data-mw-deduplicate="TemplateStyles:r1244144826">.mw-parser-output .machine-learning-list-title{background-color:#ddddff}html.skin-theme-clientpref-night .mw-parser-output .machine-learning-list-title{background-color:#222}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .machine-learning-list-title{background-color:#222}}</style> <style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1246091330">.mw-parser-output .sidebar{width:22em;float:right;clear:right;margin:0.5em 0 1em 1em;background:var(--background-color-neutral-subtle,#f8f9fa);border:1px solid var(--border-color-base,#a2a9b1);padding:0.2em;text-align:center;line-height:1.4em;font-size:88%;border-collapse:collapse;display:table}body.skin-minerva .mw-parser-output .sidebar{display:table!important;float:right!important;margin:0.5em 0 1em 1em!important}.mw-parser-output .sidebar-subgroup{width:100%;margin:0;border-spacing:0}.mw-parser-output .sidebar-left{float:left;clear:left;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-none{float:none;clear:both;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-outer-title{padding:0 0.4em 0.2em;font-size:125%;line-height:1.2em;font-weight:bold}.mw-parser-output .sidebar-top-image{padding:0.4em}.mw-parser-output .sidebar-top-caption,.mw-parser-output .sidebar-pretitle-with-top-image,.mw-parser-output .sidebar-caption{padding:0.2em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-pretitle{padding:0.4em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-title,.mw-parser-output .sidebar-title-with-pretitle{padding:0.2em 0.8em;font-size:145%;line-height:1.2em}.mw-parser-output .sidebar-title-with-pretitle{padding:0.1em 0.4em}.mw-parser-output .sidebar-image{padding:0.2em 0.4em 0.4em}.mw-parser-output .sidebar-heading{padding:0.1em 0.4em}.mw-parser-output .sidebar-content{padding:0 0.5em 0.4em}.mw-parser-output .sidebar-content-with-subgroup{padding:0.1em 0.4em 0.2em}.mw-parser-output .sidebar-above,.mw-parser-output .sidebar-below{padding:0.3em 0.8em;font-weight:bold}.mw-parser-output .sidebar-collapse .sidebar-above,.mw-parser-output .sidebar-collapse .sidebar-below{border-top:1px solid #aaa;border-bottom:1px solid #aaa}.mw-parser-output .sidebar-navbar{text-align:right;font-size:115%;padding:0 0.4em 0.4em}.mw-parser-output .sidebar-list-title{padding:0 0.4em;text-align:left;font-weight:bold;line-height:1.6em;font-size:105%}.mw-parser-output .sidebar-list-title-c{padding:0 0.4em;text-align:center;margin:0 3.3em}@media(max-width:640px){body.mediawiki .mw-parser-output .sidebar{width:100%!important;clear:both;float:none!important;margin-left:0!important;margin-right:0!important}}body.skin--responsive .mw-parser-output .sidebar a>img{max-width:none!important}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media print{body.ns-0 .mw-parser-output .sidebar{display:none!important}}</style><style data-mw-deduplicate="TemplateStyles:r886047488">.mw-parser-output .nobold{font-weight:normal}</style><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r886047488"> <p>An ANN consists of connected units or nodes called <i><a href="/wiki/Artificial_neuron" title="Artificial neuron">artificial neurons</a></i>, which loosely model the <a href="/wiki/Neuron" title="Neuron">neurons</a> in the brain. These are connected by <i>edges</i>, which model the <a href="/wiki/Synapse" title="Synapse">synapses</a> in the brain. Each artificial neuron receives signals from connected neurons, then processes them and sends a signal to other connected neurons. The "signal" is a <a href="/wiki/Real_number" title="Real number">real number</a>, and the output of each neuron is computed by some non-linear function of the sum of its inputs, called the <i><a href="/wiki/Activation_function" title="Activation function">activation function</a></i>. The strength of the signal at each connection is determined by a <i><a href="/wiki/Weighting" title="Weighting">weight</a></i>, which adjusts during the learning process. </p><p>Typically, neurons are aggregated into layers. Different layers may perform different transformations on their inputs. Signals travel from the first layer (the <i>input layer</i>) to the last layer (the <i>output layer</i>), possibly passing through multiple intermediate layers (<i><a href="/wiki/Hidden_layer" title="Hidden layer">hidden layers</a></i>). A network is typically called a deep neural network if it has at least two hidden layers.<sup id="cite_ref-3" class="reference"><a href="#cite_note-3"><span class="cite-bracket">[</span>3<span class="cite-bracket">]</span></a></sup> </p><p>Artificial neural networks are used for various tasks, including <a href="/wiki/Predictive_modeling" class="mw-redirect" title="Predictive modeling">predictive modeling</a>, <a href="/wiki/Adaptive_control" title="Adaptive control">adaptive control</a>, and solving problems in <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a>. They can learn from experience, and can derive conclusions from a complex and seemingly unrelated set of information. </p> <style data-mw-deduplicate="TemplateStyles:r886046785">.mw-parser-output .toclimit-2 .toclevel-1 ul,.mw-parser-output .toclimit-3 .toclevel-2 ul,.mw-parser-output .toclimit-4 .toclevel-3 ul,.mw-parser-output .toclimit-5 .toclevel-4 ul,.mw-parser-output .toclimit-6 .toclevel-5 ul,.mw-parser-output .toclimit-7 .toclevel-6 ul{display:none}</style><div class="toclimit-3"><div id="toc" class="toc" role="navigation" aria-labelledby="mw-toc-heading"><input type="checkbox" role="button" id="toctogglecheckbox" class="toctogglecheckbox" style="display:none"><div class="toctitle" lang="en" dir="ltr"><h2 id="mw-toc-heading">Contents</h2><span class="toctogglespan"><label class="toctogglelabel" for="toctogglecheckbox"></label></span></div> <ul> <li class="toclevel-1 tocsection-1"><a href="#Training"><span class="tocnumber">1</span> <span class="toctext">Training</span></a></li> <li class="toclevel-1 tocsection-2"><a href="#History"><span class="tocnumber">2</span> <span class="toctext">History</span></a> <ul> <li class="toclevel-2 tocsection-3"><a href="#Early_work"><span class="tocnumber">2.1</span> <span class="toctext">Early work</span></a></li> <li class="toclevel-2 tocsection-4"><a href="#Deep_learning_breakthroughs_in_the_1960s_and_1970s"><span class="tocnumber">2.2</span> <span class="toctext">Deep learning breakthroughs in the 1960s and 1970s</span></a></li> <li class="toclevel-2 tocsection-5"><a href="#Backpropagation"><span class="tocnumber">2.3</span> <span class="toctext">Backpropagation</span></a></li> <li class="toclevel-2 tocsection-6"><a href="#Convolutional_neural_networks"><span class="tocnumber">2.4</span> <span class="toctext">Convolutional neural networks</span></a></li> <li class="toclevel-2 tocsection-7"><a href="#Recurrent_neural_networks"><span class="tocnumber">2.5</span> <span class="toctext">Recurrent neural networks</span></a></li> <li class="toclevel-2 tocsection-8"><a href="#Deep_learning"><span class="tocnumber">2.6</span> <span class="toctext">Deep learning</span></a></li> </ul> </li> <li class="toclevel-1 tocsection-9"><a href="#Models"><span class="tocnumber">3</span> <span class="toctext">Models</span></a> <ul> <li class="toclevel-2 tocsection-10"><a href="#Artificial_neurons"><span class="tocnumber">3.1</span> <span class="toctext">Artificial neurons</span></a></li> <li class="toclevel-2 tocsection-11"><a href="#Organization"><span class="tocnumber">3.2</span> <span class="toctext">Organization</span></a></li> <li class="toclevel-2 tocsection-12"><a href="#Hyperparameter"><span class="tocnumber">3.3</span> <span class="toctext">Hyperparameter</span></a></li> <li class="toclevel-2 tocsection-13"><a href="#Learning"><span class="tocnumber">3.4</span> <span class="toctext">Learning</span></a> <ul> <li class="toclevel-3 tocsection-14"><a href="#Learning_rate"><span class="tocnumber">3.4.1</span> <span class="toctext">Learning rate</span></a></li> <li class="toclevel-3 tocsection-15"><a href="#Cost_function"><span class="tocnumber">3.4.2</span> <span class="toctext">Cost function</span></a></li> <li class="toclevel-3 tocsection-16"><a href="#Backpropagation_2"><span class="tocnumber">3.4.3</span> <span class="toctext">Backpropagation</span></a></li> </ul> </li> <li class="toclevel-2 tocsection-17"><a href="#Learning_paradigms"><span class="tocnumber">3.5</span> <span class="toctext">Learning paradigms</span></a> <ul> <li class="toclevel-3 tocsection-18"><a href="#Supervised_learning"><span class="tocnumber">3.5.1</span> <span class="toctext">Supervised learning</span></a></li> <li class="toclevel-3 tocsection-19"><a href="#Unsupervised_learning"><span class="tocnumber">3.5.2</span> <span class="toctext">Unsupervised learning</span></a></li> <li class="toclevel-3 tocsection-20"><a href="#Reinforcement_learning"><span class="tocnumber">3.5.3</span> <span class="toctext">Reinforcement learning</span></a></li> <li class="toclevel-3 tocsection-21"><a href="#Self-learning"><span class="tocnumber">3.5.4</span> <span class="toctext">Self-learning</span></a></li> <li class="toclevel-3 tocsection-22"><a href="#Neuroevolution"><span class="tocnumber">3.5.5</span> <span class="toctext">Neuroevolution</span></a></li> </ul> </li> <li class="toclevel-2 tocsection-23"><a href="#Stochastic_neural_network"><span class="tocnumber">3.6</span> <span class="toctext">Stochastic neural network</span></a></li> <li class="toclevel-2 tocsection-24"><a href="#Other"><span class="tocnumber">3.7</span> <span class="toctext">Other</span></a> <ul> <li class="toclevel-3 tocsection-25"><a href="#Modes"><span class="tocnumber">3.7.1</span> <span class="toctext">Modes</span></a></li> </ul> </li> </ul> </li> <li class="toclevel-1 tocsection-26"><a href="#Types"><span class="tocnumber">4</span> <span class="toctext">Types</span></a></li> <li class="toclevel-1 tocsection-27"><a href="#Network_design"><span class="tocnumber">5</span> <span class="toctext">Network design</span></a></li> <li class="toclevel-1 tocsection-28"><a href="#Applications"><span class="tocnumber">6</span> <span class="toctext">Applications</span></a></li> <li class="toclevel-1 tocsection-29"><a href="#Theoretical_properties"><span class="tocnumber">7</span> <span class="toctext">Theoretical properties</span></a> <ul> <li class="toclevel-2 tocsection-30"><a href="#Computational_power"><span class="tocnumber">7.1</span> <span class="toctext">Computational power</span></a></li> <li class="toclevel-2 tocsection-31"><a href="#Capacity"><span class="tocnumber">7.2</span> <span class="toctext">Capacity</span></a></li> <li class="toclevel-2 tocsection-32"><a href="#Convergence"><span class="tocnumber">7.3</span> <span class="toctext">Convergence</span></a></li> <li class="toclevel-2 tocsection-33"><a href="#Generalization_and_statistics"><span class="tocnumber">7.4</span> <span class="toctext">Generalization and statistics</span></a></li> </ul> </li> <li class="toclevel-1 tocsection-34"><a href="#Criticism"><span class="tocnumber">8</span> <span class="toctext">Criticism</span></a> <ul> <li class="toclevel-2 tocsection-35"><a href="#Training_2"><span class="tocnumber">8.1</span> <span class="toctext">Training</span></a></li> <li class="toclevel-2 tocsection-36"><a href="#Theory"><span class="tocnumber">8.2</span> <span class="toctext">Theory</span></a></li> <li class="toclevel-2 tocsection-37"><a href="#Hardware"><span class="tocnumber">8.3</span> <span class="toctext">Hardware</span></a></li> <li class="toclevel-2 tocsection-38"><a href="#Practical_counterexamples"><span class="tocnumber">8.4</span> <span class="toctext">Practical counterexamples</span></a></li> <li class="toclevel-2 tocsection-39"><a href="#Hybrid_approaches"><span class="tocnumber">8.5</span> <span class="toctext">Hybrid approaches</span></a></li> <li class="toclevel-2 tocsection-40"><a href="#Dataset_bias"><span class="tocnumber">8.6</span> <span class="toctext">Dataset bias</span></a></li> </ul> </li> <li class="toclevel-1 tocsection-41"><a href="#Gallery"><span class="tocnumber">9</span> <span class="toctext">Gallery</span></a></li> <li class="toclevel-1 tocsection-42"><a href="#Recent_advancements_and_future_directions"><span class="tocnumber">10</span> <span class="toctext">Recent advancements and future directions</span></a> <ul> <li class="toclevel-2 tocsection-43"><a href="#Image_processing"><span class="tocnumber">10.1</span> <span class="toctext">Image processing</span></a></li> <li class="toclevel-2 tocsection-44"><a href="#Speech_recognition"><span class="tocnumber">10.2</span> <span class="toctext">Speech recognition</span></a></li> <li class="toclevel-2 tocsection-45"><a href="#Natural_language_processing"><span class="tocnumber">10.3</span> <span class="toctext">Natural language processing</span></a></li> <li class="toclevel-2 tocsection-46"><a href="#Control_systems"><span class="tocnumber">10.4</span> <span class="toctext">Control systems</span></a></li> <li class="toclevel-2 tocsection-47"><a href="#Finance"><span class="tocnumber">10.5</span> <span class="toctext">Finance</span></a></li> <li class="toclevel-2 tocsection-48"><a href="#Medicine"><span class="tocnumber">10.6</span> <span class="toctext">Medicine</span></a></li> <li class="toclevel-2 tocsection-49"><a href="#Content_creation"><span class="tocnumber">10.7</span> <span class="toctext">Content creation</span></a></li> </ul> </li> <li class="toclevel-1 tocsection-50"><a href="#See_also"><span class="tocnumber">11</span> <span class="toctext">See also</span></a></li> <li class="toclevel-1 tocsection-51"><a href="#References"><span class="tocnumber">12</span> <span class="toctext">References</span></a></li> <li class="toclevel-1 tocsection-52"><a href="#Bibliography"><span class="tocnumber">13</span> <span class="toctext">Bibliography</span></a></li> <li class="toclevel-1 tocsection-53"><a href="#External_links"><span class="tocnumber">14</span> <span class="toctext">External links</span></a></li> </ul> </div> </div> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(1)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Training">Training</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=1" title="Edit section: Training" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-1 collapsible-block" id="mf-section-1"><p> Neural networks are typically trained through <a href="/wiki/Empirical_risk_minimization" title="Empirical risk minimization">empirical risk minimization</a>. This method is based on the idea of optimizing the network's parameters to minimize the difference, or empirical risk, between the predicted output and the actual target values in a given dataset.<sup id="cite_ref-:2_4-0" class="reference"><a href="#cite_note-:2-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup> Gradient-based methods such as <a href="/wiki/Backpropagation" title="Backpropagation">backpropagation</a> are usually used to estimate the parameters of the network.<sup id="cite_ref-:2_4-1" class="reference"><a href="#cite_note-:2-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup> During the training phase, ANNs learn from <a href="/wiki/Labeled_data" title="Labeled data">labeled</a> training data by iteratively updating their parameters to minimize a defined <a href="/wiki/Loss_functions_for_classification" title="Loss functions for classification">loss function</a>.<sup id="cite_ref-:4_5-0" class="reference"><a href="#cite_note-:4-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup> This method allows the network to generalize to unseen data.<style data-mw-deduplicate="TemplateStyles:r1237032888/mw-parser-output/.tmulti">.mw-parser-output .tmulti .multiimageinner{display:flex;flex-direction:column}.mw-parser-output .tmulti .trow{display:flex;flex-direction:row;clear:left;flex-wrap:wrap;width:100%;box-sizing:border-box}.mw-parser-output .tmulti .tsingle{margin:1px;float:left}.mw-parser-output .tmulti .theader{clear:both;font-weight:bold;text-align:center;align-self:center;background-color:transparent;width:100%}.mw-parser-output .tmulti .thumbcaption{background-color:transparent}.mw-parser-output .tmulti .text-align-left{text-align:left}.mw-parser-output .tmulti .text-align-right{text-align:right}.mw-parser-output .tmulti .text-align-center{text-align:center}@media all and (max-width:720px){.mw-parser-output .tmulti .thumbinner{width:100%!important;box-sizing:border-box;max-width:none!important;align-items:center}.mw-parser-output .tmulti .trow{justify-content:center}.mw-parser-output .tmulti .tsingle{float:none!important;max-width:100%!important;box-sizing:border-box;text-align:center}.mw-parser-output .tmulti .tsingle .thumbcaption{text-align:left}.mw-parser-output .tmulti .trow>.thumbcaption{text-align:center}}@media screen{html.skin-theme-clientpref-night .mw-parser-output .tmulti .multiimageinner img{background-color:white}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .tmulti .multiimageinner img{background-color:white}}</style></p><div class="thumb tmulti tright"><div class="thumbinner multiimageinner" style="width:392px;max-width:392px"><div class="trow"><div class="tsingle" style="width:167px;max-width:167px"><div class="thumbimage" style="height:188px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:Simplified_neural_network_training_example.svg" class="mw-file-description"><noscript><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Simplified_neural_network_training_example.svg/165px-Simplified_neural_network_training_example.svg.png" decoding="async" width="165" height="189" class="mw-file-element" data-file-width="773" data-file-height="884"></noscript><span class="lazy-image-placeholder" style="width: 165px;height: 189px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Simplified_neural_network_training_example.svg/165px-Simplified_neural_network_training_example.svg.png" data-alt="" data-width="165" data-height="189" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Simplified_neural_network_training_example.svg/248px-Simplified_neural_network_training_example.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Simplified_neural_network_training_example.svg/330px-Simplified_neural_network_training_example.svg.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></div><div class="thumbcaption">Simplified example of training a neural network in object detection: The network is trained by multiple images that are known to depict <a href="/wiki/Starfish" title="Starfish">starfish</a> and <a href="/wiki/Sea_urchin" title="Sea urchin">sea urchins</a>, which are correlated with "nodes" that represent visual <a href="/wiki/Feature_(computer_vision)" title="Feature (computer vision)">features</a>. The starfish match with a ringed texture and a star outline, whereas most sea urchins match with a striped texture and oval shape. However, the instance of a ring textured sea urchin creates a weakly weighted association between them.</div></div><div class="tsingle" style="width:221px;max-width:221px"><div class="thumbimage" style="height:188px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:Simplified_neural_network_example.svg" class="mw-file-description"><noscript><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Simplified_neural_network_example.svg/219px-Simplified_neural_network_example.svg.png" decoding="async" width="219" height="188" class="mw-file-element" data-file-width="1028" data-file-height="882"></noscript><span class="lazy-image-placeholder" style="width: 219px;height: 188px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Simplified_neural_network_example.svg/219px-Simplified_neural_network_example.svg.png" data-alt="" data-width="219" data-height="188" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Simplified_neural_network_example.svg/329px-Simplified_neural_network_example.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/b7/Simplified_neural_network_example.svg/438px-Simplified_neural_network_example.svg.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></div><div class="thumbcaption">Subsequent run of the network on an input image (left):<sup id="cite_ref-6" class="reference"><a href="#cite_note-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup> The network correctly detects the starfish. However, the weakly weighted association between ringed texture and sea urchin also confers a weak signal to the latter from one of two intermediate nodes. In addition, a shell that was not included in the training gives a weak signal for the oval shape, also resulting in a weak signal for the sea urchin output. These weak signals may result in a <a href="/wiki/False_positive" class="mw-redirect" title="False positive">false positive</a> result for sea urchin.<br>In reality, textures and outlines would not be represented by single nodes, but rather by associated weight patterns of multiple nodes.</div></div></div></div></div> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(2)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="History">History</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=2" title="Edit section: History" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-2 collapsible-block" id="mf-section-2"> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/History_of_artificial_neural_networks" title="History of artificial neural networks">History of artificial neural networks</a></div> <div class="mw-heading mw-heading3"><h3 id="Early_work">Early work</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=3" title="Edit section: Early work" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Today's deep neural networks are based on early work in <a href="/wiki/Statistics" title="Statistics">statistics</a> over 200 years ago. The simplest kind of <a href="/wiki/Feedforward_neural_network" title="Feedforward neural network">feedforward neural network</a> (FNN) is a linear network, which consists of a single layer of output nodes with linear activation functions; the inputs are fed directly to the outputs via a series of weights. The sum of the products of the weights and the inputs is calculated at each node. The <a href="/wiki/Mean_squared_error" title="Mean squared error">mean squared errors</a> between these calculated outputs and the given target values are minimized by creating an adjustment to the weights. This technique has been known for over two centuries as the <a href="/wiki/Method_of_least_squares" class="mw-redirect" title="Method of least squares">method of least squares</a> or <a href="/wiki/Linear_regression" title="Linear regression">linear regression</a>. It was used as a means of finding a good rough linear fit to a set of points by <a href="/wiki/Adrien-Marie_Legendre" title="Adrien-Marie Legendre">Legendre</a> (1805) and <a href="/wiki/Gauss" class="mw-redirect" title="Gauss">Gauss</a> (1795) for the prediction of planetary movement.<sup id="cite_ref-legendre1805_7-0" class="reference"><a href="#cite_note-legendre1805-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-gauss1795_8-0" class="reference"><a href="#cite_note-gauss1795-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-brertscher_9-0" class="reference"><a href="#cite_note-brertscher-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-DLhistory_10-0" class="reference"><a href="#cite_note-DLhistory-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-stigler_11-0" class="reference"><a href="#cite_note-stigler-11"><span class="cite-bracket">[</span>11<span class="cite-bracket">]</span></a></sup> </p><p>Historically, digital computers such as the <a href="/wiki/Von_Neumann_model" class="mw-redirect" title="Von Neumann model">von Neumann model</a> operate via the execution of explicit instructions with access to memory by a number of processors. Some neural networks, on the other hand, originated from efforts to model information processing in biological systems through the framework of <a href="/wiki/Connectionism" title="Connectionism">connectionism</a>. Unlike the von Neumann model, connectionist computing does not separate memory and processing. </p><p><a href="/wiki/Warren_McCulloch" class="mw-redirect" title="Warren McCulloch">Warren McCulloch</a> and <a href="/wiki/Walter_Pitts" title="Walter Pitts">Walter Pitts</a><sup id="cite_ref-WM_12-0" class="reference"><a href="#cite_note-WM-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup> (1943) considered a non-learning computational model for neural networks.<sup id="cite_ref-13" class="reference"><a href="#cite_note-13"><span class="cite-bracket">[</span>13<span class="cite-bracket">]</span></a></sup> This model paved the way for research to split into two approaches. One approach focused on biological processes while the other focused on the application of neural networks to <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a>. </p><p>In the late 1940s, <a href="/wiki/Donald_O._Hebb" title="Donald O. Hebb">D. O. Hebb</a><sup id="cite_ref-14" class="reference"><a href="#cite_note-14"><span class="cite-bracket">[</span>14<span class="cite-bracket">]</span></a></sup> proposed a learning <a href="/wiki/Hypothesis" title="Hypothesis">hypothesis</a> based on the mechanism of <a href="/wiki/Neuroplasticity" title="Neuroplasticity">neural plasticity</a> that became known as <a href="/wiki/Hebbian_learning" class="mw-redirect" title="Hebbian learning">Hebbian learning</a>. It was used in many early neural networks, such as Rosenblatt's <a href="/wiki/Perceptron" title="Perceptron">perceptron</a> and the <a href="/wiki/Hopfield_network" title="Hopfield network">Hopfield network</a>. Farley and <a href="/wiki/Wesley_A._Clark" title="Wesley A. Clark">Clark</a><sup id="cite_ref-15" class="reference"><a href="#cite_note-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup> (1954) used computational machines to simulate a Hebbian network. Other neural network computational machines were created by <a href="/wiki/Nathaniel_Rochester_(computer_scientist)" title="Nathaniel Rochester (computer scientist)">Rochester</a>, Holland, Habit and Duda (1956).<sup id="cite_ref-16" class="reference"><a href="#cite_note-16"><span class="cite-bracket">[</span>16<span class="cite-bracket">]</span></a></sup> </p><p>In 1958, psychologist <a href="/wiki/Frank_Rosenblatt" title="Frank Rosenblatt">Frank Rosenblatt</a> described the perceptron, one of the first implemented artificial neural networks,<sup id="cite_ref-17" class="reference"><a href="#cite_note-17"><span class="cite-bracket">[</span>17<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-18" class="reference"><a href="#cite_note-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Werbos_1975_19-0" class="reference"><a href="#cite_note-Werbos_1975-19"><span class="cite-bracket">[</span>19<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-20" class="reference"><a href="#cite_note-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup> funded by the United States <a href="/wiki/Office_of_Naval_Research" title="Office of Naval Research">Office of Naval Research</a>.<sup id="cite_ref-Olazaran_21-0" class="reference"><a href="#cite_note-Olazaran-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup> R. D. Joseph (1960)<sup id="cite_ref-joseph1960_22-0" class="reference"><a href="#cite_note-joseph1960-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup> mentions an even earlier perceptron-like device by Farley and Clark:<sup id="cite_ref-DLhistory_10-1" class="reference"><a href="#cite_note-DLhistory-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> "Farley and Clark of MIT Lincoln Laboratory actually preceded Rosenblatt in the development of a perceptron-like device." However, "they dropped the subject." The perceptron raised public excitement for research in Artificial Neural Networks, causing the US government to drastically increase funding. This contributed to "the Golden Age of AI" fueled by the optimistic claims made by computer scientists regarding the ability of perceptrons to emulate human intelligence.<sup id="cite_ref-:08_23-0" class="reference"><a href="#cite_note-:08-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup> </p><p>The first perceptrons did not have adaptive hidden units. However, Joseph (1960)<sup id="cite_ref-joseph1960_22-1" class="reference"><a href="#cite_note-joseph1960-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup> also discussed <a href="/wiki/Multilayer_perceptrons" class="mw-redirect" title="Multilayer perceptrons">multilayer perceptrons</a> with an adaptive hidden layer. Rosenblatt (1962)<sup id="cite_ref-rosenblatt1962_24-0" class="reference"><a href="#cite_note-rosenblatt1962-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup><sup class="reference nowrap"><span title="Page / location: section 16">: section 16 </span></sup> cited and adopted these ideas, also crediting work by H. D. Block and B. W. Knight. Unfortunately, these early efforts did not lead to a working learning algorithm for hidden units, i.e., <a href="/wiki/Deep_learning" title="Deep learning">deep learning</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Deep_learning_breakthroughs_in_the_1960s_and_1970s">Deep learning breakthroughs in the 1960s and 1970s</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=4" title="Edit section: Deep learning breakthroughs in the 1960s and 1970s" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Fundamental research was conducted on ANNs in the 1960s and 1970s. The first working <a href="/wiki/Deep_learning" title="Deep learning">deep learning</a> algorithm was the <a href="/wiki/Group_method_of_data_handling" title="Group method of data handling">Group method of data handling</a>, a method to train arbitrarily deep neural networks, published by <a href="/wiki/Alexey_Ivakhnenko" title="Alexey Ivakhnenko">Alexey Ivakhnenko</a> and Lapa in <a href="/wiki/Ukraine" title="Ukraine">Ukraine</a> (1965). They regarded it as a form of polynomial regression,<sup id="cite_ref-ivak1965_25-0" class="reference"><a href="#cite_note-ivak1965-25"><span class="cite-bracket">[</span>25<span class="cite-bracket">]</span></a></sup> or a generalization of Rosenblatt's perceptron.<sup id="cite_ref-26" class="reference"><a href="#cite_note-26"><span class="cite-bracket">[</span>26<span class="cite-bracket">]</span></a></sup> A 1971 paper described a deep network with eight layers trained by this method,<sup id="cite_ref-ivak1971_27-0" class="reference"><a href="#cite_note-ivak1971-27"><span class="cite-bracket">[</span>27<span class="cite-bracket">]</span></a></sup> which is based on layer by layer training through regression analysis. Superfluous hidden units are pruned using a separate validation set. Since the activation functions of the nodes are Kolmogorov-Gabor polynomials, these were also the first deep networks with multiplicative units or "gates."<sup id="cite_ref-DLhistory_10-2" class="reference"><a href="#cite_note-DLhistory-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> </p><p>The first deep learning <a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">multilayer perceptron</a> trained by <a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">stochastic gradient descent</a><sup id="cite_ref-robbins1951_28-0" class="reference"><a href="#cite_note-robbins1951-28"><span class="cite-bracket">[</span>28<span class="cite-bracket">]</span></a></sup> was published in 1967 by <a href="/wiki/Shun%27ichi_Amari" title="Shun'ichi Amari">Shun'ichi Amari</a>.<sup id="cite_ref-Amari1967_29-0" class="reference"><a href="#cite_note-Amari1967-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> In computer experiments conducted by Amari's student Saito, a five layer MLP with two modifiable layers learned <a href="/wiki/Knowledge_representation" class="mw-redirect" title="Knowledge representation">internal representations</a> to classify non-linearily separable pattern classes.<sup id="cite_ref-DLhistory_10-3" class="reference"><a href="#cite_note-DLhistory-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> Subsequent developments in hardware and hyperparameter tunings have made end-to-end <a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">stochastic gradient descent</a> the currently dominant training technique. </p><p>In 1969, <a href="/wiki/Kunihiko_Fukushima" title="Kunihiko Fukushima">Kunihiko Fukushima</a> introduced the <a href="/wiki/Rectifier_(neural_networks)" title="Rectifier (neural networks)">ReLU</a> (rectified linear unit) <a href="/wiki/Activation_function" title="Activation function">activation function</a>.<sup id="cite_ref-DLhistory_10-4" class="reference"><a href="#cite_note-DLhistory-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Fukushima1969_30-0" class="reference"><a href="#cite_note-Fukushima1969-30"><span class="cite-bracket">[</span>30<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-sonoda17_31-0" class="reference"><a href="#cite_note-sonoda17-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> The rectifier has become the most popular activation function for deep learning.<sup id="cite_ref-32" class="reference"><a href="#cite_note-32"><span class="cite-bracket">[</span>32<span class="cite-bracket">]</span></a></sup> </p><p>Nevertheless, research stagnated in the United States following the work of <a href="/wiki/Marvin_Minsky" title="Marvin Minsky">Minsky</a> and <a href="/wiki/Seymour_Papert" title="Seymour Papert">Papert</a> (1969),<sup id="cite_ref-:132_33-0" class="reference"><a href="#cite_note-:132-33"><span class="cite-bracket">[</span>33<span class="cite-bracket">]</span></a></sup> who emphasized that basic perceptrons were incapable of processing the exclusive-or circuit. This insight was irrelevant for the deep networks of Ivakhnenko (1965) and Amari (1967). </p><p>In 1976 transfer learning was introduced in neural networks learning. <sup id="cite_ref-34" class="reference"><a href="#cite_note-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> <sup id="cite_ref-35" class="reference"><a href="#cite_note-35"><span class="cite-bracket">[</span>35<span class="cite-bracket">]</span></a></sup> </p><p>Deep learning architectures for <a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">convolutional neural networks</a> (CNNs) with convolutional layers and downsampling layers and weight replication began with the <a href="/wiki/Neocognitron" title="Neocognitron">Neocognitron</a> introduced by <a href="/wiki/Kunihiko_Fukushima" title="Kunihiko Fukushima">Kunihiko Fukushima</a> in 1979, though not trained by backpropagation.<sup id="cite_ref-FUKU1979_36-0" class="reference"><a href="#cite_note-FUKU1979-36"><span class="cite-bracket">[</span>36<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-FUKU1980_37-0" class="reference"><a href="#cite_note-FUKU1980-37"><span class="cite-bracket">[</span>37<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-SCHIDHUB4_38-0" class="reference"><a href="#cite_note-SCHIDHUB4-38"><span class="cite-bracket">[</span>38<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Backpropagation">Backpropagation</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=5" title="Edit section: Backpropagation" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p><a href="/wiki/Backpropagation" title="Backpropagation">Backpropagation</a> is an efficient application of the <a href="/wiki/Chain_rule" title="Chain rule">chain rule</a> derived by <a href="/wiki/Gottfried_Wilhelm_Leibniz" title="Gottfried Wilhelm Leibniz">Gottfried Wilhelm Leibniz</a> in 1673<sup id="cite_ref-leibniz16762_39-0" class="reference"><a href="#cite_note-leibniz16762-39"><span class="cite-bracket">[</span>39<span class="cite-bracket">]</span></a></sup> to networks of differentiable nodes. The terminology "back-propagating errors" was actually introduced in 1962 by Rosenblatt,<sup id="cite_ref-rosenblatt1962_24-1" class="reference"><a href="#cite_note-rosenblatt1962-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> but he did not know how to implement this, although <a href="/wiki/Henry_J._Kelley" title="Henry J. Kelley">Henry J. Kelley</a> had a continuous precursor of backpropagation in 1960 in the context of <a href="/wiki/Control_theory" title="Control theory">control theory</a>.<sup id="cite_ref-kelley19602_40-0" class="reference"><a href="#cite_note-kelley19602-40"><span class="cite-bracket">[</span>40<span class="cite-bracket">]</span></a></sup> In 1970, <a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Seppo Linnainmaa</a> published the modern form of <a href="/wiki/Backpropagation" title="Backpropagation">backpropagation</a> in his master thesis (1970).<sup id="cite_ref-lin19703_41-0" class="reference"><a href="#cite_note-lin19703-41"><span class="cite-bracket">[</span>41<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-lin19763_42-0" class="reference"><a href="#cite_note-lin19763-42"><span class="cite-bracket">[</span>42<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-DLhistory_10-5" class="reference"><a href="#cite_note-DLhistory-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> G.M. Ostrovski et al. republished it in 1971.<sup id="cite_ref-ostrowski1971_43-0" class="reference"><a href="#cite_note-ostrowski1971-43"><span class="cite-bracket">[</span>43<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-backprop_44-0" class="reference"><a href="#cite_note-backprop-44"><span class="cite-bracket">[</span>44<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Paul_Werbos" title="Paul Werbos">Paul Werbos</a> applied backpropagation to neural networks in 1982<sup id="cite_ref-werbos1982_45-0" class="reference"><a href="#cite_note-werbos1982-45"><span class="cite-bracket">[</span>45<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:1_46-0" class="reference"><a href="#cite_note-:1-46"><span class="cite-bracket">[</span>46<span class="cite-bracket">]</span></a></sup> (his 1974 PhD thesis, reprinted in a 1994 book,<sup id="cite_ref-werbos1974_47-0" class="reference"><a href="#cite_note-werbos1974-47"><span class="cite-bracket">[</span>47<span class="cite-bracket">]</span></a></sup> did not yet describe the algorithm<sup id="cite_ref-backprop_44-1" class="reference"><a href="#cite_note-backprop-44"><span class="cite-bracket">[</span>44<span class="cite-bracket">]</span></a></sup>). In 1986, <a href="/wiki/David_E._Rumelhart" class="mw-redirect" title="David E. Rumelhart">David E. Rumelhart</a> et al. popularised backpropagation but did not cite the original work.<sup id="cite_ref-48" class="reference"><a href="#cite_note-48"><span class="cite-bracket">[</span>48<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Convolutional_neural_networks">Convolutional neural networks</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=6" title="Edit section: Convolutional neural networks" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p><a href="/wiki/Kunihiko_Fukushima" title="Kunihiko Fukushima">Kunihiko Fukushima</a>'s <a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">convolutional neural network</a> (CNN) architecture of 1979<sup id="cite_ref-FUKU1979_36-1" class="reference"><a href="#cite_note-FUKU1979-36"><span class="cite-bracket">[</span>36<span class="cite-bracket">]</span></a></sup> also introduced <a href="/wiki/Max_pooling" class="mw-redirect" title="Max pooling">max pooling</a>,<sup id="cite_ref-49" class="reference"><a href="#cite_note-49"><span class="cite-bracket">[</span>49<span class="cite-bracket">]</span></a></sup> a popular downsampling procedure for CNNs. CNNs have become an essential tool for <a href="/wiki/Computer_vision" title="Computer vision">computer vision</a>. </p><p>The <a href="/wiki/Time_delay_neural_network" title="Time delay neural network">time delay neural network</a> (TDNN) was introduced in 1987 by <a href="/wiki/Alex_Waibel" title="Alex Waibel">Alex Waibel</a> to apply CNN to phoneme recognition. It used convolutions, weight sharing, and backpropagation.<sup id="cite_ref-Waibel1987_50-0" class="reference"><a href="#cite_note-Waibel1987-50"><span class="cite-bracket">[</span>50<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-speechsignal_51-0" class="reference"><a href="#cite_note-speechsignal-51"><span class="cite-bracket">[</span>51<span class="cite-bracket">]</span></a></sup> In 1988, Wei Zhang applied a backpropagation-trained CNN to alphabet recognition.<sup id="cite_ref-wz1988_52-0" class="reference"><a href="#cite_note-wz1988-52"><span class="cite-bracket">[</span>52<span class="cite-bracket">]</span></a></sup> In 1989, <a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> et al. created a CNN called <a href="/wiki/LeNet" title="LeNet">LeNet</a> for <a href="/wiki/Handwriting_recognition" title="Handwriting recognition">recognizing handwritten ZIP codes</a> on mail. Training required 3 days.<sup id="cite_ref-LECUN1989_53-0" class="reference"><a href="#cite_note-LECUN1989-53"><span class="cite-bracket">[</span>53<span class="cite-bracket">]</span></a></sup> In 1990, Wei Zhang implemented a CNN on <a href="/wiki/Optical_computing" title="Optical computing">optical computing</a> hardware.<sup id="cite_ref-wz1990_54-0" class="reference"><a href="#cite_note-wz1990-54"><span class="cite-bracket">[</span>54<span class="cite-bracket">]</span></a></sup> In 1991, a CNN was applied to medical image object segmentation<sup id="cite_ref-55" class="reference"><a href="#cite_note-55"><span class="cite-bracket">[</span>55<span class="cite-bracket">]</span></a></sup> and breast cancer detection in mammograms.<sup id="cite_ref-56" class="reference"><a href="#cite_note-56"><span class="cite-bracket">[</span>56<span class="cite-bracket">]</span></a></sup> <a href="/wiki/LeNet" title="LeNet">LeNet</a>-5 (1998), a 7-level CNN by <a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> et al., that classifies digits, was applied by several banks to recognize hand-written numbers on checks digitized in 32×32 pixel images.<sup id="cite_ref-lecun98_57-0" class="reference"><a href="#cite_note-lecun98-57"><span class="cite-bracket">[</span>57<span class="cite-bracket">]</span></a></sup> </p><p>From 1988 onward,<sup id="cite_ref-Qian1988_58-0" class="reference"><a href="#cite_note-Qian1988-58"><span class="cite-bracket">[</span>58<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Bohr1988_59-0" class="reference"><a href="#cite_note-Bohr1988-59"><span class="cite-bracket">[</span>59<span class="cite-bracket">]</span></a></sup> the use of neural networks transformed the field of <a href="/wiki/Protein_structure_prediction" title="Protein structure prediction">protein structure prediction</a>, in particular when the first cascading networks were trained on <i>profiles</i> (matrices) produced by multiple <a href="/wiki/Sequence_alignment" title="Sequence alignment">sequence alignments</a>.<sup id="cite_ref-Rost1993_60-0" class="reference"><a href="#cite_note-Rost1993-60"><span class="cite-bracket">[</span>60<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Recurrent_neural_networks">Recurrent neural networks</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=7" title="Edit section: Recurrent neural networks" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>One origin of RNN was <a href="/wiki/Statistical_mechanics" title="Statistical mechanics">statistical mechanics</a>. In 1972, <a href="/wiki/Shun%27ichi_Amari" title="Shun'ichi Amari">Shun'ichi Amari</a> proposed to modify the weights of an <a href="/wiki/Ising_model" title="Ising model">Ising model</a> by <a href="/wiki/Hebbian_theory" title="Hebbian theory">Hebbian learning</a> rule as a model of associative memory, adding in the component of learning.<sup id="cite_ref-61" class="reference"><a href="#cite_note-61"><span class="cite-bracket">[</span>61<span class="cite-bracket">]</span></a></sup> This was popularized as the <a href="/wiki/Hopfield_network" title="Hopfield network">Hopfield network</a> by <a href="/wiki/John_Hopfield" title="John Hopfield">John Hopfield</a>(1982).<sup id="cite_ref-Hopfield19822_62-0" class="reference"><a href="#cite_note-Hopfield19822-62"><span class="cite-bracket">[</span>62<span class="cite-bracket">]</span></a></sup> Another origin of RNN was neuroscience. The word "recurrent" is used to describe loop-like structures in anatomy. In 1901, <a href="/wiki/Santiago_Ram%C3%B3n_y_Cajal" title="Santiago Ramón y Cajal">Cajal</a> observed "recurrent semicircles" in the <a href="/wiki/Cerebellum" title="Cerebellum">cerebellar cortex</a>.<sup id="cite_ref-63" class="reference"><a href="#cite_note-63"><span class="cite-bracket">[</span>63<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Donald_O._Hebb" title="Donald O. Hebb">Hebb</a> considered "reverberating circuit" as an explanation for short-term memory.<sup id="cite_ref-64" class="reference"><a href="#cite_note-64"><span class="cite-bracket">[</span>64<span class="cite-bracket">]</span></a></sup> The McCulloch and Pitts paper (1943) considered neural networks that contains cycles, and noted that the current activity of such networks can be affected by activity indefinitely far in the past.<sup id="cite_ref-WM_12-1" class="reference"><a href="#cite_note-WM-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup> </p><p>In 1982 a recurrent neural network, with an array architecture (rather than a multilayer perceptron architecture), named Crossbar Adaptive Array <sup id="cite_ref-CAA1982_65-0" class="reference"><a href="#cite_note-CAA1982-65"><span class="cite-bracket">[</span>65<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-" class="reference"><a href="#cite_note-"><span class="cite-bracket">[</span>66<span class="cite-bracket">]</span></a></sup> used direct recurrent connections from the output to the supervisor (teaching ) inputs. In addition of computing actions (decisions), it computed internal state evaluations (emotions) of the consequence situations. Eliminating the external supervisor, it introduced the self-learning method in neural networks. </p><p>In cognitive psychology, the journal American Psychologist in early 1980's carried out a debate on relation between cognition and emotion. Zajonc in 1980 stated that emotion is computed first and is independent from cognition, while Lazarus in 1982 stated that cognition is computed first and is inseparable from emotion. <sup id="cite_ref-67" class="reference"><a href="#cite_note-67"><span class="cite-bracket">[</span>67<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-68" class="reference"><a href="#cite_note-68"><span class="cite-bracket">[</span>68<span class="cite-bracket">]</span></a></sup> In 1982 the Crossbar Adaptive Array gave a neural network model of cognition-emotion relation. <sup id="cite_ref-CAA1982_65-1" class="reference"><a href="#cite_note-CAA1982-65"><span class="cite-bracket">[</span>65<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-69" class="reference"><a href="#cite_note-69"><span class="cite-bracket">[</span>69<span class="cite-bracket">]</span></a></sup> It was an example of a debate where an AI system, a recurrent neural network, contributed to an issue in the same time addressed by cognitive psychology. </p><p>Two early influential works were the <a href="/wiki/Recurrent_neural_network#Jordan_network" title="Recurrent neural network">Jordan network</a> (1986) and the <a href="/wiki/Recurrent_neural_network#Elman_network" title="Recurrent neural network">Elman network</a> (1990), which applied RNN to study <a href="/wiki/Cognitive_psychology" title="Cognitive psychology">cognitive psychology</a>. </p><p>In the 1980s, backpropagation did not work well for deep RNNs. To overcome this problem, in 1991, <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a> proposed the "neural sequence chunker" or "neural history compressor"<sup id="cite_ref-chunker1991_70-0" class="reference"><a href="#cite_note-chunker1991-70"><span class="cite-bracket">[</span>70<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-schmidhuber1992_71-0" class="reference"><a href="#cite_note-schmidhuber1992-71"><span class="cite-bracket">[</span>71<span class="cite-bracket">]</span></a></sup> which introduced the important concepts of self-supervised pre-training (the "P" in <a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a>) and neural <a href="/wiki/Knowledge_distillation" title="Knowledge distillation">knowledge distillation</a>.<sup id="cite_ref-DLhistory_10-6" class="reference"><a href="#cite_note-DLhistory-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> In 1993, a neural history compressor system solved a "Very Deep Learning" task that required more than 1000 subsequent <a href="/wiki/Layer_(deep_learning)" title="Layer (deep learning)">layers</a> in an RNN unfolded in time.<sup id="cite_ref-schmidhuber19932_72-0" class="reference"><a href="#cite_note-schmidhuber19932-72"><span class="cite-bracket">[</span>72<span class="cite-bracket">]</span></a></sup> </p><p>In 1991, <a href="/wiki/Sepp_Hochreiter" title="Sepp Hochreiter">Sepp Hochreiter</a>'s diploma thesis <sup id="cite_ref-HOCH1991_73-0" class="reference"><a href="#cite_note-HOCH1991-73"><span class="cite-bracket">[</span>73<span class="cite-bracket">]</span></a></sup> identified and analyzed the <a href="/wiki/Vanishing_gradient_problem" title="Vanishing gradient problem">vanishing gradient problem</a><sup id="cite_ref-HOCH1991_73-1" class="reference"><a href="#cite_note-HOCH1991-73"><span class="cite-bracket">[</span>73<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-HOCH2001_74-0" class="reference"><a href="#cite_note-HOCH2001-74"><span class="cite-bracket">[</span>74<span class="cite-bracket">]</span></a></sup> and proposed recurrent <a href="/wiki/Residual_neural_network" title="Residual neural network">residual</a> connections to solve it. He and Schmidhuber introduced <a href="/wiki/Long_short-term_memory" title="Long short-term memory">long short-term memory</a> (LSTM), which set accuracy records in multiple applications domains.<sup id="cite_ref-75" class="reference"><a href="#cite_note-75"><span class="cite-bracket">[</span>75<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-lstm2_76-0" class="reference"><a href="#cite_note-lstm2-76"><span class="cite-bracket">[</span>76<span class="cite-bracket">]</span></a></sup> This was not yet the modern version of LSTM, which required the forget gate, which was introduced in 1999.<sup id="cite_ref-lstm1999_77-0" class="reference"><a href="#cite_note-lstm1999-77"><span class="cite-bracket">[</span>77<span class="cite-bracket">]</span></a></sup> It became the default choice for RNN architecture. </p><p>During 1985–1995, inspired by statistical mechanics, several architectures and methods were developed by <a href="/wiki/Terry_Sejnowski" title="Terry Sejnowski">Terry Sejnowski</a>, <a href="/wiki/Peter_Dayan" title="Peter Dayan">Peter Dayan</a>, <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a>, etc., including the <a href="/wiki/Boltzmann_machine" title="Boltzmann machine">Boltzmann machine</a>,<sup id="cite_ref-78" class="reference"><a href="#cite_note-78"><span class="cite-bracket">[</span>78<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Restricted_Boltzmann_machine" title="Restricted Boltzmann machine">restricted Boltzmann machine</a>,<sup id="cite_ref-79" class="reference"><a href="#cite_note-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Helmholtz_machine" title="Helmholtz machine">Helmholtz machine</a>,<sup id="cite_ref-“nc95“_80-0" class="reference"><a href="#cite_note-%E2%80%9Cnc95%E2%80%9C-80"><span class="cite-bracket">[</span>80<span class="cite-bracket">]</span></a></sup> and the <a href="/wiki/Wake-sleep_algorithm" title="Wake-sleep algorithm">wake-sleep algorithm</a>.<sup id="cite_ref-:13_81-0" class="reference"><a href="#cite_note-:13-81"><span class="cite-bracket">[</span>81<span class="cite-bracket">]</span></a></sup> These were designed for unsupervised learning of deep generative models. </p> <div class="mw-heading mw-heading3"><h3 id="Deep_learning">Deep learning</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=8" title="Edit section: Deep learning" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Between 2009 and 2012, ANNs began winning prizes in image recognition contests, approaching human level performance on various tasks, initially in <a href="/wiki/Pattern_recognition" title="Pattern recognition">pattern recognition</a> and <a href="/wiki/Handwriting_recognition" title="Handwriting recognition">handwriting recognition</a>.<sup id="cite_ref-82" class="reference"><a href="#cite_note-82"><span class="cite-bracket">[</span>82<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-83" class="reference"><a href="#cite_note-83"><span class="cite-bracket">[</span>83<span class="cite-bracket">]</span></a></sup> In 2011, a CNN named <i>DanNet<sup id="cite_ref-:32_84-0" class="reference"><a href="#cite_note-:32-84"><span class="cite-bracket">[</span>84<span class="cite-bracket">]</span></a></sup></i><sup id="cite_ref-:62_85-0" class="reference"><a href="#cite_note-:62-85"><span class="cite-bracket">[</span>85<span class="cite-bracket">]</span></a></sup> by Dan Ciresan, Ueli Meier, Jonathan Masci, <a href="/wiki/Luca_Maria_Gambardella" title="Luca Maria Gambardella">Luca Maria Gambardella</a>, and <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a> achieved for the first time superhuman performance in a visual pattern recognition contest, outperforming traditional methods by a factor of 3.<sup id="cite_ref-SCHIDHUB4_38-1" class="reference"><a href="#cite_note-SCHIDHUB4-38"><span class="cite-bracket">[</span>38<span class="cite-bracket">]</span></a></sup> It then won more contests.<sup id="cite_ref-:82_86-0" class="reference"><a href="#cite_note-:82-86"><span class="cite-bracket">[</span>86<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-ciresan2013miccai_87-0" class="reference"><a href="#cite_note-ciresan2013miccai-87"><span class="cite-bracket">[</span>87<span class="cite-bracket">]</span></a></sup> They also showed how <a href="/wiki/Max_pooling" class="mw-redirect" title="Max pooling">max-pooling</a> CNNs on GPU improved performance significantly.<sup id="cite_ref-:9_88-0" class="reference"><a href="#cite_note-:9-88"><span class="cite-bracket">[</span>88<span class="cite-bracket">]</span></a></sup> </p><p>In October 2012, <a href="/wiki/AlexNet" title="AlexNet">AlexNet</a> by <a href="/wiki/Alex_Krizhevsky" title="Alex Krizhevsky">Alex Krizhevsky</a>, <a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Ilya Sutskever</a>, and <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a><sup id="cite_ref-krizhevsky20122_89-0" class="reference"><a href="#cite_note-krizhevsky20122-89"><span class="cite-bracket">[</span>89<span class="cite-bracket">]</span></a></sup> won the large-scale <a href="/wiki/ImageNet_competition" class="mw-redirect" title="ImageNet competition">ImageNet competition</a> by a significant margin over shallow machine learning methods. Further incremental improvements included the VGG-16 network by <a href="/wiki/Karen_Simonyan" title="Karen Simonyan">Karen Simonyan</a> and <a href="/wiki/Andrew_Zisserman" title="Andrew Zisserman">Andrew Zisserman</a><sup id="cite_ref-VGG_90-0" class="reference"><a href="#cite_note-VGG-90"><span class="cite-bracket">[</span>90<span class="cite-bracket">]</span></a></sup> and Google's <a href="/wiki/Inceptionv3" class="mw-redirect" title="Inceptionv3">Inceptionv3</a>.<sup id="cite_ref-szegedy_91-0" class="reference"><a href="#cite_note-szegedy-91"><span class="cite-bracket">[</span>91<span class="cite-bracket">]</span></a></sup> </p><p>In 2012, <a href="/wiki/Andrew_Ng" title="Andrew Ng">Ng</a> and <a href="/wiki/Jeff_Dean_(computer_scientist)" class="mw-redirect" title="Jeff Dean (computer scientist)">Dean</a> created a network that learned to recognize higher-level concepts, such as cats, only from watching unlabeled images.<sup id="cite_ref-ng2012_92-0" class="reference"><a href="#cite_note-ng2012-92"><span class="cite-bracket">[</span>92<span class="cite-bracket">]</span></a></sup> Unsupervised pre-training and increased computing power from <a href="/wiki/GPU" class="mw-redirect" title="GPU">GPUs</a> and <a href="/wiki/Distributed_computing" title="Distributed computing">distributed computing</a> allowed the use of larger networks, particularly in image and visual recognition problems, which became known as "deep learning".<sup id="cite_ref-:4_5-1" class="reference"><a href="#cite_note-:4-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Radial_basis_function_network" title="Radial basis function network">Radial basis function</a> and wavelet networks were introduced in 2013. These can be shown to offer best approximation properties and have been applied in <a href="/wiki/Nonlinear_system_identification" title="Nonlinear system identification">nonlinear system identification</a> and classification applications.<sup id="cite_ref-SAB1_93-0" class="reference"><a href="#cite_note-SAB1-93"><span class="cite-bracket">[</span>93<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">Generative adversarial network</a> (GAN) (<a href="/wiki/Ian_Goodfellow" title="Ian Goodfellow">Ian Goodfellow</a> et al., 2014)<sup id="cite_ref-GANnips_94-0" class="reference"><a href="#cite_note-GANnips-94"><span class="cite-bracket">[</span>94<span class="cite-bracket">]</span></a></sup> became state of the art in generative modeling during 2014–2018 period. The GAN principle was originally published in 1991 by <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a> who called it "artificial curiosity": two neural networks contest with each other in the form of a <a href="/wiki/Zero-sum_game" title="Zero-sum game">zero-sum game</a>, where one network's gain is the other network's loss.<sup id="cite_ref-curiosity1991_95-0" class="reference"><a href="#cite_note-curiosity1991-95"><span class="cite-bracket">[</span>95<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-gancurpm2020_96-0" class="reference"><a href="#cite_note-gancurpm2020-96"><span class="cite-bracket">[</span>96<span class="cite-bracket">]</span></a></sup> The first network is a <a href="/wiki/Generative_model" title="Generative model">generative model</a> that models a <a href="/wiki/Probability_distribution" title="Probability distribution">probability distribution</a> over output patterns. The second network learns by <a href="/wiki/Gradient_descent" title="Gradient descent">gradient descent</a> to predict the reactions of the environment to these patterns. Excellent image quality is achieved by <a href="/wiki/Nvidia" title="Nvidia">Nvidia</a>'s <a href="/wiki/StyleGAN" title="StyleGAN">StyleGAN</a> (2018)<sup id="cite_ref-SyncedReview201822_97-0" class="reference"><a href="#cite_note-SyncedReview201822-97"><span class="cite-bracket">[</span>97<span class="cite-bracket">]</span></a></sup> based on the Progressive GAN by Tero Karras et al.<sup id="cite_ref-progressiveGAN201722_98-0" class="reference"><a href="#cite_note-progressiveGAN201722-98"><span class="cite-bracket">[</span>98<span class="cite-bracket">]</span></a></sup> Here, the GAN generator is grown from small to large scale in a pyramidal fashion. Image generation by GAN reached popular success, and provoked discussions concerning <a href="/wiki/Deepfake" title="Deepfake">deepfakes</a>.<sup id="cite_ref-99" class="reference"><a href="#cite_note-99"><span class="cite-bracket">[</span>99<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Diffusion_model" title="Diffusion model">Diffusion models</a> (2015)<sup id="cite_ref-100" class="reference"><a href="#cite_note-100"><span class="cite-bracket">[</span>100<span class="cite-bracket">]</span></a></sup> eclipsed GANs in generative modeling since then, with systems such as <a href="/wiki/DALL%C2%B7E_2" class="mw-redirect" title="DALL·E 2">DALL·E 2</a> (2022) and <a href="/wiki/Stable_Diffusion" title="Stable Diffusion">Stable Diffusion</a> (2022). </p><p>In 2014, the state of the art was training "very deep neural network" with 20 to 30 layers.<sup id="cite_ref-101" class="reference"><a href="#cite_note-101"><span class="cite-bracket">[</span>101<span class="cite-bracket">]</span></a></sup> Stacking too many layers led to a steep reduction in <a href="/wiki/Training,_validation,_and_test_data_sets" title="Training, validation, and test data sets">training</a> accuracy,<sup id="cite_ref-prelu2_102-0" class="reference"><a href="#cite_note-prelu2-102"><span class="cite-bracket">[</span>102<span class="cite-bracket">]</span></a></sup> known as the "degradation" problem.<sup id="cite_ref-resnet2_103-0" class="reference"><a href="#cite_note-resnet2-103"><span class="cite-bracket">[</span>103<span class="cite-bracket">]</span></a></sup> In 2015, two techniques were developed to train very deep networks: the <a href="/wiki/Highway_network" title="Highway network">highway network</a> was published in May 2015,<sup id="cite_ref-highway20153_104-0" class="reference"><a href="#cite_note-highway20153-104"><span class="cite-bracket">[</span>104<span class="cite-bracket">]</span></a></sup> and the <a href="/wiki/Residual_neural_network" title="Residual neural network">residual neural network</a> (ResNet) in December 2015.<sup id="cite_ref-resnet20153_105-0" class="reference"><a href="#cite_note-resnet20153-105"><span class="cite-bracket">[</span>105<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-106" class="reference"><a href="#cite_note-106"><span class="cite-bracket">[</span>106<span class="cite-bracket">]</span></a></sup> ResNet behaves like an open-gated Highway Net. </p> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Transformer_(deep_learning_architecture)#History" title="Transformer (deep learning architecture)">Transformer (deep learning architecture) § History</a></div> <p>During the 2010s, the <a href="/wiki/Seq2seq" title="Seq2seq">seq2seq</a> model was developed, and attention mechanisms were added. It led to the modern Transformer architecture in 2017 in <i><a href="/wiki/Attention_Is_All_You_Need" title="Attention Is All You Need">Attention Is All You Need</a></i>.<sup id="cite_ref-vaswani2017_107-0" class="reference"><a href="#cite_note-vaswani2017-107"><span class="cite-bracket">[</span>107<span class="cite-bracket">]</span></a></sup> It requires computation time that is quadratic in the size of the context window. <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a>'s fast weight controller (1992)<sup id="cite_ref-transform19922_108-0" class="reference"><a href="#cite_note-transform19922-108"><span class="cite-bracket">[</span>108<span class="cite-bracket">]</span></a></sup> scales linearly and was later shown to be equivalent to the unnormalized linear Transformer.<sup id="cite_ref-fastlinear20202_109-0" class="reference"><a href="#cite_note-fastlinear20202-109"><span class="cite-bracket">[</span>109<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-schlag20212_110-0" class="reference"><a href="#cite_note-schlag20212-110"><span class="cite-bracket">[</span>110<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-DLhistory_10-7" class="reference"><a href="#cite_note-DLhistory-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> Transformers have increasingly become the model of choice for <a href="/wiki/Natural_language_processing" title="Natural language processing">natural language processing</a>.<sup id="cite_ref-wolf2020_111-0" class="reference"><a href="#cite_note-wolf2020-111"><span class="cite-bracket">[</span>111<span class="cite-bracket">]</span></a></sup> Many modern <a href="/wiki/Large_language_model" title="Large language model">large language models</a> such as <a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a>, <a href="/wiki/GPT-4" title="GPT-4">GPT-4</a>, and <a href="/wiki/BERT_(language_model)" title="BERT (language model)">BERT</a> use this architecture. </p> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(3)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Models">Models</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=9" title="Edit section: Models" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-3 collapsible-block" id="mf-section-3"> <style data-mw-deduplicate="TemplateStyles:r1251242444">.mw-parser-output .ambox{border:1px solid #a2a9b1;border-left:10px solid #36c;background-color:#fbfbfb;box-sizing:border-box}.mw-parser-output .ambox+link+.ambox,.mw-parser-output .ambox+link+style+.ambox,.mw-parser-output .ambox+link+link+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+style+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+link+.ambox{margin-top:-1px}html body.mediawiki .mw-parser-output .ambox.mbox-small-left{margin:4px 1em 4px 0;overflow:hidden;width:238px;border-collapse:collapse;font-size:88%;line-height:1.25em}.mw-parser-output .ambox-speedy{border-left:10px solid #b32424;background-color:#fee7e6}.mw-parser-output .ambox-delete{border-left:10px solid #b32424}.mw-parser-output .ambox-content{border-left:10px solid #f28500}.mw-parser-output .ambox-style{border-left:10px solid #fc3}.mw-parser-output .ambox-move{border-left:10px solid #9932cc}.mw-parser-output .ambox-protection{border-left:10px solid #a2a9b1}.mw-parser-output .ambox .mbox-text{border:none;padding:0.25em 0.5em;width:100%}.mw-parser-output .ambox .mbox-image{border:none;padding:2px 0 2px 0.5em;text-align:center}.mw-parser-output .ambox .mbox-imageright{border:none;padding:2px 0.5em 2px 0;text-align:center}.mw-parser-output .ambox .mbox-empty-cell{border:none;padding:0;width:1px}.mw-parser-output .ambox .mbox-image-div{width:52px}@media(min-width:720px){.mw-parser-output .ambox{margin:0 10%}}@media print{body.ns-0 .mw-parser-output .ambox{display:none!important}}</style><table class="box-Confusing plainlinks metadata ambox ambox-style ambox-confusing" role="presentation"><tbody><tr><td class="mbox-text"><div class="mbox-text-span">This section <b>may be <a href="/wiki/Wikipedia:Vagueness" title="Wikipedia:Vagueness">confusing or unclear</a> to readers</b>.<span class="hide-when-compact"> Please help <a href="/wiki/Wikipedia:Please_clarify" title="Wikipedia:Please clarify">clarify the section</a>. There might be a discussion about this on <a href="/wiki/Talk:Neural_network_(machine_learning)" title="Talk:Neural network (machine learning)">the talk page</a>.</span> <span class="date-container"><i>(<span class="date">April 2017</span>)</i></span><span class="hide-when-compact"><i> (<small><a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove this message</a></small>)</i></span></div></td></tr></tbody></table><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/Mathematics_of_artificial_neural_networks" title="Mathematics of artificial neural networks">Mathematics of artificial neural networks</a></div><figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Neuron3.png" class="mw-file-description"><noscript><img src="//upload.wikimedia.org/wikipedia/commons/thumb/4/44/Neuron3.png/300px-Neuron3.png" decoding="async" width="300" height="159" class="mw-file-element" data-file-width="415" data-file-height="220"></noscript><span class="lazy-image-placeholder" style="width: 300px;height: 159px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/4/44/Neuron3.png/300px-Neuron3.png" data-width="300" data-height="159" data-srcset="//upload.wikimedia.org/wikipedia/commons/4/44/Neuron3.png 1.5x" data-class="mw-file-element">&nbsp;</span></a><figcaption>Neuron and myelinated axon, with signal flow from inputs at dendrites to outputs at axon terminals</figcaption></figure> <p>ANNs began as an attempt to exploit the architecture of the human brain to perform tasks that conventional algorithms had little success with. They soon reoriented towards improving empirical results, abandoning attempts to remain true to their biological precursors. ANNs have the ability to learn and model non-linearities and complex relationships. This is achieved by neurons being connected in various patterns, allowing the output of some neurons to become the input of others. The network forms a <a href="/wiki/Directed_graph" title="Directed graph">directed</a>, <a href="/wiki/Weighted_graph" class="mw-redirect" title="Weighted graph">weighted graph</a>.<sup id="cite_ref-Zell1994ch5.2_112-0" class="reference"><a href="#cite_note-Zell1994ch5.2-112"><span class="cite-bracket">[</span>112<span class="cite-bracket">]</span></a></sup> </p><p>An artificial neural network consists of simulated neurons. Each neuron is connected to other <a href="/wiki/Vertex_(graph_theory)" title="Vertex (graph theory)">nodes</a> via <a href="/wiki/Glossary_of_graph_theory_terms#edge" class="mw-redirect" title="Glossary of graph theory terms">links</a> like a biological axon-synapse-dendrite connection. All the nodes connected by links take in some data and use it to perform specific operations and tasks on the data. Each link has a weight, determining the strength of one node's influence on another,<sup id="cite_ref-Winston_113-0" class="reference"><a href="#cite_note-Winston-113"><span class="cite-bracket">[</span>113<span class="cite-bracket">]</span></a></sup> allowing weights to choose the signal between neurons. </p> <div class="mw-heading mw-heading3"><h3 id="Artificial_neurons">Artificial neurons</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=10" title="Edit section: Artificial neurons" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>ANNs are composed of <a href="/wiki/Artificial_neurons" class="mw-redirect" title="Artificial neurons">artificial neurons</a> which are conceptually derived from biological <a href="/wiki/Neuron" title="Neuron">neurons</a>. Each artificial neuron has inputs and produces a single output which can be sent to multiple other neurons.<sup id="cite_ref-Abbod2007_114-0" class="reference"><a href="#cite_note-Abbod2007-114"><span class="cite-bracket">[</span>114<span class="cite-bracket">]</span></a></sup> The inputs can be the feature values of a sample of external data, such as images or documents, or they can be the outputs of other neurons. The outputs of the final <i>output neurons</i> of the neural net accomplish the task, such as recognizing an object in an image.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p><p>To find the output of the neuron we take the weighted sum of all the inputs, weighted by the <i>weights</i> of the <i>connections</i> from the inputs to the neuron. We add a <i>bias</i> term to this sum.<sup id="cite_ref-DAWSON1998_115-0" class="reference"><a href="#cite_note-DAWSON1998-115"><span class="cite-bracket">[</span>115<span class="cite-bracket">]</span></a></sup> This weighted sum is sometimes called the <i>activation</i>. This weighted sum is then passed through a (usually nonlinear) <a href="/wiki/Activation_function" title="Activation function">activation function</a> to produce the output. The initial inputs are external data, such as images and documents. The ultimate outputs accomplish the task, such as recognizing an object in an image.<sup id="cite_ref-116" class="reference"><a href="#cite_note-116"><span class="cite-bracket">[</span>116<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Organization">Organization</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=11" title="Edit section: Organization" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>The neurons are typically organized into multiple layers, especially in <a href="/wiki/Deep_learning" title="Deep learning">deep learning</a>. Neurons of one layer connect only to neurons of the immediately preceding and immediately following layers. The layer that receives external data is the <i>input layer</i>. The layer that produces the ultimate result is the <i>output layer</i>. In between them are zero or more <i>hidden layers</i>. Single layer and unlayered networks are also used. Between two layers, multiple connection patterns are possible. They can be 'fully connected', with every neuron in one layer connecting to every neuron in the next layer. They can be <i>pooling</i>, where a group of neurons in one layer connects to a single neuron in the next layer, thereby reducing the number of neurons in that layer.<sup id="cite_ref-flexible_117-0" class="reference"><a href="#cite_note-flexible-117"><span class="cite-bracket">[</span>117<span class="cite-bracket">]</span></a></sup> Neurons with only such connections form a <a href="/wiki/Directed_acyclic_graph" title="Directed acyclic graph">directed acyclic graph</a> and are known as <a href="/wiki/Feedforward_neural_network" title="Feedforward neural network"><i>feedforward networks</i></a>.<sup id="cite_ref-Zell1994p73_118-0" class="reference"><a href="#cite_note-Zell1994p73-118"><span class="cite-bracket">[</span>118<span class="cite-bracket">]</span></a></sup> Alternatively, networks that allow connections between neurons in the same or previous layers are known as <a href="/wiki/Recurrent_neural_network" title="Recurrent neural network"><i>recurrent networks</i></a>.<sup id="cite_ref-119" class="reference"><a href="#cite_note-119"><span class="cite-bracket">[</span>119<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Hyperparameter">Hyperparameter</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=12" title="Edit section: Hyperparameter" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Hyperparameter_(machine_learning)" title="Hyperparameter (machine learning)">Hyperparameter (machine learning)</a></div> <p>A <a href="/wiki/Hyperparameter_(machine_learning)" title="Hyperparameter (machine learning)">hyperparameter</a> is a constant <a href="/wiki/Parameter" title="Parameter">parameter</a> whose value is set before the learning process begins. The values of parameters are derived via learning. Examples of hyperparameters include <a href="/wiki/Learning_rate" title="Learning rate">learning rate</a>, the number of hidden layers and batch size.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (June 2024)">citation needed</span></a></i>]</sup> The values of some hyperparameters can be dependent on those of other hyperparameters. For example, the size of some layers can depend on the overall number of layers.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Learning">Learning</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=13" title="Edit section: Learning" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-No_footnotes plainlinks metadata ambox ambox-style ambox-No_footnotes" role="presentation"><tbody><tr><td class="mbox-text"><div class="mbox-text-span">This section includes a <a href="/wiki/Wikipedia:Citing_sources" title="Wikipedia:Citing sources">list of references</a>, <a href="/wiki/Wikipedia:Further_reading" title="Wikipedia:Further reading">related reading</a>, or <a href="/wiki/Wikipedia:External_links" title="Wikipedia:External links">external links</a>, <b>but its sources remain unclear because it lacks <a href="/wiki/Wikipedia:Citing_sources#Inline_citations" title="Wikipedia:Citing sources">inline citations</a></b>.<span class="hide-when-compact"> Please help <a href="/wiki/Wikipedia:WikiProject_Fact_and_Reference_Check" class="mw-redirect" title="Wikipedia:WikiProject Fact and Reference Check">improve</a> this section by <a href="/wiki/Wikipedia:When_to_cite" title="Wikipedia:When to cite">introducing</a> more precise citations.</span> <span class="date-container"><i>(<span class="date">August 2019</span>)</i></span><span class="hide-when-compact"><i> (<small><a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove this message</a></small>)</i></span></div></td></tr></tbody></table><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Mathematical_optimization" title="Mathematical optimization">Mathematical optimization</a>, <a href="/wiki/Estimation_theory" title="Estimation theory">Estimation theory</a>, and <a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a></div> <p>Learning is the adaptation of the network to better handle a task by considering sample observations. Learning involves adjusting the weights (and optional thresholds) of the network to improve the accuracy of the result. This is done by minimizing the observed errors. Learning is complete when examining additional observations does not usefully reduce the error rate. Even after learning, the error rate typically does not reach 0. If after learning, the error rate is too high, the network typically must be redesigned. Practically this is done by defining a <a href="/wiki/Loss_function" title="Loss function">cost function</a> that is evaluated periodically during learning. As long as its output continues to decline, learning continues. The cost is frequently defined as a <a href="/wiki/Statistic" title="Statistic">statistic</a> whose value can only be approximated. The outputs are actually numbers, so when the error is low, the difference between the output (almost certainly a cat) and the correct answer (cat) is small. Learning attempts to reduce the total of the differences across the observations. Most learning models can be viewed as a straightforward application of <a href="/wiki/Mathematical_optimization" title="Mathematical optimization">optimization</a> theory and <a href="/wiki/Statistical_estimation" class="mw-redirect" title="Statistical estimation">statistical estimation</a>.<sup id="cite_ref-Zell1994ch5.2_112-1" class="reference"><a href="#cite_note-Zell1994ch5.2-112"><span class="cite-bracket">[</span>112<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-120" class="reference"><a href="#cite_note-120"><span class="cite-bracket">[</span>120<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Learning_rate">Learning rate</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=14" title="Edit section: Learning rate" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Learning_rate" title="Learning rate">Learning rate</a></div> <p>The learning rate defines the size of the corrective steps that the model takes to adjust for errors in each observation.<sup id="cite_ref-121" class="reference"><a href="#cite_note-121"><span class="cite-bracket">[</span>121<span class="cite-bracket">]</span></a></sup> A high learning rate shortens the training time, but with lower ultimate accuracy, while a lower learning rate takes longer, but with the potential for greater accuracy. Optimizations such as <a href="/wiki/Quickprop" title="Quickprop">Quickprop</a> are primarily aimed at speeding up error minimization, while other improvements mainly try to increase reliability. In order to avoid <a href="/wiki/Oscillation" title="Oscillation">oscillation</a> inside the network such as alternating connection weights, and to improve the rate of convergence, refinements use an <a href="/wiki/Adaptive_learning_rate" class="mw-redirect" title="Adaptive learning rate">adaptive learning rate</a> that increases or decreases as appropriate.<sup id="cite_ref-122" class="reference"><a href="#cite_note-122"><span class="cite-bracket">[</span>122<span class="cite-bracket">]</span></a></sup> The concept of momentum allows the balance between the gradient and the previous change to be weighted such that the weight adjustment depends to some degree on the previous change. A momentum close to 0 emphasizes the gradient, while a value close to 1 emphasizes the last change.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading4"><h4 id="Cost_function">Cost function</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=15" title="Edit section: Cost function" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>While it is possible to define a cost function <a href="/wiki/Ad_hoc" title="Ad hoc">ad hoc</a>, frequently the choice is determined by the function's desirable properties (such as <a href="/wiki/Convex_function" title="Convex function">convexity</a>) or because it arises from the model (e.g. in a probabilistic model the model's <a href="/wiki/Posterior_probability" title="Posterior probability">posterior probability</a> can be used as an inverse cost).<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading4"><h4 id="Backpropagation_2">Backpropagation</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=16" title="Edit section: Backpropagation" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Backpropagation" title="Backpropagation">Backpropagation</a></div> <p>Backpropagation is a method used to adjust the connection weights to compensate for each error found during learning. The error amount is effectively divided among the connections. Technically, backprop calculates the <a href="/wiki/Gradient" title="Gradient">gradient</a> (the derivative) of the <a href="/wiki/Loss_function" title="Loss function">cost function</a> associated with a given state with respect to the weights. The weight updates can be done via <a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">stochastic gradient descent</a> or other methods, such as <i><a href="/wiki/Extreme_learning_machine" title="Extreme learning machine">extreme learning machines</a></i>,<sup id="cite_ref-123" class="reference"><a href="#cite_note-123"><span class="cite-bracket">[</span>123<span class="cite-bracket">]</span></a></sup> "no-prop" networks,<sup id="cite_ref-124" class="reference"><a href="#cite_note-124"><span class="cite-bracket">[</span>124<span class="cite-bracket">]</span></a></sup> training without backtracking,<sup id="cite_ref-125" class="reference"><a href="#cite_note-125"><span class="cite-bracket">[</span>125<span class="cite-bracket">]</span></a></sup> "weightless" networks,<sup id="cite_ref-RBMTRAIN_126-0" class="reference"><a href="#cite_note-RBMTRAIN-126"><span class="cite-bracket">[</span>126<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-127" class="reference"><a href="#cite_note-127"><span class="cite-bracket">[</span>127<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Holographic_associative_memory" title="Holographic associative memory">non-connectionist neural networks</a>.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (June 2022)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Learning_paradigms">Learning paradigms</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=17" title="Edit section: Learning paradigms" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-No_footnotes plainlinks metadata ambox ambox-style ambox-No_footnotes" role="presentation"><tbody><tr><td class="mbox-text"><div class="mbox-text-span">This section includes a <a href="/wiki/Wikipedia:Citing_sources" title="Wikipedia:Citing sources">list of references</a>, <a href="/wiki/Wikipedia:Further_reading" title="Wikipedia:Further reading">related reading</a>, or <a href="/wiki/Wikipedia:External_links" title="Wikipedia:External links">external links</a>, <b>but its sources remain unclear because it lacks <a href="/wiki/Wikipedia:Citing_sources#Inline_citations" title="Wikipedia:Citing sources">inline citations</a></b>.<span class="hide-when-compact"> Please help <a href="/wiki/Wikipedia:WikiProject_Fact_and_Reference_Check" class="mw-redirect" title="Wikipedia:WikiProject Fact and Reference Check">improve</a> this section by <a href="/wiki/Wikipedia:When_to_cite" title="Wikipedia:When to cite">introducing</a> more precise citations.</span> <span class="date-container"><i>(<span class="date">August 2019</span>)</i></span><span class="hide-when-compact"><i> (<small><a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove this message</a></small>)</i></span></div></td></tr></tbody></table> <p>Machine learning is commonly separated into three main learning paradigms, <a href="/wiki/Supervised_learning" title="Supervised learning">supervised learning</a>,<sup id="cite_ref-128" class="reference"><a href="#cite_note-128"><span class="cite-bracket">[</span>128<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Unsupervised_learning" title="Unsupervised learning">unsupervised learning</a><sup id="cite_ref-129" class="reference"><a href="#cite_note-129"><span class="cite-bracket">[</span>129<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Reinforcement_learning" title="Reinforcement learning">reinforcement learning</a>.<sup id="cite_ref-130" class="reference"><a href="#cite_note-130"><span class="cite-bracket">[</span>130<span class="cite-bracket">]</span></a></sup> Each corresponds to a particular learning task. </p> <div class="mw-heading mw-heading4"><h4 id="Supervised_learning">Supervised learning</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=18" title="Edit section: Supervised learning" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p><a href="/wiki/Supervised_learning" title="Supervised learning">Supervised learning</a> uses a set of paired inputs and desired outputs. The learning task is to produce the desired output for each input. In this case, the cost function is related to eliminating incorrect deductions.<sup id="cite_ref-131" class="reference"><a href="#cite_note-131"><span class="cite-bracket">[</span>131<span class="cite-bracket">]</span></a></sup> A commonly used cost is the <a href="/wiki/Mean-squared_error" class="mw-redirect" title="Mean-squared error">mean-squared error</a>, which tries to minimize the average squared error between the network's output and the desired output. Tasks suited for supervised learning are <a href="/wiki/Pattern_recognition" title="Pattern recognition">pattern recognition</a> (also known as classification) and <a href="/wiki/Regression_analysis" title="Regression analysis">regression</a> (also known as function approximation). Supervised learning is also applicable to sequential data (e.g., for handwriting, speech and <a href="/wiki/Gesture_recognition" title="Gesture recognition">gesture recognition</a>). This can be thought of as learning with a "teacher", in the form of a function that provides continuous feedback on the quality of solutions obtained thus far. </p> <div class="mw-heading mw-heading4"><h4 id="Unsupervised_learning">Unsupervised learning</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=19" title="Edit section: Unsupervised learning" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>In <a href="/wiki/Unsupervised_learning" title="Unsupervised learning">unsupervised learning</a>, input data is given along with the cost function, some function of the data <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle x}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>x</mi> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle x}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/1bd1c9e87a24e459706fa8e63b9e5d94db4540b4" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.33ex; height:1.676ex;" alt="{\displaystyle \textstyle x}"></noscript><span class="lazy-image-placeholder" style="width: 1.33ex;height: 1.676ex;vertical-align: -0.338ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/1bd1c9e87a24e459706fa8e63b9e5d94db4540b4" data-alt="{\displaystyle \textstyle x}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span> and the network's output. The cost function is dependent on the task (the model domain) and any <i><a href="/wiki/A_priori_and_a_posteriori" title="A priori and a posteriori">a priori</a></i> assumptions (the implicit properties of the model, its parameters and the observed variables). As a trivial example, consider the model <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle f(x)=a}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>f</mi> <mo stretchy="false">(</mo> <mi>x</mi> <mo stretchy="false">)</mo> <mo>=</mo> <mi>a</mi> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle f(x)=a}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/a0e4cc2a7c5c4f65d59b669d64c7759623c648c5" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:8.746ex; height:2.843ex;" alt="{\displaystyle \textstyle f(x)=a}"></noscript><span class="lazy-image-placeholder" style="width: 8.746ex;height: 2.843ex;vertical-align: -0.838ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/a0e4cc2a7c5c4f65d59b669d64c7759623c648c5" data-alt="{\displaystyle \textstyle f(x)=a}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span> where <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle a}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>a</mi> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle a}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7abc4e8a2ef67ff9b7ad050afa4cf286c3ec0cd3" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.23ex; height:1.676ex;" alt="{\displaystyle \textstyle a}"></noscript><span class="lazy-image-placeholder" style="width: 1.23ex;height: 1.676ex;vertical-align: -0.338ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7abc4e8a2ef67ff9b7ad050afa4cf286c3ec0cd3" data-alt="{\displaystyle \textstyle a}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span> is a constant and the cost <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle C=E[(x-f(x))^{2}]}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>C</mi> <mo>=</mo> <mi>E</mi> <mo stretchy="false">[</mo> <mo stretchy="false">(</mo> <mi>x</mi> <mo>−<!-- − --></mo> <mi>f</mi> <mo stretchy="false">(</mo> <mi>x</mi> <mo stretchy="false">)</mo> <msup> <mo stretchy="false">)</mo> <mrow class="MJX-TeXAtom-ORD"> <mn>2</mn> </mrow> </msup> <mo stretchy="false">]</mo> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle C=E[(x-f(x))^{2}]}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/2929ecb1606fdfeaddc55477d9671e11c034e21c" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:19.385ex; height:3.009ex;" alt="{\displaystyle \textstyle C=E[(x-f(x))^{2}]}"></noscript><span class="lazy-image-placeholder" style="width: 19.385ex;height: 3.009ex;vertical-align: -0.838ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/2929ecb1606fdfeaddc55477d9671e11c034e21c" data-alt="{\displaystyle \textstyle C=E[(x-f(x))^{2}]}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span>. Minimizing this cost produces a value of <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle a}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>a</mi> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle a}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7abc4e8a2ef67ff9b7ad050afa4cf286c3ec0cd3" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.23ex; height:1.676ex;" alt="{\displaystyle \textstyle a}"></noscript><span class="lazy-image-placeholder" style="width: 1.23ex;height: 1.676ex;vertical-align: -0.338ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7abc4e8a2ef67ff9b7ad050afa4cf286c3ec0cd3" data-alt="{\displaystyle \textstyle a}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span> that is equal to the mean of the data. The cost function can be much more complicated. Its form depends on the application: for example, in <a href="/wiki/Data_compression" title="Data compression">compression</a> it could be related to the <a href="/wiki/Mutual_information" title="Mutual information">mutual information</a> between <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle x}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>x</mi> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle x}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/1bd1c9e87a24e459706fa8e63b9e5d94db4540b4" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.33ex; height:1.676ex;" alt="{\displaystyle \textstyle x}"></noscript><span class="lazy-image-placeholder" style="width: 1.33ex;height: 1.676ex;vertical-align: -0.338ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/1bd1c9e87a24e459706fa8e63b9e5d94db4540b4" data-alt="{\displaystyle \textstyle x}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span> and <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle f(x)}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>f</mi> <mo stretchy="false">(</mo> <mi>x</mi> <mo stretchy="false">)</mo> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle f(x)}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/e0fa998ae55408f7f94d08ee08a04fcf92330878" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:4.418ex; height:2.843ex;" alt="{\displaystyle \textstyle f(x)}"></noscript><span class="lazy-image-placeholder" style="width: 4.418ex;height: 2.843ex;vertical-align: -0.838ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/e0fa998ae55408f7f94d08ee08a04fcf92330878" data-alt="{\displaystyle \textstyle f(x)}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span>, whereas in statistical modeling, it could be related to the <a href="/wiki/Posterior_probability" title="Posterior probability">posterior probability</a> of the model given the data (note that in both of those examples, those quantities would be maximized rather than minimized). Tasks that fall within the paradigm of unsupervised learning are in general <a href="/wiki/Approximation" title="Approximation">estimation</a> problems; the applications include <a href="/wiki/Data_clustering" class="mw-redirect" title="Data clustering">clustering</a>, the estimation of <a href="/wiki/Statistical_distributions" class="mw-redirect" title="Statistical distributions">statistical distributions</a>, <a href="/wiki/Data_compression" title="Data compression">compression</a> and <a href="/wiki/Bayesian_spam_filtering" class="mw-redirect" title="Bayesian spam filtering">filtering</a>. </p> <div class="mw-heading mw-heading4"><h4 id="Reinforcement_learning">Reinforcement learning</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=20" title="Edit section: Reinforcement learning" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Stochastic_control" title="Stochastic control">Stochastic control</a></div> <p>In applications such as playing video games, an actor takes a string of actions, receiving a generally unpredictable response from the environment after each one. The goal is to win the game, i.e., generate the most positive (lowest cost) responses. In <a href="/wiki/Reinforcement_learning" title="Reinforcement learning">reinforcement learning</a>, the aim is to weight the network (devise a policy) to perform actions that minimize long-term (expected cumulative) cost. At each point in time the agent performs an action and the environment generates an observation and an <a href="/wiki/Instant" title="Instant">instantaneous</a> cost, according to some (usually unknown) rules. The rules and the long-term cost usually only can be estimated. At any juncture, the agent decides whether to explore new actions to uncover their costs or to exploit prior learning to proceed more quickly. </p><p>Formally the environment is modeled as a <a href="/wiki/Markov_decision_process" title="Markov decision process">Markov decision process</a> (MDP) with states <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle {s_{1},...,s_{n}}\in S}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mn>1</mn> </mrow> </msub> <mo>,</mo> <mo>.</mo> <mo>.</mo> <mo>.</mo> <mo>,</mo> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>n</mi> </mrow> </msub> </mrow> <mo>∈<!-- ∈ --></mo> <mi>S</mi> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle {s_{1},...,s_{n}}\in S}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/594ab196af08f0326623318ae24ea08359f03c2f" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.671ex; width:13.963ex; height:2.509ex;" alt="{\displaystyle \textstyle {s_{1},...,s_{n}}\in S}"></noscript><span class="lazy-image-placeholder" style="width: 13.963ex;height: 2.509ex;vertical-align: -0.671ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/594ab196af08f0326623318ae24ea08359f03c2f" data-alt="{\displaystyle \textstyle {s_{1},...,s_{n}}\in S}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span> and actions <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle {a_{1},...,a_{m}}\in A}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>a</mi> <mrow class="MJX-TeXAtom-ORD"> <mn>1</mn> </mrow> </msub> <mo>,</mo> <mo>.</mo> <mo>.</mo> <mo>.</mo> <mo>,</mo> <msub> <mi>a</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>m</mi> </mrow> </msub> </mrow> <mo>∈<!-- ∈ --></mo> <mi>A</mi> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle {a_{1},...,a_{m}}\in A}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/c1a2b52db3ad57568da895b2171c90ae6e0de377" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.671ex; width:14.942ex; height:2.509ex;" alt="{\displaystyle \textstyle {a_{1},...,a_{m}}\in A}"></noscript><span class="lazy-image-placeholder" style="width: 14.942ex;height: 2.509ex;vertical-align: -0.671ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/c1a2b52db3ad57568da895b2171c90ae6e0de377" data-alt="{\displaystyle \textstyle {a_{1},...,a_{m}}\in A}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span>. Because the state transitions are not known, probability distributions are used instead: the instantaneous cost distribution <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle P(c_{t}|s_{t})}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>P</mi> <mo stretchy="false">(</mo> <msub> <mi>c</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>t</mi> </mrow> </msub> <mrow class="MJX-TeXAtom-ORD"> <mo stretchy="false">|</mo> </mrow> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>t</mi> </mrow> </msub> <mo stretchy="false">)</mo> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle P(c_{t}|s_{t})}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/d64a75ad0caf0be43f971b53a2c16b637bf8bb20" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:7.951ex; height:2.843ex;" alt="{\displaystyle \textstyle P(c_{t}|s_{t})}"></noscript><span class="lazy-image-placeholder" style="width: 7.951ex;height: 2.843ex;vertical-align: -0.838ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/d64a75ad0caf0be43f971b53a2c16b637bf8bb20" data-alt="{\displaystyle \textstyle P(c_{t}|s_{t})}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span>, the observation distribution <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle P(x_{t}|s_{t})}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>P</mi> <mo stretchy="false">(</mo> <msub> <mi>x</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>t</mi> </mrow> </msub> <mrow class="MJX-TeXAtom-ORD"> <mo stretchy="false">|</mo> </mrow> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>t</mi> </mrow> </msub> <mo stretchy="false">)</mo> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle P(x_{t}|s_{t})}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/591cc95a2eb71b92b0fc200675bc8037094a37f0" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:8.274ex; height:2.843ex;" alt="{\displaystyle \textstyle P(x_{t}|s_{t})}"></noscript><span class="lazy-image-placeholder" style="width: 8.274ex;height: 2.843ex;vertical-align: -0.838ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/591cc95a2eb71b92b0fc200675bc8037094a37f0" data-alt="{\displaystyle \textstyle P(x_{t}|s_{t})}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span> and the transition distribution <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle P(s_{t+1}|s_{t},a_{t})}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mi>P</mi> <mo stretchy="false">(</mo> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>t</mi> <mo>+</mo> <mn>1</mn> </mrow> </msub> <mrow class="MJX-TeXAtom-ORD"> <mo stretchy="false">|</mo> </mrow> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>t</mi> </mrow> </msub> <mo>,</mo> <msub> <mi>a</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>t</mi> </mrow> </msub> <mo stretchy="false">)</mo> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle P(s_{t+1}|s_{t},a_{t})}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/0200f5e1c5ad6c6114027fd0f660399c8b584d71" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:13.225ex; height:2.843ex;" alt="{\displaystyle \textstyle P(s_{t+1}|s_{t},a_{t})}"></noscript><span class="lazy-image-placeholder" style="width: 13.225ex;height: 2.843ex;vertical-align: -0.838ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/0200f5e1c5ad6c6114027fd0f660399c8b584d71" data-alt="{\displaystyle \textstyle P(s_{t+1}|s_{t},a_{t})}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span>, while a policy is defined as the conditional distribution over actions given the observations. Taken together, the two define a <a href="/wiki/Markov_chain" title="Markov chain">Markov chain</a> (MC). The aim is to discover the lowest-cost MC. </p><p>ANNs serve as the learning component in such applications.<sup id="cite_ref-132" class="reference"><a href="#cite_note-132"><span class="cite-bracket">[</span>132<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-133" class="reference"><a href="#cite_note-133"><span class="cite-bracket">[</span>133<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Dynamic_programming" title="Dynamic programming">Dynamic programming</a> coupled with ANNs (giving <a href="/wiki/Neural_oscillation" title="Neural oscillation">neurodynamic</a> programming)<sup id="cite_ref-134" class="reference"><a href="#cite_note-134"><span class="cite-bracket">[</span>134<span class="cite-bracket">]</span></a></sup> has been applied to problems such as those involved in <a href="/wiki/Vehicle_routing" class="mw-redirect" title="Vehicle routing">vehicle routing</a>,<sup id="cite_ref-135" class="reference"><a href="#cite_note-135"><span class="cite-bracket">[</span>135<span class="cite-bracket">]</span></a></sup> video games, <a href="/wiki/Natural_resource_management" title="Natural resource management">natural resource management</a><sup id="cite_ref-136" class="reference"><a href="#cite_note-136"><span class="cite-bracket">[</span>136<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-137" class="reference"><a href="#cite_note-137"><span class="cite-bracket">[</span>137<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Medicine" title="Medicine">medicine</a><sup id="cite_ref-138" class="reference"><a href="#cite_note-138"><span class="cite-bracket">[</span>138<span class="cite-bracket">]</span></a></sup> because of ANNs ability to mitigate losses of accuracy even when reducing the <a href="/wiki/Discretization" title="Discretization">discretization</a> grid density for numerically approximating the solution of control problems. Tasks that fall within the paradigm of reinforcement learning are control problems, <a href="/wiki/Game" title="Game">games</a> and other sequential decision making tasks. </p> <div class="mw-heading mw-heading4"><h4 id="Self-learning">Self-learning</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=21" title="Edit section: Self-learning" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Self-learning in neural networks was introduced in 1982 along with a neural network capable of self-learning named <i>crossbar adaptive array</i> (CAA).<sup id="cite_ref-139" class="reference"><a href="#cite_note-139"><span class="cite-bracket">[</span>139<span class="cite-bracket">]</span></a></sup> It is a system with only one input, situation s, and only one output, action (or behavior) a. It has neither external advice input nor external reinforcement input from the environment. The CAA computes, in a crossbar fashion, both decisions about actions and emotions (feelings) about encountered situations. The system is driven by the interaction between cognition and emotion.<sup id="cite_ref-140" class="reference"><a href="#cite_note-140"><span class="cite-bracket">[</span>140<span class="cite-bracket">]</span></a></sup> Given the memory matrix, W =||w(a,s)||, the crossbar self-learning algorithm in each iteration performs the following computation: </p> <pre> In situation s perform action a; Receive consequence situation s'; Compute emotion of being in consequence situation v(s'); Update crossbar memory w'(a,s) = w(a,s) + v(s'). </pre> <p>The backpropagated value (secondary reinforcement) is the emotion toward the consequence situation. The CAA exists in two environments, one is behavioral environment where it behaves, and the other is genetic environment, where from it initially and only once receives initial emotions about to be encountered situations in the behavioral environment. Having received the genome vector (species vector) from the genetic environment, the CAA will learn a goal-seeking behavior, in the behavioral environment that contains both desirable and undesirable situations.<sup id="cite_ref-141" class="reference"><a href="#cite_note-141"><span class="cite-bracket">[</span>141<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Neuroevolution">Neuroevolution</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=22" title="Edit section: Neuroevolution" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Neuroevolution" title="Neuroevolution">Neuroevolution</a></div> <p><a href="/wiki/Neuroevolution" title="Neuroevolution">Neuroevolution</a> can create neural network topologies and weights using <a href="/wiki/Evolutionary_computation" title="Evolutionary computation">evolutionary computation</a>. It is competitive with sophisticated gradient descent approaches.<sup id="cite_ref-142" class="reference"><a href="#cite_note-142"><span class="cite-bracket">[</span>142<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-143" class="reference"><a href="#cite_note-143"><span class="cite-bracket">[</span>143<span class="cite-bracket">]</span></a></sup> One advantage of neuroevolution is that it may be less prone to get caught in "dead ends".<sup id="cite_ref-144" class="reference"><a href="#cite_note-144"><span class="cite-bracket">[</span>144<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Stochastic_neural_network">Stochastic neural network</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=23" title="Edit section: Stochastic neural network" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p><b>Stochastic neural networks</b> originating from <a href="/wiki/Spin_glass#Sherrington%E2%80%93Kirkpatrick_model" title="Spin glass">Sherrington–Kirkpatrick models</a> are a type of artificial neural network built by introducing random variations into the network, either by giving the network's <a href="/wiki/Artificial_neuron" title="Artificial neuron">artificial neurons</a> <a href="/wiki/Stochastic_process" title="Stochastic process">stochastic</a> transfer functions <sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (September 2024)">citation needed</span></a></i>]</sup>, or by giving them stochastic weights. This makes them useful tools for <a href="/wiki/Optimization_(mathematics)" class="mw-redirect" title="Optimization (mathematics)">optimization</a> problems, since the random fluctuations help the network escape from <a href="/wiki/Maxima_and_minima" class="mw-redirect" title="Maxima and minima">local minima</a>.<sup id="cite_ref-145" class="reference"><a href="#cite_note-145"><span class="cite-bracket">[</span>145<span class="cite-bracket">]</span></a></sup> Stochastic neural networks trained using a Bayesian approach are known as <b>Bayesian neural networks</b>.<sup id="cite_ref-146" class="reference"><a href="#cite_note-146"><span class="cite-bracket">[</span>146<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Other">Other</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=24" title="Edit section: Other" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>In a <a href="/wiki/Bayesian_probability" title="Bayesian probability">Bayesian</a> framework, a distribution over the set of allowed models is chosen to minimize the cost. <a href="/wiki/Evolutionary_methods" class="mw-redirect" title="Evolutionary methods">Evolutionary methods</a>,<sup id="cite_ref-147" class="reference"><a href="#cite_note-147"><span class="cite-bracket">[</span>147<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Gene_expression_programming" title="Gene expression programming">gene expression programming</a>,<sup id="cite_ref-148" class="reference"><a href="#cite_note-148"><span class="cite-bracket">[</span>148<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Simulated_annealing" title="Simulated annealing">simulated annealing</a>,<sup id="cite_ref-149" class="reference"><a href="#cite_note-149"><span class="cite-bracket">[</span>149<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Expectation%E2%80%93maximization_algorithm" title="Expectation–maximization algorithm">expectation–maximization</a>, <a href="/wiki/Non-parametric_methods" class="mw-redirect" title="Non-parametric methods">non-parametric methods</a> and <a href="/wiki/Particle_swarm_optimization" title="Particle swarm optimization">particle swarm optimization</a><sup id="cite_ref-150" class="reference"><a href="#cite_note-150"><span class="cite-bracket">[</span>150<span class="cite-bracket">]</span></a></sup> are other learning algorithms. Convergent recursion is a learning algorithm for <a href="/wiki/Cerebellar_model_articulation_controller" title="Cerebellar model articulation controller">cerebellar model articulation controller</a> (CMAC) neural networks.<sup id="cite_ref-Qin1_151-0" class="reference"><a href="#cite_note-Qin1-151"><span class="cite-bracket">[</span>151<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Qin2_152-0" class="reference"><a href="#cite_note-Qin2-152"><span class="cite-bracket">[</span>152<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Modes">Modes</h4><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=25" title="Edit section: Modes" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-No_footnotes plainlinks metadata ambox ambox-style ambox-No_footnotes" role="presentation"><tbody><tr><td class="mbox-text"><div class="mbox-text-span">This section includes a <a href="/wiki/Wikipedia:Citing_sources" title="Wikipedia:Citing sources">list of references</a>, <a href="/wiki/Wikipedia:Further_reading" title="Wikipedia:Further reading">related reading</a>, or <a href="/wiki/Wikipedia:External_links" title="Wikipedia:External links">external links</a>, <b>but its sources remain unclear because it lacks <a href="/wiki/Wikipedia:Citing_sources#Inline_citations" title="Wikipedia:Citing sources">inline citations</a></b>.<span class="hide-when-compact"> Please help <a href="/wiki/Wikipedia:WikiProject_Fact_and_Reference_Check" class="mw-redirect" title="Wikipedia:WikiProject Fact and Reference Check">improve</a> this section by <a href="/wiki/Wikipedia:When_to_cite" title="Wikipedia:When to cite">introducing</a> more precise citations.</span> <span class="date-container"><i>(<span class="date">August 2019</span>)</i></span><span class="hide-when-compact"><i> (<small><a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove this message</a></small>)</i></span></div></td></tr></tbody></table> <p>Two modes of learning are available: <a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">stochastic</a> and batch. In stochastic learning, each input creates a weight adjustment. In batch learning weights are adjusted based on a batch of inputs, accumulating errors over the batch. Stochastic learning introduces "noise" into the process, using the local gradient calculated from one data point; this reduces the chance of the network getting stuck in local minima. However, batch learning typically yields a faster, more stable descent to a local minimum, since each update is performed in the direction of the batch's average error. A common compromise is to use "mini-batches", small batches with samples in each batch selected stochastically from the entire data set. </p> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(4)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Types">Types</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=26" title="Edit section: Types" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-4 collapsible-block" id="mf-section-4"> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Types_of_artificial_neural_networks" title="Types of artificial neural networks">Types of artificial neural networks</a></div> <p>ANNs have evolved into a broad family of techniques that have advanced the state of the art across multiple domains. The simplest types have one or more static components, including number of units, number of layers, unit weights and <a href="/wiki/Topology" title="Topology">topology</a>. Dynamic types allow one or more of these to evolve via learning. The latter is much more complicated but can shorten learning periods and produce better results. Some types allow/require learning to be "supervised" by the operator, while others operate independently. Some types operate purely in hardware, while others are purely software and run on general purpose computers. </p><p>Some of the main breakthroughs include: </p> <ul><li><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">Convolutional neural networks</a> that have proven particularly successful in processing visual and other two-dimensional data;<sup id="cite_ref-153" class="reference"><a href="#cite_note-153"><span class="cite-bracket">[</span>153<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-lecun2016slides_154-0" class="reference"><a href="#cite_note-lecun2016slides-154"><span class="cite-bracket">[</span>154<span class="cite-bracket">]</span></a></sup> where long short-term memory avoids the <a href="/wiki/Vanishing_gradient_problem" title="Vanishing gradient problem">vanishing gradient problem</a><sup id="cite_ref-:03_155-0" class="reference"><a href="#cite_note-:03-155"><span class="cite-bracket">[</span>155<span class="cite-bracket">]</span></a></sup> and can handle signals that have a mix of low and high frequency components aiding large-vocabulary speech recognition,<sup id="cite_ref-sak2014_156-0" class="reference"><a href="#cite_note-sak2014-156"><span class="cite-bracket">[</span>156<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-liwu2015_157-0" class="reference"><a href="#cite_note-liwu2015-157"><span class="cite-bracket">[</span>157<span class="cite-bracket">]</span></a></sup> text-to-speech synthesis,<sup id="cite_ref-158" class="reference"><a href="#cite_note-158"><span class="cite-bracket">[</span>158<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-scholarpedia2_159-0" class="reference"><a href="#cite_note-scholarpedia2-159"><span class="cite-bracket">[</span>159<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-zen2015_160-0" class="reference"><a href="#cite_note-zen2015-160"><span class="cite-bracket">[</span>160<span class="cite-bracket">]</span></a></sup> and photo-real talking heads;<sup id="cite_ref-fan2015_161-0" class="reference"><a href="#cite_note-fan2015-161"><span class="cite-bracket">[</span>161<span class="cite-bracket">]</span></a></sup></li> <li>Competitive networks such as <a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">generative adversarial networks</a> in which multiple networks (of varying structure) compete with each other, on tasks such as winning a game<sup id="cite_ref-preprint_162-0" class="reference"><a href="#cite_note-preprint-162"><span class="cite-bracket">[</span>162<span class="cite-bracket">]</span></a></sup> or on deceiving the opponent about the authenticity of an input.<sup id="cite_ref-GANnips_94-1" class="reference"><a href="#cite_note-GANnips-94"><span class="cite-bracket">[</span>94<span class="cite-bracket">]</span></a></sup></li></ul> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(5)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Network_design">Network design</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=27" title="Edit section: Network design" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-5 collapsible-block" id="mf-section-5"> <p>Using artificial neural networks requires an understanding of their characteristics. </p> <ul><li>Choice of model: This depends on the data representation and the application. Model parameters include the number, type, and connectedness of network layers, as well as the size of each and the connection type (full, pooling, etc. ). Overly complex models learn slowly.</li> <li><a href="/wiki/Machine_learning" title="Machine learning">Learning algorithm</a>: Numerous trade-offs exist between learning algorithms. Almost any algorithm will work well with the correct hyperparameters<sup id="cite_ref-163" class="reference"><a href="#cite_note-163"><span class="cite-bracket">[</span>163<span class="cite-bracket">]</span></a></sup> for training on a particular data set. However, selecting and tuning an algorithm for training on unseen data requires significant experimentation.</li> <li><a href="/wiki/Robustness" title="Robustness">Robustness</a>: If the model, cost function and learning algorithm are selected appropriately, the resulting ANN can become robust.</li></ul> <p><a href="/wiki/Neural_architecture_search" title="Neural architecture search">Neural architecture search</a> (NAS) uses machine learning to automate ANN design. Various approaches to NAS have designed networks that compare well with hand-designed systems. The basic search algorithm is to propose a candidate model, evaluate it against a dataset, and use the results as feedback to teach the NAS network.<sup id="cite_ref-164" class="reference"><a href="#cite_note-164"><span class="cite-bracket">[</span>164<span class="cite-bracket">]</span></a></sup> Available systems include <a href="/wiki/Automated_machine_learning" title="Automated machine learning">AutoML</a> and AutoKeras.<sup id="cite_ref-165" class="reference"><a href="#cite_note-165"><span class="cite-bracket">[</span>165<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Scikit-learn" title="Scikit-learn">scikit-learn library</a> provides functions to help with building a deep network from scratch. We can then implement a deep network with <a href="/wiki/TensorFlow" title="TensorFlow">TensorFlow</a> or <a href="/wiki/Keras" title="Keras">Keras</a>. </p><p> Hyperparameters must also be defined as part of the design (they are not learned), governing matters such as how many neurons are in each layer, learning rate, step, stride, depth, receptive field and padding (for CNNs), etc.<sup id="cite_ref-abs1502.02127_166-0" class="reference"><a href="#cite_note-abs1502.02127-166"><span class="cite-bracket">[</span>166<span class="cite-bracket">]</span></a></sup> </p><span class="citation-needed-content" style="padding-left:0.1em; padding-right:0.1em; color:var(--color-subtle, #54595d); border:1px solid var(--border-color-subtle, #c8ccd1);">The <a href="/wiki/Python_(programming_language)" title="Python (programming language)">Python</a> code snippet provides an overview of the training function, which uses the training dataset, number of hidden layer units, learning rate, and number of iterations as parameters:<div class="mw-highlight mw-highlight-lang-python3 mw-content-ltr mw-highlight-lines" dir="ltr"><pre><span></span><span class="linenos" data-line="1"></span><span class="k">def</span> <span class="nf">train</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">n_hidden</span><span class="p">,</span> <span class="n">learning_rate</span><span class="p">,</span> <span class="n">n_iter</span><span class="p">):</span> <span class="linenos" data-line="2"></span> <span class="n">m</span><span class="p">,</span> <span class="n">n_input</span> <span class="o">=</span> <span class="n">X</span><span class="o">.</span><span class="n">shape</span> <span class="linenos" data-line="3"></span> <span class="linenos" data-line="4"></span> <span class="c1"># 1. random initialize weights and biases</span> <span class="linenos" data-line="5"></span> <span class="n">w1</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="n">n_input</span><span class="p">,</span> <span class="n">n_hidden</span><span class="p">)</span> <span class="linenos" data-line="6"></span> <span class="n">b1</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">((</span><span class="mi">1</span><span class="p">,</span> <span class="n">n_hidden</span><span class="p">))</span> <span class="linenos" data-line="7"></span> <span class="n">w2</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">random</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="n">n_hidden</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span> <span class="linenos" data-line="8"></span> <span class="n">b2</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">((</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span> <span class="linenos" data-line="9"></span> <span class="linenos" data-line="10"></span> <span class="c1"># 2. in each iteration, feed all layers with the latest weights and biases</span> <span class="linenos" data-line="11"></span> <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_iter</span> <span class="o">+</span> <span class="mi">1</span><span class="p">):</span> <span class="linenos" data-line="12"></span> <span class="n">z2</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">w1</span><span class="p">)</span> <span class="o">+</span> <span class="n">b1</span> <span class="linenos" data-line="13"></span> <span class="n">a2</span> <span class="o">=</span> <span class="n">sigmoid</span><span class="p">(</span><span class="n">z2</span><span class="p">)</span> <span class="linenos" data-line="14"></span> <span class="n">z3</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="n">a2</span><span class="p">,</span> <span class="n">w2</span><span class="p">)</span> <span class="o">+</span> <span class="n">b2</span> <span class="linenos" data-line="15"></span> <span class="n">a3</span> <span class="o">=</span> <span class="n">z3</span> <span class="linenos" data-line="16"></span> <span class="n">dz3</span> <span class="o">=</span> <span class="n">a3</span> <span class="o">-</span> <span class="n">y</span> <span class="linenos" data-line="17"></span> <span class="n">dw2</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="n">a2</span><span class="o">.</span><span class="n">T</span><span class="p">,</span> <span class="n">dz3</span><span class="p">)</span> <span class="linenos" data-line="18"></span> <span class="n">db2</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">dz3</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">keepdims</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span> <span class="linenos" data-line="19"></span> <span class="n">dz2</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="n">dz3</span><span class="p">,</span> <span class="n">w2</span><span class="o">.</span><span class="n">T</span><span class="p">)</span> <span class="o">*</span> <span class="n">sigmoid_derivative</span><span class="p">(</span><span class="n">z2</span><span class="p">)</span> <span class="linenos" data-line="20"></span> <span class="n">dw1</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">dot</span><span class="p">(</span><span class="n">X</span><span class="o">.</span><span class="n">T</span><span class="p">,</span> <span class="n">dz2</span><span class="p">)</span> <span class="linenos" data-line="21"></span> <span class="n">db1</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">dz2</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span> <span class="linenos" data-line="22"></span> <span class="linenos" data-line="23"></span> <span class="c1"># 3. update weights and biases with gradients</span> <span class="linenos" data-line="24"></span> <span class="n">w1</span> <span class="o">-=</span> <span class="n">learning_rate</span> <span class="o">*</span> <span class="n">dw1</span> <span class="o">/</span> <span class="n">m</span> <span class="linenos" data-line="25"></span> <span class="n">w2</span> <span class="o">-=</span> <span class="n">learning_rate</span> <span class="o">*</span> <span class="n">dw2</span> <span class="o">/</span> <span class="n">m</span> <span class="linenos" data-line="26"></span> <span class="n">b1</span> <span class="o">-=</span> <span class="n">learning_rate</span> <span class="o">*</span> <span class="n">db1</span> <span class="o">/</span> <span class="n">m</span> <span class="linenos" data-line="27"></span> <span class="n">b2</span> <span class="o">-=</span> <span class="n">learning_rate</span> <span class="o">*</span> <span class="n">db2</span> <span class="o">/</span> <span class="n">m</span> <span class="linenos" data-line="28"></span> <span class="linenos" data-line="29"></span> <span class="k">if</span> <span class="n">i</span> <span class="o">%</span> <span class="mi">1000</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span> <span class="linenos" data-line="30"></span> <span class="nb">print</span><span class="p">(</span><span class="s2">"Epoch"</span><span class="p">,</span> <span class="n">i</span><span class="p">,</span> <span class="s2">"loss: "</span><span class="p">,</span> <span class="n">np</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">square</span><span class="p">(</span><span class="n">dz3</span><span class="p">)))</span> <span class="linenos" data-line="31"></span> <span class="linenos" data-line="32"></span> <span class="n">model</span> <span class="o">=</span> <span class="p">{</span><span class="s2">"w1"</span><span class="p">:</span> <span class="n">w1</span><span class="p">,</span> <span class="s2">"b1"</span><span class="p">:</span> <span class="n">b1</span><span class="p">,</span> <span class="s2">"w2"</span><span class="p">:</span> <span class="n">w2</span><span class="p">,</span> <span class="s2">"b2"</span><span class="p">:</span> <span class="n">b2</span><span class="p">}</span> <span class="linenos" data-line="33"></span> <span class="k">return</span> <span class="n">model</span> </pre></div></span><p><sup class="noprint Inline-Template Template-Fact" style="margin-left:0.1em; white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (July 2023)">citation needed</span></a></i>]</sup> </p></section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(6)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Applications">Applications</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=28" title="Edit section: Applications" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-6 collapsible-block" id="mf-section-6"> <p>Because of their ability to reproduce and model nonlinear processes, artificial neural networks have found applications in many disciplines. These include: </p> <ul><li><a href="/wiki/Function_approximation" title="Function approximation">Function approximation</a>,<sup id="cite_ref-167" class="reference"><a href="#cite_note-167"><span class="cite-bracket">[</span>167<span class="cite-bracket">]</span></a></sup> or <a href="/wiki/Regression_analysis" title="Regression analysis">regression analysis</a>,<sup id="cite_ref-168" class="reference"><a href="#cite_note-168"><span class="cite-bracket">[</span>168<span class="cite-bracket">]</span></a></sup> (including <a href="/wiki/Time_series#Prediction_and_forecasting" title="Time series">time series prediction</a>, <a href="/wiki/Fitness_approximation" title="Fitness approximation">fitness approximation</a>,<sup id="cite_ref-169" class="reference"><a href="#cite_note-169"><span class="cite-bracket">[</span>169<span class="cite-bracket">]</span></a></sup> and modeling)</li> <li><a href="/wiki/Data_processing" title="Data processing">Data processing</a><sup id="cite_ref-170" class="reference"><a href="#cite_note-170"><span class="cite-bracket">[</span>170<span class="cite-bracket">]</span></a></sup> (including filtering, clustering, <a href="/wiki/Blind_source_separation" class="mw-redirect" title="Blind source separation">blind source separation</a>,<sup id="cite_ref-171" class="reference"><a href="#cite_note-171"><span class="cite-bracket">[</span>171<span class="cite-bracket">]</span></a></sup> and compression)</li> <li><a href="/wiki/Nonlinear_system_identification" title="Nonlinear system identification">Nonlinear system identification</a><sup id="cite_ref-SAB1_93-1" class="reference"><a href="#cite_note-SAB1-93"><span class="cite-bracket">[</span>93<span class="cite-bracket">]</span></a></sup> and control (including vehicle control, trajectory prediction,<sup id="cite_ref-172" class="reference"><a href="#cite_note-172"><span class="cite-bracket">[</span>172<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Adaptive_control" title="Adaptive control">adaptive control</a>, <a href="/wiki/Process_control" class="mw-redirect" title="Process control">process control</a>, and <a href="/wiki/Natural_resource_management" title="Natural resource management">natural resource management</a>)</li> <li><a href="/wiki/Pattern_recognition" title="Pattern recognition">Pattern recognition</a> (including radar systems, <a href="/wiki/Facial_recognition_system" title="Facial recognition system">face identification</a>, signal classification,<sup id="cite_ref-173" class="reference"><a href="#cite_note-173"><span class="cite-bracket">[</span>173<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Novelty_detection" title="Novelty detection">novelty detection</a>, <a href="/wiki/3D_reconstruction" title="3D reconstruction">3D reconstruction</a>,<sup id="cite_ref-174" class="reference"><a href="#cite_note-174"><span class="cite-bracket">[</span>174<span class="cite-bracket">]</span></a></sup> object recognition, and sequential decision making<sup id="cite_ref-TurekNeuralNet_175-0" class="reference"><a href="#cite_note-TurekNeuralNet-175"><span class="cite-bracket">[</span>175<span class="cite-bracket">]</span></a></sup>)</li> <li>Sequence recognition (including <a href="/wiki/Gesture_recognition" title="Gesture recognition">gesture</a>, <a href="/wiki/Speech_recognition" title="Speech recognition">speech</a>, and <a href="/wiki/Handwriting_recognition" title="Handwriting recognition">handwritten</a> and printed text recognition<sup id="cite_ref-176" class="reference"><a href="#cite_note-176"><span class="cite-bracket">[</span>176<span class="cite-bracket">]</span></a></sup>)</li> <li>Sensor data analysis<sup id="cite_ref-177" class="reference"><a href="#cite_note-177"><span class="cite-bracket">[</span>177<span class="cite-bracket">]</span></a></sup> (including <a href="/wiki/Image_analysis" title="Image analysis">image analysis</a>)</li> <li><a href="/wiki/Robotics" title="Robotics">Robotics</a> (including directing manipulators and <a href="/wiki/Prosthesis" title="Prosthesis">prostheses</a>)</li> <li><a href="/wiki/Data_mining" title="Data mining">Data mining</a> (including <a href="/wiki/Knowledge_discovery_in_databases" class="mw-redirect" title="Knowledge discovery in databases">knowledge discovery in databases</a>)</li> <li>Finance<sup id="cite_ref-178" class="reference"><a href="#cite_note-178"><span class="cite-bracket">[</span>178<span class="cite-bracket">]</span></a></sup> (such as <a href="/wiki/Ex-ante" title="Ex-ante">ex-ante</a> models for specific financial long-run forecasts and <a href="/wiki/Artificial_financial_market" class="mw-redirect" title="Artificial financial market">artificial financial markets</a>)</li> <li><a href="/wiki/Quantum_chemistry" title="Quantum chemistry">Quantum chemistry</a><sup id="cite_ref-Balabin_2009_179-0" class="reference"><a href="#cite_note-Balabin_2009-179"><span class="cite-bracket">[</span>179<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/General_game_playing" title="General game playing">General game playing</a><sup id="cite_ref-180" class="reference"><a href="#cite_note-180"><span class="cite-bracket">[</span>180<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/Generative_AI" class="mw-redirect" title="Generative AI">Generative AI</a><sup id="cite_ref-181" class="reference"><a href="#cite_note-181"><span class="cite-bracket">[</span>181<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/Data_visualization" class="mw-redirect" title="Data visualization">Data visualization</a></li> <li><a href="/wiki/Machine_translation" title="Machine translation">Machine translation</a></li> <li>Social network filtering<sup id="cite_ref-182" class="reference"><a href="#cite_note-182"><span class="cite-bracket">[</span>182<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/E-mail_spam" class="mw-redirect" title="E-mail spam">E-mail spam</a> filtering</li> <li><a href="/wiki/Medical_diagnosis" title="Medical diagnosis">Medical diagnosis</a><sup id="cite_ref-Ciaramella_183-0" class="reference"><a href="#cite_note-Ciaramella-183"><span class="cite-bracket">[</span>183<span class="cite-bracket">]</span></a></sup></li></ul> <p>ANNs have been used to diagnose several types of cancers<sup id="cite_ref-184" class="reference"><a href="#cite_note-184"><span class="cite-bracket">[</span>184<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-185" class="reference"><a href="#cite_note-185"><span class="cite-bracket">[</span>185<span class="cite-bracket">]</span></a></sup> and to distinguish highly invasive cancer cell lines from less invasive lines using only cell shape information.<sup id="cite_ref-186" class="reference"><a href="#cite_note-186"><span class="cite-bracket">[</span>186<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-187" class="reference"><a href="#cite_note-187"><span class="cite-bracket">[</span>187<span class="cite-bracket">]</span></a></sup> </p><p>ANNs have been used to accelerate reliability analysis of infrastructures subject to natural disasters<sup id="cite_ref-188" class="reference"><a href="#cite_note-188"><span class="cite-bracket">[</span>188<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-189" class="reference"><a href="#cite_note-189"><span class="cite-bracket">[</span>189<span class="cite-bracket">]</span></a></sup> and to predict foundation settlements.<sup id="cite_ref-190" class="reference"><a href="#cite_note-190"><span class="cite-bracket">[</span>190<span class="cite-bracket">]</span></a></sup> It can also be useful to mitigate flood by the use of ANNs for modelling rainfall-runoff.<sup id="cite_ref-191" class="reference"><a href="#cite_note-191"><span class="cite-bracket">[</span>191<span class="cite-bracket">]</span></a></sup> ANNs have also been used for building black-box models in <a href="/wiki/Geoscience" class="mw-redirect" title="Geoscience">geoscience</a>: <a href="/wiki/Hydrology" title="Hydrology">hydrology</a>,<sup id="cite_ref-192" class="reference"><a href="#cite_note-192"><span class="cite-bracket">[</span>192<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-193" class="reference"><a href="#cite_note-193"><span class="cite-bracket">[</span>193<span class="cite-bracket">]</span></a></sup> ocean modelling and <a href="/wiki/Coastal_engineering" title="Coastal engineering">coastal engineering</a>,<sup id="cite_ref-194" class="reference"><a href="#cite_note-194"><span class="cite-bracket">[</span>194<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-195" class="reference"><a href="#cite_note-195"><span class="cite-bracket">[</span>195<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Geomorphology" title="Geomorphology">geomorphology</a>.<sup id="cite_ref-196" class="reference"><a href="#cite_note-196"><span class="cite-bracket">[</span>196<span class="cite-bracket">]</span></a></sup> ANNs have been employed in <a href="/wiki/Computer_security" title="Computer security">cybersecurity</a>, with the objective to discriminate between legitimate activities and malicious ones. For example, machine learning has been used for classifying Android malware,<sup id="cite_ref-197" class="reference"><a href="#cite_note-197"><span class="cite-bracket">[</span>197<span class="cite-bracket">]</span></a></sup> for identifying domains belonging to threat actors and for detecting URLs posing a security risk.<sup id="cite_ref-198" class="reference"><a href="#cite_note-198"><span class="cite-bracket">[</span>198<span class="cite-bracket">]</span></a></sup> Research is underway on ANN systems designed for penetration testing, for detecting botnets,<sup id="cite_ref-199" class="reference"><a href="#cite_note-199"><span class="cite-bracket">[</span>199<span class="cite-bracket">]</span></a></sup> credit cards frauds<sup id="cite_ref-200" class="reference"><a href="#cite_note-200"><span class="cite-bracket">[</span>200<span class="cite-bracket">]</span></a></sup> and network intrusions. </p><p>ANNs have been proposed as a tool to solve <a href="/wiki/Partial_differential_equation" title="Partial differential equation">partial differential equations</a> in physics<sup id="cite_ref-201" class="reference"><a href="#cite_note-201"><span class="cite-bracket">[</span>201<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-202" class="reference"><a href="#cite_note-202"><span class="cite-bracket">[</span>202<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-203" class="reference"><a href="#cite_note-203"><span class="cite-bracket">[</span>203<span class="cite-bracket">]</span></a></sup> and simulate the properties of many-body <a href="/wiki/Open_quantum_system" title="Open quantum system">open quantum systems</a>.<sup id="cite_ref-204" class="reference"><a href="#cite_note-204"><span class="cite-bracket">[</span>204<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-205" class="reference"><a href="#cite_note-205"><span class="cite-bracket">[</span>205<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-206" class="reference"><a href="#cite_note-206"><span class="cite-bracket">[</span>206<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-207" class="reference"><a href="#cite_note-207"><span class="cite-bracket">[</span>207<span class="cite-bracket">]</span></a></sup> In brain research ANNs have studied short-term behavior of <a href="/wiki/Biological_neuron_models" class="mw-redirect" title="Biological neuron models">individual neurons</a>,<sup id="cite_ref-208" class="reference"><a href="#cite_note-208"><span class="cite-bracket">[</span>208<span class="cite-bracket">]</span></a></sup> the dynamics of neural circuitry arise from interactions between individual neurons and how behavior can arise from abstract neural modules that represent complete subsystems. Studies considered long-and short-term plasticity of neural systems and their relation to learning and memory from the individual neuron to the system level. </p><p>It is possible to create a profile of a user's interests from pictures, using artificial neural networks trained for object recognition.<sup id="cite_ref-209" class="reference"><a href="#cite_note-209"><span class="cite-bracket">[</span>209<span class="cite-bracket">]</span></a></sup> </p><p>Beyond their traditional applications, artificial neural networks are increasingly being utilized in interdisciplinary research, such as materials science. For instance, graph neural networks (GNNs) have demonstrated their capability in scaling deep learning for the discovery of new stable materials by efficiently predicting the total energy of crystals. This application underscores the adaptability and potential of ANNs in tackling complex problems beyond the realms of predictive modeling and artificial intelligence, opening new pathways for scientific discovery and innovation.<sup id="cite_ref-210" class="reference"><a href="#cite_note-210"><span class="cite-bracket">[</span>210<span class="cite-bracket">]</span></a></sup> </p> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(7)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Theoretical_properties">Theoretical properties</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=29" title="Edit section: Theoretical properties" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-7 collapsible-block" id="mf-section-7"> <div class="mw-heading mw-heading3"><h3 id="Computational_power">Computational power</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=30" title="Edit section: Computational power" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>The <a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">multilayer perceptron</a> is a <a href="/wiki/UTM_theorem" title="UTM theorem">universal function</a> approximator, as proven by the <a href="/wiki/Universal_approximation_theorem" title="Universal approximation theorem">universal approximation theorem</a>. However, the proof is not constructive regarding the number of neurons required, the network topology, the weights and the learning parameters. </p><p>A specific recurrent architecture with <a href="/wiki/Rational_number" title="Rational number">rational</a>-valued weights (as opposed to full precision <a href="/wiki/Real_number" title="Real number">real number</a>-valued weights) has the power of a <a href="/wiki/Universal_Turing_machine" title="Universal Turing machine">universal Turing machine</a>,<sup id="cite_ref-211" class="reference"><a href="#cite_note-211"><span class="cite-bracket">[</span>211<span class="cite-bracket">]</span></a></sup> using a finite number of neurons and standard linear connections. Further, the use of <a href="/wiki/Irrational_number" title="Irrational number">irrational</a> values for weights results in a machine with <a href="/wiki/Hypercomputation" title="Hypercomputation">super-Turing</a> power.<sup id="cite_ref-212" class="reference"><a href="#cite_note-212"><span class="cite-bracket">[</span>212<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-213" class="reference"><a href="#cite_note-213"><span class="cite-bracket">[</span>213<span class="cite-bracket">]</span></a></sup><sup class="noprint Inline-Template" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Verifiability" title="Wikipedia:Verifiability"><span title="The material near this tag failed verification of its source citation(s). (May 2023)">failed verification</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Capacity">Capacity</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=31" title="Edit section: Capacity" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>A model's "capacity" property corresponds to its ability to model any given function. It is related to the amount of information that can be stored in the network and to the notion of complexity. Two notions of capacity are known by the community. The information capacity and the VC Dimension. The information capacity of a perceptron is intensively discussed in Sir David MacKay's book<sup id="cite_ref-auto_214-0" class="reference"><a href="#cite_note-auto-214"><span class="cite-bracket">[</span>214<span class="cite-bracket">]</span></a></sup> which summarizes work by Thomas Cover.<sup id="cite_ref-215" class="reference"><a href="#cite_note-215"><span class="cite-bracket">[</span>215<span class="cite-bracket">]</span></a></sup> The capacity of a network of standard neurons (not convolutional) can be derived by four rules<sup id="cite_ref-216" class="reference"><a href="#cite_note-216"><span class="cite-bracket">[</span>216<span class="cite-bracket">]</span></a></sup> that derive from understanding a neuron as an electrical element. The information capacity captures the functions modelable by the network given any data as input. The second notion, is the <a href="/wiki/VC_dimension" class="mw-redirect" title="VC dimension">VC dimension</a>. VC Dimension uses the principles of <a href="/wiki/Measure_theory" class="mw-redirect" title="Measure theory">measure theory</a> and finds the maximum capacity under the best possible circumstances. This is, given input data in a specific form. As noted in,<sup id="cite_ref-auto_214-1" class="reference"><a href="#cite_note-auto-214"><span class="cite-bracket">[</span>214<span class="cite-bracket">]</span></a></sup> the VC Dimension for arbitrary inputs is half the information capacity of a Perceptron. The VC Dimension for arbitrary points is sometimes referred to as Memory Capacity.<sup id="cite_ref-217" class="reference"><a href="#cite_note-217"><span class="cite-bracket">[</span>217<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Convergence">Convergence</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=32" title="Edit section: Convergence" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Models may not consistently converge on a single solution, firstly because local minima may exist, depending on the cost function and the model. Secondly, the optimization method used might not guarantee to converge when it begins far from any local minimum. Thirdly, for sufficiently large data or parameters, some methods become impractical. </p><p>Another issue worthy to mention is that training may cross some <a href="/wiki/Saddle_point" title="Saddle point">Saddle point</a> which may lead the convergence to the wrong direction. </p><p>The convergence behavior of certain types of ANN architectures are more understood than others. When the width of network approaches to infinity, the ANN is well described by its first order Taylor expansion throughout training, and so inherits the convergence behavior of <a href="/wiki/Linear_model" title="Linear model">affine models</a>.<sup id="cite_ref-218" class="reference"><a href="#cite_note-218"><span class="cite-bracket">[</span>218<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-219" class="reference"><a href="#cite_note-219"><span class="cite-bracket">[</span>219<span class="cite-bracket">]</span></a></sup> Another example is when parameters are small, it is observed that ANNs often fits target functions from low to high frequencies. This behavior is referred to as the spectral bias, or frequency principle, of neural networks.<sup id="cite_ref-220" class="reference"><a href="#cite_note-220"><span class="cite-bracket">[</span>220<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-221" class="reference"><a href="#cite_note-221"><span class="cite-bracket">[</span>221<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-222" class="reference"><a href="#cite_note-222"><span class="cite-bracket">[</span>222<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-223" class="reference"><a href="#cite_note-223"><span class="cite-bracket">[</span>223<span class="cite-bracket">]</span></a></sup> This phenomenon is the opposite to the behavior of some well studied iterative numerical schemes such as <a href="/wiki/Jacobi_method" title="Jacobi method">Jacobi method</a>. Deeper neural networks have been observed to be more biased towards low frequency functions.<sup id="cite_ref-224" class="reference"><a href="#cite_note-224"><span class="cite-bracket">[</span>224<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Generalization_and_statistics">Generalization and statistics</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=33" title="Edit section: Generalization and statistics" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-No_footnotes plainlinks metadata ambox ambox-style ambox-No_footnotes" role="presentation"><tbody><tr><td class="mbox-text"><div class="mbox-text-span">This section includes a <a href="/wiki/Wikipedia:Citing_sources" title="Wikipedia:Citing sources">list of references</a>, <a href="/wiki/Wikipedia:Further_reading" title="Wikipedia:Further reading">related reading</a>, or <a href="/wiki/Wikipedia:External_links" title="Wikipedia:External links">external links</a>, <b>but its sources remain unclear because it lacks <a href="/wiki/Wikipedia:Citing_sources#Inline_citations" title="Wikipedia:Citing sources">inline citations</a></b>.<span class="hide-when-compact"> Please help <a href="/wiki/Wikipedia:WikiProject_Fact_and_Reference_Check" class="mw-redirect" title="Wikipedia:WikiProject Fact and Reference Check">improve</a> this section by <a href="/wiki/Wikipedia:When_to_cite" title="Wikipedia:When to cite">introducing</a> more precise citations.</span> <span class="date-container"><i>(<span class="date">August 2019</span>)</i></span><span class="hide-when-compact"><i> (<small><a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove this message</a></small>)</i></span></div></td></tr></tbody></table> <p>Applications whose goal is to create a system that generalizes well to unseen examples, face the possibility of over-training. This arises in convoluted or over-specified systems when the network capacity significantly exceeds the needed free parameters. Two approaches address over-training. The first is to use <a href="/wiki/Cross-validation_(statistics)" title="Cross-validation (statistics)">cross-validation</a> and similar techniques to check for the presence of over-training and to select hyperparameters to minimize the generalization error. </p><p>The second is to use some form of <i><a href="/wiki/Regularization_(mathematics)" title="Regularization (mathematics)">regularization</a></i>. This concept emerges in a probabilistic (Bayesian) framework, where regularization can be performed by selecting a larger prior probability over simpler models; but also in statistical learning theory, where the goal is to minimize over two quantities: the 'empirical risk' and the 'structural risk', which roughly corresponds to the error over the training set and the predicted error in unseen data due to overfitting. </p> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Synapse_deployment.jpg" class="mw-file-description"><noscript><img src="//upload.wikimedia.org/wikipedia/commons/thumb/2/22/Synapse_deployment.jpg/250px-Synapse_deployment.jpg" decoding="async" width="250" height="192" class="mw-file-element" data-file-width="1024" data-file-height="786"></noscript><span class="lazy-image-placeholder" style="width: 250px;height: 192px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/2/22/Synapse_deployment.jpg/250px-Synapse_deployment.jpg" data-width="250" data-height="192" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/22/Synapse_deployment.jpg/375px-Synapse_deployment.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/22/Synapse_deployment.jpg/500px-Synapse_deployment.jpg 2x" data-class="mw-file-element">&nbsp;</span></a><figcaption>Confidence analysis of a neural network</figcaption></figure> <p>Supervised neural networks that use a <a href="/wiki/Mean_squared_error" title="Mean squared error">mean squared error</a> (MSE) cost function can use formal statistical methods to determine the confidence of the trained model. The MSE on a validation set can be used as an estimate for variance. This value can then be used to calculate the <a href="/wiki/Confidence_interval" title="Confidence interval">confidence interval</a> of network output, assuming a <a href="/wiki/Normal_distribution" title="Normal distribution">normal distribution</a>. A confidence analysis made this way is statistically valid as long as the output <a href="/wiki/Probability_distribution" title="Probability distribution">probability distribution</a> stays the same and the network is not modified. </p><p>By assigning a <a href="/wiki/Softmax_activation_function" class="mw-redirect" title="Softmax activation function">softmax activation function</a>, a generalization of the <a href="/wiki/Logistic_function" title="Logistic function">logistic function</a>, on the output layer of the neural network (or a softmax component in a component-based network) for categorical target variables, the outputs can be interpreted as posterior probabilities. This is useful in classification as it gives a certainty measure on classifications. </p><p>The softmax activation function is: </p> <dl><dd><span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle y_{i}={\frac {e^{x_{i}}}{\sum _{j=1}^{c}e^{x_{j}}}}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <msub> <mi>y</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> <mo>=</mo> <mrow class="MJX-TeXAtom-ORD"> <mfrac> <msup> <mi>e</mi> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>x</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> </mrow> </msup> <mrow> <munderover> <mo>∑<!-- ∑ --></mo> <mrow class="MJX-TeXAtom-ORD"> <mi>j</mi> <mo>=</mo> <mn>1</mn> </mrow> <mrow class="MJX-TeXAtom-ORD"> <mi>c</mi> </mrow> </munderover> <msup> <mi>e</mi> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>x</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>j</mi> </mrow> </msub> </mrow> </msup> </mrow> </mfrac> </mrow> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle y_{i}={\frac {e^{x_{i}}}{\sum _{j=1}^{c}e^{x_{j}}}}}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/481884e3588b192020e8d0aafe16b41c0ed26007" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -3.171ex; width:14.695ex; height:6.509ex;" alt="{\displaystyle y_{i}={\frac {e^{x_{i}}}{\sum _{j=1}^{c}e^{x_{j}}}}}"></noscript><span class="lazy-image-placeholder" style="width: 14.695ex;height: 6.509ex;vertical-align: -3.171ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/481884e3588b192020e8d0aafe16b41c0ed26007" data-alt="{\displaystyle y_{i}={\frac {e^{x_{i}}}{\sum _{j=1}^{c}e^{x_{j}}}}}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span></dd></dl> <p><br> </p> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(8)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Criticism">Criticism</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=34" title="Edit section: Criticism" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-8 collapsible-block" id="mf-section-8"> <div class="mw-heading mw-heading3"><h3 id="Training_2">Training</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=35" title="Edit section: Training" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>A common criticism of neural networks, particularly in robotics, is that they require too many training samples for real-world operation.<sup id="cite_ref-225" class="reference"><a href="#cite_note-225"><span class="cite-bracket">[</span>225<span class="cite-bracket">]</span></a></sup> Any learning machine needs sufficient representative examples in order to capture the underlying structure that allows it to generalize to new cases. Potential solutions include randomly shuffling training examples, by using a numerical optimization algorithm that does not take too large steps when changing the network connections following an example, grouping examples in so-called mini-batches and/or introducing a recursive least squares algorithm for <a href="/wiki/Cerebellar_model_articulation_controller" title="Cerebellar model articulation controller">CMAC</a>.<sup id="cite_ref-Qin1_151-1" class="reference"><a href="#cite_note-Qin1-151"><span class="cite-bracket">[</span>151<span class="cite-bracket">]</span></a></sup> Dean Pomerleau uses a neural network to train a robotic vehicle to drive on multiple types of roads (single lane, multi-lane, dirt, etc.), and a large amount of his research is devoted to extrapolating multiple training scenarios from a single training experience, and preserving past training diversity so that the system does not become overtrained (if, for example, it is presented with a series of right turns—it should not learn to always turn right).<sup id="cite_ref-226" class="reference"><a href="#cite_note-226"><span class="cite-bracket">[</span>226<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Theory">Theory</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=36" title="Edit section: Theory" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>A central claim<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (January 2023)">citation needed</span></a></i>]</sup> of ANNs is that they embody new and powerful general principles for processing information. These principles are ill-defined. It is often claimed<sup class="noprint Inline-Template" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Manual_of_Style/Words_to_watch#Unsupported_attributions" title="Wikipedia:Manual of Style/Words to watch"><span title="The material near this tag may use weasel words or too-vague attribution. (January 2023)">by whom?</span></a></i>]</sup> that they are <a href="/wiki/Emergent_properties" class="mw-redirect" title="Emergent properties">emergent</a> from the network itself. This allows simple statistical association (the basic function of artificial neural networks) to be described as learning or recognition. In 1997, <a href="/wiki/Alexander_Dewdney" class="mw-redirect" title="Alexander Dewdney">Alexander Dewdney</a>, a former <i><a href="/wiki/Scientific_American" title="Scientific American">Scientific American</a></i> columnist, commented that as a result, artificial neural networks have a "something-for-nothing quality, one that imparts a peculiar aura of laziness and a distinct lack of curiosity about just how good these computing systems are. No human hand (or mind) intervenes; solutions are found as if by magic; and no one, it seems, has learned anything".<sup id="cite_ref-227" class="reference"><a href="#cite_note-227"><span class="cite-bracket">[</span>227<span class="cite-bracket">]</span></a></sup> One response to Dewdney is that neural networks have been successfully used to handle many complex and diverse tasks, ranging from autonomously flying aircraft<sup id="cite_ref-228" class="reference"><a href="#cite_note-228"><span class="cite-bracket">[</span>228<span class="cite-bracket">]</span></a></sup> to detecting credit card fraud to mastering the game of <a href="/wiki/Go_(game)" title="Go (game)">Go</a>. </p><p>Technology writer Roger Bridgman commented: </p> <style data-mw-deduplicate="TemplateStyles:r1244412712">.mw-parser-output .templatequote{overflow:hidden;margin:1em 0;padding:0 32px}.mw-parser-output .templatequotecite{line-height:1.5em;text-align:left;margin-top:0}@media(min-width:500px){.mw-parser-output .templatequotecite{padding-left:1.6em}}</style><blockquote class="templatequote"><p>Neural networks, for instance, are in the dock not only because they have been hyped to high heaven, (what hasn't?) but also because you could create a successful net without understanding how it worked: the bunch of numbers that captures its behaviour would in all probability be "an opaque, unreadable table...valueless as a scientific resource". </p><p>In spite of his emphatic declaration that science is not technology, Dewdney seems here to pillory neural nets as bad science when most of those devising them are just trying to be good engineers. An unreadable table that a useful machine could read would still be well worth having.<sup id="cite_ref-229" class="reference"><a href="#cite_note-229"><span class="cite-bracket">[</span>229<span class="cite-bracket">]</span></a></sup> </p> </blockquote> <p>Although it is true that analyzing what has been learned by an artificial neural network is difficult, it is much easier to do so than to analyze what has been learned by a biological neural network. Moreover, recent emphasis on the <a href="/wiki/Explainable_artificial_intelligence" title="Explainable artificial intelligence">explainability</a> of AI has contributed towards the development of methods, notably those based on <a href="/wiki/Attention_(machine_learning)" title="Attention (machine learning)">attention</a> mechanisms, for visualizing and explaining learned neural networks. Furthermore, researchers involved in exploring learning algorithms for neural networks are gradually uncovering generic principles that allow a learning machine to be successful. For example, Bengio and LeCun (2007) wrote an article regarding local vs non-local learning, as well as shallow vs deep architecture.<sup id="cite_ref-230" class="reference"><a href="#cite_note-230"><span class="cite-bracket">[</span>230<span class="cite-bracket">]</span></a></sup> </p><p>Biological brains use both shallow and deep circuits as reported by brain anatomy,<sup id="cite_ref-VanEssen1991_231-0" class="reference"><a href="#cite_note-VanEssen1991-231"><span class="cite-bracket">[</span>231<span class="cite-bracket">]</span></a></sup> displaying a wide variety of invariance. Weng<sup id="cite_ref-Weng2012_232-0" class="reference"><a href="#cite_note-Weng2012-232"><span class="cite-bracket">[</span>232<span class="cite-bracket">]</span></a></sup> argued that the brain self-wires largely according to signal statistics and therefore, a serial cascade cannot catch all major statistical dependencies. </p> <div class="mw-heading mw-heading3"><h3 id="Hardware">Hardware</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=37" title="Edit section: Hardware" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Large and effective neural networks require considerable computing resources.<sup id="cite_ref-:0_233-0" class="reference"><a href="#cite_note-:0-233"><span class="cite-bracket">[</span>233<span class="cite-bracket">]</span></a></sup> While the brain has hardware tailored to the task of processing signals through a <a href="/wiki/Graph_(discrete_mathematics)" title="Graph (discrete mathematics)">graph</a> of neurons, simulating even a simplified neuron on <a href="/wiki/Von_Neumann_architecture" title="Von Neumann architecture">von Neumann architecture</a> may consume vast amounts of <a href="/wiki/Random-access_memory" title="Random-access memory">memory</a> and storage. Furthermore, the designer often needs to transmit signals through many of these connections and their associated neurons – which require enormous <a href="/wiki/Central_processing_unit" title="Central processing unit">CPU</a> power and time.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p><p>Some argue that the resurgence of neural networks in the twenty-first century is largely attributable to advances in hardware: from 1991 to 2015, computing power, especially as delivered by <a href="/wiki/General-purpose_computing_on_graphics_processing_units" title="General-purpose computing on graphics processing units">GPGPUs</a> (on <a href="/wiki/Graphics_processing_unit" title="Graphics processing unit">GPUs</a>), has increased around a million-fold, making the standard backpropagation algorithm feasible for training networks that are several layers deeper than before.<sup id="cite_ref-SCHIDHUB4_38-2" class="reference"><a href="#cite_note-SCHIDHUB4-38"><span class="cite-bracket">[</span>38<span class="cite-bracket">]</span></a></sup> The use of accelerators such as <a href="/wiki/Field-programmable_gate_array" title="Field-programmable gate array">FPGAs</a> and GPUs can reduce training times from months to days.<sup id="cite_ref-:0_233-1" class="reference"><a href="#cite_note-:0-233"><span class="cite-bracket">[</span>233<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-234" class="reference"><a href="#cite_note-234"><span class="cite-bracket">[</span>234<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Neuromorphic_engineering" class="mw-redirect" title="Neuromorphic engineering">Neuromorphic engineering</a> or a <a href="/wiki/Physical_neural_network" title="Physical neural network">physical neural network</a> addresses the hardware difficulty directly, by constructing non-von-Neumann chips to directly implement neural networks in circuitry. Another type of chip optimized for neural network processing is called a <a href="/wiki/Tensor_Processing_Unit" title="Tensor Processing Unit">Tensor Processing Unit</a>, or TPU.<sup id="cite_ref-235" class="reference"><a href="#cite_note-235"><span class="cite-bracket">[</span>235<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Practical_counterexamples">Practical counterexamples</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=38" title="Edit section: Practical counterexamples" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Analyzing what has been learned by an ANN is much easier than analyzing what has been learned by a biological neural network. Furthermore, researchers involved in exploring learning algorithms for neural networks are gradually uncovering general principles that allow a learning machine to be successful. For example, local vs. non-local learning and shallow vs. deep architecture.<sup id="cite_ref-236" class="reference"><a href="#cite_note-236"><span class="cite-bracket">[</span>236<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Hybrid_approaches">Hybrid approaches</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=39" title="Edit section: Hybrid approaches" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Advocates of <a href="/wiki/Hybrid_neural_network" title="Hybrid neural network">hybrid</a> models (combining neural networks and symbolic approaches) say that such a mixture can better capture the mechanisms of the human mind.<sup id="cite_ref-237" class="reference"><a href="#cite_note-237"><span class="cite-bracket">[</span>237<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-238" class="reference"><a href="#cite_note-238"><span class="cite-bracket">[</span>238<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Dataset_bias">Dataset bias</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=40" title="Edit section: Dataset bias" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>Neural networks are dependent on the quality of the data they are trained on, thus low quality data with imbalanced representativeness can lead to the model learning and perpetuating societal biases.<sup id="cite_ref-:010_239-0" class="reference"><a href="#cite_note-:010-239"><span class="cite-bracket">[</span>239<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:17_240-0" class="reference"><a href="#cite_note-:17-240"><span class="cite-bracket">[</span>240<span class="cite-bracket">]</span></a></sup> These inherited biases become especially critical when the ANNs are integrated into real-world scenarios where the training data may be imbalanced due to the scarcity of data for a specific race, gender or other attribute.<sup id="cite_ref-:010_239-1" class="reference"><a href="#cite_note-:010-239"><span class="cite-bracket">[</span>239<span class="cite-bracket">]</span></a></sup> This imbalance can result in the model having inadequate representation and understanding of underrepresented groups, leading to discriminatory outcomes that exacerbate societal inequalities, especially in applications like <a href="/wiki/Facial_recognition_system" title="Facial recognition system">facial recognition</a>, hiring processes, and <a href="/wiki/Law_enforcement" title="Law enforcement">law enforcement</a>.<sup id="cite_ref-:17_240-1" class="reference"><a href="#cite_note-:17-240"><span class="cite-bracket">[</span>240<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:22_241-0" class="reference"><a href="#cite_note-:22-241"><span class="cite-bracket">[</span>241<span class="cite-bracket">]</span></a></sup> For example, in 2018, <a href="/wiki/Amazon_(company)" title="Amazon (company)">Amazon</a> had to scrap a recruiting tool because the model favored men over women for jobs in software engineering due to the higher number of male workers in the field.<sup id="cite_ref-:22_241-1" class="reference"><a href="#cite_note-:22-241"><span class="cite-bracket">[</span>241<span class="cite-bracket">]</span></a></sup> The program would penalize any resume with the word "woman" or the name of any women's college. However, the use of <a href="/wiki/Synthetic_data" title="Synthetic data">synthetic data</a> can help reduce dataset bias and increase representation in datasets.<sup id="cite_ref-242" class="reference"><a href="#cite_note-242"><span class="cite-bracket">[</span>242<span class="cite-bracket">]</span></a></sup> </p> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(9)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Gallery">Gallery</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=41" title="Edit section: Gallery" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-9 collapsible-block" id="mf-section-9"> <ul class="gallery mw-gallery-traditional"> <li class="gallerybox" style="width: 255px"> <div class="thumb" style="width: 250px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Single_layer_ann.svg" class="mw-file-description" title="A single-layer feedforward artificial neural network. Arrows originating from '&quot;`UNIQ--postMath-0000000E-QINU`&quot;' are omitted for clarity. There are p inputs to this network and q outputs. In this system, the value of the qth output, '&quot;`UNIQ--postMath-0000000F-QINU`&quot;', is calculated as '&quot;`UNIQ--postMath-00000010-QINU`&quot;'"><noscript><img alt="A single-layer feedforward artificial neural network. Arrows originating from '&quot;`UNIQ--postMath-0000000E-QINU`&quot;' are omitted for clarity. There are p inputs to this network and q outputs. In this system, the value of the qth output, '&quot;`UNIQ--postMath-0000000F-QINU`&quot;', is calculated as '&quot;`UNIQ--postMath-00000010-QINU`&quot;'" src="//upload.wikimedia.org/wikipedia/commons/thumb/b/be/Single_layer_ann.svg/172px-Single_layer_ann.svg.png" decoding="async" width="172" height="120" class="mw-file-element" data-file-width="329" data-file-height="230"></noscript><span class="lazy-image-placeholder" style="width: 172px;height: 120px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/b/be/Single_layer_ann.svg/172px-Single_layer_ann.svg.png" data-alt="A single-layer feedforward artificial neural network. Arrows originating from '&quot;`UNIQ--postMath-0000000E-QINU`&quot;' are omitted for clarity. There are p inputs to this network and q outputs. In this system, the value of the qth output, '&quot;`UNIQ--postMath-0000000F-QINU`&quot;', is calculated as '&quot;`UNIQ--postMath-00000010-QINU`&quot;'" data-width="172" data-height="120" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/b/be/Single_layer_ann.svg/258px-Single_layer_ann.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/be/Single_layer_ann.svg/344px-Single_layer_ann.svg.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></div> <div class="gallerytext">A single-layer feedforward artificial neural network. Arrows originating from <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \scriptstyle x_{2}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="1"> <msub> <mi>x</mi> <mrow class="MJX-TeXAtom-ORD"> <mn>2</mn> </mrow> </msub> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \scriptstyle x_{2}}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/e758ff5bf928d44a755aa0e87d3b97a28f5fd50e" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.505ex; width:1.772ex; height:1.509ex;" alt="{\displaystyle \scriptstyle x_{2}}"></noscript><span class="lazy-image-placeholder" style="width: 1.772ex;height: 1.509ex;vertical-align: -0.505ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/e758ff5bf928d44a755aa0e87d3b97a28f5fd50e" data-alt="{\displaystyle \scriptstyle x_{2}}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span> are omitted for clarity. There are p inputs to this network and q outputs. In this system, the value of the qth output, <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle y_{q}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <msub> <mi>y</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>q</mi> </mrow> </msub> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle y_{q}}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/62e77051e3953cdf3818c7c8458d4f515eb5f79c" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -1.005ex; width:2.128ex; height:2.343ex;" alt="{\displaystyle y_{q}}"></noscript><span class="lazy-image-placeholder" style="width: 2.128ex;height: 2.343ex;vertical-align: -1.005ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/62e77051e3953cdf3818c7c8458d4f515eb5f79c" data-alt="{\displaystyle y_{q}}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span>, is calculated as <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \scriptstyle y_{q}=K*(\sum _{i}(x_{i}*w_{iq})-b_{q}).}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="1"> <msub> <mi>y</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>q</mi> </mrow> </msub> <mo>=</mo> <mi>K</mi> <mo>∗<!-- ∗ --></mo> <mo stretchy="false">(</mo> <munder> <mo>∑<!-- ∑ --></mo> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </munder> <mo stretchy="false">(</mo> <msub> <mi>x</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> <mo>∗<!-- ∗ --></mo> <msub> <mi>w</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> <mi>q</mi> </mrow> </msub> <mo stretchy="false">)</mo> <mo>−<!-- − --></mo> <msub> <mi>b</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>q</mi> </mrow> </msub> <mo stretchy="false">)</mo> <mo>.</mo> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \scriptstyle y_{q}=K*(\sum _{i}(x_{i}*w_{iq})-b_{q}).}</annotation> </semantics> </math></span><noscript><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/4acabcd3a2df465f6bb9a5747d5b431e339b61d9" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:18.086ex; height:2.343ex;" alt="{\displaystyle \scriptstyle y_{q}=K*(\sum _{i}(x_{i}*w_{iq})-b_{q}).}"></noscript><span class="lazy-image-placeholder" style="width: 18.086ex;height: 2.343ex;vertical-align: -0.838ex;" data-src="https://wikimedia.org/api/rest_v1/media/math/render/svg/4acabcd3a2df465f6bb9a5747d5b431e339b61d9" data-alt="{\displaystyle \scriptstyle y_{q}=K*(\sum _{i}(x_{i}*w_{iq})-b_{q}).}" data-class="mwe-math-fallback-image-inline mw-invert skin-invert">&nbsp;</span></span></div> </li> <li class="gallerybox" style="width: 255px"> <div class="thumb" style="width: 250px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Two_layer_ann.svg" class="mw-file-description" title="A two-layer feedforward artificial neural network"><noscript><img alt="A two-layer feedforward artificial neural network" src="//upload.wikimedia.org/wikipedia/commons/thumb/7/7f/Two_layer_ann.svg/94px-Two_layer_ann.svg.png" decoding="async" width="94" height="120" class="mw-file-element" data-file-width="331" data-file-height="421"></noscript><span class="lazy-image-placeholder" style="width: 94px;height: 120px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/7/7f/Two_layer_ann.svg/94px-Two_layer_ann.svg.png" data-alt="A two-layer feedforward artificial neural network" data-width="94" data-height="120" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/7/7f/Two_layer_ann.svg/141px-Two_layer_ann.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/7/7f/Two_layer_ann.svg/189px-Two_layer_ann.svg.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></div> <div class="gallerytext">A two-layer feedforward artificial neural network</div> </li> <li class="gallerybox" style="width: 255px"> <div class="thumb" style="width: 250px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Artificial_neural_network.svg" class="mw-file-description" title="An artificial neural network"><noscript><img alt="An artificial neural network" src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e4/Artificial_neural_network.svg/134px-Artificial_neural_network.svg.png" decoding="async" width="134" height="120" class="mw-file-element" data-file-width="560" data-file-height="500"></noscript><span class="lazy-image-placeholder" style="width: 134px;height: 120px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e4/Artificial_neural_network.svg/134px-Artificial_neural_network.svg.png" data-alt="An artificial neural network" data-width="134" data-height="120" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/e4/Artificial_neural_network.svg/202px-Artificial_neural_network.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/e4/Artificial_neural_network.svg/269px-Artificial_neural_network.svg.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></div> <div class="gallerytext">An artificial neural network</div> </li> <li class="gallerybox" style="width: 255px"> <div class="thumb" style="width: 250px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Ann_dependency_(graph).svg" class="mw-file-description" title="An ANN dependency graph"><noscript><img alt="An ANN dependency graph" src="//upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Ann_dependency_%28graph%29.svg/168px-Ann_dependency_%28graph%29.svg.png" decoding="async" width="168" height="120" class="mw-file-element" data-file-width="178" data-file-height="127"></noscript><span class="lazy-image-placeholder" style="width: 168px;height: 120px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Ann_dependency_%28graph%29.svg/168px-Ann_dependency_%28graph%29.svg.png" data-alt="An ANN dependency graph" data-width="168" data-height="120" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Ann_dependency_%28graph%29.svg/252px-Ann_dependency_%28graph%29.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Ann_dependency_%28graph%29.svg/337px-Ann_dependency_%28graph%29.svg.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></div> <div class="gallerytext">An ANN dependency graph</div> </li> <li class="gallerybox" style="width: 255px"> <div class="thumb" style="width: 250px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Single-layer_feedforward_artificial_neural_network.png" class="mw-file-description" title="A single-layer feedforward artificial neural network with 4 inputs, 6 hidden nodes and 2 outputs. Given position state and direction, it outputs wheel based control values."><noscript><img alt="A single-layer feedforward artificial neural network with 4 inputs, 6 hidden nodes and 2 outputs. Given position state and direction, it outputs wheel based control values." src="//upload.wikimedia.org/wikipedia/commons/thumb/3/32/Single-layer_feedforward_artificial_neural_network.png/214px-Single-layer_feedforward_artificial_neural_network.png" decoding="async" width="214" height="120" class="mw-file-element" data-file-width="1280" data-file-height="720"></noscript><span class="lazy-image-placeholder" style="width: 214px;height: 120px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/3/32/Single-layer_feedforward_artificial_neural_network.png/214px-Single-layer_feedforward_artificial_neural_network.png" data-alt="A single-layer feedforward artificial neural network with 4 inputs, 6 hidden nodes and 2 outputs. Given position state and direction, it outputs wheel based control values." data-width="214" data-height="120" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/32/Single-layer_feedforward_artificial_neural_network.png/320px-Single-layer_feedforward_artificial_neural_network.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/32/Single-layer_feedforward_artificial_neural_network.png/427px-Single-layer_feedforward_artificial_neural_network.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></div> <div class="gallerytext">A single-layer feedforward artificial neural network with 4 inputs, 6 hidden nodes and 2 outputs. Given position state and direction, it outputs wheel based control values.</div> </li> <li class="gallerybox" style="width: 255px"> <div class="thumb" style="width: 250px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Two-layer_feedforward_artificial_neural_network.png" class="mw-file-description" title="A two-layer feedforward artificial neural network with 8 inputs, 2x8 hidden nodes and 2 outputs. Given position state, direction and other environment values, it outputs thruster based control values."><noscript><img alt="A two-layer feedforward artificial neural network with 8 inputs, 2x8 hidden nodes and 2 outputs. Given position state, direction and other environment values, it outputs thruster based control values." src="//upload.wikimedia.org/wikipedia/commons/thumb/5/58/Two-layer_feedforward_artificial_neural_network.png/214px-Two-layer_feedforward_artificial_neural_network.png" decoding="async" width="214" height="120" class="mw-file-element" data-file-width="1280" data-file-height="720"></noscript><span class="lazy-image-placeholder" style="width: 214px;height: 120px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/5/58/Two-layer_feedforward_artificial_neural_network.png/214px-Two-layer_feedforward_artificial_neural_network.png" data-alt="A two-layer feedforward artificial neural network with 8 inputs, 2x8 hidden nodes and 2 outputs. Given position state, direction and other environment values, it outputs thruster based control values." data-width="214" data-height="120" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/5/58/Two-layer_feedforward_artificial_neural_network.png/320px-Two-layer_feedforward_artificial_neural_network.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/5/58/Two-layer_feedforward_artificial_neural_network.png/427px-Two-layer_feedforward_artificial_neural_network.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></div> <div class="gallerytext">A two-layer feedforward artificial neural network with 8 inputs, 2x8 hidden nodes and 2 outputs. Given position state, direction and other environment values, it outputs thruster based control values.</div> </li> <li class="gallerybox" style="width: 255px"> <div class="thumb" style="width: 250px; height: 150px;"><span typeof="mw:File"><a href="/wiki/File:Cmac.jpg" class="mw-file-description" title="Parallel pipeline structure of CMAC neural network. This learning algorithm can converge in one step."><noscript><img alt="Parallel pipeline structure of CMAC neural network. This learning algorithm can converge in one step." src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Cmac.jpg/177px-Cmac.jpg" decoding="async" width="177" height="120" class="mw-file-element" data-file-width="899" data-file-height="609"></noscript><span class="lazy-image-placeholder" style="width: 177px;height: 120px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Cmac.jpg/177px-Cmac.jpg" data-alt="Parallel pipeline structure of CMAC neural network. This learning algorithm can converge in one step." data-width="177" data-height="120" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Cmac.jpg/266px-Cmac.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Cmac.jpg/355px-Cmac.jpg 2x" data-class="mw-file-element">&nbsp;</span></a></span></div> <div class="gallerytext">Parallel pipeline structure of CMAC neural network. This learning algorithm can converge in one step. </div> </li> </ul> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(10)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Recent_advancements_and_future_directions">Recent advancements and future directions</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=42" title="Edit section: Recent advancements and future directions" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-10 collapsible-block" id="mf-section-10"> <p>Artificial neural networks (ANNs) have undergone significant advancements, particularly in their ability to model complex systems, handle large data sets, and adapt to various types of applications. Their evolution over the past few decades has been marked by a broad range of applications in fields such as image processing, speech recognition, natural language processing, finance, and medicine.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Image_processing">Image processing</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=43" title="Edit section: Image processing" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>In the realm of image processing, ANNs are employed in tasks such as image classification, object recognition, and image segmentation. For instance, deep convolutional neural networks (CNNs) have been important in handwritten digit recognition, achieving state-of-the-art performance.<sup id="cite_ref-:07_243-0" class="reference"><a href="#cite_note-:07-243"><span class="cite-bracket">[</span>243<span class="cite-bracket">]</span></a></sup> This demonstrates the ability of ANNs to effectively process and interpret complex visual information, leading to advancements in fields ranging from automated surveillance to medical imaging.<sup id="cite_ref-:07_243-1" class="reference"><a href="#cite_note-:07-243"><span class="cite-bracket">[</span>243<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Speech_recognition">Speech recognition</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=44" title="Edit section: Speech recognition" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>By modeling speech signals, ANNs are used for tasks like speaker identification and speech-to-text conversion. Deep neural network architectures have introduced significant improvements in large vocabulary continuous speech recognition, outperforming traditional techniques.<sup id="cite_ref-:07_243-2" class="reference"><a href="#cite_note-:07-243"><span class="cite-bracket">[</span>243<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:15_244-0" class="reference"><a href="#cite_note-:15-244"><span class="cite-bracket">[</span>244<span class="cite-bracket">]</span></a></sup> These advancements have enabled the development of more accurate and efficient voice-activated systems, enhancing user interfaces in technology products.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Natural_language_processing">Natural language processing</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=45" title="Edit section: Natural language processing" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>In natural language processing, ANNs are used for tasks such as text classification, sentiment analysis, and machine translation. They have enabled the development of models that can accurately translate between languages, understand the context and sentiment in textual data, and categorize text based on content.<sup id="cite_ref-:07_243-3" class="reference"><a href="#cite_note-:07-243"><span class="cite-bracket">[</span>243<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:15_244-1" class="reference"><a href="#cite_note-:15-244"><span class="cite-bracket">[</span>244<span class="cite-bracket">]</span></a></sup> This has implications for automated customer service, content moderation, and language understanding technologies.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Control_systems">Control systems</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=46" title="Edit section: Control systems" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>In the domain of control systems, ANNs are used to model dynamic systems for tasks such as system identification, control design, and optimization. For instance, deep feedforward neural networks are important in system identification and control applications.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Finance">Finance</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=47" title="Edit section: Finance" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/Applications_of_artificial_intelligence#Trading_and_investment" title="Applications of artificial intelligence">Applications of artificial intelligence § Trading and investment</a></div> <p>ANNs are used for <a href="/wiki/Quantitative_investing" class="mw-redirect" title="Quantitative investing">stock market prediction</a> and <a href="/wiki/Credit_scoring" class="mw-redirect" title="Credit scoring">credit scoring</a>: </p> <ul><li>In investing, ANNs can process vast amounts of financial data, recognize complex patterns, and forecast stock market trends, aiding investors and risk managers in making informed decisions.<sup id="cite_ref-:07_243-4" class="reference"><a href="#cite_note-:07-243"><span class="cite-bracket">[</span>243<span class="cite-bracket">]</span></a></sup></li> <li>In credit scoring, ANNs offer data-driven, personalized assessments of creditworthiness, improving the accuracy of default predictions and automating the lending process.<sup id="cite_ref-:15_244-2" class="reference"><a href="#cite_note-:15-244"><span class="cite-bracket">[</span>244<span class="cite-bracket">]</span></a></sup></li></ul> <p>ANNs require high-quality data and careful tuning, and their "black-box" nature can pose challenges in interpretation. Nevertheless, ongoing advancements suggest that ANNs continue to play a role in finance, offering valuable insights and enhancing <a href="/wiki/Financial_risk_management" title="Financial risk management">risk management strategies</a>.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Medicine">Medicine</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=48" title="Edit section: Medicine" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>ANNs are able to process and analyze vast medical datasets. They enhance diagnostic accuracy, especially by interpreting complex <a href="/wiki/Medical_imaging" title="Medical imaging">medical imaging</a> for early disease detection, and by predicting patient outcomes for personalized treatment planning.<sup id="cite_ref-:15_244-3" class="reference"><a href="#cite_note-:15-244"><span class="cite-bracket">[</span>244<span class="cite-bracket">]</span></a></sup> In drug discovery, ANNs speed up the identification of potential drug candidates and predict their efficacy and safety, significantly reducing development time and costs.<sup id="cite_ref-:07_243-5" class="reference"><a href="#cite_note-:07-243"><span class="cite-bracket">[</span>243<span class="cite-bracket">]</span></a></sup> Additionally, their application in personalized medicine and healthcare data analysis allows tailored therapies and efficient patient care management.<sup id="cite_ref-:15_244-4" class="reference"><a href="#cite_note-:15-244"><span class="cite-bracket">[</span>244<span class="cite-bracket">]</span></a></sup> Ongoing research is aimed at addressing remaining challenges such as data privacy and model interpretability, as well as expanding the scope of ANN applications in medicine.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2024)">citation needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading3"><h3 id="Content_creation">Content creation</h3><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=49" title="Edit section: Content creation" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div> <p>ANNs such as generative adversarial networks (<a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">GAN</a>) and <a href="/wiki/Transformer_(machine_learning_model)" class="mw-redirect" title="Transformer (machine learning model)">transformers</a> are used for content creation across numerous industries.<sup id="cite_ref-:09_245-0" class="reference"><a href="#cite_note-:09-245"><span class="cite-bracket">[</span>245<span class="cite-bracket">]</span></a></sup> This is because deep learning models are able to learn the style of an artist or musician from huge datasets and generate completely new artworks and music compositions. For instance, <a href="/wiki/DALL-E" title="DALL-E">DALL-E</a> is a deep neural network trained on 650 million pairs of images and texts across the internet that can create artworks based on text entered by the user.<sup id="cite_ref-246" class="reference"><a href="#cite_note-246"><span class="cite-bracket">[</span>246<span class="cite-bracket">]</span></a></sup> In the field of music, transformers are used to create original music for commercials and documentaries through companies such as <a href="/wiki/AIVA" title="AIVA">AIVA</a> and <a href="/wiki/Jukedeck" title="Jukedeck">Jukedeck</a>.<sup id="cite_ref-247" class="reference"><a href="#cite_note-247"><span class="cite-bracket">[</span>247<span class="cite-bracket">]</span></a></sup> In the marketing industry generative models are used to create personalized advertisements for consumers.<sup id="cite_ref-:09_245-1" class="reference"><a href="#cite_note-:09-245"><span class="cite-bracket">[</span>245<span class="cite-bracket">]</span></a></sup> Additionally, major film companies are partnering with technology companies to analyze the financial success of a film, such as the partnership between Warner Bros and technology company Cinelytic established in 2020.<sup id="cite_ref-248" class="reference"><a href="#cite_note-248"><span class="cite-bracket">[</span>248<span class="cite-bracket">]</span></a></sup> Furthermore, neural networks have found uses in video game creation, where Non Player Characters (NPCs) can make decisions based on all the characters currently in the game.<sup id="cite_ref-249" class="reference"><a href="#cite_note-249"><span class="cite-bracket">[</span>249<span class="cite-bracket">]</span></a></sup> </p> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(11)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="See_also">See also</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=50" title="Edit section: See also" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-11 collapsible-block" id="mf-section-11"> <style data-mw-deduplicate="TemplateStyles:r1184024115">.mw-parser-output .div-col{margin-top:0.3em;column-width:30em}.mw-parser-output .div-col-small{font-size:90%}.mw-parser-output .div-col-rules{column-rule:1px solid #aaa}.mw-parser-output .div-col dl,.mw-parser-output .div-col ol,.mw-parser-output .div-col ul{margin-top:0}.mw-parser-output .div-col li,.mw-parser-output .div-col dd{page-break-inside:avoid;break-inside:avoid-column}</style><div class="div-col" style="column-width: 18em;"> <ul><li><a href="/wiki/ADALINE" title="ADALINE">ADALINE</a></li> <li><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></li> <li><a href="/wiki/Bio-inspired_computing" title="Bio-inspired computing">Bio-inspired computing</a></li> <li><a href="/wiki/Blue_Brain_Project" title="Blue Brain Project">Blue Brain Project</a></li> <li><a href="/wiki/Catastrophic_interference" title="Catastrophic interference">Catastrophic interference</a></li> <li><a href="/wiki/Cognitive_architecture" title="Cognitive architecture">Cognitive architecture</a></li> <li><a href="/wiki/Connectionist_expert_system" title="Connectionist expert system">Connectionist expert system</a></li> <li><a href="/wiki/Connectomics" title="Connectomics">Connectomics</a></li> <li><a href="/wiki/Deep_image_prior" title="Deep image prior">Deep image prior</a></li> <li><a href="/wiki/Digital_morphogenesis" title="Digital morphogenesis">Digital morphogenesis</a></li> <li><a href="/wiki/Efficiently_updatable_neural_network" title="Efficiently updatable neural network">Efficiently updatable neural network</a></li> <li><a href="/wiki/Evolutionary_algorithm" title="Evolutionary algorithm">Evolutionary algorithm</a></li> <li><a href="/wiki/Genetic_algorithm" title="Genetic algorithm">Genetic algorithm</a></li> <li><a href="/wiki/Hyperdimensional_computing" title="Hyperdimensional computing">Hyperdimensional computing</a></li> <li><a href="/wiki/In_situ_adaptive_tabulation" title="In situ adaptive tabulation">In situ adaptive tabulation</a></li> <li><a href="/wiki/Large_width_limits_of_neural_networks" title="Large width limits of neural networks">Large width limits of neural networks</a></li> <li><a href="/wiki/List_of_machine_learning_concepts" class="mw-redirect" title="List of machine learning concepts">List of machine learning concepts</a></li> <li><a href="/wiki/Memristor" title="Memristor">Memristor</a></li> <li><a href="/wiki/Neural_gas" title="Neural gas">Neural gas</a></li> <li><a href="/wiki/Neural_network_software" title="Neural network software">Neural network software</a></li> <li><a href="/wiki/Optical_neural_network" title="Optical neural network">Optical neural network</a></li> <li><a href="/wiki/Parallel_distributed_processing" class="mw-redirect" title="Parallel distributed processing">Parallel distributed processing</a></li> <li><a href="/wiki/Philosophy_of_artificial_intelligence" title="Philosophy of artificial intelligence">Philosophy of artificial intelligence</a></li> <li><a href="/wiki/Predictive_analytics" title="Predictive analytics">Predictive analytics</a></li> <li><a href="/wiki/Quantum_neural_network" title="Quantum neural network">Quantum neural network</a></li> <li><a href="/wiki/Support_vector_machine" title="Support vector machine">Support vector machine</a></li> <li><a href="/wiki/Spiking_neural_network" title="Spiking neural network">Spiking neural network</a></li> <li><a href="/wiki/Stochastic_parrot" title="Stochastic parrot">Stochastic parrot</a></li> <li><a href="/wiki/Tensor_product_network" title="Tensor product network">Tensor product network</a></li></ul> </div> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(12)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="References">References</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=51" title="Edit section: References" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-12 collapsible-block" id="mf-section-12"> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist reflist-columns references-column-width" style="column-width: 30em;"> <ol class="references"> <li id="cite_note-1"><span class="mw-cite-backlink"><b><a href="#cite_ref-1">^</a></b></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite id="CITEREFHardesty2017" class="citation web cs1">Hardesty L (14 April 2017). <a rel="nofollow" class="external text" href="https://news.mit.edu/2017/explained-neural-networks-deep-learning-0414">"Explained: Neural networks"</a>. MIT News Office. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240318120205/https://news.mit.edu/2017/explained-neural-networks-deep-learning-0414">Archived</a> from the original on 18 March 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">2 June</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Explained%3A+Neural+networks&amp;rft.pub=MIT+News+Office&amp;rft.date=2017-04-14&amp;rft.aulast=Hardesty&amp;rft.aufirst=Larry&amp;rft_id=https%3A%2F%2Fnews.mit.edu%2F2017%2Fexplained-neural-networks-deep-learning-0414&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-2">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYangYang2014" class="citation book cs1">Yang Z, Yang Z (2014). <a rel="nofollow" class="external text" href="https://www.sciencedirect.com/topics/neuroscience/artificial-neural-network"><i>Comprehensive Biomedical Physics</i></a>. Karolinska Institute, Stockholm, Sweden: Elsevier. p. 1. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-444-53633-4" title="Special:BookSources/978-0-444-53633-4"><bdi>978-0-444-53633-4</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220728183237/https://www.sciencedirect.com/topics/neuroscience/artificial-neural-network">Archived</a> from the original on 28 July 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">28 July</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Comprehensive+Biomedical+Physics&amp;rft.place=Karolinska+Institute%2C+Stockholm%2C+Sweden&amp;rft.pages=1&amp;rft.pub=Elsevier&amp;rft.date=2014&amp;rft.isbn=978-0-444-53633-4&amp;rft.aulast=Yang&amp;rft.aufirst=Z.R.&amp;rft.au=Yang%2C+Z.&amp;rft_id=https%3A%2F%2Fwww.sciencedirect.com%2Ftopics%2Fneuroscience%2Fartificial-neural-network&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-3">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBishop2006" class="citation book cs1">Bishop CM (17 August 2006). <i>Pattern Recognition and Machine Learning</i>. New York: Springer. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-387-31073-2" title="Special:BookSources/978-0-387-31073-2"><bdi>978-0-387-31073-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Pattern+Recognition+and+Machine+Learning&amp;rft.place=New+York&amp;rft.pub=Springer&amp;rft.date=2006-08-17&amp;rft.isbn=978-0-387-31073-2&amp;rft.aulast=Bishop&amp;rft.aufirst=Christopher+M.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:2-4"><span class="mw-cite-backlink">^ <a href="#cite_ref-:2_4-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:2_4-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVapnikVapnik1998" class="citation book cs1">Vapnik VN, Vapnik VN (1998). <i>The nature of statistical learning theory</i> (Corrected 2nd print. ed.). New York Berlin Heidelberg: Springer. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-387-94559-0" title="Special:BookSources/978-0-387-94559-0"><bdi>978-0-387-94559-0</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+nature+of+statistical+learning+theory&amp;rft.place=New+York+Berlin+Heidelberg&amp;rft.edition=Corrected+2nd+print.&amp;rft.pub=Springer&amp;rft.date=1998&amp;rft.isbn=978-0-387-94559-0&amp;rft.aulast=Vapnik&amp;rft.aufirst=Vladimir+N.&amp;rft.au=Vapnik%2C+Vladimir+Naumovich&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:4-5"><span class="mw-cite-backlink">^ <a href="#cite_ref-:4_5-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:4_5-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFIan_Goodfellow_and_Yoshua_Bengio_and_Aaron_Courville2016" class="citation book cs1">Ian Goodfellow and Yoshua Bengio and Aaron Courville (2016). <a rel="nofollow" class="external text" href="http://www.deeplearningbook.org/"><i>Deep Learning</i></a>. MIT Press. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160416111010/http://www.deeplearningbook.org/">Archived</a> from the original on 16 April 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">1 June</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Deep+Learning&amp;rft.pub=MIT+Press&amp;rft.date=2016&amp;rft.au=Ian+Goodfellow+and+Yoshua+Bengio+and+Aaron+Courville&amp;rft_id=http%3A%2F%2Fwww.deeplearningbook.org%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-6">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFerrie,_C.Kaiser,_S.2019" class="citation book cs1">Ferrie, C., Kaiser, S. (2019). <i>Neural Networks for Babies</i>. Sourcebooks. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4926-7120-6" title="Special:BookSources/978-1-4926-7120-6"><bdi>978-1-4926-7120-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Neural+Networks+for+Babies&amp;rft.pub=Sourcebooks&amp;rft.date=2019&amp;rft.isbn=978-1-4926-7120-6&amp;rft.au=Ferrie%2C+C.&amp;rft.au=Kaiser%2C+S.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-legendre1805-7"><span class="mw-cite-backlink"><b><a href="#cite_ref-legendre1805_7-0">^</a></b></span> <span class="reference-text">Mansfield Merriman, "A List of Writings Relating to the Method of Least Squares"</span> </li> <li id="cite_note-gauss1795-8"><span class="mw-cite-backlink"><b><a href="#cite_ref-gauss1795_8-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFStigler1981" class="citation journal cs1">Stigler SM (1981). <a rel="nofollow" class="external text" href="https://doi.org/10.1214%2Faos%2F1176345451">"Gauss and the Invention of Least Squares"</a>. <i>Ann. Stat</i>. <b>9</b> (3): 465–474. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1214%2Faos%2F1176345451">10.1214/aos/1176345451</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Ann.+Stat.&amp;rft.atitle=Gauss+and+the+Invention+of+Least+Squares&amp;rft.volume=9&amp;rft.issue=3&amp;rft.pages=465-474&amp;rft.date=1981&amp;rft_id=info%3Adoi%2F10.1214%2Faos%2F1176345451&amp;rft.aulast=Stigler&amp;rft.aufirst=Stephen+M.&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1214%252Faos%252F1176345451&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-brertscher-9"><span class="mw-cite-backlink"><b><a href="#cite_ref-brertscher_9-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBretscher1995" class="citation book cs1">Bretscher O (1995). <i>Linear Algebra With Applications</i> (3rd ed.). Upper Saddle River, NJ: Prentice Hall.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Linear+Algebra+With+Applications&amp;rft.place=Upper+Saddle+River%2C+NJ&amp;rft.edition=3rd&amp;rft.pub=Prentice+Hall&amp;rft.date=1995&amp;rft.aulast=Bretscher&amp;rft.aufirst=Otto&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-DLhistory-10"><span class="mw-cite-backlink">^ <a href="#cite_ref-DLhistory_10-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-DLhistory_10-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-DLhistory_10-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-DLhistory_10-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-DLhistory_10-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-DLhistory_10-5"><sup><i><b>f</b></i></sup></a> <a href="#cite_ref-DLhistory_10-6"><sup><i><b>g</b></i></sup></a> <a href="#cite_ref-DLhistory_10-7"><sup><i><b>h</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber2022" class="citation arxiv cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber J</a> (2022). "Annotated History of Modern AI and Deep Learning". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2212.11279">2212.11279</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.NE">cs.NE</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Annotated+History+of+Modern+AI+and+Deep+Learning&amp;rft.date=2022&amp;rft_id=info%3Aarxiv%2F2212.11279&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J%C3%BCrgen&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-stigler-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-stigler_11-0">^</a></b></span> <span class="reference-text"> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFStigler1986" class="citation book cs1"><a href="/wiki/Stephen_Stigler" title="Stephen Stigler">Stigler SM</a> (1986). <span class="id-lock-registration" title="Free registration required"><a rel="nofollow" class="external text" href="https://archive.org/details/historyofstatist00stig"><i>The History of Statistics: The Measurement of Uncertainty before 1900</i></a></span>. Cambridge: Harvard. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-674-40340-1" title="Special:BookSources/0-674-40340-1"><bdi>0-674-40340-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+History+of+Statistics%3A+The+Measurement+of+Uncertainty+before+1900&amp;rft.place=Cambridge&amp;rft.pub=Harvard&amp;rft.date=1986&amp;rft.isbn=0-674-40340-1&amp;rft.aulast=Stigler&amp;rft.aufirst=Stephen+M.&amp;rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fhistoryofstatist00stig&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-WM-12"><span class="mw-cite-backlink">^ <a href="#cite_ref-WM_12-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-WM_12-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMcCullochPitts1943" class="citation journal cs1">McCulloch WS, Pitts W (December 1943). <a rel="nofollow" class="external text" href="http://link.springer.com/10.1007/BF02478259">"A logical calculus of the ideas immanent in nervous activity"</a>. <i>The Bulletin of Mathematical Biophysics</i>. <b>5</b> (4): 115–133. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2FBF02478259">10.1007/BF02478259</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0007-4985">0007-4985</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Bulletin+of+Mathematical+Biophysics&amp;rft.atitle=A+logical+calculus+of+the+ideas+immanent+in+nervous+activity&amp;rft.volume=5&amp;rft.issue=4&amp;rft.pages=115-133&amp;rft.date=1943-12&amp;rft_id=info%3Adoi%2F10.1007%2FBF02478259&amp;rft.issn=0007-4985&amp;rft.aulast=McCulloch&amp;rft.aufirst=Warren+S.&amp;rft.au=Pitts%2C+Walter&amp;rft_id=http%3A%2F%2Flink.springer.com%2F10.1007%2FBF02478259&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-13"><span class="mw-cite-backlink"><b><a href="#cite_ref-13">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKleene1956" class="citation news cs1">Kleene S (1956). <a rel="nofollow" class="external text" href="https://www.degruyter.com/view/books/9781400882618/9781400882618-002/9781400882618-002.xml">"Representation of Events in Nerve Nets and Finite Automata"</a>. <i>Annals of Mathematics Studies</i>. No. 34. Princeton University Press. pp. 3–41<span class="reference-accessdate">. Retrieved <span class="nowrap">17 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Annals+of+Mathematics+Studies&amp;rft.atitle=Representation+of+Events+in+Nerve+Nets+and+Finite+Automata&amp;rft.issue=34&amp;rft.pages=3-41&amp;rft.date=1956&amp;rft.aulast=Kleene&amp;rft.aufirst=S.C.&amp;rft_id=https%3A%2F%2Fwww.degruyter.com%2Fview%2Fbooks%2F9781400882618%2F9781400882618-002%2F9781400882618-002.xml&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-14"><span class="mw-cite-backlink"><b><a href="#cite_ref-14">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHebb1949" class="citation book cs1">Hebb D (1949). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=ddB4AgAAQBAJ"><i>The Organization of Behavior</i></a>. New York: Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-135-63190-1" title="Special:BookSources/978-1-135-63190-1"><bdi>978-1-135-63190-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+Organization+of+Behavior&amp;rft.place=New+York&amp;rft.pub=Wiley&amp;rft.date=1949&amp;rft.isbn=978-1-135-63190-1&amp;rft.aulast=Hebb&amp;rft.aufirst=Donald&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DddB4AgAAQBAJ&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-15">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFarleyW.A._Clark1954" class="citation journal cs1">Farley B, W.A. Clark (1954). "Simulation of Self-Organizing Systems by Digital Computer". <i>IRE Transactions on Information Theory</i>. <b>4</b> (4): 76–84. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTIT.1954.1057468">10.1109/TIT.1954.1057468</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IRE+Transactions+on+Information+Theory&amp;rft.atitle=Simulation+of+Self-Organizing+Systems+by+Digital+Computer&amp;rft.volume=4&amp;rft.issue=4&amp;rft.pages=76-84&amp;rft.date=1954&amp;rft_id=info%3Adoi%2F10.1109%2FTIT.1954.1057468&amp;rft.aulast=Farley&amp;rft.aufirst=B.G.&amp;rft.au=W.A.+Clark&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-16"><span class="mw-cite-backlink"><b><a href="#cite_ref-16">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRochesterJ.H._HollandL.H._HabitW.L._Duda1956" class="citation journal cs1">Rochester N, J.H. Holland, L.H. Habit, W.L. Duda (1956). "Tests on a cell assembly theory of the action of the brain, using a large digital computer". <i>IRE Transactions on Information Theory</i>. <b>2</b> (3): 80–93. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTIT.1956.1056810">10.1109/TIT.1956.1056810</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IRE+Transactions+on+Information+Theory&amp;rft.atitle=Tests+on+a+cell+assembly+theory+of+the+action+of+the+brain%2C+using+a+large+digital+computer&amp;rft.volume=2&amp;rft.issue=3&amp;rft.pages=80-93&amp;rft.date=1956&amp;rft_id=info%3Adoi%2F10.1109%2FTIT.1956.1056810&amp;rft.aulast=Rochester&amp;rft.aufirst=N.&amp;rft.au=J.H.+Holland&amp;rft.au=L.H.+Habit&amp;rft.au=W.L.+Duda&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-17"><span class="mw-cite-backlink"><b><a href="#cite_ref-17">^</a></b></span> <span class="reference-text">Haykin (2008) Neural Networks and Learning Machines, 3rd edition</span> </li> <li id="cite_note-18"><span class="mw-cite-backlink"><b><a href="#cite_ref-18">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRosenblatt1958" class="citation journal cs1">Rosenblatt F (1958). "The Perceptron: A Probabilistic Model For Information Storage And Organization in the Brain". <i>Psychological Review</i>. <b>65</b> (6): 386–408. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.588.3775">10.1.1.588.3775</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1037%2Fh0042519">10.1037/h0042519</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/13602029">13602029</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:12781225">12781225</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Psychological+Review&amp;rft.atitle=The+Perceptron%3A+A+Probabilistic+Model+For+Information+Storage+And+Organization+in+the+Brain&amp;rft.volume=65&amp;rft.issue=6&amp;rft.pages=386-408&amp;rft.date=1958&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.588.3775%23id-name%3DCiteSeerX&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A12781225%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F13602029&amp;rft_id=info%3Adoi%2F10.1037%2Fh0042519&amp;rft.aulast=Rosenblatt&amp;rft.aufirst=F.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Werbos_1975-19"><span class="mw-cite-backlink"><b><a href="#cite_ref-Werbos_1975_19-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWerbos1975" class="citation book cs1">Werbos P (1975). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=z81XmgEACAAJ"><i>Beyond Regression: New Tools for Prediction and Analysis in the Behavioral Sciences</i></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Beyond+Regression%3A+New+Tools+for+Prediction+and+Analysis+in+the+Behavioral+Sciences&amp;rft.date=1975&amp;rft.aulast=Werbos&amp;rft.aufirst=P.J.&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3Dz81XmgEACAAJ&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-20"><span class="mw-cite-backlink"><b><a href="#cite_ref-20">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRosenblatt1957" class="citation journal cs1">Rosenblatt F (1957). "The Perceptron—a perceiving and recognizing automaton". <i>Report 85-460-1</i>. Cornell Aeronautical Laboratory.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Report+85-460-1&amp;rft.atitle=The+Perceptron%E2%80%94a+perceiving+and+recognizing+automaton&amp;rft.date=1957&amp;rft.aulast=Rosenblatt&amp;rft.aufirst=Frank&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Olazaran-21"><span class="mw-cite-backlink"><b><a href="#cite_ref-Olazaran_21-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOlazaran1996" class="citation journal cs1">Olazaran M (1996). "A Sociological Study of the Official History of the Perceptrons Controversy". <i>Social Studies of Science</i>. <b>26</b> (3): 611–659. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1177%2F030631296026003005">10.1177/030631296026003005</a>. <a href="/wiki/JSTOR_(identifier)" class="mw-redirect" title="JSTOR (identifier)">JSTOR</a> <a rel="nofollow" class="external text" href="https://www.jstor.org/stable/285702">285702</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:16786738">16786738</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Social+Studies+of+Science&amp;rft.atitle=A+Sociological+Study+of+the+Official+History+of+the+Perceptrons+Controversy&amp;rft.volume=26&amp;rft.issue=3&amp;rft.pages=611-659&amp;rft.date=1996&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A16786738%23id-name%3DS2CID&amp;rft_id=https%3A%2F%2Fwww.jstor.org%2Fstable%2F285702%23id-name%3DJSTOR&amp;rft_id=info%3Adoi%2F10.1177%2F030631296026003005&amp;rft.aulast=Olazaran&amp;rft.aufirst=Mikel&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-joseph1960-22"><span class="mw-cite-backlink">^ <a href="#cite_ref-joseph1960_22-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-joseph1960_22-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJoseph1960" class="citation book cs1">Joseph RD (1960). <i>Contributions to Perceptron Theory, Cornell Aeronautical Laboratory Report No. VG-11 96--G-7, Buffalo</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Contributions+to+Perceptron+Theory%2C+Cornell+Aeronautical+Laboratory+Report+No.+VG-11+96--G-7%2C+Buffalo&amp;rft.date=1960&amp;rft.aulast=Joseph&amp;rft.aufirst=R.+D.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:08-23"><span class="mw-cite-backlink"><b><a href="#cite_ref-:08_23-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussel,_StuartNorvig,_Peter2010" class="citation book cs1">Russel, Stuart, Norvig, Peter (2010). <a rel="nofollow" class="external text" href="https://people.engr.tamu.edu/guni/csce421/files/AI_Russell_Norvig.pdf"><i>Artificial Intelligence A Modern Approach</i></a> <span class="cs1-format">(PDF)</span> (3rd ed.). United States of America: Pearson Education. pp. 16–28. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-13-604259-4" title="Special:BookSources/978-0-13-604259-4"><bdi>978-0-13-604259-4</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Artificial+Intelligence+A+Modern+Approach&amp;rft.place=United+States+of+America&amp;rft.pages=16-28&amp;rft.edition=3rd&amp;rft.pub=Pearson+Education&amp;rft.date=2010&amp;rft.isbn=978-0-13-604259-4&amp;rft.au=Russel%2C+Stuart&amp;rft.au=Norvig%2C+Peter&amp;rft_id=https%3A%2F%2Fpeople.engr.tamu.edu%2Fguni%2Fcsce421%2Ffiles%2FAI_Russell_Norvig.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-rosenblatt1962-24"><span class="mw-cite-backlink">^ <a href="#cite_ref-rosenblatt1962_24-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-rosenblatt1962_24-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRosenblatt1962" class="citation book cs1"><a href="/wiki/Frank_Rosenblatt" title="Frank Rosenblatt">Rosenblatt F</a> (1962). <i>Principles of Neurodynamics</i>. Spartan, New York.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Principles+of+Neurodynamics&amp;rft.pub=Spartan%2C+New+York&amp;rft.date=1962&amp;rft.aulast=Rosenblatt&amp;rft.aufirst=Frank&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-ivak1965-25"><span class="mw-cite-backlink"><b><a href="#cite_ref-ivak1965_25-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFIvakhnenkoLapa1967" class="citation book cs1">Ivakhnenko AG, Lapa VG (1967). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=rGFgAAAAMAAJ"><i>Cybernetics and Forecasting Techniques</i></a>. American Elsevier Publishing Co. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-444-00020-0" title="Special:BookSources/978-0-444-00020-0"><bdi>978-0-444-00020-0</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Cybernetics+and+Forecasting+Techniques&amp;rft.pub=American+Elsevier+Publishing+Co.&amp;rft.date=1967&amp;rft.isbn=978-0-444-00020-0&amp;rft.aulast=Ivakhnenko&amp;rft.aufirst=A.+G.&amp;rft.au=Lapa%2C+V.+G.&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DrGFgAAAAMAAJ&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-26"><span class="mw-cite-backlink"><b><a href="#cite_ref-26">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFIvakhnenko1970" class="citation journal cs1">Ivakhnenko A (March 1970). <a rel="nofollow" class="external text" href="https://linkinghub.elsevier.com/retrieve/pii/0005109870900920">"Heuristic self-organization in problems of engineering cybernetics"</a>. <i>Automatica</i>. <b>6</b> (2): 207–219. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0005-1098%2870%2990092-0">10.1016/0005-1098(70)90092-0</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Automatica&amp;rft.atitle=Heuristic+self-organization+in+problems+of+engineering+cybernetics&amp;rft.volume=6&amp;rft.issue=2&amp;rft.pages=207-219&amp;rft.date=1970-03&amp;rft_id=info%3Adoi%2F10.1016%2F0005-1098%2870%2990092-0&amp;rft.aulast=Ivakhnenko&amp;rft.aufirst=A.G.&amp;rft_id=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2F0005109870900920&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-ivak1971-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-ivak1971_27-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFIvakhnenko1971" class="citation journal cs1 cs1-prop-long-vol">Ivakhnenko A (1971). <a rel="nofollow" class="external text" href="http://gmdh.net/articles/history/polynomial.pdf">"Polynomial theory of complex systems"</a> <span class="cs1-format">(PDF)</span>. <i>IEEE Transactions on Systems, Man, and Cybernetics</i>. SMC-1 (4): 364–378. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTSMC.1971.4308320">10.1109/TSMC.1971.4308320</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170829230621/http://www.gmdh.net/articles/history/polynomial.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 29 August 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">5 November</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Transactions+on+Systems%2C+Man%2C+and+Cybernetics&amp;rft.atitle=Polynomial+theory+of+complex+systems&amp;rft.volume=SMC-1&amp;rft.issue=4&amp;rft.pages=364-378&amp;rft.date=1971&amp;rft_id=info%3Adoi%2F10.1109%2FTSMC.1971.4308320&amp;rft.aulast=Ivakhnenko&amp;rft.aufirst=Alexey&amp;rft_id=http%3A%2F%2Fgmdh.net%2Farticles%2Fhistory%2Fpolynomial.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-robbins1951-28"><span class="mw-cite-backlink"><b><a href="#cite_ref-robbins1951_28-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRobbinsMonro1951" class="citation journal cs1"><a href="/wiki/Herbert_Robbins" title="Herbert Robbins">Robbins H</a>, Monro S (1951). <a rel="nofollow" class="external text" href="https://doi.org/10.1214%2Faoms%2F1177729586">"A Stochastic Approximation Method"</a>. <i>The Annals of Mathematical Statistics</i>. <b>22</b> (3): 400. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1214%2Faoms%2F1177729586">10.1214/aoms/1177729586</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Annals+of+Mathematical+Statistics&amp;rft.atitle=A+Stochastic+Approximation+Method&amp;rft.volume=22&amp;rft.issue=3&amp;rft.pages=400&amp;rft.date=1951&amp;rft_id=info%3Adoi%2F10.1214%2Faoms%2F1177729586&amp;rft.aulast=Robbins&amp;rft.aufirst=H.&amp;rft.au=Monro%2C+S.&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1214%252Faoms%252F1177729586&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Amari1967-29"><span class="mw-cite-backlink"><b><a href="#cite_ref-Amari1967_29-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAmari1967" class="citation journal cs1"><a href="/wiki/Shun%27ichi_Amari" title="Shun'ichi Amari">Amari S</a> (1967). "A theory of adaptive pattern classifier". <i>IEEE Transactions</i>. <b>EC</b> (16): 279–307.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Transactions&amp;rft.atitle=A+theory+of+adaptive+pattern+classifier&amp;rft.volume=EC&amp;rft.issue=16&amp;rft.pages=279-307&amp;rft.date=1967&amp;rft.aulast=Amari&amp;rft.aufirst=Shun%27ichi&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Fukushima1969-30"><span class="mw-cite-backlink"><b><a href="#cite_ref-Fukushima1969_30-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFukushima1969" class="citation journal cs1">Fukushima K (1969). "Visual feature extraction by a multilayered network of analog threshold elements". <i>IEEE Transactions on Systems Science and Cybernetics</i>. <b>5</b> (4): 322–333. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTSSC.1969.300225">10.1109/TSSC.1969.300225</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Transactions+on+Systems+Science+and+Cybernetics&amp;rft.atitle=Visual+feature+extraction+by+a+multilayered+network+of+analog+threshold+elements&amp;rft.volume=5&amp;rft.issue=4&amp;rft.pages=322-333&amp;rft.date=1969&amp;rft_id=info%3Adoi%2F10.1109%2FTSSC.1969.300225&amp;rft.aulast=Fukushima&amp;rft.aufirst=K.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-sonoda17-31"><span class="mw-cite-backlink"><b><a href="#cite_ref-sonoda17_31-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSonodaMurata2017" class="citation journal cs1">Sonoda S, Murata N (2017). "Neural network with unbounded activation functions is universal approximator". <i>Applied and Computational Harmonic Analysis</i>. <b>43</b> (2): 233–268. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1505.03654">1505.03654</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.acha.2015.12.005">10.1016/j.acha.2015.12.005</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:12149203">12149203</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Applied+and+Computational+Harmonic+Analysis&amp;rft.atitle=Neural+network+with+unbounded+activation+functions+is+universal+approximator&amp;rft.volume=43&amp;rft.issue=2&amp;rft.pages=233-268&amp;rft.date=2017&amp;rft_id=info%3Aarxiv%2F1505.03654&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A12149203%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1016%2Fj.acha.2015.12.005&amp;rft.aulast=Sonoda&amp;rft.aufirst=Sho&amp;rft.au=Murata%2C+Noboru&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-32">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRamachandranBarretQuoc2017" class="citation arxiv cs1">Ramachandran P, Barret Z, Quoc VL (16 October 2017). "Searching for Activation Functions". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1710.05941">1710.05941</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.NE">cs.NE</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Searching+for+Activation+Functions&amp;rft.date=2017-10-16&amp;rft_id=info%3Aarxiv%2F1710.05941&amp;rft.aulast=Ramachandran&amp;rft.aufirst=Prajit&amp;rft.au=Barret%2C+Zoph&amp;rft.au=Quoc%2C+V.+Le&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:132-33"><span class="mw-cite-backlink"><b><a href="#cite_ref-:132_33-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMinskyPapert1969" class="citation book cs1">Minsky M, Papert S (1969). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=Ow1OAQAAIAAJ"><i>Perceptrons: An Introduction to Computational Geometry</i></a>. MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-63022-1" title="Special:BookSources/978-0-262-63022-1"><bdi>978-0-262-63022-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Perceptrons%3A+An+Introduction+to+Computational+Geometry&amp;rft.pub=MIT+Press&amp;rft.date=1969&amp;rft.isbn=978-0-262-63022-1&amp;rft.aulast=Minsky&amp;rft.aufirst=Marvin&amp;rft.au=Papert%2C+Seymour&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DOw1OAQAAIAAJ&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-34"><span class="mw-cite-backlink"><b><a href="#cite_ref-34">^</a></b></span> <span class="reference-text"> Bozinovski S. and Fulgosi A. (1976). "The influence of pattern similarity and transfer learning on the base perceptron training" (original in Croatian) Proceedings of Symposium Informatica 3-121-5, Bled. </span> </li> <li id="cite_note-35"><span class="mw-cite-backlink"><b><a href="#cite_ref-35">^</a></b></span> <span class="reference-text"> Bozinovski S.(2020) "Reminder of the first paper on transfer learning in neural networks, 1976". Informatica 44: 291–302. </span> </li> <li id="cite_note-FUKU1979-36"><span class="mw-cite-backlink">^ <a href="#cite_ref-FUKU1979_36-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-FUKU1979_36-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFukushima1979" class="citation journal cs1 cs1-prop-long-vol">Fukushima K (1979). "Neural network model for a mechanism of pattern recognition unaffected by shift in position—Neocognitron". <i>Trans. IECE (In Japanese)</i>. J62-A (10): 658–665. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fbf00344251">10.1007/bf00344251</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7370364">7370364</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:206775608">206775608</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Trans.+IECE+%28In+Japanese%29&amp;rft.atitle=Neural+network+model+for+a+mechanism+of+pattern+recognition+unaffected+by+shift+in+position%E2%80%94Neocognitron&amp;rft.volume=J62-A&amp;rft.issue=10&amp;rft.pages=658-665&amp;rft.date=1979&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A206775608%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F7370364&amp;rft_id=info%3Adoi%2F10.1007%2Fbf00344251&amp;rft.aulast=Fukushima&amp;rft.aufirst=K.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-FUKU1980-37"><span class="mw-cite-backlink"><b><a href="#cite_ref-FUKU1980_37-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFukushima1980" class="citation journal cs1">Fukushima K (1980). "Neocognitron: A self-organizing neural network model for a mechanism of pattern recognition unaffected by shift in position". <i>Biol. Cybern</i>. <b>36</b> (4): 193–202. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fbf00344251">10.1007/bf00344251</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7370364">7370364</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:206775608">206775608</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Biol.+Cybern.&amp;rft.atitle=Neocognitron%3A+A+self-organizing+neural+network+model+for+a+mechanism+of+pattern+recognition+unaffected+by+shift+in+position&amp;rft.volume=36&amp;rft.issue=4&amp;rft.pages=193-202&amp;rft.date=1980&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A206775608%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F7370364&amp;rft_id=info%3Adoi%2F10.1007%2Fbf00344251&amp;rft.aulast=Fukushima&amp;rft.aufirst=K.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-SCHIDHUB4-38"><span class="mw-cite-backlink">^ <a href="#cite_ref-SCHIDHUB4_38-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-SCHIDHUB4_38-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-SCHIDHUB4_38-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber2015" class="citation journal cs1">Schmidhuber J (2015). "Deep Learning in Neural Networks: An Overview". <i>Neural Networks</i>. <b>61</b>: 85–117. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1404.7828">1404.7828</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neunet.2014.09.003">10.1016/j.neunet.2014.09.003</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/25462637">25462637</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:11715509">11715509</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Networks&amp;rft.atitle=Deep+Learning+in+Neural+Networks%3A+An+Overview&amp;rft.volume=61&amp;rft.pages=85-117&amp;rft.date=2015&amp;rft_id=info%3Aarxiv%2F1404.7828&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A11715509%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F25462637&amp;rft_id=info%3Adoi%2F10.1016%2Fj.neunet.2014.09.003&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-leibniz16762-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-leibniz16762_39-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLeibniz1920" class="citation book cs1">Leibniz GW (1920). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=bOIGAAAAYAAJ&amp;q=leibniz+altered+manuscripts&amp;pg=PA90"><i>The Early Mathematical Manuscripts of Leibniz: Translated from the Latin Texts Published by Carl Immanuel Gerhardt with Critical and Historical Notes (Leibniz published the chain rule in a 1676 memoir)</i></a>. Open court publishing Company. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780598818461" title="Special:BookSources/9780598818461"><bdi>9780598818461</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+Early+Mathematical+Manuscripts+of+Leibniz%3A+Translated+from+the+Latin+Texts+Published+by+Carl+Immanuel+Gerhardt+with+Critical+and+Historical+Notes+%28Leibniz+published+the+chain+rule+in+a+1676+memoir%29&amp;rft.pub=Open+court+publishing+Company&amp;rft.date=1920&amp;rft.isbn=9780598818461&amp;rft.aulast=Leibniz&amp;rft.aufirst=Gottfried+Wilhelm+Freiherr+von&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DbOIGAAAAYAAJ%26q%3Dleibniz%2Baltered%2Bmanuscripts%26pg%3DPA90&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-kelley19602-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-kelley19602_40-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKelley1960" class="citation journal cs1"><a href="/wiki/Henry_J._Kelley" title="Henry J. Kelley">Kelley HJ</a> (1960). "Gradient theory of optimal flight paths". <i>ARS Journal</i>. <b>30</b> (10): 947–954. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.2514%2F8.5282">10.2514/8.5282</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=ARS+Journal&amp;rft.atitle=Gradient+theory+of+optimal+flight+paths&amp;rft.volume=30&amp;rft.issue=10&amp;rft.pages=947-954&amp;rft.date=1960&amp;rft_id=info%3Adoi%2F10.2514%2F8.5282&amp;rft.aulast=Kelley&amp;rft.aufirst=Henry+J.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-lin19703-41"><span class="mw-cite-backlink"><b><a href="#cite_ref-lin19703_41-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLinnainmaa1970" class="citation thesis cs1 cs1-prop-foreign-lang-source"><a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Linnainmaa S</a> (1970). <i>The representation of the cumulative rounding error of an algorithm as a Taylor expansion of the local rounding errors</i> (Masters) (in Finnish). University of Helsinki. p. 6–7.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Adissertation&amp;rft.title=The+representation+of+the+cumulative+rounding+error+of+an+algorithm+as+a+Taylor+expansion+of+the+local+rounding+errors&amp;rft.inst=University+of+Helsinki&amp;rft.date=1970&amp;rft.aulast=Linnainmaa&amp;rft.aufirst=Seppo&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-lin19763-42"><span class="mw-cite-backlink"><b><a href="#cite_ref-lin19763_42-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLinnainmaa1976" class="citation journal cs1"><a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Linnainmaa S</a> (1976). "Taylor expansion of the accumulated rounding error". <i>BIT Numerical Mathematics</i>. <b>16</b> (2): 146–160. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fbf01931367">10.1007/bf01931367</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:122357351">122357351</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BIT+Numerical+Mathematics&amp;rft.atitle=Taylor+expansion+of+the+accumulated+rounding+error&amp;rft.volume=16&amp;rft.issue=2&amp;rft.pages=146-160&amp;rft.date=1976&amp;rft_id=info%3Adoi%2F10.1007%2Fbf01931367&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A122357351%23id-name%3DS2CID&amp;rft.aulast=Linnainmaa&amp;rft.aufirst=Seppo&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-ostrowski1971-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-ostrowski1971_43-0">^</a></b></span> <span class="reference-text">Ostrovski, G.M., Volin,Y.M., and Boris, W.W. (1971). On the computation of derivatives. Wiss. Z. Tech. Hochschule for Chemistry, 13:382–384.</span> </li> <li id="cite_note-backprop-44"><span class="mw-cite-backlink">^ <a href="#cite_ref-backprop_44-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-backprop_44-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber2014" class="citation web cs1"><a href="/wiki/Juergen_Schmidhuber" class="mw-redirect" title="Juergen Schmidhuber">Schmidhuber J</a> (25 October 2014). <a rel="nofollow" class="external text" href="http://web.archive.org/web/20240730110408/https://people.idsia.ch/~juergen/who-invented-backpropagation.html">"Who Invented Backpropagation?"</a>. IDSIA, Switzerland. Archived from <a rel="nofollow" class="external text" href="https://people.idsia.ch/~juergen/who-invented-backpropagation.html">the original</a> on 30 July 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">14 September</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Who+Invented+Backpropagation%3F&amp;rft.pub=IDSIA%2C+Switzerland&amp;rft.date=2014-10-25&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=Juergen&amp;rft_id=https%3A%2F%2Fpeople.idsia.ch%2F~juergen%2Fwho-invented-backpropagation.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-werbos1982-45"><span class="mw-cite-backlink"><b><a href="#cite_ref-werbos1982_45-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWerbos1982" class="citation book cs1"><a href="/wiki/Paul_Werbos" title="Paul Werbos">Werbos P</a> (1982). <a rel="nofollow" class="external text" href="http://werbos.com/Neural/SensitivityIFIPSeptember1981.pdf">"Applications of advances in nonlinear sensitivity analysis"</a> <span class="cs1-format">(PDF)</span>. <i>System modeling and optimization</i>. Springer. pp. 762–770. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160414055503/http://werbos.com/Neural/SensitivityIFIPSeptember1981.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 14 April 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">2 July</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Applications+of+advances+in+nonlinear+sensitivity+analysis&amp;rft.btitle=System+modeling+and+optimization&amp;rft.pages=762-770&amp;rft.pub=Springer&amp;rft.date=1982&amp;rft.aulast=Werbos&amp;rft.aufirst=Paul&amp;rft_id=http%3A%2F%2Fwerbos.com%2FNeural%2FSensitivityIFIPSeptember1981.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:1-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-:1_46-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAndersonRosenfeld2000" class="citation book cs1">Anderson JA, Rosenfeld E, eds. (2000). <a rel="nofollow" class="external text" href="https://direct.mit.edu/books/book/4886/Talking-NetsAn-Oral-History-of-Neural-Networks"><i>Talking Nets: An Oral History of Neural Networks</i></a>. The MIT Press. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.7551%2Fmitpress%2F6626.003.0016">10.7551/mitpress/6626.003.0016</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-26715-1" title="Special:BookSources/978-0-262-26715-1"><bdi>978-0-262-26715-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Talking+Nets%3A+An+Oral+History+of+Neural+Networks&amp;rft.pub=The+MIT+Press&amp;rft.date=2000&amp;rft_id=info%3Adoi%2F10.7551%2Fmitpress%2F6626.003.0016&amp;rft.isbn=978-0-262-26715-1&amp;rft_id=https%3A%2F%2Fdirect.mit.edu%2Fbooks%2Fbook%2F4886%2FTalking-NetsAn-Oral-History-of-Neural-Networks&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-werbos1974-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-werbos1974_47-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWerbos1994" class="citation book cs1">Werbos PJ (1994). <i>The Roots of Backpropagation : From Ordered Derivatives to Neural Networks and Political Forecasting</i>. New York: John Wiley &amp; Sons. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-471-59897-6" title="Special:BookSources/0-471-59897-6"><bdi>0-471-59897-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=The+Roots+of+Backpropagation+%3A+From+Ordered+Derivatives+to+Neural+Networks+and+Political+Forecasting&amp;rft.place=New+York&amp;rft.pub=John+Wiley+%26+Sons&amp;rft.date=1994&amp;rft.isbn=0-471-59897-6&amp;rft.aulast=Werbos&amp;rft.aufirst=Paul+J.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-48">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRumelhartHintonWilliams1986" class="citation journal cs1">Rumelhart DE, Hinton GE, Williams RJ (October 1986). <a rel="nofollow" class="external text" href="https://www.nature.com/articles/323533a0">"Learning representations by back-propagating errors"</a>. <i>Nature</i>. <b>323</b> (6088): 533–536. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1986Natur.323..533R">1986Natur.323..533R</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2F323533a0">10.1038/323533a0</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1476-4687">1476-4687</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Learning+representations+by+back-propagating+errors&amp;rft.volume=323&amp;rft.issue=6088&amp;rft.pages=533-536&amp;rft.date=1986-10&amp;rft.issn=1476-4687&amp;rft_id=info%3Adoi%2F10.1038%2F323533a0&amp;rft_id=info%3Abibcode%2F1986Natur.323..533R&amp;rft.aulast=Rumelhart&amp;rft.aufirst=David+E.&amp;rft.au=Hinton%2C+Geoffrey+E.&amp;rft.au=Williams%2C+Ronald+J.&amp;rft_id=https%3A%2F%2Fwww.nature.com%2Farticles%2F323533a0&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-49"><span class="mw-cite-backlink"><b><a href="#cite_ref-49">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFukushimaMiyake1982" class="citation journal cs1">Fukushima K, Miyake S (1 January 1982). <a rel="nofollow" class="external text" href="https://www.sciencedirect.com/science/article/abs/pii/0031320382900243">"Neocognitron: A new algorithm for pattern recognition tolerant of deformations and shifts in position"</a>. <i>Pattern Recognition</i>. <b>15</b> (6): 455–469. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1982PatRe..15..455F">1982PatRe..15..455F</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0031-3203%2882%2990024-3">10.1016/0031-3203(82)90024-3</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0031-3203">0031-3203</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Pattern+Recognition&amp;rft.atitle=Neocognitron%3A+A+new+algorithm+for+pattern+recognition+tolerant+of+deformations+and+shifts+in+position&amp;rft.volume=15&amp;rft.issue=6&amp;rft.pages=455-469&amp;rft.date=1982-01-01&amp;rft.issn=0031-3203&amp;rft_id=info%3Adoi%2F10.1016%2F0031-3203%2882%2990024-3&amp;rft_id=info%3Abibcode%2F1982PatRe..15..455F&amp;rft.aulast=Fukushima&amp;rft.aufirst=Kunihiko&amp;rft.au=Miyake%2C+Sei&amp;rft_id=https%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fabs%2Fpii%2F0031320382900243&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Waibel1987-50"><span class="mw-cite-backlink"><b><a href="#cite_ref-Waibel1987_50-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWaibel1987" class="citation conference cs1">Waibel A (December 1987). <a rel="nofollow" class="external text" href="https://isl.anthropomatik.kit.edu/pdf/Waibel1987a.pdf"><i>Phoneme Recognition Using Time-Delay Neural Networks</i></a> <span class="cs1-format">(PDF)</span>. Meeting of the Institute of Electrical, Information and Communication Engineers (IEICE). Tokyo, Japan.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.btitle=Phoneme+Recognition+Using+Time-Delay+Neural+Networks&amp;rft.place=Tokyo%2C+Japan&amp;rft.date=1987-12&amp;rft.aulast=Waibel&amp;rft.aufirst=Alex&amp;rft_id=https%3A%2F%2Fisl.anthropomatik.kit.edu%2Fpdf%2FWaibel1987a.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-speechsignal-51"><span class="mw-cite-backlink"><b><a href="#cite_ref-speechsignal_51-0">^</a></b></span> <span class="reference-text"><a href="/wiki/Alex_Waibel" title="Alex Waibel">Alexander Waibel</a> et al., <i><a rel="nofollow" class="external text" href="http://www.inf.ufrgs.br/~engel/data/media/file/cmp121/waibel89_TDNN.pdf">Phoneme Recognition Using Time-Delay Neural Networks</a></i> IEEE Transactions on Acoustics, Speech, and Signal Processing, Volume 37, No. 3, pp. 328. – 339 March 1989.</span> </li> <li id="cite_note-wz1988-52"><span class="mw-cite-backlink"><b><a href="#cite_ref-wz1988_52-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhang1988" class="citation journal cs1">Zhang W (1988). <a rel="nofollow" class="external text" href="https://drive.google.com/file/d/1nN_5odSG_QVae54EsQN_qSz-0ZsX6wA0/view?usp=sharing">"Shift-invariant pattern recognition neural network and its optical architecture"</a>. <i>Proceedings of Annual Conference of the Japan Society of Applied Physics</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+Annual+Conference+of+the+Japan+Society+of+Applied+Physics&amp;rft.atitle=Shift-invariant+pattern+recognition+neural+network+and+its+optical+architecture&amp;rft.date=1988&amp;rft.aulast=Zhang&amp;rft.aufirst=Wei&amp;rft_id=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F1nN_5odSG_QVae54EsQN_qSz-0ZsX6wA0%2Fview%3Fusp%3Dsharing&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-LECUN1989-53"><span class="mw-cite-backlink"><b><a href="#cite_ref-LECUN1989_53-0">^</a></b></span> <span class="reference-text">LeCun <i>et al.</i>, "Backpropagation Applied to Handwritten Zip Code Recognition", <i>Neural Computation</i>, 1, pp. 541–551, 1989.</span> </li> <li id="cite_note-wz1990-54"><span class="mw-cite-backlink"><b><a href="#cite_ref-wz1990_54-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhang1990" class="citation journal cs1">Zhang W (1990). <a rel="nofollow" class="external text" href="https://drive.google.com/file/d/0B65v6Wo67Tk5ODRzZmhSR29VeDg/view?usp=sharing">"Parallel distributed processing model with local space-invariant interconnections and its optical architecture"</a>. <i>Applied Optics</i>. <b>29</b> (32): 4790–7. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1990ApOpt..29.4790Z">1990ApOpt..29.4790Z</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1364%2FAO.29.004790">10.1364/AO.29.004790</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/20577468">20577468</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Applied+Optics&amp;rft.atitle=Parallel+distributed+processing+model+with+local+space-invariant+interconnections+and+its+optical+architecture&amp;rft.volume=29&amp;rft.issue=32&amp;rft.pages=4790-7&amp;rft.date=1990&amp;rft_id=info%3Apmid%2F20577468&amp;rft_id=info%3Adoi%2F10.1364%2FAO.29.004790&amp;rft_id=info%3Abibcode%2F1990ApOpt..29.4790Z&amp;rft.aulast=Zhang&amp;rft.aufirst=Wei&amp;rft_id=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F0B65v6Wo67Tk5ODRzZmhSR29VeDg%2Fview%3Fusp%3Dsharing&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-55"><span class="mw-cite-backlink"><b><a href="#cite_ref-55">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhang1991" class="citation journal cs1">Zhang W (1991). <a rel="nofollow" class="external text" href="https://drive.google.com/file/d/0B65v6Wo67Tk5cm5DTlNGd0NPUmM/view?usp=sharing">"Image processing of human corneal endothelium based on a learning network"</a>. <i>Applied Optics</i>. <b>30</b> (29): 4211–7. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1991ApOpt..30.4211Z">1991ApOpt..30.4211Z</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1364%2FAO.30.004211">10.1364/AO.30.004211</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/20706526">20706526</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Applied+Optics&amp;rft.atitle=Image+processing+of+human+corneal+endothelium+based+on+a+learning+network&amp;rft.volume=30&amp;rft.issue=29&amp;rft.pages=4211-7&amp;rft.date=1991&amp;rft_id=info%3Apmid%2F20706526&amp;rft_id=info%3Adoi%2F10.1364%2FAO.30.004211&amp;rft_id=info%3Abibcode%2F1991ApOpt..30.4211Z&amp;rft.aulast=Zhang&amp;rft.aufirst=Wei&amp;rft_id=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F0B65v6Wo67Tk5cm5DTlNGd0NPUmM%2Fview%3Fusp%3Dsharing&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-56"><span class="mw-cite-backlink"><b><a href="#cite_ref-56">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhang1994" class="citation journal cs1">Zhang W (1994). <a rel="nofollow" class="external text" href="https://drive.google.com/file/d/0B65v6Wo67Tk5Ml9qeW5nQ3poVTQ/view?usp=sharing">"Computerized detection of clustered microcalcifications in digital mammograms using a shift-invariant artificial neural network"</a>. <i>Medical Physics</i>. <b>21</b> (4): 517–24. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1994MedPh..21..517Z">1994MedPh..21..517Z</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1118%2F1.597177">10.1118/1.597177</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/8058017">8058017</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Medical+Physics&amp;rft.atitle=Computerized+detection+of+clustered+microcalcifications+in+digital+mammograms+using+a+shift-invariant+artificial+neural+network&amp;rft.volume=21&amp;rft.issue=4&amp;rft.pages=517-24&amp;rft.date=1994&amp;rft_id=info%3Apmid%2F8058017&amp;rft_id=info%3Adoi%2F10.1118%2F1.597177&amp;rft_id=info%3Abibcode%2F1994MedPh..21..517Z&amp;rft.aulast=Zhang&amp;rft.aufirst=Wei&amp;rft_id=https%3A%2F%2Fdrive.google.com%2Ffile%2Fd%2F0B65v6Wo67Tk5Ml9qeW5nQ3poVTQ%2Fview%3Fusp%3Dsharing&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-lecun98-57"><span class="mw-cite-backlink"><b><a href="#cite_ref-lecun98_57-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLeCunLéon_BottouYoshua_BengioPatrick_Haffner1998" class="citation journal cs1">LeCun Y, Léon Bottou, Yoshua Bengio, Patrick Haffner (1998). <a rel="nofollow" class="external text" href="http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf">"Gradient-based learning applied to document recognition"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of the IEEE</i>. <b>86</b> (11): 2278–2324. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.32.9552">10.1.1.32.9552</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2F5.726791">10.1109/5.726791</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:14542261">14542261</a><span class="reference-accessdate">. Retrieved <span class="nowrap">7 October</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+IEEE&amp;rft.atitle=Gradient-based+learning+applied+to+document+recognition&amp;rft.volume=86&amp;rft.issue=11&amp;rft.pages=2278-2324&amp;rft.date=1998&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.32.9552%23id-name%3DCiteSeerX&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A14542261%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2F5.726791&amp;rft.aulast=LeCun&amp;rft.aufirst=Yann&amp;rft.au=L%C3%A9on+Bottou&amp;rft.au=Yoshua+Bengio&amp;rft.au=Patrick+Haffner&amp;rft_id=http%3A%2F%2Fyann.lecun.com%2Fexdb%2Fpublis%2Fpdf%2Flecun-01a.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Qian1988-58"><span class="mw-cite-backlink"><b><a href="#cite_ref-Qian1988_58-0">^</a></b></span> <span class="reference-text">Qian, Ning, and Terrence J. Sejnowski. "Predicting the secondary structure of globular proteins using neural network models." <i>Journal of molecular biology</i> 202, no. 4 (1988): 865–884.</span> </li> <li id="cite_note-Bohr1988-59"><span class="mw-cite-backlink"><b><a href="#cite_ref-Bohr1988_59-0">^</a></b></span> <span class="reference-text">Bohr, Henrik, Jakob Bohr, Søren Brunak, Rodney MJ Cotterill, Benny Lautrup, Leif Nørskov, Ole H. Olsen, and Steffen B. Petersen. "Protein secondary structure and homology by neural networks The α-helices in rhodopsin." <i>FEBS letters</i> 241, (1988): 223–228</span> </li> <li id="cite_note-Rost1993-60"><span class="mw-cite-backlink"><b><a href="#cite_ref-Rost1993_60-0">^</a></b></span> <span class="reference-text">Rost, Burkhard, and Chris Sander. "Prediction of protein secondary structure at better than 70% accuracy." <i>Journal of molecular biology</i> 232, no. 2 (1993): 584–599.</span> </li> <li id="cite_note-61"><span class="mw-cite-backlink"><b><a href="#cite_ref-61">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAmari1972" class="citation journal cs1">Amari SI (November 1972). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/1672070">"Learning Patterns and Pattern Sequences by Self-Organizing Nets of Threshold Elements"</a>. <i>IEEE Transactions on Computers</i>. <b>C-21</b> (11): 1197–1206. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FT-C.1972.223477">10.1109/T-C.1972.223477</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0018-9340">0018-9340</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Transactions+on+Computers&amp;rft.atitle=Learning+Patterns+and+Pattern+Sequences+by+Self-Organizing+Nets+of+Threshold+Elements&amp;rft.volume=C-21&amp;rft.issue=11&amp;rft.pages=1197-1206&amp;rft.date=1972-11&amp;rft_id=info%3Adoi%2F10.1109%2FT-C.1972.223477&amp;rft.issn=0018-9340&amp;rft.aulast=Amari&amp;rft.aufirst=S.-I.&amp;rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F1672070&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Hopfield19822-62"><span class="mw-cite-backlink"><b><a href="#cite_ref-Hopfield19822_62-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHopfield1982" class="citation journal cs1">Hopfield JJ (1982). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC346238">"Neural networks and physical systems with emergent collective computational abilities"</a>. <i>Proceedings of the National Academy of Sciences</i>. <b>79</b> (8): 2554–2558. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1982PNAS...79.2554H">1982PNAS...79.2554H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1073%2Fpnas.79.8.2554">10.1073/pnas.79.8.2554</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC346238">346238</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/6953413">6953413</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+National+Academy+of+Sciences&amp;rft.atitle=Neural+networks+and+physical+systems+with+emergent+collective+computational+abilities&amp;rft.volume=79&amp;rft.issue=8&amp;rft.pages=2554-2558&amp;rft.date=1982&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC346238%23id-name%3DPMC&amp;rft_id=info%3Apmid%2F6953413&amp;rft_id=info%3Adoi%2F10.1073%2Fpnas.79.8.2554&amp;rft_id=info%3Abibcode%2F1982PNAS...79.2554H&amp;rft.aulast=Hopfield&amp;rft.aufirst=J.+J.&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC346238&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-63"><span class="mw-cite-backlink"><b><a href="#cite_ref-63">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEspinosa-SanchezGomez-Marinde_Castro2023" class="citation journal cs1">Espinosa-Sanchez JM, Gomez-Marin A, de Castro F (5 July 2023). <a rel="nofollow" class="external text" href="http://journals.sagepub.com/doi/10.1177/10738584231179932">"The Importance of Cajal's and Lorente de Nó's Neuroscience to the Birth of Cybernetics"</a>. <i>The Neuroscientist</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1177%2F10738584231179932">10.1177/10738584231179932</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/10261%2F348372">10261/348372</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1073-8584">1073-8584</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/37403768">37403768</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Neuroscientist&amp;rft.atitle=The+Importance+of+Cajal%27s+and+Lorente+de+N%C3%B3%27s+Neuroscience+to+the+Birth+of+Cybernetics&amp;rft.date=2023-07-05&amp;rft_id=info%3Ahdl%2F10261%2F348372&amp;rft.issn=1073-8584&amp;rft_id=info%3Apmid%2F37403768&amp;rft_id=info%3Adoi%2F10.1177%2F10738584231179932&amp;rft.aulast=Espinosa-Sanchez&amp;rft.aufirst=Juan+Manuel&amp;rft.au=Gomez-Marin%2C+Alex&amp;rft.au=de+Castro%2C+Fernando&amp;rft_id=http%3A%2F%2Fjournals.sagepub.com%2Fdoi%2F10.1177%2F10738584231179932&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-64"><span class="mw-cite-backlink"><b><a href="#cite_ref-64">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.oxfordreference.com/display/10.1093/oi/authority.20110803100417461">"reverberating circuit"</a>. <i>Oxford Reference</i><span class="reference-accessdate">. Retrieved <span class="nowrap">27 July</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Oxford+Reference&amp;rft.atitle=reverberating+circuit&amp;rft_id=https%3A%2F%2Fwww.oxfordreference.com%2Fdisplay%2F10.1093%2Foi%2Fauthority.20110803100417461&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-CAA1982-65"><span class="mw-cite-backlink">^ <a href="#cite_ref-CAA1982_65-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-CAA1982_65-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"> Bozinovski, S. (1982). "A self-learning system using secondary reinforcement". In Trappl, Robert (ed.). Cybernetics and Systems Research: Proceedings of the Sixth European Meeting on Cybernetics and Systems Research. North-Holland. pp. 397–402. ISBN 978-0-444-86488-8 </span> </li> <li id="cite_note-22-66"><span class="mw-cite-backlink"><b><a href="#cite_ref-22_66-0">^</a></b></span> <span class="reference-text"> Bozinovski S. (1995) "Neuro genetic agents and structural theory of self-reinforcement learning systems". CMPSCI Technical Report 95-107, University of Massachusetts at Amherst <a rel="nofollow" class="external autonumber" href="https://web.cs.umass.edu/publication/docs/1995/UM-CS-1995-107.pdf">[1]</a></span> </li> <li id="cite_note-67"><span class="mw-cite-backlink"><b><a href="#cite_ref-67">^</a></b></span> <span class="reference-text">R. Zajonc (1980) "Feeling and thinking: Preferences need no inferences". American Psychologist 35 (2): 151-175</span> </li> <li id="cite_note-68"><span class="mw-cite-backlink"><b><a href="#cite_ref-68">^</a></b></span> <span class="reference-text">Lazarus R. (1982) "Thoughts on the relations between emotion and cognition" American Psychologist 37 (9): 1019-1024</span> </li> <li id="cite_note-69"><span class="mw-cite-backlink"><b><a href="#cite_ref-69">^</a></b></span> <span class="reference-text"> Bozinovski, S. (2014) "Modeling mechanisms of cognition-emotion interaction in artificial neural networks, since 1981" Procedia Computer Science p. 255-263 (<a rel="nofollow" class="external free" href="https://core.ac.uk/download/pdf/81973924.pdf">https://core.ac.uk/download/pdf/81973924.pdf</a>) </span> </li> <li id="cite_note-chunker1991-70"><span class="mw-cite-backlink"><b><a href="#cite_ref-chunker1991_70-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber1991" class="citation journal cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber J</a> (April 1991). <a rel="nofollow" class="external text" href="https://people.idsia.ch/~juergen/FKI-148-91ocr.pdf">"Neural Sequence Chunkers"</a> <span class="cs1-format">(PDF)</span>. <i>TR FKI-148, TU Munich</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=TR+FKI-148%2C+TU+Munich&amp;rft.atitle=Neural+Sequence+Chunkers&amp;rft.date=1991-04&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J%C3%BCrgen&amp;rft_id=https%3A%2F%2Fpeople.idsia.ch%2F~juergen%2FFKI-148-91ocr.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-schmidhuber1992-71"><span class="mw-cite-backlink"><b><a href="#cite_ref-schmidhuber1992_71-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber1992" class="citation journal cs1">Schmidhuber J (1992). <a rel="nofollow" class="external text" href="https://sferics.idsia.ch/pub/juergen/chunker.pdf">"Learning complex, extended sequences using the principle of history compression (based on TR FKI-148, 1991)"</a> <span class="cs1-format">(PDF)</span>. <i>Neural Computation</i>. <b>4</b> (2): 234–242. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1992.4.2.234">10.1162/neco.1992.4.2.234</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:18271205">18271205</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computation&amp;rft.atitle=Learning+complex%2C+extended+sequences+using+the+principle+of+history+compression+%28based+on+TR+FKI-148%2C+1991%29&amp;rft.volume=4&amp;rft.issue=2&amp;rft.pages=234-242&amp;rft.date=1992&amp;rft_id=info%3Adoi%2F10.1162%2Fneco.1992.4.2.234&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A18271205%23id-name%3DS2CID&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J%C3%BCrgen&amp;rft_id=https%3A%2F%2Fsferics.idsia.ch%2Fpub%2Fjuergen%2Fchunker.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-schmidhuber19932-72"><span class="mw-cite-backlink"><b><a href="#cite_ref-schmidhuber19932_72-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber1993" class="citation book cs1">Schmidhuber J (1993). <a rel="nofollow" class="external text" href="https://sferics.idsia.ch/pub/juergen/habilitation.pdf"><i>Habilitation thesis: System modeling and optimization</i></a> <span class="cs1-format">(PDF)</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Habilitation+thesis%3A+System+modeling+and+optimization&amp;rft.date=1993&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J%C3%BCrgen&amp;rft_id=https%3A%2F%2Fsferics.idsia.ch%2Fpub%2Fjuergen%2Fhabilitation.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span> Page 150 ff demonstrates credit assignment across the equivalent of 1,200 layers in an unfolded RNN.</span> </li> <li id="cite_note-HOCH1991-73"><span class="mw-cite-backlink">^ <a href="#cite_ref-HOCH1991_73-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-HOCH1991_73-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">S. Hochreiter., "<a rel="nofollow" class="external text" href="http://people.idsia.ch/~juergen/SeppHochreiter1991ThesisAdvisorSchmidhuber.pdf">Untersuchungen zu dynamischen neuronalen Netzen</a>", <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150306075401/http://people.idsia.ch/~juergen/SeppHochreiter1991ThesisAdvisorSchmidhuber.pdf">Archived</a> 2015-03-06 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, <i>Diploma thesis. Institut f. Informatik, Technische Univ. Munich. Advisor: J. Schmidhuber</i>, 1991.</span> </li> <li id="cite_note-HOCH2001-74"><span class="mw-cite-backlink"><b><a href="#cite_ref-HOCH2001_74-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHochreiter2001" class="citation book cs1">Hochreiter S, et al. (15 January 2001). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=NWOcMVA64aAC">"Gradient flow in recurrent nets: the difficulty of learning long-term dependencies"</a>. In Kolen JF, Kremer SC (eds.). <i>A Field Guide to Dynamical Recurrent Networks</i>. John Wiley &amp; Sons. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-7803-5369-5" title="Special:BookSources/978-0-7803-5369-5"><bdi>978-0-7803-5369-5</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519081124/https://books.google.com/books?id=NWOcMVA64aAC">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">26 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Gradient+flow+in+recurrent+nets%3A+the+difficulty+of+learning+long-term+dependencies&amp;rft.btitle=A+Field+Guide+to+Dynamical+Recurrent+Networks&amp;rft.pub=John+Wiley+%26+Sons&amp;rft.date=2001-01-15&amp;rft.isbn=978-0-7803-5369-5&amp;rft.aulast=Hochreiter&amp;rft.aufirst=S.&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DNWOcMVA64aAC&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-75"><span class="mw-cite-backlink"><b><a href="#cite_ref-75">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSepp_HochreiterJürgen_Schmidhuber1995" class="citation cs2"><a href="/wiki/Sepp_Hochreiter" title="Sepp Hochreiter">Sepp Hochreiter</a>, <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a> (21 August 1995), <a rel="nofollow" class="external text" href="ftp://ftp.idsia.ch/pub/juergen/fki-207-95.ps.gz"><i>Long Short Term Memory</i></a>, <a href="/wiki/WDQ_(identifier)" class="mw-redirect" title="WDQ (identifier)">Wikidata</a> <a href="https://www.wikidata.org/wiki/Q98967430" class="extiw" title="d:Q98967430">Q98967430</a></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Long+Short+Term+Memory&amp;rft.date=1995-08-21&amp;rft.au=Sepp+Hochreiter&amp;rft.au=J%C3%BCrgen+Schmidhuber&amp;rft_id=ftp%3A%2F%2Fftp.idsia.ch%2Fpub%2Fjuergen%2Ffki-207-95.ps.gz&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-lstm2-76"><span class="mw-cite-backlink"><b><a href="#cite_ref-lstm2_76-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHochreiterSchmidhuber1997" class="citation journal cs1"><a href="/wiki/Sepp_Hochreiter" title="Sepp Hochreiter">Hochreiter S</a>, Schmidhuber J (1 November 1997). "Long Short-Term Memory". <i>Neural Computation</i>. <b>9</b> (8): 1735–1780. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1997.9.8.1735">10.1162/neco.1997.9.8.1735</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/9377276">9377276</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1915014">1915014</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computation&amp;rft.atitle=Long+Short-Term+Memory&amp;rft.volume=9&amp;rft.issue=8&amp;rft.pages=1735-1780&amp;rft.date=1997-11-01&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1915014%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F9377276&amp;rft_id=info%3Adoi%2F10.1162%2Fneco.1997.9.8.1735&amp;rft.aulast=Hochreiter&amp;rft.aufirst=Sepp&amp;rft.au=Schmidhuber%2C+J%C3%BCrgen&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-lstm1999-77"><span class="mw-cite-backlink"><b><a href="#cite_ref-lstm1999_77-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGersSchmidhuberCummins1999" class="citation book cs1">Gers F, Schmidhuber J, Cummins F (1999). "Learning to forget: Continual prediction with LSTM". <i>9th International Conference on Artificial Neural Networks: ICANN '99</i>. Vol. 1999. pp. 850–855. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1049%2Fcp%3A19991218">10.1049/cp:19991218</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-85296-721-7" title="Special:BookSources/0-85296-721-7"><bdi>0-85296-721-7</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Learning+to+forget%3A+Continual+prediction+with+LSTM&amp;rft.btitle=9th+International+Conference+on+Artificial+Neural+Networks%3A+ICANN+%2799&amp;rft.pages=850-855&amp;rft.date=1999&amp;rft_id=info%3Adoi%2F10.1049%2Fcp%3A19991218&amp;rft.isbn=0-85296-721-7&amp;rft.aulast=Gers&amp;rft.aufirst=Felix&amp;rft.au=Schmidhuber%2C+J%C3%BCrgen&amp;rft.au=Cummins%2C+Fred&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-78"><span class="mw-cite-backlink"><b><a href="#cite_ref-78">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAckleyHintonSejnowski1985" class="citation journal cs1">Ackley DH, Hinton GE, Sejnowski TJ (1 January 1985). <a rel="nofollow" class="external text" href="https://www.sciencedirect.com/science/article/pii/S0364021385800124">"A learning algorithm for boltzmann machines"</a>. <i>Cognitive Science</i>. <b>9</b> (1): 147–169. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2FS0364-0213%2885%2980012-4">10.1016/S0364-0213(85)80012-4</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0364-0213">0364-0213</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Cognitive+Science&amp;rft.atitle=A+learning+algorithm+for+boltzmann+machines&amp;rft.volume=9&amp;rft.issue=1&amp;rft.pages=147-169&amp;rft.date=1985-01-01&amp;rft_id=info%3Adoi%2F10.1016%2FS0364-0213%2885%2980012-4&amp;rft.issn=0364-0213&amp;rft.aulast=Ackley&amp;rft.aufirst=David+H.&amp;rft.au=Hinton%2C+Geoffrey+E.&amp;rft.au=Sejnowski%2C+Terrence+J.&amp;rft_id=https%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS0364021385800124&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-79"><span class="mw-cite-backlink"><b><a href="#cite_ref-79">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSmolensky1986" class="citation book cs1">Smolensky P (1986). <a rel="nofollow" class="external text" href="https://stanford.edu/~jlmcc/papers/PDP/Volume%201/Chap6_PDP86.pdf">"Chapter 6: Information Processing in Dynamical Systems: Foundations of Harmony Theory"</a> <span class="cs1-format">(PDF)</span>. In Rumelhart DE, McLelland JL (eds.). <a href="/wiki/Connectionism" title="Connectionism"><i>Parallel Distributed Processing: Explorations in the Microstructure of Cognition, Volume 1: Foundations</i></a>. MIT Press. pp. <a rel="nofollow" class="external text" href="https://archive.org/details/paralleldistribu00rume/page/194">194–281</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-262-68053-X" title="Special:BookSources/0-262-68053-X"><bdi>0-262-68053-X</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Chapter+6%3A+Information+Processing+in+Dynamical+Systems%3A+Foundations+of+Harmony+Theory&amp;rft.btitle=Parallel+Distributed+Processing%3A+Explorations+in+the+Microstructure+of+Cognition%2C+Volume+1%3A+Foundations&amp;rft.pages=194-281&amp;rft.pub=MIT+Press&amp;rft.date=1986&amp;rft.isbn=0-262-68053-X&amp;rft.aulast=Smolensky&amp;rft.aufirst=Paul&amp;rft_id=https%3A%2F%2Fstanford.edu%2F~jlmcc%2Fpapers%2FPDP%2FVolume%25201%2FChap6_PDP86.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-“nc95“-80"><span class="mw-cite-backlink"><b><a href="#cite_ref-%E2%80%9Cnc95%E2%80%9C_80-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPeterHintonNealZemel1995" class="citation journal cs1"><a href="/wiki/Peter_Dayan" title="Peter Dayan">Peter D</a>, <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Hinton GE</a>, <a href="/wiki/Radford_M._Neal" title="Radford M. Neal">Neal RM</a>, <a href="/wiki/Richard_Zemel" title="Richard Zemel">Zemel RS</a> (1995). "The Helmholtz machine". <i>Neural Computation</i>. <b>7</b> (5): 889–904. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1995.7.5.889">10.1162/neco.1995.7.5.889</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/21.11116%2F0000-0002-D6D3-E">21.11116/0000-0002-D6D3-E</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7584891">7584891</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1890561">1890561</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computation&amp;rft.atitle=The+Helmholtz+machine.&amp;rft.volume=7&amp;rft.issue=5&amp;rft.pages=889-904&amp;rft.date=1995&amp;rft_id=info%3Ahdl%2F21.11116%2F0000-0002-D6D3-E&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1890561%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F7584891&amp;rft_id=info%3Adoi%2F10.1162%2Fneco.1995.7.5.889&amp;rft.aulast=Peter&amp;rft.aufirst=Dayan&amp;rft.au=Hinton%2C+Geoffrey+E.&amp;rft.au=Neal%2C+Radford+M.&amp;rft.au=Zemel%2C+Richard+S.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span> <span style="position:relative; top: -2px;"><span typeof="mw:File"><a href="/wiki/Paywall" title="closed access publication – behind paywall"><noscript><img alt="Closed access icon" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/9px-Closed_Access_logo_transparent.svg.png" decoding="async" width="9" height="14" class="mw-file-element" data-file-width="640" data-file-height="1000"></noscript><span class="lazy-image-placeholder" style="width: 9px;height: 14px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/9px-Closed_Access_logo_transparent.svg.png" data-alt="Closed access icon" data-width="9" data-height="14" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/14px-Closed_Access_logo_transparent.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/18px-Closed_Access_logo_transparent.svg.png 2x" data-class="mw-file-element">&nbsp;</span></a></span></span></span> </li> <li id="cite_note-:13-81"><span class="mw-cite-backlink"><b><a href="#cite_ref-:13_81-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHintonDayanFreyNeal1995" class="citation journal cs1"><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Hinton GE</a>, <a href="/wiki/Peter_Dayan" title="Peter Dayan">Dayan P</a>, <a href="/wiki/Brendan_Frey" title="Brendan Frey">Frey BJ</a>, Neal R (26 May 1995). "The wake-sleep algorithm for unsupervised neural networks". <i>Science</i>. <b>268</b> (5214): 1158–1161. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1995Sci...268.1158H">1995Sci...268.1158H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.7761831">10.1126/science.7761831</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7761831">7761831</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:871473">871473</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Science&amp;rft.atitle=The+wake-sleep+algorithm+for+unsupervised+neural+networks&amp;rft.volume=268&amp;rft.issue=5214&amp;rft.pages=1158-1161&amp;rft.date=1995-05-26&amp;rft_id=info%3Adoi%2F10.1126%2Fscience.7761831&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A871473%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F7761831&amp;rft_id=info%3Abibcode%2F1995Sci...268.1158H&amp;rft.aulast=Hinton&amp;rft.aufirst=Geoffrey+E.&amp;rft.au=Dayan%2C+Peter&amp;rft.au=Frey%2C+Brendan+J.&amp;rft.au=Neal%2C+Radford&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-82"><span class="mw-cite-backlink"><b><a href="#cite_ref-82">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.kurzweilai.net/how-bio-inspired-deep-learning-keeps-winning-competitions">2012 Kurzweil AI Interview</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180831075249/http://www.kurzweilai.net/how-bio-inspired-deep-learning-keeps-winning-competitions">Archived</a> 31 August 2018 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> with Juergen Schmidhuber on the eight competitions won by his Deep Learning team 2009–2012</span> </li> <li id="cite_note-83"><span class="mw-cite-backlink"><b><a href="#cite_ref-83">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20180831075249/http://www.kurzweilai.net/how-bio-inspired-deep-learning-keeps-winning-competitions">"How bio-inspired deep learning keeps winning competitions | KurzweilAI"</a>. <i>kurzweilai.net</i>. Archived from <a rel="nofollow" class="external text" href="http://www.kurzweilai.net/how-bio-inspired-deep-learning-keeps-winning-competitions">the original</a> on 31 August 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">16 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=kurzweilai.net&amp;rft.atitle=How+bio-inspired+deep+learning+keeps+winning+competitions+%7C+KurzweilAI&amp;rft_id=http%3A%2F%2Fwww.kurzweilai.net%2Fhow-bio-inspired-deep-learning-keeps-winning-competitions&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:32-84"><span class="mw-cite-backlink"><b><a href="#cite_ref-:32_84-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCireşanMeierGambardellaSchmidhuber2010" class="citation journal cs1">Cireşan DC, Meier U, Gambardella LM, Schmidhuber J (21 September 2010). "Deep, Big, Simple Neural Nets for Handwritten Digit Recognition". <i>Neural Computation</i>. <b>22</b> (12): 3207–3220. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1003.0358">1003.0358</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco_a_00052">10.1162/neco_a_00052</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0899-7667">0899-7667</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/20858131">20858131</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1918673">1918673</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computation&amp;rft.atitle=Deep%2C+Big%2C+Simple+Neural+Nets+for+Handwritten+Digit+Recognition&amp;rft.volume=22&amp;rft.issue=12&amp;rft.pages=3207-3220&amp;rft.date=2010-09-21&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1918673%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1162%2Fneco_a_00052&amp;rft_id=info%3Aarxiv%2F1003.0358&amp;rft.issn=0899-7667&amp;rft_id=info%3Apmid%2F20858131&amp;rft.aulast=Cire%C5%9Fan&amp;rft.aufirst=Dan+Claudiu&amp;rft.au=Meier%2C+Ueli&amp;rft.au=Gambardella%2C+Luca+Maria&amp;rft.au=Schmidhuber%2C+J%C3%BCrgen&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:62-85"><span class="mw-cite-backlink"><b><a href="#cite_ref-:62_85-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCiresanMeierMasciGambardella2011" class="citation journal cs1">Ciresan DC, Meier U, Masci J, Gambardella L, Schmidhuber J (2011). <a rel="nofollow" class="external text" href="http://ijcai.org/papers11/Papers/IJCAI11-210.pdf">"Flexible, High Performance Convolutional Neural Networks for Image Classification"</a> <span class="cs1-format">(PDF)</span>. <i>International Joint Conference on Artificial Intelligence</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.5591%2F978-1-57735-516-8%2Fijcai11-210">10.5591/978-1-57735-516-8/ijcai11-210</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20140929094040/http://ijcai.org/papers11/Papers/IJCAI11-210.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 29 September 2014<span class="reference-accessdate">. Retrieved <span class="nowrap">13 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=International+Joint+Conference+on+Artificial+Intelligence&amp;rft.atitle=Flexible%2C+High+Performance+Convolutional+Neural+Networks+for+Image+Classification&amp;rft.date=2011&amp;rft_id=info%3Adoi%2F10.5591%2F978-1-57735-516-8%2Fijcai11-210&amp;rft.aulast=Ciresan&amp;rft.aufirst=D.+C.&amp;rft.au=Meier%2C+U.&amp;rft.au=Masci%2C+J.&amp;rft.au=Gambardella%2C+L.M.&amp;rft.au=Schmidhuber%2C+J.&amp;rft_id=http%3A%2F%2Fijcai.org%2Fpapers11%2FPapers%2FIJCAI11-210.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:82-86"><span class="mw-cite-backlink"><b><a href="#cite_ref-:82_86-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCiresanGiustiGambardellaSchmidhuber2012" class="citation book cs1">Ciresan D, Giusti A, Gambardella LM, Schmidhuber J (2012). Pereira F, Burges CJ, Bottou L, Weinberger KQ (eds.). <a rel="nofollow" class="external text" href="http://papers.nips.cc/paper/4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images.pdf"><i>Advances in Neural Information Processing Systems 25</i></a> <span class="cs1-format">(PDF)</span>. Curran Associates, Inc. pp. 2843–2851. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170809081713/http://papers.nips.cc/paper/4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 9 August 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">13 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Advances+in+Neural+Information+Processing+Systems+25&amp;rft.pages=2843-2851&amp;rft.pub=Curran+Associates%2C+Inc.&amp;rft.date=2012&amp;rft.aulast=Ciresan&amp;rft.aufirst=Dan&amp;rft.au=Giusti%2C+Alessandro&amp;rft.au=Gambardella%2C+Luca+M.&amp;rft.au=Schmidhuber%2C+J%C3%BCrgen&amp;rft_id=http%3A%2F%2Fpapers.nips.cc%2Fpaper%2F4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-ciresan2013miccai-87"><span class="mw-cite-backlink"><b><a href="#cite_ref-ciresan2013miccai_87-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCiresanGiustiGambardellaSchmidhuber2013" class="citation book cs1">Ciresan D, Giusti A, Gambardella L, Schmidhuber J (2013). "Mitosis Detection in Breast Cancer Histology Images with Deep Neural Networks". <i>Medical Image Computing and Computer-Assisted Intervention – MICCAI 2013</i>. Lecture Notes in Computer Science. Vol. 7908. pp. 411–418. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-642-40763-5_51">10.1007/978-3-642-40763-5_51</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-642-38708-1" title="Special:BookSources/978-3-642-38708-1"><bdi>978-3-642-38708-1</bdi></a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/24579167">24579167</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Mitosis+Detection+in+Breast+Cancer+Histology+Images+with+Deep+Neural+Networks&amp;rft.btitle=Medical+Image+Computing+and+Computer-Assisted+Intervention+%E2%80%93+MICCAI+2013&amp;rft.series=Lecture+Notes+in+Computer+Science&amp;rft.pages=411-418&amp;rft.date=2013&amp;rft_id=info%3Apmid%2F24579167&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-642-40763-5_51&amp;rft.isbn=978-3-642-38708-1&amp;rft.aulast=Ciresan&amp;rft.aufirst=D.&amp;rft.au=Giusti%2C+A.&amp;rft.au=Gambardella%2C+L.M.&amp;rft.au=Schmidhuber%2C+J.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:9-88"><span class="mw-cite-backlink"><b><a href="#cite_ref-:9_88-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCiresanMeierSchmidhuber2012" class="citation book cs1">Ciresan D, Meier U, Schmidhuber J (2012). "Multi-column deep neural networks for image classification". <i>2012 IEEE Conference on Computer Vision and Pattern Recognition</i>. pp. 3642–3649. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1202.2745">1202.2745</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fcvpr.2012.6248110">10.1109/cvpr.2012.6248110</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4673-1228-8" title="Special:BookSources/978-1-4673-1228-8"><bdi>978-1-4673-1228-8</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2161592">2161592</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Multi-column+deep+neural+networks+for+image+classification&amp;rft.btitle=2012+IEEE+Conference+on+Computer+Vision+and+Pattern+Recognition&amp;rft.pages=3642-3649&amp;rft.date=2012&amp;rft_id=info%3Aarxiv%2F1202.2745&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2161592%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2Fcvpr.2012.6248110&amp;rft.isbn=978-1-4673-1228-8&amp;rft.aulast=Ciresan&amp;rft.aufirst=D.&amp;rft.au=Meier%2C+U.&amp;rft.au=Schmidhuber%2C+J.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-krizhevsky20122-89"><span class="mw-cite-backlink"><b><a href="#cite_ref-krizhevsky20122_89-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKrizhevskySutskeverHinton2012" class="citation journal cs1">Krizhevsky A, Sutskever I, Hinton G (2012). <a rel="nofollow" class="external text" href="https://www.cs.toronto.edu/~kriz/imagenet_classification_with_deep_convolutional.pdf">"ImageNet Classification with Deep Convolutional Neural Networks"</a> <span class="cs1-format">(PDF)</span>. <i>NIPS 2012: Neural Information Processing Systems, Lake Tahoe, Nevada</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170110123024/http://www.cs.toronto.edu/~kriz/imagenet_classification_with_deep_convolutional.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 10 January 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">24 May</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=NIPS+2012%3A+Neural+Information+Processing+Systems%2C+Lake+Tahoe%2C+Nevada&amp;rft.atitle=ImageNet+Classification+with+Deep+Convolutional+Neural+Networks&amp;rft.date=2012&amp;rft.aulast=Krizhevsky&amp;rft.aufirst=Alex&amp;rft.au=Sutskever%2C+Ilya&amp;rft.au=Hinton%2C+Geoffrey&amp;rft_id=https%3A%2F%2Fwww.cs.toronto.edu%2F~kriz%2Fimagenet_classification_with_deep_convolutional.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-VGG-90"><span class="mw-cite-backlink"><b><a href="#cite_ref-VGG_90-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSimonyanAndrew2014" class="citation arxiv cs1">Simonyan K, Andrew Z (2014). "Very Deep Convolution Networks for Large Scale Image Recognition". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1409.1556">1409.1556</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Very+Deep+Convolution+Networks+for+Large+Scale+Image+Recognition&amp;rft.date=2014&amp;rft_id=info%3Aarxiv%2F1409.1556&amp;rft.aulast=Simonyan&amp;rft.aufirst=Karen&amp;rft.au=Andrew%2C+Zisserman&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-szegedy-91"><span class="mw-cite-backlink"><b><a href="#cite_ref-szegedy_91-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSzegedy2015" class="citation journal cs1">Szegedy C (2015). <a rel="nofollow" class="external text" href="https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43022.pdf">"Going deeper with convolutions"</a> <span class="cs1-format">(PDF)</span>. <i>Cvpr2015</i>. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1409.4842">1409.4842</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Cvpr2015&amp;rft.atitle=Going+deeper+with+convolutions&amp;rft.date=2015&amp;rft_id=info%3Aarxiv%2F1409.4842&amp;rft.aulast=Szegedy&amp;rft.aufirst=Christian&amp;rft_id=https%3A%2F%2Fstatic.googleusercontent.com%2Fmedia%2Fresearch.google.com%2Fen%2F%2Fpubs%2Farchive%2F43022.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-ng2012-92"><span class="mw-cite-backlink"><b><a href="#cite_ref-ng2012_92-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNgDean2012" class="citation arxiv cs1">Ng A, Dean J (2012). "Building High-level Features Using Large Scale Unsupervised Learning". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1112.6209">1112.6209</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Building+High-level+Features+Using+Large+Scale+Unsupervised+Learning&amp;rft.date=2012&amp;rft_id=info%3Aarxiv%2F1112.6209&amp;rft.aulast=Ng&amp;rft.aufirst=Andrew&amp;rft.au=Dean%2C+Jeff&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-SAB1-93"><span class="mw-cite-backlink">^ <a href="#cite_ref-SAB1_93-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-SAB1_93-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBillings2013" class="citation book cs1">Billings SA (2013). <i>Nonlinear System Identification: NARMAX Methods in the Time, Frequency, and Spatio-Temporal Domains</i>. Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-119-94359-4" title="Special:BookSources/978-1-119-94359-4"><bdi>978-1-119-94359-4</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Nonlinear+System+Identification%3A+NARMAX+Methods+in+the+Time%2C+Frequency%2C+and+Spatio-Temporal+Domains&amp;rft.pub=Wiley&amp;rft.date=2013&amp;rft.isbn=978-1-119-94359-4&amp;rft.aulast=Billings&amp;rft.aufirst=S.+A.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-GANnips-94"><span class="mw-cite-backlink">^ <a href="#cite_ref-GANnips_94-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-GANnips_94-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGoodfellowPouget-AbadieMirzaXu2014" class="citation conference cs1">Goodfellow I, Pouget-Abadie J, Mirza M, Xu B, Warde-Farley D, Ozair S, et al. (2014). <a rel="nofollow" class="external text" href="https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf"><i>Generative Adversarial Networks</i></a> <span class="cs1-format">(PDF)</span>. Proceedings of the International Conference on Neural Information Processing Systems (NIPS 2014). pp. 2672–2680. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20191122034612/http://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 22 November 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">20 August</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.btitle=Generative+Adversarial+Networks&amp;rft.pages=2672-2680&amp;rft.date=2014&amp;rft.aulast=Goodfellow&amp;rft.aufirst=Ian&amp;rft.au=Pouget-Abadie%2C+Jean&amp;rft.au=Mirza%2C+Mehdi&amp;rft.au=Xu%2C+Bing&amp;rft.au=Warde-Farley%2C+David&amp;rft.au=Ozair%2C+Sherjil&amp;rft.au=Courville%2C+Aaron&amp;rft.au=Bengio%2C+Yoshua&amp;rft_id=https%3A%2F%2Fpapers.nips.cc%2Fpaper%2F5423-generative-adversarial-nets.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-curiosity1991-95"><span class="mw-cite-backlink"><b><a href="#cite_ref-curiosity1991_95-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber1991" class="citation conference cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber J</a> (1991). "A possibility for implementing curiosity and boredom in model-building neural controllers". <i>Proc. SAB'1991</i>. MIT Press/Bradford Books. pp. 222–227.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=A+possibility+for+implementing+curiosity+and+boredom+in+model-building+neural+controllers&amp;rft.btitle=Proc.+SAB%271991&amp;rft.pages=222-227&amp;rft.pub=MIT+Press%2FBradford+Books&amp;rft.date=1991&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J%C3%BCrgen&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-gancurpm2020-96"><span class="mw-cite-backlink"><b><a href="#cite_ref-gancurpm2020_96-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber2020" class="citation journal cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber J</a> (2020). "Generative Adversarial Networks are Special Cases of Artificial Curiosity (1990) and also Closely Related to Predictability Minimization (1991)". <i>Neural Networks</i>. <b>127</b>: 58–66. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1906.04493">1906.04493</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neunet.2020.04.008">10.1016/j.neunet.2020.04.008</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32334341">32334341</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:216056336">216056336</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Networks&amp;rft.atitle=Generative+Adversarial+Networks+are+Special+Cases+of+Artificial+Curiosity+%281990%29+and+also+Closely+Related+to+Predictability+Minimization+%281991%29&amp;rft.volume=127&amp;rft.pages=58-66&amp;rft.date=2020&amp;rft_id=info%3Aarxiv%2F1906.04493&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A216056336%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F32334341&amp;rft_id=info%3Adoi%2F10.1016%2Fj.neunet.2020.04.008&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J%C3%BCrgen&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-SyncedReview201822-97"><span class="mw-cite-backlink"><b><a href="#cite_ref-SyncedReview201822_97-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://syncedreview.com/2018/12/14/gan-2-0-nvidias-hyperrealistic-face-generator/">"GAN 2.0: NVIDIA's Hyperrealistic Face Generator"</a>. <i>SyncedReview.com</i>. 14 December 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">3 October</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=SyncedReview.com&amp;rft.atitle=GAN+2.0%3A+NVIDIA%27s+Hyperrealistic+Face+Generator&amp;rft.date=2018-12-14&amp;rft_id=https%3A%2F%2Fsyncedreview.com%2F2018%2F12%2F14%2Fgan-2-0-nvidias-hyperrealistic-face-generator%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-progressiveGAN201722-98"><span class="mw-cite-backlink"><b><a href="#cite_ref-progressiveGAN201722_98-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKarrasAilaLaineLehtinen2018" class="citation arxiv cs1">Karras T, Aila T, Laine S, Lehtinen J (26 February 2018). "Progressive Growing of GANs for Improved Quality, Stability, and Variation". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1710.10196">1710.10196</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.NE">cs.NE</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Progressive+Growing+of+GANs+for+Improved+Quality%2C+Stability%2C+and+Variation&amp;rft.date=2018-02-26&amp;rft_id=info%3Aarxiv%2F1710.10196&amp;rft.aulast=Karras&amp;rft.aufirst=T.&amp;rft.au=Aila%2C+T.&amp;rft.au=Laine%2C+S.&amp;rft.au=Lehtinen%2C+J.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-99"><span class="mw-cite-backlink"><b><a href="#cite_ref-99">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://lab.witness.org/projects/synthetic-media-and-deep-fakes/">"Prepare, Don't Panic: Synthetic Media and Deepfakes"</a>. witness.org. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201202231744/https://lab.witness.org/projects/synthetic-media-and-deep-fakes/">Archived</a> from the original on 2 December 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">25 November</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Prepare%2C+Don%27t+Panic%3A+Synthetic+Media+and+Deepfakes&amp;rft.pub=witness.org&amp;rft_id=https%3A%2F%2Flab.witness.org%2Fprojects%2Fsynthetic-media-and-deep-fakes%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-100"><span class="mw-cite-backlink"><b><a href="#cite_ref-100">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSohl-DicksteinWeissMaheswaranathanGanguli2015" class="citation journal cs1">Sohl-Dickstein J, Weiss E, Maheswaranathan N, Ganguli S (1 June 2015). <a rel="nofollow" class="external text" href="http://proceedings.mlr.press/v37/sohl-dickstein15.pdf">"Deep Unsupervised Learning using Nonequilibrium Thermodynamics"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of the 32nd International Conference on Machine Learning</i>. <b>37</b>. PMLR: 2256–2265. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1503.03585">1503.03585</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+32nd+International+Conference+on+Machine+Learning&amp;rft.atitle=Deep+Unsupervised+Learning+using+Nonequilibrium+Thermodynamics&amp;rft.volume=37&amp;rft.pages=2256-2265&amp;rft.date=2015-06-01&amp;rft_id=info%3Aarxiv%2F1503.03585&amp;rft.aulast=Sohl-Dickstein&amp;rft.aufirst=Jascha&amp;rft.au=Weiss%2C+Eric&amp;rft.au=Maheswaranathan%2C+Niru&amp;rft.au=Ganguli%2C+Surya&amp;rft_id=http%3A%2F%2Fproceedings.mlr.press%2Fv37%2Fsohl-dickstein15.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-101"><span class="mw-cite-backlink"><b><a href="#cite_ref-101">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSimonyanZisserman2015" class="citation cs2">Simonyan K, Zisserman A (10 April 2015), <i>Very Deep Convolutional Networks for Large-Scale Image Recognition</i>, <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1409.1556">1409.1556</a></span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Very+Deep+Convolutional+Networks+for+Large-Scale+Image+Recognition&amp;rft.date=2015-04-10&amp;rft_id=info%3Aarxiv%2F1409.1556&amp;rft.aulast=Simonyan&amp;rft.aufirst=Karen&amp;rft.au=Zisserman%2C+Andrew&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-prelu2-102"><span class="mw-cite-backlink"><b><a href="#cite_ref-prelu2_102-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHeZhangRenSun2016" class="citation arxiv cs1">He K, Zhang X, Ren S, Sun J (2016). "Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1502.01852">1502.01852</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Delving+Deep+into+Rectifiers%3A+Surpassing+Human-Level+Performance+on+ImageNet+Classification&amp;rft.date=2016&amp;rft_id=info%3Aarxiv%2F1502.01852&amp;rft.aulast=He&amp;rft.aufirst=Kaiming&amp;rft.au=Zhang%2C+Xiangyu&amp;rft.au=Ren%2C+Shaoqing&amp;rft.au=Sun%2C+Jian&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-resnet2-103"><span class="mw-cite-backlink"><b><a href="#cite_ref-resnet2_103-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHeZhangRenSun2015" class="citation conference cs1">He K, Zhang X, Ren S, Sun J (10 December 2015). <i>Deep Residual Learning for Image Recognition</i>. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1512.03385">1512.03385</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.btitle=Deep+Residual+Learning+for+Image+Recognition&amp;rft.date=2015-12-10&amp;rft_id=info%3Aarxiv%2F1512.03385&amp;rft.aulast=He&amp;rft.aufirst=Kaiming&amp;rft.au=Zhang%2C+Xiangyu&amp;rft.au=Ren%2C+Shaoqing&amp;rft.au=Sun%2C+Jian&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-highway20153-104"><span class="mw-cite-backlink"><b><a href="#cite_ref-highway20153_104-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSrivastavaGreffSchmidhuber2015" class="citation arxiv cs1">Srivastava RK, Greff K, Schmidhuber J (2 May 2015). "Highway Networks". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1505.00387">1505.00387</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Highway+Networks&amp;rft.date=2015-05-02&amp;rft_id=info%3Aarxiv%2F1505.00387&amp;rft.aulast=Srivastava&amp;rft.aufirst=Rupesh+Kumar&amp;rft.au=Greff%2C+Klaus&amp;rft.au=Schmidhuber%2C+J%C3%BCrgen&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-resnet20153-105"><span class="mw-cite-backlink"><b><a href="#cite_ref-resnet20153_105-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHeZhangRenSun2016" class="citation conference cs1">He K, Zhang X, Ren S, Sun J (2016). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/7780459"><i>Deep Residual Learning for Image Recognition</i></a>. <i>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</i>. Las Vegas, NV, USA: IEEE. pp. 770–778. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1512.03385">1512.03385</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FCVPR.2016.90">10.1109/CVPR.2016.90</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4673-8851-1" title="Special:BookSources/978-1-4673-8851-1"><bdi>978-1-4673-8851-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=conference&amp;rft.jtitle=2016+IEEE+Conference+on+Computer+Vision+and+Pattern+Recognition+%28CVPR%29&amp;rft.atitle=Deep+Residual+Learning+for+Image+Recognition&amp;rft.pages=770-778&amp;rft.date=2016&amp;rft_id=info%3Aarxiv%2F1512.03385&amp;rft_id=info%3Adoi%2F10.1109%2FCVPR.2016.90&amp;rft.isbn=978-1-4673-8851-1&amp;rft.aulast=He&amp;rft.aufirst=Kaiming&amp;rft.au=Zhang%2C+Xiangyu&amp;rft.au=Ren%2C+Shaoqing&amp;rft.au=Sun%2C+Jian&amp;rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F7780459&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-106"><span class="mw-cite-backlink"><b><a href="#cite_ref-106">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLinn2015" class="citation web cs1">Linn A (10 December 2015). <a rel="nofollow" class="external text" href="https://blogs.microsoft.com/ai/microsoft-researchers-win-imagenet-computer-vision-challenge/">"Microsoft researchers win ImageNet computer vision challenge"</a>. <i>The AI Blog</i><span class="reference-accessdate">. Retrieved <span class="nowrap">29 June</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+AI+Blog&amp;rft.atitle=Microsoft+researchers+win+ImageNet+computer+vision+challenge&amp;rft.date=2015-12-10&amp;rft.aulast=Linn&amp;rft.aufirst=Allison&amp;rft_id=https%3A%2F%2Fblogs.microsoft.com%2Fai%2Fmicrosoft-researchers-win-imagenet-computer-vision-challenge%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-vaswani2017-107"><span class="mw-cite-backlink"><b><a href="#cite_ref-vaswani2017_107-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVaswaniShazeerParmarUszkoreit2017" class="citation arxiv cs1">Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, et al. (12 June 2017). "Attention Is All You Need". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1706.03762">1706.03762</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Attention+Is+All+You+Need&amp;rft.date=2017-06-12&amp;rft_id=info%3Aarxiv%2F1706.03762&amp;rft.aulast=Vaswani&amp;rft.aufirst=Ashish&amp;rft.au=Shazeer%2C+Noam&amp;rft.au=Parmar%2C+Niki&amp;rft.au=Uszkoreit%2C+Jakob&amp;rft.au=Jones%2C+Llion&amp;rft.au=Gomez%2C+Aidan+N.&amp;rft.au=Kaiser%2C+Lukasz&amp;rft.au=Polosukhin%2C+Illia&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-transform19922-108"><span class="mw-cite-backlink"><b><a href="#cite_ref-transform19922_108-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber1992" class="citation journal cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber J</a> (1992). <a rel="nofollow" class="external text" href="https://archive.org/download/wikipedia-scholarly-sources-corpus/10.1162.zip/10.1162%252Fneco.1992.4.1.131.pdf">"Learning to control fast-weight memories: an alternative to recurrent nets"</a> <span class="cs1-format">(PDF)</span>. <i>Neural Computation</i>. <b>4</b> (1): 131–139. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1992.4.1.131">10.1162/neco.1992.4.1.131</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:16683347">16683347</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computation&amp;rft.atitle=Learning+to+control+fast-weight+memories%3A+an+alternative+to+recurrent+nets.&amp;rft.volume=4&amp;rft.issue=1&amp;rft.pages=131-139&amp;rft.date=1992&amp;rft_id=info%3Adoi%2F10.1162%2Fneco.1992.4.1.131&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A16683347%23id-name%3DS2CID&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J%C3%BCrgen&amp;rft_id=https%3A%2F%2Farchive.org%2Fdownload%2Fwikipedia-scholarly-sources-corpus%2F10.1162.zip%2F10.1162%25252Fneco.1992.4.1.131.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-fastlinear20202-109"><span class="mw-cite-backlink"><b><a href="#cite_ref-fastlinear20202_109-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKatharopoulosVyasPappasFleuret2020" class="citation conference cs1">Katharopoulos A, Vyas A, Pappas N, Fleuret F (2020). <a rel="nofollow" class="external text" href="https://paperswithcode.com/paper/a-decomposable-attention-model-for-natural">"Transformers are RNNs: Fast autoregressive Transformers with linear attention"</a>. <i>ICML 2020</i>. PMLR. pp. 5156–5165.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=Transformers+are+RNNs%3A+Fast+autoregressive+Transformers+with+linear+attention&amp;rft.btitle=ICML+2020&amp;rft.pages=5156-5165&amp;rft.pub=PMLR&amp;rft.date=2020&amp;rft.aulast=Katharopoulos&amp;rft.aufirst=Angelos&amp;rft.au=Vyas%2C+Apoorv&amp;rft.au=Pappas%2C+Nikolaos&amp;rft.au=Fleuret%2C+Fran%C3%A7ois&amp;rft_id=https%3A%2F%2Fpaperswithcode.com%2Fpaper%2Fa-decomposable-attention-model-for-natural&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-schlag20212-110"><span class="mw-cite-backlink"><b><a href="#cite_ref-schlag20212_110-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchlagIrieSchmidhuber2021" class="citation conference cs1">Schlag I, Irie K, <a href="/wiki/Juergen_Schmidhuber" class="mw-redirect" title="Juergen Schmidhuber">Schmidhuber J</a> (2021). "Linear Transformers Are Secretly Fast Weight Programmers". <i>ICML 2021</i>. Springer. pp. 9355–9366.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=Linear+Transformers+Are+Secretly+Fast+Weight+Programmers&amp;rft.btitle=ICML+2021&amp;rft.pages=9355-9366&amp;rft.pub=Springer&amp;rft.date=2021&amp;rft.aulast=Schlag&amp;rft.aufirst=Imanol&amp;rft.au=Irie%2C+Kazuki&amp;rft.au=Schmidhuber%2C+J%C3%BCrgen&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-wolf2020-111"><span class="mw-cite-backlink"><b><a href="#cite_ref-wolf2020_111-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWolfDebutSanhChaumond2020" class="citation book cs1">Wolf T, Debut L, Sanh V, Chaumond J, Delangue C, Moi A, et al. (2020). "Transformers: State-of-the-Art Natural Language Processing". <i>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations</i>. pp. 38–45. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.18653%2Fv1%2F2020.emnlp-demos.6">10.18653/v1/2020.emnlp-demos.6</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:208117506">208117506</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Transformers%3A+State-of-the-Art+Natural+Language+Processing&amp;rft.btitle=Proceedings+of+the+2020+Conference+on+Empirical+Methods+in+Natural+Language+Processing%3A+System+Demonstrations&amp;rft.pages=38-45&amp;rft.date=2020&amp;rft_id=info%3Adoi%2F10.18653%2Fv1%2F2020.emnlp-demos.6&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A208117506%23id-name%3DS2CID&amp;rft.aulast=Wolf&amp;rft.aufirst=Thomas&amp;rft.au=Debut%2C+Lysandre&amp;rft.au=Sanh%2C+Victor&amp;rft.au=Chaumond%2C+Julien&amp;rft.au=Delangue%2C+Clement&amp;rft.au=Moi%2C+Anthony&amp;rft.au=Cistac%2C+Pierric&amp;rft.au=Rault%2C+Tim&amp;rft.au=Louf%2C+Remi&amp;rft.au=Funtowicz%2C+Morgan&amp;rft.au=Davison%2C+Joe&amp;rft.au=Shleifer%2C+Sam&amp;rft.au=von+Platen%2C+Patrick&amp;rft.au=Ma%2C+Clara&amp;rft.au=Jernite%2C+Yacine&amp;rft.au=Plu%2C+Julien&amp;rft.au=Xu%2C+Canwen&amp;rft.au=Le+Scao%2C+Teven&amp;rft.au=Gugger%2C+Sylvain&amp;rft.au=Drame%2C+Mariama&amp;rft.au=Lhoest%2C+Quentin&amp;rft.au=Rush%2C+Alexander&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Zell1994ch5.2-112"><span class="mw-cite-backlink">^ <a href="#cite_ref-Zell1994ch5.2_112-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Zell1994ch5.2_112-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZell2003" class="citation book cs1 cs1-prop-foreign-lang-source">Zell A (2003). "chapter 5.2". <i>Simulation neuronaler Netze</i> [<i>Simulation of Neural Networks</i>] (in German) (1st ed.). Addison-Wesley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-89319-554-1" title="Special:BookSources/978-3-89319-554-1"><bdi>978-3-89319-554-1</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/249017987">249017987</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=chapter+5.2&amp;rft.btitle=Simulation+neuronaler+Netze&amp;rft.edition=1st&amp;rft.pub=Addison-Wesley&amp;rft.date=2003&amp;rft_id=info%3Aoclcnum%2F249017987&amp;rft.isbn=978-3-89319-554-1&amp;rft.aulast=Zell&amp;rft.aufirst=Andreas&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Winston-113"><span class="mw-cite-backlink"><b><a href="#cite_ref-Winston_113-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><i>Artificial intelligence</i> (3rd ed.). Addison-Wesley Pub. Co. 1992. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-201-53377-4" title="Special:BookSources/0-201-53377-4"><bdi>0-201-53377-4</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Artificial+intelligence&amp;rft.edition=3rd&amp;rft.pub=Addison-Wesley+Pub.+Co&amp;rft.date=1992&amp;rft.isbn=0-201-53377-4&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Abbod2007-114"><span class="mw-cite-backlink"><b><a href="#cite_ref-Abbod2007_114-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAbbod2007" class="citation journal cs1">Abbod MF (2007). "Application of Artificial Intelligence to the Management of Urological Cancer". <i>The Journal of Urology</i>. <b>178</b> (4): 1150–1156. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.juro.2007.05.122">10.1016/j.juro.2007.05.122</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/17698099">17698099</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Journal+of+Urology&amp;rft.atitle=Application+of+Artificial+Intelligence+to+the+Management+of+Urological+Cancer&amp;rft.volume=178&amp;rft.issue=4&amp;rft.pages=1150-1156&amp;rft.date=2007&amp;rft_id=info%3Adoi%2F10.1016%2Fj.juro.2007.05.122&amp;rft_id=info%3Apmid%2F17698099&amp;rft.aulast=Abbod&amp;rft.aufirst=Maysam+F.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-DAWSON1998-115"><span class="mw-cite-backlink"><b><a href="#cite_ref-DAWSON1998_115-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDawson1998" class="citation journal cs1">Dawson CW (1998). <a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F02626669809492102">"An artificial neural network approach to rainfall-runoff modelling"</a>. <i>Hydrological Sciences Journal</i>. <b>43</b> (1): 47–66. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1998HydSJ..43...47D">1998HydSJ..43...47D</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F02626669809492102">10.1080/02626669809492102</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Hydrological+Sciences+Journal&amp;rft.atitle=An+artificial+neural+network+approach+to+rainfall-runoff+modelling&amp;rft.volume=43&amp;rft.issue=1&amp;rft.pages=47-66&amp;rft.date=1998&amp;rft_id=info%3Adoi%2F10.1080%2F02626669809492102&amp;rft_id=info%3Abibcode%2F1998HydSJ..43...47D&amp;rft.aulast=Dawson&amp;rft.aufirst=Christian+W.&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1080%252F02626669809492102&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-116"><span class="mw-cite-backlink"><b><a href="#cite_ref-116">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20180826151959/http://www.cse.unsw.edu.au/~billw/mldict.html#activnfn">"The Machine Learning Dictionary"</a>. <i>cse.unsw.edu.au</i>. Archived from <a rel="nofollow" class="external text" href="http://www.cse.unsw.edu.au/~billw/mldict.html#activnfn">the original</a> on 26 August 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">4 November</span> 2009</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=cse.unsw.edu.au&amp;rft.atitle=The+Machine+Learning+Dictionary&amp;rft_id=http%3A%2F%2Fwww.cse.unsw.edu.au%2F~billw%2Fmldict.html%23activnfn&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-flexible-117"><span class="mw-cite-backlink"><b><a href="#cite_ref-flexible_117-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCiresanUeli_MeierJonathan_MasciLuca_M._Gambardella2011" class="citation journal cs1">Ciresan D, Ueli Meier, Jonathan Masci, Luca M. Gambardella, Jurgen Schmidhuber (2011). <a rel="nofollow" class="external text" href="https://people.idsia.ch/~juergen/ijcai2011.pdf">"Flexible, High Performance Convolutional Neural Networks for Image Classification"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of the Twenty-Second International Joint Conference on Artificial Intelligence-Volume Volume Two</i>. <b>2</b>: 1237–1242. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220405190128/https://people.idsia.ch/~juergen/ijcai2011.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 5 April 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">7 July</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+Twenty-Second+International+Joint+Conference+on+Artificial+Intelligence-Volume+Volume+Two&amp;rft.atitle=Flexible%2C+High+Performance+Convolutional+Neural+Networks+for+Image+Classification&amp;rft.volume=2&amp;rft.pages=1237-1242&amp;rft.date=2011&amp;rft.aulast=Ciresan&amp;rft.aufirst=Dan&amp;rft.au=Ueli+Meier&amp;rft.au=Jonathan+Masci&amp;rft.au=Luca+M.+Gambardella&amp;rft.au=Jurgen+Schmidhuber&amp;rft_id=https%3A%2F%2Fpeople.idsia.ch%2F~juergen%2Fijcai2011.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Zell1994p73-118"><span class="mw-cite-backlink"><b><a href="#cite_ref-Zell1994p73_118-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZell1994" class="citation book cs1 cs1-prop-foreign-lang-source">Zell A (1994). <i>Simulation Neuronaler Netze</i> [<i>Simulation of Neural Networks</i>] (in German) (1st ed.). Addison-Wesley. p. 73. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/3-89319-554-8" title="Special:BookSources/3-89319-554-8"><bdi>3-89319-554-8</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Simulation+Neuronaler+Netze&amp;rft.pages=73&amp;rft.edition=1st&amp;rft.pub=Addison-Wesley&amp;rft.date=1994&amp;rft.isbn=3-89319-554-8&amp;rft.aulast=Zell&amp;rft.aufirst=Andreas&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-119"><span class="mw-cite-backlink"><b><a href="#cite_ref-119">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMiljanovic2012" class="citation journal cs1">Miljanovic M (February–March 2012). <a rel="nofollow" class="external text" href="http://www.ijcse.com/docs/INDJCSE12-03-01-028.pdf">"Comparative analysis of Recurrent and Finite Impulse Response Neural Networks in Time Series Prediction"</a> <span class="cs1-format">(PDF)</span>. <i>Indian Journal of Computer and Engineering</i>. <b>3</b> (1). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519081156/http://www.ijcse.com/docs/INDJCSE12-03-01-028.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">21 August</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Indian+Journal+of+Computer+and+Engineering&amp;rft.atitle=Comparative+analysis+of+Recurrent+and+Finite+Impulse+Response+Neural+Networks+in+Time+Series+Prediction&amp;rft.volume=3&amp;rft.issue=1&amp;rft.date=2012-02%2F2012-03&amp;rft.aulast=Miljanovic&amp;rft.aufirst=Milos&amp;rft_id=http%3A%2F%2Fwww.ijcse.com%2Fdocs%2FINDJCSE12-03-01-028.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-120"><span class="mw-cite-backlink"><b><a href="#cite_ref-120">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKelleherMac_NameeD'Arcy2020" class="citation book cs1">Kelleher JD, Mac Namee B, D'Arcy A (2020). "7-8". <i>Fundamentals of machine learning for predictive data analytics: algorithms, worked examples, and case studies</i> (2nd ed.). Cambridge, MA: The MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-36110-1" title="Special:BookSources/978-0-262-36110-1"><bdi>978-0-262-36110-1</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/1162184998">1162184998</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=7-8&amp;rft.btitle=Fundamentals+of+machine+learning+for+predictive+data+analytics%3A+algorithms%2C+worked+examples%2C+and+case+studies&amp;rft.place=Cambridge%2C+MA&amp;rft.edition=2nd&amp;rft.pub=The+MIT+Press&amp;rft.date=2020&amp;rft_id=info%3Aoclcnum%2F1162184998&amp;rft.isbn=978-0-262-36110-1&amp;rft.aulast=Kelleher&amp;rft.aufirst=John+D.&amp;rft.au=Mac+Namee%2C+Brian&amp;rft.au=D%27Arcy%2C+Aoife&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-121"><span class="mw-cite-backlink"><b><a href="#cite_ref-121">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWei2019" class="citation arxiv cs1">Wei J (26 April 2019). "Forget the Learning Rate, Decay Loss". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1905.00094">1905.00094</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Forget+the+Learning+Rate%2C+Decay+Loss&amp;rft.date=2019-04-26&amp;rft_id=info%3Aarxiv%2F1905.00094&amp;rft.aulast=Wei&amp;rft.aufirst=Jiakai&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-122"><span class="mw-cite-backlink"><b><a href="#cite_ref-122">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLiFuLiZhang2009" class="citation book cs1">Li Y, Fu Y, Li H, Zhang SW (1 June 2009). "The Improved Training Algorithm of Back Propagation Neural Network with Self-adaptive Learning Rate". <i>2009 International Conference on Computational Intelligence and Natural Computing</i>. Vol. 1. pp. 73–76. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FCINC.2009.111">10.1109/CINC.2009.111</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-7695-3645-3" title="Special:BookSources/978-0-7695-3645-3"><bdi>978-0-7695-3645-3</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:10557754">10557754</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=The+Improved+Training+Algorithm+of+Back+Propagation+Neural+Network+with+Self-adaptive+Learning+Rate&amp;rft.btitle=2009+International+Conference+on+Computational+Intelligence+and+Natural+Computing&amp;rft.pages=73-76&amp;rft.date=2009-06-01&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A10557754%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FCINC.2009.111&amp;rft.isbn=978-0-7695-3645-3&amp;rft.aulast=Li&amp;rft.aufirst=Y.&amp;rft.au=Fu%2C+Y.&amp;rft.au=Li%2C+H.&amp;rft.au=Zhang%2C+S.+W.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-123"><span class="mw-cite-backlink"><b><a href="#cite_ref-123">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHuangZhuSiew2006" class="citation journal cs1">Huang GB, Zhu QY, Siew CK (2006). "Extreme learning machine: theory and applications". <i>Neurocomputing</i>. <b>70</b> (1): 489–501. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.3692">10.1.1.217.3692</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neucom.2005.12.126">10.1016/j.neucom.2005.12.126</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:116858">116858</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neurocomputing&amp;rft.atitle=Extreme+learning+machine%3A+theory+and+applications&amp;rft.volume=70&amp;rft.issue=1&amp;rft.pages=489-501&amp;rft.date=2006&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.217.3692%23id-name%3DCiteSeerX&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A116858%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1016%2Fj.neucom.2005.12.126&amp;rft.aulast=Huang&amp;rft.aufirst=Guang-Bin&amp;rft.au=Zhu%2C+Qin-Yu&amp;rft.au=Siew%2C+Chee-Kheong&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-124"><span class="mw-cite-backlink"><b><a href="#cite_ref-124">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWidrow2013" class="citation journal cs1">Widrow B, et al. (2013). "The no-prop algorithm: A new learning algorithm for multilayer neural networks". <i>Neural Networks</i>. <b>37</b>: 182–188. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neunet.2012.09.020">10.1016/j.neunet.2012.09.020</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/23140797">23140797</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Networks&amp;rft.atitle=The+no-prop+algorithm%3A+A+new+learning+algorithm+for+multilayer+neural+networks&amp;rft.volume=37&amp;rft.pages=182-188&amp;rft.date=2013&amp;rft_id=info%3Adoi%2F10.1016%2Fj.neunet.2012.09.020&amp;rft_id=info%3Apmid%2F23140797&amp;rft.aulast=Widrow&amp;rft.aufirst=Bernard&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-125"><span class="mw-cite-backlink"><b><a href="#cite_ref-125">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOllivierCharpiat2015" class="citation arxiv cs1">Ollivier Y, Charpiat G (2015). "Training recurrent networks without backtracking". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1507.07680">1507.07680</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.NE">cs.NE</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Training+recurrent+networks+without+backtracking&amp;rft.date=2015&amp;rft_id=info%3Aarxiv%2F1507.07680&amp;rft.aulast=Ollivier&amp;rft.aufirst=Yann&amp;rft.au=Charpiat%2C+Guillaume&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-RBMTRAIN-126"><span class="mw-cite-backlink"><b><a href="#cite_ref-RBMTRAIN_126-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHinton2010" class="citation journal cs1">Hinton GE (2010). <a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/221166159">"A Practical Guide to Training Restricted Boltzmann Machines"</a>. <i>Tech. Rep. UTML TR 2010-003</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123211/https://www.researchgate.net/publication/221166159_A_brief_introduction_to_Weightless_Neural_Systems">Archived</a> from the original on 9 May 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">27 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Tech.+Rep.+UTML+TR+2010-003&amp;rft.atitle=A+Practical+Guide+to+Training+Restricted+Boltzmann+Machines&amp;rft.date=2010&amp;rft.aulast=Hinton&amp;rft.aufirst=G.+E.&amp;rft_id=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F221166159&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-127"><span class="mw-cite-backlink"><b><a href="#cite_ref-127">^</a></b></span> <span class="reference-text">ESANN. 2009.<sup class="noprint Inline-Template" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citing_sources#What_information_to_include" title="Wikipedia:Citing sources"><span title="A complete citation is needed. (June 2022)">full citation needed</span></a></i>]</sup></span> </li> <li id="cite_note-128"><span class="mw-cite-backlink"><b><a href="#cite_ref-128">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBernard2021" class="citation book cs1">Bernard E (2021). <a rel="nofollow" class="external text" href="https://www.wolfram.com/language/introduction-machine-learning/machine-learning-paradigms/#p-9"><i>Introduction to machine learning</i></a>. Champaign: Wolfram Media. p. 9. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-57955-048-6" title="Special:BookSources/978-1-57955-048-6"><bdi>978-1-57955-048-6</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519081126/https://www.wolfram.com/language/introduction-machine-learning/machine-learning-paradigms/#p-9">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">22 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Introduction+to+machine+learning&amp;rft.place=Champaign&amp;rft.pages=9&amp;rft.pub=Wolfram+Media&amp;rft.date=2021&amp;rft.isbn=978-1-57955-048-6&amp;rft.aulast=Bernard&amp;rft.aufirst=Etienne&amp;rft_id=https%3A%2F%2Fwww.wolfram.com%2Flanguage%2Fintroduction-machine-learning%2Fmachine-learning-paradigms%2F%23p-9&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-129"><span class="mw-cite-backlink"><b><a href="#cite_ref-129">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBernard2021" class="citation book cs1">Bernard E (2021). <a rel="nofollow" class="external text" href="https://www.wolfram.com/language/introduction-machine-learning/machine-learning-paradigms/#p-9"><i>Introduction to machine learning</i></a>. Champaign: Wolfram Media. p. 12. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-57955-048-6" title="Special:BookSources/978-1-57955-048-6"><bdi>978-1-57955-048-6</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519081126/https://www.wolfram.com/language/introduction-machine-learning/machine-learning-paradigms/#p-9">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">22 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Introduction+to+machine+learning&amp;rft.place=Champaign&amp;rft.pages=12&amp;rft.pub=Wolfram+Media&amp;rft.date=2021&amp;rft.isbn=978-1-57955-048-6&amp;rft.aulast=Bernard&amp;rft.aufirst=Etienne&amp;rft_id=https%3A%2F%2Fwww.wolfram.com%2Flanguage%2Fintroduction-machine-learning%2Fmachine-learning-paradigms%2F%23p-9&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-130"><span class="mw-cite-backlink"><b><a href="#cite_ref-130">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBernard2021" class="citation book cs1">Bernard E (2021). <a rel="nofollow" class="external text" href="https://www.wolfram.com/language/introduction-machine-learning/"><i>Introduction to Machine Learning</i></a>. Wolfram Media Inc. p. 9. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-57955-048-6" title="Special:BookSources/978-1-57955-048-6"><bdi>978-1-57955-048-6</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519081126/https://www.wolfram.com/language/introduction-machine-learning/">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">28 July</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Introduction+to+Machine+Learning&amp;rft.pages=9&amp;rft.pub=Wolfram+Media+Inc&amp;rft.date=2021&amp;rft.isbn=978-1-57955-048-6&amp;rft.aulast=Bernard&amp;rft.aufirst=Etienne&amp;rft_id=https%3A%2F%2Fwww.wolfram.com%2Flanguage%2Fintroduction-machine-learning%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-131"><span class="mw-cite-backlink"><b><a href="#cite_ref-131">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOjhaAbrahamSnášel2017" class="citation journal cs1">Ojha VK, Abraham A, Snášel V (1 April 2017). "Metaheuristic design of feedforward neural networks: A review of two decades of research". <i>Engineering Applications of Artificial Intelligence</i>. <b>60</b>: 97–116. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1705.05584">1705.05584</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2017arXiv170505584O">2017arXiv170505584O</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.engappai.2017.01.013">10.1016/j.engappai.2017.01.013</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:27910748">27910748</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Engineering+Applications+of+Artificial+Intelligence&amp;rft.atitle=Metaheuristic+design+of+feedforward+neural+networks%3A+A+review+of+two+decades+of+research&amp;rft.volume=60&amp;rft.pages=97-116&amp;rft.date=2017-04-01&amp;rft_id=info%3Aarxiv%2F1705.05584&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A27910748%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1016%2Fj.engappai.2017.01.013&amp;rft_id=info%3Abibcode%2F2017arXiv170505584O&amp;rft.aulast=Ojha&amp;rft.aufirst=Varun+Kumar&amp;rft.au=Abraham%2C+Ajith&amp;rft.au=Sn%C3%A1%C5%A1el%2C+V%C3%A1clav&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-132"><span class="mw-cite-backlink"><b><a href="#cite_ref-132">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDominic,_S.Das,_R.Whitley,_D.Anderson,_C.1991" class="citation conference cs1">Dominic, S., Das, R., Whitley, D., Anderson, C. (July 1991). <span class="id-lock-registration" title="Free registration required"><a rel="nofollow" class="external text" href="https://archive.org/details/ijcnn91seattlein01ieee">"Genetic reinforcement learning for neural networks"</a></span>. <i>IJCNN-91-Seattle International Joint Conference on Neural Networks</i>. IJCNN-91-Seattle International Joint Conference on Neural Networks. Seattle, Washington, US: IEEE. pp. 71–76. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FIJCNN.1991.155315">10.1109/IJCNN.1991.155315</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-7803-0164-1" title="Special:BookSources/0-7803-0164-1"><bdi>0-7803-0164-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=Genetic+reinforcement+learning+for+neural+networks&amp;rft.btitle=IJCNN-91-Seattle+International+Joint+Conference+on+Neural+Networks&amp;rft.place=Seattle%2C+Washington%2C+US&amp;rft.pages=71-76&amp;rft.pub=IEEE&amp;rft.date=1991-07&amp;rft_id=info%3Adoi%2F10.1109%2FIJCNN.1991.155315&amp;rft.isbn=0-7803-0164-1&amp;rft.au=Dominic%2C+S.&amp;rft.au=Das%2C+R.&amp;rft.au=Whitley%2C+D.&amp;rft.au=Anderson%2C+C.&amp;rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fijcnn91seattlein01ieee&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-133"><span class="mw-cite-backlink"><b><a href="#cite_ref-133">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHoskinsHimmelblau,_D.M.1992" class="citation journal cs1">Hoskins J, Himmelblau, D.M. (1992). "Process control via artificial neural networks and reinforcement learning". <i>Computers &amp; Chemical Engineering</i>. <b>16</b> (4): 241–251. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0098-1354%2892%2980045-B">10.1016/0098-1354(92)80045-B</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Computers+%26+Chemical+Engineering&amp;rft.atitle=Process+control+via+artificial+neural+networks+and+reinforcement+learning&amp;rft.volume=16&amp;rft.issue=4&amp;rft.pages=241-251&amp;rft.date=1992&amp;rft_id=info%3Adoi%2F10.1016%2F0098-1354%2892%2980045-B&amp;rft.aulast=Hoskins&amp;rft.aufirst=J.C.&amp;rft.au=Himmelblau%2C+D.M.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-134"><span class="mw-cite-backlink"><b><a href="#cite_ref-134">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBertsekasTsitsiklis1996" class="citation book cs1">Bertsekas D, Tsitsiklis J (1996). <a rel="nofollow" class="external text" href="https://papers.nips.cc/paper/4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images"><i>Neuro-dynamic programming</i></a>. Athena Scientific. p. 512. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-886529-10-6" title="Special:BookSources/978-1-886529-10-6"><bdi>978-1-886529-10-6</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170629172039/http://papers.nips.cc/paper/4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images">Archived</a> from the original on 29 June 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">17 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Neuro-dynamic+programming&amp;rft.pages=512&amp;rft.pub=Athena+Scientific&amp;rft.date=1996&amp;rft.isbn=978-1-886529-10-6&amp;rft.aulast=Bertsekas&amp;rft.aufirst=D.P.&amp;rft.au=Tsitsiklis%2C+J.N.&amp;rft_id=https%3A%2F%2Fpapers.nips.cc%2Fpaper%2F4741-deep-neural-networks-segment-neuronal-membranes-in-electron-microscopy-images&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-135"><span class="mw-cite-backlink"><b><a href="#cite_ref-135">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSecomandi2000" class="citation journal cs1">Secomandi N (2000). "Comparing neuro-dynamic programming algorithms for the vehicle routing problem with stochastic demands". <i>Computers &amp; Operations Research</i>. <b>27</b> (11–12): 1201–1225. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.392.4034">10.1.1.392.4034</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2FS0305-0548%2899%2900146-X">10.1016/S0305-0548(99)00146-X</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Computers+%26+Operations+Research&amp;rft.atitle=Comparing+neuro-dynamic+programming+algorithms+for+the+vehicle+routing+problem+with+stochastic+demands&amp;rft.volume=27&amp;rft.issue=11%E2%80%9312&amp;rft.pages=1201-1225&amp;rft.date=2000&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.392.4034%23id-name%3DCiteSeerX&amp;rft_id=info%3Adoi%2F10.1016%2FS0305-0548%2899%2900146-X&amp;rft.aulast=Secomandi&amp;rft.aufirst=Nicola&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-136"><span class="mw-cite-backlink"><b><a href="#cite_ref-136">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFde_Rigo,_D.Rizzoli,_A._E.Soncini-Sessa,_R.Weber,_E.2001" class="citation conference cs1">de Rigo, D., Rizzoli, A. E., Soncini-Sessa, R., Weber, E., Zenesi, P. (2001). <a rel="nofollow" class="external text" href="http://www.mssanz.org.au/MODSIM01/MODSIM01.htm">"Neuro-dynamic programming for the efficient management of reservoir networks"</a>. <i>Proceedings of MODSIM 2001, International Congress on Modelling and Simulation</i>. MODSIM 2001, International Congress on Modelling and Simulation. Canberra, Australia: Modelling and Simulation Society of Australia and New Zealand. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.5281%2Fzenodo.7481">10.5281/zenodo.7481</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-86740-525-2" title="Special:BookSources/0-86740-525-2"><bdi>0-86740-525-2</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20130807223658/http://mssanz.org.au/MODSIM01/MODSIM01.htm">Archived</a> from the original on 7 August 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">29 July</span> 2013</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=Neuro-dynamic+programming+for+the+efficient+management+of+reservoir+networks&amp;rft.btitle=Proceedings+of+MODSIM+2001%2C+International+Congress+on+Modelling+and+Simulation&amp;rft.place=Canberra%2C+Australia&amp;rft.pub=Modelling+and+Simulation+Society+of+Australia+and+New+Zealand&amp;rft.date=2001&amp;rft_id=info%3Adoi%2F10.5281%2Fzenodo.7481&amp;rft.isbn=0-86740-525-2&amp;rft.au=de+Rigo%2C+D.&amp;rft.au=Rizzoli%2C+A.+E.&amp;rft.au=Soncini-Sessa%2C+R.&amp;rft.au=Weber%2C+E.&amp;rft.au=Zenesi%2C+P.&amp;rft_id=http%3A%2F%2Fwww.mssanz.org.au%2FMODSIM01%2FMODSIM01.htm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-137"><span class="mw-cite-backlink"><b><a href="#cite_ref-137">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDamas,_M.Salmeron,_M.Diaz,_A.Ortega,_J.2000" class="citation conference cs1">Damas, M., Salmeron, M., Diaz, A., Ortega, J., Prieto, A., Olivares, G. (2000). "Genetic algorithms and neuro-dynamic programming: application to water supply networks". <i>Proceedings of 2000 Congress on Evolutionary Computation</i>. 2000 Congress on Evolutionary Computation. Vol. 1. La Jolla, California, US: IEEE. pp. 7–14. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FCEC.2000.870269">10.1109/CEC.2000.870269</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-7803-6375-2" title="Special:BookSources/0-7803-6375-2"><bdi>0-7803-6375-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=Genetic+algorithms+and+neuro-dynamic+programming%3A+application+to+water+supply+networks&amp;rft.btitle=Proceedings+of+2000+Congress+on+Evolutionary+Computation&amp;rft.place=La+Jolla%2C+California%2C+US&amp;rft.pages=7-14&amp;rft.pub=IEEE&amp;rft.date=2000&amp;rft_id=info%3Adoi%2F10.1109%2FCEC.2000.870269&amp;rft.isbn=0-7803-6375-2&amp;rft.au=Damas%2C+M.&amp;rft.au=Salmeron%2C+M.&amp;rft.au=Diaz%2C+A.&amp;rft.au=Ortega%2C+J.&amp;rft.au=Prieto%2C+A.&amp;rft.au=Olivares%2C+G.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-138"><span class="mw-cite-backlink"><b><a href="#cite_ref-138">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDengFerris,_M.C.2008" class="citation book cs1">Deng G, Ferris, M.C. (2008). "Neuro-dynamic programming for fractionated radiotherapy planning". <i>Optimization in Medicine</i>. Springer Optimization and Its Applications. Vol. 12. pp. 47–70. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.137.8288">10.1.1.137.8288</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-0-387-73299-2_3">10.1007/978-0-387-73299-2_3</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-387-73298-5" title="Special:BookSources/978-0-387-73298-5"><bdi>978-0-387-73298-5</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Neuro-dynamic+programming+for+fractionated+radiotherapy+planning&amp;rft.btitle=Optimization+in+Medicine&amp;rft.series=Springer+Optimization+and+Its+Applications&amp;rft.pages=47-70&amp;rft.date=2008&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.137.8288%23id-name%3DCiteSeerX&amp;rft_id=info%3Adoi%2F10.1007%2F978-0-387-73299-2_3&amp;rft.isbn=978-0-387-73298-5&amp;rft.aulast=Deng&amp;rft.aufirst=Geng&amp;rft.au=Ferris%2C+M.C.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-139"><span class="mw-cite-backlink"><b><a href="#cite_ref-139">^</a></b></span> <span class="reference-text">Bozinovski, S. (1982). "A self-learning system using secondary reinforcement". In R. Trappl (ed.) Cybernetics and Systems Research: Proceedings of the Sixth European Meeting on Cybernetics and Systems Research. North Holland. pp. 397–402. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-444-86488-8" title="Special:BookSources/978-0-444-86488-8">978-0-444-86488-8</a>.</span> </li> <li id="cite_note-140"><span class="mw-cite-backlink"><b><a href="#cite_ref-140">^</a></b></span> <span class="reference-text">Bozinovski, S. (2014) "<a rel="nofollow" class="external text" href="https://core.ac.uk/download/pdf/81973924.pdf">Modeling mechanisms of cognition-emotion interaction in artificial neural networks, since 1981</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190323204838/https://core.ac.uk/download/pdf/81973924.pdf">Archived</a> 23 March 2019 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>." Procedia Computer Science p. 255-263</span> </li> <li id="cite_note-141"><span class="mw-cite-backlink"><b><a href="#cite_ref-141">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBozinovskiBozinovska2001" class="citation journal cs1">Bozinovski S, Bozinovska L (2001). "Self-learning agents: A connectionist theory of emotion based on crossbar value judgment". <i>Cybernetics and Systems</i>. <b>32</b> (6): 637–667. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F01969720118145">10.1080/01969720118145</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:8944741">8944741</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Cybernetics+and+Systems&amp;rft.atitle=Self-learning+agents%3A+A+connectionist+theory+of+emotion+based+on+crossbar+value+judgment&amp;rft.volume=32&amp;rft.issue=6&amp;rft.pages=637-667&amp;rft.date=2001&amp;rft_id=info%3Adoi%2F10.1080%2F01969720118145&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A8944741%23id-name%3DS2CID&amp;rft.aulast=Bozinovski&amp;rft.aufirst=Stevo&amp;rft.au=Bozinovska%2C+Liljana&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-142"><span class="mw-cite-backlink"><b><a href="#cite_ref-142">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSalimansHoChenSidor2017" class="citation arxiv cs1">Salimans T, Ho J, Chen X, Sidor S, Sutskever I (7 September 2017). "Evolution Strategies as a Scalable Alternative to Reinforcement Learning". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1703.03864">1703.03864</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/stat.ML">stat.ML</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Evolution+Strategies+as+a+Scalable+Alternative+to+Reinforcement+Learning&amp;rft.date=2017-09-07&amp;rft_id=info%3Aarxiv%2F1703.03864&amp;rft.aulast=Salimans&amp;rft.aufirst=Tim&amp;rft.au=Ho%2C+Jonathan&amp;rft.au=Chen%2C+Xi&amp;rft.au=Sidor%2C+Szymon&amp;rft.au=Sutskever%2C+Ilya&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-143"><span class="mw-cite-backlink"><b><a href="#cite_ref-143">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSuchMadhavanContiLehman2018" class="citation arxiv cs1">Such FP, Madhavan V, Conti E, Lehman J, Stanley KO, Clune J (20 April 2018). "Deep Neuroevolution: Genetic Algorithms Are a Competitive Alternative for Training Deep Neural Networks for Reinforcement Learning". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1712.06567">1712.06567</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.NE">cs.NE</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Deep+Neuroevolution%3A+Genetic+Algorithms+Are+a+Competitive+Alternative+for+Training+Deep+Neural+Networks+for+Reinforcement+Learning&amp;rft.date=2018-04-20&amp;rft_id=info%3Aarxiv%2F1712.06567&amp;rft.aulast=Such&amp;rft.aufirst=Felipe+Petroski&amp;rft.au=Madhavan%2C+Vashisht&amp;rft.au=Conti%2C+Edoardo&amp;rft.au=Lehman%2C+Joel&amp;rft.au=Stanley%2C+Kenneth+O.&amp;rft.au=Clune%2C+Jeff&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-144"><span class="mw-cite-backlink"><b><a href="#cite_ref-144">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.science.org/content/article/artificial-intelligence-can-evolve-solve-problems">"Artificial intelligence can 'evolve' to solve problems"</a>. <i>Science | AAAS</i>. 10 January 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211209231714/https://www.science.org/content/article/artificial-intelligence-can-evolve-solve-problems">Archived</a> from the original on 9 December 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">7 February</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Science+%7C+AAAS&amp;rft.atitle=Artificial+intelligence+can+%27evolve%27+to+solve+problems&amp;rft.date=2018-01-10&amp;rft_id=https%3A%2F%2Fwww.science.org%2Fcontent%2Farticle%2Fartificial-intelligence-can-evolve-solve-problems&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-145"><span class="mw-cite-backlink"><b><a href="#cite_ref-145">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTurchetti2004" class="citation cs2">Turchetti C (2004), <i>Stochastic Models of Neural Networks</i>, Frontiers in artificial intelligence and applications: Knowledge-based intelligent engineering systems, vol. 102, IOS Press, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-58603-388-0" title="Special:BookSources/978-1-58603-388-0"><bdi>978-1-58603-388-0</bdi></a></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Stochastic+Models+of+Neural+Networks&amp;rft.series=Frontiers+in+artificial+intelligence+and+applications%3A+Knowledge-based+intelligent+engineering+systems&amp;rft.pub=IOS+Press&amp;rft.date=2004&amp;rft.isbn=978-1-58603-388-0&amp;rft.aulast=Turchetti&amp;rft.aufirst=Claudio&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-146"><span class="mw-cite-backlink"><b><a href="#cite_ref-146">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJospinLagaBoussaidBuntine2022" class="citation magazine cs1">Jospin LV, Laga H, Boussaid F, Buntine W, Bennamoun M (2022). "Hands-On Bayesian Neural Networks—A Tutorial for Deep Learning Users". <i>IEEE Computational Intelligence Magazine</i>. Vol. 17, no. 2. pp. 29–48. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2007.06823">2007.06823</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fmci.2022.3155327">10.1109/mci.2022.3155327</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1556-603X">1556-603X</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:220514248">220514248</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Computational+Intelligence+Magazine&amp;rft.atitle=Hands-On+Bayesian+Neural+Networks%E2%80%94A+Tutorial+for+Deep+Learning+Users&amp;rft.volume=17&amp;rft.issue=2&amp;rft.pages=29-48&amp;rft.date=2022&amp;rft_id=info%3Aarxiv%2F2007.06823&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A220514248%23id-name%3DS2CID&amp;rft.issn=1556-603X&amp;rft_id=info%3Adoi%2F10.1109%2Fmci.2022.3155327&amp;rft.aulast=Jospin&amp;rft.aufirst=Laurent+Valentin&amp;rft.au=Laga%2C+Hamid&amp;rft.au=Boussaid%2C+Farid&amp;rft.au=Buntine%2C+Wray&amp;rft.au=Bennamoun%2C+Mohammed&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-147"><span class="mw-cite-backlink"><b><a href="#cite_ref-147">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFde_Rigo,_D.Castelletti,_A.Rizzoli,_A._E.Soncini-Sessa,_R.2005" class="citation conference cs1">de Rigo, D., Castelletti, A., Rizzoli, A. E., Soncini-Sessa, R., Weber, E. (January 2005). <a rel="nofollow" class="external text" href="http://www.nt.ntnu.no/users/skoge/prost/proceedings/ifac2005/Papers/Paper4269.html">"A selective improvement technique for fastening Neuro-Dynamic Programming in Water Resources Network Management"</a>. In Pavel Zítek (ed.). <i>Proceedings of the 16th IFAC World Congress – IFAC-PapersOnLine</i>. <a rel="nofollow" class="external text" href="http://www.nt.ntnu.no/users/skoge/prost/proceedings/ifac2005/Index.html">16th IFAC World Congress</a>. Vol. 16. Prague, Czech Republic: IFAC. pp. 7–12. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.3182%2F20050703-6-CZ-1902.02172">10.3182/20050703-6-CZ-1902.02172</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/11311%2F255236">11311/255236</a></span>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-902661-75-3" title="Special:BookSources/978-3-902661-75-3"><bdi>978-3-902661-75-3</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20120426012450/http://www.nt.ntnu.no/users/skoge/prost/proceedings/ifac2005/Papers/Paper4269.html">Archived</a> from the original on 26 April 2012<span class="reference-accessdate">. Retrieved <span class="nowrap">30 December</span> 2011</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=A+selective+improvement+technique+for+fastening+Neuro-Dynamic+Programming+in+Water+Resources+Network+Management&amp;rft.btitle=Proceedings+of+the+16th+IFAC+World+Congress+%E2%80%93+IFAC-PapersOnLine&amp;rft.place=Prague%2C+Czech+Republic&amp;rft.pages=7-12&amp;rft.pub=IFAC&amp;rft.date=2005-01&amp;rft_id=info%3Ahdl%2F11311%2F255236&amp;rft_id=info%3Adoi%2F10.3182%2F20050703-6-CZ-1902.02172&amp;rft.isbn=978-3-902661-75-3&amp;rft.au=de+Rigo%2C+D.&amp;rft.au=Castelletti%2C+A.&amp;rft.au=Rizzoli%2C+A.+E.&amp;rft.au=Soncini-Sessa%2C+R.&amp;rft.au=Weber%2C+E.&amp;rft_id=http%3A%2F%2Fwww.nt.ntnu.no%2Fusers%2Fskoge%2Fprost%2Fproceedings%2Fifac2005%2FPapers%2FPaper4269.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-148"><span class="mw-cite-backlink"><b><a href="#cite_ref-148">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFerreira2006" class="citation book cs1">Ferreira C (2006). "Designing Neural Networks Using Gene Expression Programming". In A. Abraham, B. de Baets, M. Köppen, B. Nickolay (eds.). <a rel="nofollow" class="external text" href="http://www.gene-expression-programming.com/webpapers/Ferreira-ASCT2006.pdf"><i>Applied Soft Computing Technologies: The Challenge of Complexity</i></a> <span class="cs1-format">(PDF)</span>. Springer-Verlag. pp. 517–536. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20131219022806/http://www.gene-expression-programming.com/webpapers/Ferreira-ASCT2006.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 19 December 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">8 October</span> 2012</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Designing+Neural+Networks+Using+Gene+Expression+Programming&amp;rft.btitle=Applied+Soft+Computing+Technologies%3A+The+Challenge+of+Complexity&amp;rft.pages=517-536&amp;rft.pub=Springer-Verlag&amp;rft.date=2006&amp;rft.aulast=Ferreira&amp;rft.aufirst=C.&amp;rft_id=http%3A%2F%2Fwww.gene-expression-programming.com%2Fwebpapers%2FFerreira-ASCT2006.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-149"><span class="mw-cite-backlink"><b><a href="#cite_ref-149">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDa,_Y.Xiurun,_G.2005" class="citation conference cs1">Da, Y., Xiurun, G. (July 2005). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20120425233611/http://www.dice.ucl.ac.be/esann/proceedings/electronicproceedings.htm">"An improved PSO-based ANN with simulated annealing technique"</a>. In T. Villmann (ed.). <i>New Aspects in Neurocomputing: 11th European Symposium on Artificial Neural Networks</i>. Vol. 63. Elsevier. pp. 527–533. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neucom.2004.07.002">10.1016/j.neucom.2004.07.002</a>. Archived from <a rel="nofollow" class="external text" href="http://www.dice.ucl.ac.be/esann/proceedings/electronicproceedings.htm">the original</a> on 25 April 2012<span class="reference-accessdate">. Retrieved <span class="nowrap">30 December</span> 2011</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=An+improved+PSO-based+ANN+with+simulated+annealing+technique&amp;rft.btitle=New+Aspects+in+Neurocomputing%3A+11th+European+Symposium+on+Artificial+Neural+Networks&amp;rft.pages=527-533&amp;rft.pub=Elsevier&amp;rft.date=2005-07&amp;rft_id=info%3Adoi%2F10.1016%2Fj.neucom.2004.07.002&amp;rft.au=Da%2C+Y.&amp;rft.au=Xiurun%2C+G.&amp;rft_id=http%3A%2F%2Fwww.dice.ucl.ac.be%2Fesann%2Fproceedings%2Felectronicproceedings.htm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-150"><span class="mw-cite-backlink"><b><a href="#cite_ref-150">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWu,_J.Chen,_E.2009" class="citation conference cs1">Wu, J., Chen, E. (May 2009). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20141231221755/http://www2.mae.cuhk.edu.hk/~isnn2009/">"A Novel Nonparametric Regression Ensemble for Rainfall Forecasting Using Particle Swarm Optimization Technique Coupled with Artificial Neural Network"</a>. In Wang, H., Shen, Y., Huang, T., Zeng, Z. (eds.). <i>6th International Symposium on Neural Networks, ISNN 2009</i>. Lecture Notes in Computer Science. Vol. 5553. Springer. pp. 49–58. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-642-01513-7_6">10.1007/978-3-642-01513-7_6</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-642-01215-0" title="Special:BookSources/978-3-642-01215-0"><bdi>978-3-642-01215-0</bdi></a>. Archived from <a rel="nofollow" class="external text" href="http://www2.mae.cuhk.edu.hk/~isnn2009/">the original</a> on 31 December 2014<span class="reference-accessdate">. Retrieved <span class="nowrap">1 January</span> 2012</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=A+Novel+Nonparametric+Regression+Ensemble+for+Rainfall+Forecasting+Using+Particle+Swarm+Optimization+Technique+Coupled+with+Artificial+Neural+Network&amp;rft.btitle=6th+International+Symposium+on+Neural+Networks%2C+ISNN+2009&amp;rft.series=Lecture+Notes+in+Computer+Science&amp;rft.pages=49-58&amp;rft.pub=Springer&amp;rft.date=2009-05&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-642-01513-7_6&amp;rft.isbn=978-3-642-01215-0&amp;rft.au=Wu%2C+J.&amp;rft.au=Chen%2C+E.&amp;rft_id=http%3A%2F%2Fwww2.mae.cuhk.edu.hk%2F~isnn2009%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Qin1-151"><span class="mw-cite-backlink">^ <a href="#cite_ref-Qin1_151-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Qin1_151-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTing_QinZonghai_ChenHaitao_ZhangSifu_Li2004" class="citation journal cs1">Ting Qin, Zonghai Chen, Haitao Zhang, Sifu Li, Wei Xiang, Ming Li (2004). <a rel="nofollow" class="external text" href="http://www-control.eng.cam.ac.uk/Homepage/papers/cued_control_998.pdf">"A learning algorithm of CMAC based on RLS"</a> <span class="cs1-format">(PDF)</span>. <i>Neural Processing Letters</i>. <b>19</b> (1): 49–61. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1023%2FB%3ANEPL.0000016847.18175.60">10.1023/B:NEPL.0000016847.18175.60</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:6233899">6233899</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210414103815/http://www-control.eng.cam.ac.uk/Homepage/papers/cued_control_998.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 14 April 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">30 January</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Processing+Letters&amp;rft.atitle=A+learning+algorithm+of+CMAC+based+on+RLS&amp;rft.volume=19&amp;rft.issue=1&amp;rft.pages=49-61&amp;rft.date=2004&amp;rft_id=info%3Adoi%2F10.1023%2FB%3ANEPL.0000016847.18175.60&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A6233899%23id-name%3DS2CID&amp;rft.au=Ting+Qin&amp;rft.au=Zonghai+Chen&amp;rft.au=Haitao+Zhang&amp;rft.au=Sifu+Li&amp;rft.au=Wei+Xiang&amp;rft.au=Ming+Li&amp;rft_id=http%3A%2F%2Fwww-control.eng.cam.ac.uk%2FHomepage%2Fpapers%2Fcued_control_998.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Qin2-152"><span class="mw-cite-backlink"><b><a href="#cite_ref-Qin2_152-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTing_QinHaitao_ZhangZonghai_ChenWei_Xiang2005" class="citation journal cs1">Ting Qin, Haitao Zhang, Zonghai Chen, Wei Xiang (2005). <a rel="nofollow" class="external text" href="http://www-control.eng.cam.ac.uk/Homepage/papers/cued_control_997.pdf">"Continuous CMAC-QRLS and its systolic array"</a> <span class="cs1-format">(PDF)</span>. <i>Neural Processing Letters</i>. <b>22</b> (1): 1–16. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs11063-004-2694-0">10.1007/s11063-004-2694-0</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:16095286">16095286</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181118122850/http://www-control.eng.cam.ac.uk/Homepage/papers/cued_control_997.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 18 November 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">30 January</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Processing+Letters&amp;rft.atitle=Continuous+CMAC-QRLS+and+its+systolic+array&amp;rft.volume=22&amp;rft.issue=1&amp;rft.pages=1-16&amp;rft.date=2005&amp;rft_id=info%3Adoi%2F10.1007%2Fs11063-004-2694-0&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A16095286%23id-name%3DS2CID&amp;rft.au=Ting+Qin&amp;rft.au=Haitao+Zhang&amp;rft.au=Zonghai+Chen&amp;rft.au=Wei+Xiang&amp;rft_id=http%3A%2F%2Fwww-control.eng.cam.ac.uk%2FHomepage%2Fpapers%2Fcued_control_997.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-153"><span class="mw-cite-backlink"><b><a href="#cite_ref-153">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLeCunBoserDenkerHenderson1989" class="citation journal cs1">LeCun Y, Boser B, Denker JS, Henderson D, Howard RE, Hubbard W, et al. (1989). "Backpropagation Applied to Handwritten Zip Code Recognition". <i>Neural Computation</i>. <b>1</b> (4): 541–551. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1989.1.4.541">10.1162/neco.1989.1.4.541</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:41312633">41312633</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computation&amp;rft.atitle=Backpropagation+Applied+to+Handwritten+Zip+Code+Recognition&amp;rft.volume=1&amp;rft.issue=4&amp;rft.pages=541-551&amp;rft.date=1989&amp;rft_id=info%3Adoi%2F10.1162%2Fneco.1989.1.4.541&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A41312633%23id-name%3DS2CID&amp;rft.aulast=LeCun&amp;rft.aufirst=Y&amp;rft.au=Boser%2C+B&amp;rft.au=Denker%2C+JS&amp;rft.au=Henderson%2C+D&amp;rft.au=Howard%2C+RE&amp;rft.au=Hubbard%2C+W&amp;rft.au=Jackel%2C+LD&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-lecun2016slides-154"><span class="mw-cite-backlink"><b><a href="#cite_ref-lecun2016slides_154-0">^</a></b></span> <span class="reference-text"><a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a> (2016). Slides on Deep Learning <a rel="nofollow" class="external text" href="https://indico.cern.ch/event/510372/">Online</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160423021403/https://indico.cern.ch/event/510372/">Archived</a> 23 April 2016 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></span> </li> <li id="cite_note-:03-155"><span class="mw-cite-backlink"><b><a href="#cite_ref-:03_155-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHochreiterSchmidhuber1997" class="citation journal cs1"><a href="/wiki/Sepp_Hochreiter" title="Sepp Hochreiter">Hochreiter S</a>, <a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber J</a> (1 November 1997). "Long Short-Term Memory". <i>Neural Computation</i>. <b>9</b> (8): 1735–1780. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1997.9.8.1735">10.1162/neco.1997.9.8.1735</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0899-7667">0899-7667</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/9377276">9377276</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1915014">1915014</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computation&amp;rft.atitle=Long+Short-Term+Memory&amp;rft.volume=9&amp;rft.issue=8&amp;rft.pages=1735-1780&amp;rft.date=1997-11-01&amp;rft.issn=0899-7667&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1915014%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F9377276&amp;rft_id=info%3Adoi%2F10.1162%2Fneco.1997.9.8.1735&amp;rft.aulast=Hochreiter&amp;rft.aufirst=Sepp&amp;rft.au=Schmidhuber%2C+J%C3%BCrgen&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-sak2014-156"><span class="mw-cite-backlink"><b><a href="#cite_ref-sak2014_156-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSakSeniorBeaufays2014" class="citation web cs1">Sak H, Senior A, Beaufays F (2014). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180424203806/https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43905.pdf">"Long Short-Term Memory recurrent neural network architectures for large scale acoustic modeling"</a> <span class="cs1-format">(PDF)</span>. Archived from <a rel="nofollow" class="external text" href="https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43905.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 24 April 2018.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Long+Short-Term+Memory+recurrent+neural+network+architectures+for+large+scale+acoustic+modeling&amp;rft.date=2014&amp;rft.aulast=Sak&amp;rft.aufirst=Hasim&amp;rft.au=Senior%2C+Andrew&amp;rft.au=Beaufays%2C+Francoise&amp;rft_id=https%3A%2F%2Fstatic.googleusercontent.com%2Fmedia%2Fresearch.google.com%2Fen%2F%2Fpubs%2Farchive%2F43905.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-liwu2015-157"><span class="mw-cite-backlink"><b><a href="#cite_ref-liwu2015_157-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLiWu2014" class="citation arxiv cs1">Li X, Wu X (15 October 2014). "Constructing Long Short-Term Memory based Deep Recurrent Neural Networks for Large Vocabulary Speech Recognition". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1410.4281">1410.4281</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CL">cs.CL</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Constructing+Long+Short-Term+Memory+based+Deep+Recurrent+Neural+Networks+for+Large+Vocabulary+Speech+Recognition&amp;rft.date=2014-10-15&amp;rft_id=info%3Aarxiv%2F1410.4281&amp;rft.aulast=Li&amp;rft.aufirst=Xiangang&amp;rft.au=Wu%2C+Xihong&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-158"><span class="mw-cite-backlink"><b><a href="#cite_ref-158">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFanQianXieSoong2014" class="citation journal cs1">Fan Y, Qian Y, Xie F, Soong FK (2014). <a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/287741874">"TTS synthesis with bidirectional LSTM based Recurrent Neural Networks"</a>. <i>Proceedings of the Annual Conference of the International Speech Communication Association, Interspeech</i>: 1964–1968<span class="reference-accessdate">. Retrieved <span class="nowrap">13 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+Annual+Conference+of+the+International+Speech+Communication+Association%2C+Interspeech&amp;rft.atitle=TTS+synthesis+with+bidirectional+LSTM+based+Recurrent+Neural+Networks&amp;rft.pages=1964-1968&amp;rft.date=2014&amp;rft.aulast=Fan&amp;rft.aufirst=Y.&amp;rft.au=Qian%2C+Y.&amp;rft.au=Xie%2C+F.&amp;rft.au=Soong%2C+F.+K.&amp;rft_id=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F287741874&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-scholarpedia2-159"><span class="mw-cite-backlink"><b><a href="#cite_ref-scholarpedia2_159-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchmidhuber2015" class="citation journal cs1"><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Schmidhuber J</a> (2015). <a rel="nofollow" class="external text" href="https://doi.org/10.4249%2Fscholarpedia.32832">"Deep Learning"</a>. <i>Scholarpedia</i>. <b>10</b> (11): 85–117. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015SchpJ..1032832S">2015SchpJ..1032832S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.4249%2Fscholarpedia.32832">10.4249/scholarpedia.32832</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Scholarpedia&amp;rft.atitle=Deep+Learning&amp;rft.volume=10&amp;rft.issue=11&amp;rft.pages=85-117&amp;rft.date=2015&amp;rft_id=info%3Adoi%2F10.4249%2Fscholarpedia.32832&amp;rft_id=info%3Abibcode%2F2015SchpJ..1032832S&amp;rft.aulast=Schmidhuber&amp;rft.aufirst=J%C3%BCrgen&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.4249%252Fscholarpedia.32832&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-zen2015-160"><span class="mw-cite-backlink"><b><a href="#cite_ref-zen2015_160-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZenSak2015" class="citation web cs1">Zen H, Sak H (2015). <a rel="nofollow" class="external text" href="https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43266.pdf">"Unidirectional Long Short-Term Memory Recurrent Neural Network with Recurrent Output Layer for Low-Latency Speech Synthesis"</a> <span class="cs1-format">(PDF)</span>. <i>Google.com</i>. ICASSP. pp. 4470–4474. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210509123113/https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43266.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 9 May 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">27 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Google.com&amp;rft.atitle=Unidirectional+Long+Short-Term+Memory+Recurrent+Neural+Network+with+Recurrent+Output+Layer+for+Low-Latency+Speech+Synthesis&amp;rft.pages=4470-4474&amp;rft.date=2015&amp;rft.aulast=Zen&amp;rft.aufirst=Heiga&amp;rft.au=Sak%2C+Hasim&amp;rft_id=https%3A%2F%2Fstatic.googleusercontent.com%2Fmedia%2Fresearch.google.com%2Fen%2F%2Fpubs%2Farchive%2F43266.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-fan2015-161"><span class="mw-cite-backlink"><b><a href="#cite_ref-fan2015_161-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFanWangSoongXie2015" class="citation journal cs1">Fan B, Wang L, Soong FK, Xie L (2015). <a rel="nofollow" class="external text" href="https://www.microsoft.com/en-us/research/wp-content/uploads/2015/04/icassp2015_fanbo_1009.pdf">"Photo-Real Talking Head with Deep Bidirectional LSTM"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of ICASSP</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171101052317/https://www.microsoft.com/en-us/research/wp-content/uploads/2015/04/icassp2015_fanbo_1009.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 1 November 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">27 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+ICASSP&amp;rft.atitle=Photo-Real+Talking+Head+with+Deep+Bidirectional+LSTM&amp;rft.date=2015&amp;rft.aulast=Fan&amp;rft.aufirst=Bo&amp;rft.au=Wang%2C+Lijuan&amp;rft.au=Soong%2C+Frank+K.&amp;rft.au=Xie%2C+Lei&amp;rft_id=https%3A%2F%2Fwww.microsoft.com%2Fen-us%2Fresearch%2Fwp-content%2Fuploads%2F2015%2F04%2Ficassp2015_fanbo_1009.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-preprint-162"><span class="mw-cite-backlink"><b><a href="#cite_ref-preprint_162-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSilverHubertSchrittwieserAntonoglou2017" class="citation arxiv cs1"><a href="/wiki/David_Silver_(programmer)" class="mw-redirect" title="David Silver (programmer)">Silver D</a>, Hubert T, Schrittwieser J, Antonoglou I, Lai M, Guez A, et al. (5 December 2017). "Mastering Chess and Shogi by Self-Play with a General Reinforcement Learning Algorithm". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1712.01815">1712.01815</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.AI">cs.AI</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Mastering+Chess+and+Shogi+by+Self-Play+with+a+General+Reinforcement+Learning+Algorithm&amp;rft.date=2017-12-05&amp;rft_id=info%3Aarxiv%2F1712.01815&amp;rft.aulast=Silver&amp;rft.aufirst=David&amp;rft.au=Hubert%2C+Thomas&amp;rft.au=Schrittwieser%2C+Julian&amp;rft.au=Antonoglou%2C+Ioannis&amp;rft.au=Lai%2C+Matthew&amp;rft.au=Guez%2C+Arthur&amp;rft.au=Lanctot%2C+Marc&amp;rft.au=Sifre%2C+Laurent&amp;rft.au=Kumaran%2C+Dharshan&amp;rft.au=Graepel%2C+Thore&amp;rft.au=Lillicrap%2C+Timothy&amp;rft.au=Simonyan%2C+Karen&amp;rft.au=Hassabis%2C+Demis&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-163"><span class="mw-cite-backlink"><b><a href="#cite_ref-163">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFProbstBoulesteixBischl2018" class="citation journal cs1">Probst P, Boulesteix AL, Bischl B (26 February 2018). "Tunability: Importance of Hyperparameters of Machine Learning Algorithms". <i>J. Mach. Learn. Res</i>. <b>20</b>: 53:1–53:32. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:88515435">88515435</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=J.+Mach.+Learn.+Res.&amp;rft.atitle=Tunability%3A+Importance+of+Hyperparameters+of+Machine+Learning+Algorithms&amp;rft.volume=20&amp;rft.pages=53%3A1-53%3A32&amp;rft.date=2018-02-26&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A88515435%23id-name%3DS2CID&amp;rft.aulast=Probst&amp;rft.aufirst=Philipp&amp;rft.au=Boulesteix%2C+Anne-Laure&amp;rft.au=Bischl%2C+Bernd&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-164"><span class="mw-cite-backlink"><b><a href="#cite_ref-164">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZophLe2016" class="citation arxiv cs1">Zoph B, Le QV (4 November 2016). "Neural Architecture Search with Reinforcement Learning". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1611.01578">1611.01578</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Neural+Architecture+Search+with+Reinforcement+Learning&amp;rft.date=2016-11-04&amp;rft_id=info%3Aarxiv%2F1611.01578&amp;rft.aulast=Zoph&amp;rft.aufirst=Barret&amp;rft.au=Le%2C+Quoc+V.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-165"><span class="mw-cite-backlink"><b><a href="#cite_ref-165">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHaifeng_JinQingquan_SongXia_Hu2019" class="citation journal cs1">Haifeng Jin, Qingquan Song, Xia Hu (2019). <a rel="nofollow" class="external text" href="https://autokeras.com/">"Auto-keras: An efficient neural architecture search system"</a>. <i>Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery &amp; Data Mining</i>. ACM. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1806.10282">1806.10282</a></span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190821163310/https://autokeras.com/">Archived</a> from the original on 21 August 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">21 August</span> 2019</span> – via autokeras.com.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+25th+ACM+SIGKDD+International+Conference+on+Knowledge+Discovery+%26+Data+Mining&amp;rft.atitle=Auto-keras%3A+An+efficient+neural+architecture+search+system&amp;rft.date=2019&amp;rft_id=info%3Aarxiv%2F1806.10282&amp;rft.au=Haifeng+Jin&amp;rft.au=Qingquan+Song&amp;rft.au=Xia+Hu&amp;rft_id=https%3A%2F%2Fautokeras.com%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-abs1502.02127-166"><span class="mw-cite-backlink"><b><a href="#cite_ref-abs1502.02127_166-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFClaesenDe_Moor2015" class="citation arxiv cs1">Claesen M, De Moor B (2015). "Hyperparameter Search in Machine Learning". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1502.02127">1502.02127</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Hyperparameter+Search+in+Machine+Learning&amp;rft.date=2015&amp;rft_id=info%3Aarxiv%2F1502.02127&amp;rft.aulast=Claesen&amp;rft.aufirst=Marc&amp;rft.au=De+Moor%2C+Bart&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span> <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015arXiv150202127C/abstract">2015arXiv150202127C</a></span> </li> <li id="cite_note-167"><span class="mw-cite-backlink"><b><a href="#cite_ref-167">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEsch1990" class="citation book cs1">Esch R (1990). "Functional Approximation". <i>Handbook of Applied Mathematics</i> (Springer US ed.). Boston, MA: Springer US. pp. 928–987. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-1-4684-1423-3_17">10.1007/978-1-4684-1423-3_17</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4684-1423-3" title="Special:BookSources/978-1-4684-1423-3"><bdi>978-1-4684-1423-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Functional+Approximation&amp;rft.btitle=Handbook+of+Applied+Mathematics&amp;rft.place=Boston%2C+MA&amp;rft.pages=928-987&amp;rft.edition=Springer+US&amp;rft.pub=Springer+US&amp;rft.date=1990&amp;rft_id=info%3Adoi%2F10.1007%2F978-1-4684-1423-3_17&amp;rft.isbn=978-1-4684-1423-3&amp;rft.aulast=Esch&amp;rft.aufirst=Robin&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-168"><span class="mw-cite-backlink"><b><a href="#cite_ref-168">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSarstedtMoo2019" class="citation book cs1">Sarstedt M, Moo E (2019). <a rel="nofollow" class="external text" href="https://link.springer.com/chapter/10.1007/978-3-662-56707-4_7#Sec1">"Regression Analysis"</a>. <i>A Concise Guide to Market Research</i>. Springer Texts in Business and Economics. Springer Berlin Heidelberg. pp. 209–256. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-662-56707-4_7">10.1007/978-3-662-56707-4_7</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-662-56706-7" title="Special:BookSources/978-3-662-56706-7"><bdi>978-3-662-56706-7</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:240396965">240396965</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230320212723/https://link.springer.com/chapter/10.1007/978-3-662-56707-4_7#Sec1">Archived</a> from the original on 20 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">20 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Regression+Analysis&amp;rft.btitle=A+Concise+Guide+to+Market+Research&amp;rft.series=Springer+Texts+in+Business+and+Economics&amp;rft.pages=209-256&amp;rft.pub=Springer+Berlin+Heidelberg&amp;rft.date=2019&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A240396965%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-662-56707-4_7&amp;rft.isbn=978-3-662-56706-7&amp;rft.aulast=Sarstedt&amp;rft.aufirst=Marko&amp;rft.au=Moo%2C+Erik&amp;rft_id=https%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-662-56707-4_7%23Sec1&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-169"><span class="mw-cite-backlink"><b><a href="#cite_ref-169">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTianTanSunZeng2016" class="citation book cs1">Tian J, Tan Y, Sun C, Zeng J, Jin Y (December 2016). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/7850209">"A self-adaptive similarity-based fitness approximation for evolutionary optimization"</a>. <i>2016 IEEE Symposium Series on Computational Intelligence (SSCI)</i>. pp. 1–8. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FSSCI.2016.7850209">10.1109/SSCI.2016.7850209</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-5090-4240-1" title="Special:BookSources/978-1-5090-4240-1"><bdi>978-1-5090-4240-1</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:14948018">14948018</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082200/https://ieeexplore.ieee.org/document/7850209">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">22 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=A+self-adaptive+similarity-based+fitness+approximation+for+evolutionary+optimization&amp;rft.btitle=2016+IEEE+Symposium+Series+on+Computational+Intelligence+%28SSCI%29&amp;rft.pages=1-8&amp;rft.date=2016-12&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A14948018%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FSSCI.2016.7850209&amp;rft.isbn=978-1-5090-4240-1&amp;rft.aulast=Tian&amp;rft.aufirst=Jie&amp;rft.au=Tan%2C+Yin&amp;rft.au=Sun%2C+Chaoli&amp;rft.au=Zeng%2C+Jianchao&amp;rft.au=Jin%2C+Yaochu&amp;rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F7850209&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-170"><span class="mw-cite-backlink"><b><a href="#cite_ref-170">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAlaloulQureshi2019" class="citation book cs1">Alaloul WS, Qureshi AH (2019). <a rel="nofollow" class="external text" href="https://www.intechopen.com/chapters/71673">"Data Processing Using Artificial Neural Networks"</a>. <i>Dynamic Data Assimilation – Beating the Uncertainties</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.5772%2Fintechopen.91935">10.5772/intechopen.91935</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-83968-083-0" title="Special:BookSources/978-1-83968-083-0"><bdi>978-1-83968-083-0</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:219735060">219735060</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230320212722/https://www.intechopen.com/chapters/71673">Archived</a> from the original on 20 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">20 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Data+Processing+Using+Artificial+Neural+Networks&amp;rft.btitle=Dynamic+Data+Assimilation+%E2%80%93+Beating+the+Uncertainties&amp;rft.date=2019&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A219735060%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.5772%2Fintechopen.91935&amp;rft.isbn=978-1-83968-083-0&amp;rft.aulast=Alaloul&amp;rft.aufirst=Wesam+Salah&amp;rft.au=Qureshi%2C+Abdul+Hannan&amp;rft_id=https%3A%2F%2Fwww.intechopen.com%2Fchapters%2F71673&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-171"><span class="mw-cite-backlink"><b><a href="#cite_ref-171">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPalRoyBasuBepari2013" class="citation book cs1">Pal M, Roy R, Basu J, Bepari MS (2013). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/6709849">"Blind source separation: A review and analysis"</a>. <i>2013 International Conference Oriental COCOSDA held jointly with 2013 Conference on Asian Spoken Language Research and Evaluation (O-COCOSDA/CASLRE)</i>. IEEE. pp. 1–5. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FICSDA.2013.6709849">10.1109/ICSDA.2013.6709849</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4799-2378-6" title="Special:BookSources/978-1-4799-2378-6"><bdi>978-1-4799-2378-6</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:37566823">37566823</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230320212720/https://ieeexplore.ieee.org/document/6709849">Archived</a> from the original on 20 March 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">20 March</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Blind+source+separation%3A+A+review+and+analysis&amp;rft.btitle=2013+International+Conference+Oriental+COCOSDA+held+jointly+with+2013+Conference+on+Asian+Spoken+Language+Research+and+Evaluation+%28O-COCOSDA%2FCASLRE%29&amp;rft.pages=1-5&amp;rft.pub=IEEE&amp;rft.date=2013&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A37566823%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FICSDA.2013.6709849&amp;rft.isbn=978-1-4799-2378-6&amp;rft.aulast=Pal&amp;rft.aufirst=Madhab&amp;rft.au=Roy%2C+Rajib&amp;rft.au=Basu%2C+Joyanta&amp;rft.au=Bepari%2C+Milton+S.&amp;rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F6709849&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-172"><span class="mw-cite-backlink"><b><a href="#cite_ref-172">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZissis2015" class="citation journal cs1">Zissis D (October 2015). <a rel="nofollow" class="external text" href="https://zenodo.org/record/848743">"A cloud based architecture capable of perceiving and predicting multiple vessel behaviour"</a>. <i>Applied Soft Computing</i>. <b>35</b>: 652–661. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.asoc.2015.07.002">10.1016/j.asoc.2015.07.002</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200726091505/https://zenodo.org/record/848743">Archived</a> from the original on 26 July 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">18 July</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Applied+Soft+Computing&amp;rft.atitle=A+cloud+based+architecture+capable+of+perceiving+and+predicting+multiple+vessel+behaviour&amp;rft.volume=35&amp;rft.pages=652-661&amp;rft.date=2015-10&amp;rft_id=info%3Adoi%2F10.1016%2Fj.asoc.2015.07.002&amp;rft.aulast=Zissis&amp;rft.aufirst=Dimitrios&amp;rft_id=https%3A%2F%2Fzenodo.org%2Frecord%2F848743&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-173"><span class="mw-cite-backlink"><b><a href="#cite_ref-173">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSenguptaSahidullah,_MdSaha,_Goutam2016" class="citation journal cs1">Sengupta N, Sahidullah, Md, Saha, Goutam (August 2016). "Lung sound classification using cepstral-based statistical features". <i>Computers in Biology and Medicine</i>. <b>75</b> (1): 118–129. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.compbiomed.2016.05.013">10.1016/j.compbiomed.2016.05.013</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/27286184">27286184</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Computers+in+Biology+and+Medicine&amp;rft.atitle=Lung+sound+classification+using+cepstral-based+statistical+features&amp;rft.volume=75&amp;rft.issue=1&amp;rft.pages=118-129&amp;rft.date=2016-08&amp;rft_id=info%3Adoi%2F10.1016%2Fj.compbiomed.2016.05.013&amp;rft_id=info%3Apmid%2F27286184&amp;rft.aulast=Sengupta&amp;rft.aufirst=Nandini&amp;rft.au=Sahidullah%2C+Md&amp;rft.au=Saha%2C+Goutam&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-174"><span class="mw-cite-backlink"><b><a href="#cite_ref-174">^</a></b></span> <span class="reference-text">Choy, Christopher B., et al. "<a rel="nofollow" class="external text" href="https://arxiv.org/abs/1604.00449">3d-r2n2: A unified approach for single and multi-view 3d object reconstruction</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200726091721/https://arxiv.org/abs/1604.00449">Archived</a> 26 July 2020 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>." European conference on computer vision. Springer, Cham, 2016.</span> </li> <li id="cite_note-TurekNeuralNet-175"><span class="mw-cite-backlink"><b><a href="#cite_ref-TurekNeuralNet_175-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTurek,_Fred_D.2007" class="citation journal cs1">Turek, Fred D. (March 2007). <a rel="nofollow" class="external text" href="http://www.vision-systems.com/articles/print/volume-12/issue-3/features/introduction-to-neural-net-machine-vision.html">"Introduction to Neural Net Machine Vision"</a>. <i>Vision Systems Design</i>. <b>12</b> (3). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20130516124148/http://www.vision-systems.com/articles/print/volume-12/issue-3/features/introduction-to-neural-net-machine-vision.html">Archived</a> from the original on 16 May 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">5 March</span> 2013</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Vision+Systems+Design&amp;rft.atitle=Introduction+to+Neural+Net+Machine+Vision&amp;rft.volume=12&amp;rft.issue=3&amp;rft.date=2007-03&amp;rft.au=Turek%2C+Fred+D.&amp;rft_id=http%3A%2F%2Fwww.vision-systems.com%2Farticles%2Fprint%2Fvolume-12%2Fissue-3%2Ffeatures%2Fintroduction-to-neural-net-machine-vision.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-176"><span class="mw-cite-backlink"><b><a href="#cite_ref-176">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMaitraBhattacharyaParui2015" class="citation book cs1">Maitra DS, Bhattacharya U, Parui SK (August 2015). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/7333916">"CNN based common approach to handwritten character recognition of multiple scripts"</a>. <i>2015 13th International Conference on Document Analysis and Recognition (ICDAR)</i>. pp. 1021–1025. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FICDAR.2015.7333916">10.1109/ICDAR.2015.7333916</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4799-1805-8" title="Special:BookSources/978-1-4799-1805-8"><bdi>978-1-4799-1805-8</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:25739012">25739012</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231016190918/https://ieeexplore.ieee.org/document/7333916">Archived</a> from the original on 16 October 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">18 March</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=CNN+based+common+approach+to+handwritten+character+recognition+of+multiple+scripts&amp;rft.btitle=2015+13th+International+Conference+on+Document+Analysis+and+Recognition+%28ICDAR%29&amp;rft.pages=1021-1025&amp;rft.date=2015-08&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A25739012%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FICDAR.2015.7333916&amp;rft.isbn=978-1-4799-1805-8&amp;rft.aulast=Maitra&amp;rft.aufirst=Durjoy+S.&amp;rft.au=Bhattacharya%2C+Ujjwal&amp;rft.au=Parui%2C+Swapan+K.&amp;rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F7333916&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-177"><span class="mw-cite-backlink"><b><a href="#cite_ref-177">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGessler2021" class="citation journal cs1">Gessler J (August 2021). <a rel="nofollow" class="external text" href="https://riunet.upv.es/handle/10251/174498">"Sensor for food analysis applying impedance spectroscopy and artificial neural networks"</a>. <i>RiuNet UPV</i> (1): 8–12. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211021115443/https://riunet.upv.es/handle/10251/174498">Archived</a> from the original on 21 October 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">21 October</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=RiuNet+UPV&amp;rft.atitle=Sensor+for+food+analysis+applying+impedance+spectroscopy+and+artificial+neural+networks&amp;rft.issue=1&amp;rft.pages=8-12&amp;rft.date=2021-08&amp;rft.aulast=Gessler&amp;rft.aufirst=Josef&amp;rft_id=https%3A%2F%2Friunet.upv.es%2Fhandle%2F10251%2F174498&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-178"><span class="mw-cite-backlink"><b><a href="#cite_ref-178">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFrench2016" class="citation journal cs1">French J (2016). "The time traveller's CAPM". <i>Investment Analysts Journal</i>. <b>46</b> (2): 81–96. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F10293523.2016.1255469">10.1080/10293523.2016.1255469</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:157962452">157962452</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Investment+Analysts+Journal&amp;rft.atitle=The+time+traveller%27s+CAPM&amp;rft.volume=46&amp;rft.issue=2&amp;rft.pages=81-96&amp;rft.date=2016&amp;rft_id=info%3Adoi%2F10.1080%2F10293523.2016.1255469&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A157962452%23id-name%3DS2CID&amp;rft.aulast=French&amp;rft.aufirst=Jordan&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Balabin_2009-179"><span class="mw-cite-backlink"><b><a href="#cite_ref-Balabin_2009_179-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRoman_M._BalabinEkaterina_I._Lomakina2009" class="citation journal cs1">Roman M. Balabin, Ekaterina I. Lomakina (2009). "Neural network approach to quantum-chemistry data: Accurate prediction of density functional theory energies". <i><a href="/wiki/J._Chem._Phys." class="mw-redirect" title="J. Chem. Phys.">J. Chem. Phys.</a></i> <b>131</b> (7): 074104. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2009JChPh.131g4104B">2009JChPh.131g4104B</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1063%2F1.3206326">10.1063/1.3206326</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/19708729">19708729</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=J.+Chem.+Phys.&amp;rft.atitle=Neural+network+approach+to+quantum-chemistry+data%3A+Accurate+prediction+of+density+functional+theory+energies&amp;rft.volume=131&amp;rft.issue=7&amp;rft.pages=074104&amp;rft.date=2009&amp;rft_id=info%3Apmid%2F19708729&amp;rft_id=info%3Adoi%2F10.1063%2F1.3206326&amp;rft_id=info%3Abibcode%2F2009JChPh.131g4104B&amp;rft.au=Roman+M.+Balabin&amp;rft.au=Ekaterina+I.+Lomakina&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-180"><span class="mw-cite-backlink"><b><a href="#cite_ref-180">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSilver2016" class="citation journal cs1">Silver D, et al. (2016). <a rel="nofollow" class="external text" href="http://web.iitd.ac.in/~sumeet/Silver16.pdf">"Mastering the game of Go with deep neural networks and tree search"</a> <span class="cs1-format">(PDF)</span>. <i>Nature</i>. <b>529</b> (7587): 484–489. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2016Natur.529..484S">2016Natur.529..484S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnature16961">10.1038/nature16961</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26819042">26819042</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:515925">515925</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181123112812/http://web.iitd.ac.in/~sumeet/Silver16.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 23 November 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">31 January</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Mastering+the+game+of+Go+with+deep+neural+networks+and+tree+search&amp;rft.volume=529&amp;rft.issue=7587&amp;rft.pages=484-489&amp;rft.date=2016&amp;rft_id=info%3Adoi%2F10.1038%2Fnature16961&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A515925%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F26819042&amp;rft_id=info%3Abibcode%2F2016Natur.529..484S&amp;rft.aulast=Silver&amp;rft.aufirst=David&amp;rft_id=http%3A%2F%2Fweb.iitd.ac.in%2F~sumeet%2FSilver16.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-181"><span class="mw-cite-backlink"><b><a href="#cite_ref-181">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPasick2023" class="citation news cs1">Pasick A (27 March 2023). <a rel="nofollow" class="external text" href="https://www.nytimes.com/article/ai-artificial-intelligence-glossary.html">"Artificial Intelligence Glossary: Neural Networks and Other Terms Explained"</a>. <i>The New York Times</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0362-4331">0362-4331</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230901183440/https://www.nytimes.com/article/ai-artificial-intelligence-glossary.html">Archived</a> from the original on 1 September 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">22 April</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+New+York+Times&amp;rft.atitle=Artificial+Intelligence+Glossary%3A+Neural+Networks+and+Other+Terms+Explained&amp;rft.date=2023-03-27&amp;rft.issn=0362-4331&amp;rft.aulast=Pasick&amp;rft.aufirst=Adam&amp;rft_id=https%3A%2F%2Fwww.nytimes.com%2Farticle%2Fai-artificial-intelligence-glossary.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-182"><span class="mw-cite-backlink"><b><a href="#cite_ref-182">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchechner2017" class="citation news cs1">Schechner S (15 June 2017). <a rel="nofollow" class="external text" href="https://www.wsj.com/articles/facebook-boosts-a-i-to-block-terrorist-propaganda-1497546000">"Facebook Boosts A.I. to Block Terrorist Propaganda"</a>. <i><a href="/wiki/The_Wall_Street_Journal" title="The Wall Street Journal">The Wall Street Journal</a></i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0099-9660">0099-9660</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082135/https://www.wsj.com/articles/facebook-boosts-a-i-to-block-terrorist-propaganda-1497546000">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">16 June</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+Wall+Street+Journal&amp;rft.atitle=Facebook+Boosts+A.I.+to+Block+Terrorist+Propaganda&amp;rft.date=2017-06-15&amp;rft.issn=0099-9660&amp;rft.aulast=Schechner&amp;rft.aufirst=Sam&amp;rft_id=https%3A%2F%2Fwww.wsj.com%2Farticles%2Ffacebook-boosts-a-i-to-block-terrorist-propaganda-1497546000&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-Ciaramella-183"><span class="mw-cite-backlink"><b><a href="#cite_ref-Ciaramella_183-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCiaramellaCiaramella2024" class="citation book cs1"><a href="/wiki/Alberto_Ciaramella" title="Alberto Ciaramella">Ciaramella A</a>, Ciaramella M (2024). <i>Introduction to Artificial Intelligence: from data analysis to generative AI</i>. Intellisemantic Editions. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-8-8947-8760-3" title="Special:BookSources/978-8-8947-8760-3"><bdi>978-8-8947-8760-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Introduction+to+Artificial+Intelligence%3A+from+data+analysis+to+generative+AI&amp;rft.pub=Intellisemantic+Editions&amp;rft.date=2024&amp;rft.isbn=978-8-8947-8760-3&amp;rft.aulast=Ciaramella&amp;rft.aufirst=Alberto&amp;rft.au=Ciaramella%2C+Marco&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-184"><span class="mw-cite-backlink"><b><a href="#cite_ref-184">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGanesan2010" class="citation journal cs1">Ganesan N (2010). <a rel="nofollow" class="external text" href="https://doi.org/10.5120%2F476-783">"Application of Neural Networks in Diagnosing Cancer Disease Using Demographic Data"</a>. <i>International Journal of Computer Applications</i>. <b>1</b> (26): 81–97. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2010IJCA....1z..81G">2010IJCA....1z..81G</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.5120%2F476-783">10.5120/476-783</a></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=International+Journal+of+Computer+Applications&amp;rft.atitle=Application+of+Neural+Networks+in+Diagnosing+Cancer+Disease+Using+Demographic+Data&amp;rft.volume=1&amp;rft.issue=26&amp;rft.pages=81-97&amp;rft.date=2010&amp;rft_id=info%3Adoi%2F10.5120%2F476-783&amp;rft_id=info%3Abibcode%2F2010IJCA....1z..81G&amp;rft.aulast=Ganesan&amp;rft.aufirst=N&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.5120%252F476-783&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-185"><span class="mw-cite-backlink"><b><a href="#cite_ref-185">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBottaci1997" class="citation journal cs1">Bottaci L (1997). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181123170444/http://www.lcc.uma.es/~jja/recidiva/042.pdf">"Artificial Neural Networks Applied to Outcome Prediction for Colorectal Cancer Patients in Separate Institutions"</a> <span class="cs1-format">(PDF)</span>. <i>Lancet</i>. <b>350</b> (9076). The Lancet: 469–72. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2FS0140-6736%2896%2911196-X">10.1016/S0140-6736(96)11196-X</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/9274582">9274582</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:18182063">18182063</a>. Archived from <a rel="nofollow" class="external text" href="http://www.lcc.uma.es/~jja/recidiva/042.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 23 November 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">2 May</span> 2012</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Lancet&amp;rft.atitle=Artificial+Neural+Networks+Applied+to+Outcome+Prediction+for+Colorectal+Cancer+Patients+in+Separate+Institutions&amp;rft.volume=350&amp;rft.issue=9076&amp;rft.pages=469-72&amp;rft.date=1997&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A18182063%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F9274582&amp;rft_id=info%3Adoi%2F10.1016%2FS0140-6736%2896%2911196-X&amp;rft.aulast=Bottaci&amp;rft.aufirst=Leonardo&amp;rft_id=http%3A%2F%2Fwww.lcc.uma.es%2F~jja%2Frecidiva%2F042.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-186"><span class="mw-cite-backlink"><b><a href="#cite_ref-186">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAlizadehLyonsCastlePrasad2016" class="citation journal cs1">Alizadeh E, Lyons SM, Castle JM, Prasad A (2016). <a rel="nofollow" class="external text" href="http://pubs.rsc.org/en/Content/ArticleLanding/2016/IB/C6IB00100A">"Measuring systematic changes in invasive cancer cell shape using Zernike moments"</a>. <i>Integrative Biology</i>. <b>8</b> (11): 1183–1193. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1039%2FC6IB00100A">10.1039/C6IB00100A</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/27735002">27735002</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082133/https://pubs.rsc.org/en/Content/ArticleLanding/2016/IB/C6IB00100A">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">28 March</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Integrative+Biology&amp;rft.atitle=Measuring+systematic+changes+in+invasive+cancer+cell+shape+using+Zernike+moments&amp;rft.volume=8&amp;rft.issue=11&amp;rft.pages=1183-1193&amp;rft.date=2016&amp;rft_id=info%3Adoi%2F10.1039%2FC6IB00100A&amp;rft_id=info%3Apmid%2F27735002&amp;rft.aulast=Alizadeh&amp;rft.aufirst=Elaheh&amp;rft.au=Lyons%2C+Samanthe+M&amp;rft.au=Castle%2C+Jordan+M&amp;rft.au=Prasad%2C+Ashok&amp;rft_id=http%3A%2F%2Fpubs.rsc.org%2Fen%2FContent%2FArticleLanding%2F2016%2FIB%2FC6IB00100A&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-187"><span class="mw-cite-backlink"><b><a href="#cite_ref-187">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLyons2016" class="citation journal cs1">Lyons S (2016). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4810736">"Changes in cell shape are correlated with metastatic potential in murine"</a>. <i>Biology Open</i>. <b>5</b> (3): 289–299. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1242%2Fbio.013409">10.1242/bio.013409</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4810736">4810736</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26873952">26873952</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Biology+Open&amp;rft.atitle=Changes+in+cell+shape+are+correlated+with+metastatic+potential+in+murine&amp;rft.volume=5&amp;rft.issue=3&amp;rft.pages=289-299&amp;rft.date=2016&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC4810736%23id-name%3DPMC&amp;rft_id=info%3Apmid%2F26873952&amp;rft_id=info%3Adoi%2F10.1242%2Fbio.013409&amp;rft.aulast=Lyons&amp;rft.aufirst=Samanthe&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC4810736&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-188"><span class="mw-cite-backlink"><b><a href="#cite_ref-188">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNabianMeidani2017" class="citation journal cs1">Nabian MA, Meidani H (28 August 2017). "Deep Learning for Accelerated Reliability Analysis of Infrastructure Networks". <i>Computer-Aided Civil and Infrastructure Engineering</i>. <b>33</b> (6): 443–458. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1708.08551">1708.08551</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2017arXiv170808551N">2017arXiv170808551N</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1111%2Fmice.12359">10.1111/mice.12359</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:36661983">36661983</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Computer-Aided+Civil+and+Infrastructure+Engineering&amp;rft.atitle=Deep+Learning+for+Accelerated+Reliability+Analysis+of+Infrastructure+Networks&amp;rft.volume=33&amp;rft.issue=6&amp;rft.pages=443-458&amp;rft.date=2017-08-28&amp;rft_id=info%3Aarxiv%2F1708.08551&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A36661983%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1111%2Fmice.12359&amp;rft_id=info%3Abibcode%2F2017arXiv170808551N&amp;rft.aulast=Nabian&amp;rft.aufirst=Mohammad+Amin&amp;rft.au=Meidani%2C+Hadi&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-189"><span class="mw-cite-backlink"><b><a href="#cite_ref-189">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNabianMeidani2018" class="citation journal cs1">Nabian MA, Meidani H (2018). <a rel="nofollow" class="external text" href="https://trid.trb.org/view/1496617">"Accelerating Stochastic Assessment of Post-Earthquake Transportation Network Connectivity via Machine-Learning-Based Surrogates"</a>. <i>Transportation Research Board 97th Annual Meeting</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180309120108/https://trid.trb.org/view/1496617">Archived</a> from the original on 9 March 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">14 March</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Transportation+Research+Board+97th+Annual+Meeting&amp;rft.atitle=Accelerating+Stochastic+Assessment+of+Post-Earthquake+Transportation+Network+Connectivity+via+Machine-Learning-Based+Surrogates&amp;rft.date=2018&amp;rft.aulast=Nabian&amp;rft.aufirst=Mohammad+Amin&amp;rft.au=Meidani%2C+Hadi&amp;rft_id=https%3A%2F%2Ftrid.trb.org%2Fview%2F1496617&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-190"><span class="mw-cite-backlink"><b><a href="#cite_ref-190">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDíazBrotonsTomás2018" class="citation journal cs1">Díaz E, Brotons V, Tomás R (September 2018). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.sandf.2018.08.001">"Use of artificial neural networks to predict 3-D elastic settlement of foundations on soils with inclined bedrock"</a>. <i>Soils and Foundations</i>. <b>58</b> (6): 1414–1422. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2018SoFou..58.1414D">2018SoFou..58.1414D</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.sandf.2018.08.001">10.1016/j.sandf.2018.08.001</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/10045%2F81208">10045/81208</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0038-0806">0038-0806</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Soils+and+Foundations&amp;rft.atitle=Use+of+artificial+neural+networks+to+predict+3-D+elastic+settlement+of+foundations+on+soils+with+inclined+bedrock&amp;rft.volume=58&amp;rft.issue=6&amp;rft.pages=1414-1422&amp;rft.date=2018-09&amp;rft_id=info%3Ahdl%2F10045%2F81208&amp;rft.issn=0038-0806&amp;rft_id=info%3Adoi%2F10.1016%2Fj.sandf.2018.08.001&amp;rft_id=info%3Abibcode%2F2018SoFou..58.1414D&amp;rft.aulast=D%C3%ADaz&amp;rft.aufirst=E.&amp;rft.au=Brotons%2C+V.&amp;rft.au=Tom%C3%A1s%2C+R.&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.sandf.2018.08.001&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-191"><span class="mw-cite-backlink"><b><a href="#cite_ref-191">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTayebiyanMohammadGhazaliMashohor" class="citation journal cs1">Tayebiyan A, Mohammad TA, Ghazali AH, Mashohor S. <a rel="nofollow" class="external text" href="http://www.pertanika.upm.edu.my/pjtas/browse/regular-issue?article=JST-0566-2015">"Artificial Neural Network for Modelling Rainfall-Runoff"</a>. <i>Pertanika Journal of Science &amp; Technology</i>. <b>24</b> (2): 319–330. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230517014047/http://www.pertanika.upm.edu.my/pjtas/browse/regular-issue?article=JST-0566-2015">Archived</a> from the original on 17 May 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">17 May</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Pertanika+Journal+of+Science+%26+Technology&amp;rft.atitle=Artificial+Neural+Network+for+Modelling+Rainfall-Runoff&amp;rft.volume=24&amp;rft.issue=2&amp;rft.pages=319-330&amp;rft.aulast=Tayebiyan&amp;rft.aufirst=A.&amp;rft.au=Mohammad%2C+T.+A.&amp;rft.au=Ghazali%2C+A.+H.&amp;rft.au=Mashohor%2C+S.&amp;rft_id=http%3A%2F%2Fwww.pertanika.upm.edu.my%2Fpjtas%2Fbrowse%2Fregular-issue%3Farticle%3DJST-0566-2015&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-192"><span class="mw-cite-backlink"><b><a href="#cite_ref-192">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGovindaraju2000" class="citation journal cs1">Govindaraju RS (1 April 2000). "Artificial Neural Networks in Hydrology. I: Preliminary Concepts". <i>Journal of Hydrologic Engineering</i>. <b>5</b> (2): 115–123. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1061%2F%28ASCE%291084-0699%282000%295%3A2%28115%29">10.1061/(ASCE)1084-0699(2000)5:2(115)</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Hydrologic+Engineering&amp;rft.atitle=Artificial+Neural+Networks+in+Hydrology.+I%3A+Preliminary+Concepts&amp;rft.volume=5&amp;rft.issue=2&amp;rft.pages=115-123&amp;rft.date=2000-04-01&amp;rft_id=info%3Adoi%2F10.1061%2F%28ASCE%291084-0699%282000%295%3A2%28115%29&amp;rft.aulast=Govindaraju&amp;rft.aufirst=Rao+S.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-193"><span class="mw-cite-backlink"><b><a href="#cite_ref-193">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGovindaraju2000" class="citation journal cs1">Govindaraju RS (1 April 2000). "Artificial Neural Networks in Hydrology. II: Hydrologic Applications". <i>Journal of Hydrologic Engineering</i>. <b>5</b> (2): 124–137. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1061%2F%28ASCE%291084-0699%282000%295%3A2%28124%29">10.1061/(ASCE)1084-0699(2000)5:2(124)</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Hydrologic+Engineering&amp;rft.atitle=Artificial+Neural+Networks+in+Hydrology.+II%3A+Hydrologic+Applications&amp;rft.volume=5&amp;rft.issue=2&amp;rft.pages=124-137&amp;rft.date=2000-04-01&amp;rft_id=info%3Adoi%2F10.1061%2F%28ASCE%291084-0699%282000%295%3A2%28124%29&amp;rft.aulast=Govindaraju&amp;rft.aufirst=Rao+S.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-194"><span class="mw-cite-backlink"><b><a href="#cite_ref-194">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPeresIuppaCavallaroCancelliere2015" class="citation journal cs1">Peres DJ, Iuppa C, Cavallaro L, Cancelliere A, Foti E (1 October 2015). "Significant wave height record extension by neural networks and reanalysis wind data". <i>Ocean Modelling</i>. <b>94</b>: 128–140. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015OcMod..94..128P">2015OcMod..94..128P</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.ocemod.2015.08.002">10.1016/j.ocemod.2015.08.002</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Ocean+Modelling&amp;rft.atitle=Significant+wave+height+record+extension+by+neural+networks+and+reanalysis+wind+data&amp;rft.volume=94&amp;rft.pages=128-140&amp;rft.date=2015-10-01&amp;rft_id=info%3Adoi%2F10.1016%2Fj.ocemod.2015.08.002&amp;rft_id=info%3Abibcode%2F2015OcMod..94..128P&amp;rft.aulast=Peres&amp;rft.aufirst=D.+J.&amp;rft.au=Iuppa%2C+C.&amp;rft.au=Cavallaro%2C+L.&amp;rft.au=Cancelliere%2C+A.&amp;rft.au=Foti%2C+E.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-195"><span class="mw-cite-backlink"><b><a href="#cite_ref-195">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDwarakishRakshithNatesan2013" class="citation journal cs1">Dwarakish GS, Rakshith S, Natesan U (2013). <a rel="nofollow" class="external text" href="http://www.ciitresearch.org/dl/index.php/aiml/article/view/AIML072013007">"Review on Applications of Neural Network in Coastal Engineering"</a>. <i>Artificial Intelligent Systems and Machine Learning</i>. <b>5</b> (7): 324–331. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170815185634/http://www.ciitresearch.org/dl/index.php/aiml/article/view/AIML072013007">Archived</a> from the original on 15 August 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">5 July</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Artificial+Intelligent+Systems+and+Machine+Learning&amp;rft.atitle=Review+on+Applications+of+Neural+Network+in+Coastal+Engineering&amp;rft.volume=5&amp;rft.issue=7&amp;rft.pages=324-331&amp;rft.date=2013&amp;rft.aulast=Dwarakish&amp;rft.aufirst=G.+S.&amp;rft.au=Rakshith%2C+Shetty&amp;rft.au=Natesan%2C+Usha&amp;rft_id=http%3A%2F%2Fwww.ciitresearch.org%2Fdl%2Findex.php%2Faiml%2Farticle%2Fview%2FAIML072013007&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-196"><span class="mw-cite-backlink"><b><a href="#cite_ref-196">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFErminiCataniCasagli2005" class="citation journal cs1">Ermini L, Catani F, Casagli N (1 March 2005). "Artificial Neural Networks applied to landslide susceptibility assessment". <i>Geomorphology</i>. Geomorphological hazard and human impact in mountain environments. <b>66</b> (1): 327–343. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2005Geomo..66..327E">2005Geomo..66..327E</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.geomorph.2004.09.025">10.1016/j.geomorph.2004.09.025</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Geomorphology&amp;rft.atitle=Artificial+Neural+Networks+applied+to+landslide+susceptibility+assessment&amp;rft.volume=66&amp;rft.issue=1&amp;rft.pages=327-343&amp;rft.date=2005-03-01&amp;rft_id=info%3Adoi%2F10.1016%2Fj.geomorph.2004.09.025&amp;rft_id=info%3Abibcode%2F2005Geomo..66..327E&amp;rft.aulast=Ermini&amp;rft.aufirst=Leonardo&amp;rft.au=Catani%2C+Filippo&amp;rft.au=Casagli%2C+Nicola&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-197"><span class="mw-cite-backlink"><b><a href="#cite_ref-197">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNixZhang2017" class="citation book cs1">Nix R, Zhang J (May 2017). "Classification of Android apps and malware using deep neural networks". <i>2017 International Joint Conference on Neural Networks (IJCNN)</i>. pp. 1871–1878. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FIJCNN.2017.7966078">10.1109/IJCNN.2017.7966078</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-5090-6182-2" title="Special:BookSources/978-1-5090-6182-2"><bdi>978-1-5090-6182-2</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:8838479">8838479</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Classification+of+Android+apps+and+malware+using+deep+neural+networks&amp;rft.btitle=2017+International+Joint+Conference+on+Neural+Networks+%28IJCNN%29&amp;rft.pages=1871-1878&amp;rft.date=2017-05&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A8838479%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FIJCNN.2017.7966078&amp;rft.isbn=978-1-5090-6182-2&amp;rft.aulast=Nix&amp;rft.aufirst=R.&amp;rft.au=Zhang%2C+J.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-198"><span class="mw-cite-backlink"><b><a href="#cite_ref-198">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20190714201955/http://www.sysnet.ucsd.edu/projects/url/">"Detecting Malicious URLs"</a>. <i>The systems and networking group at UCSD</i>. Archived from <a rel="nofollow" class="external text" href="http://www.sysnet.ucsd.edu/projects/url/">the original</a> on 14 July 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">15 February</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+systems+and+networking+group+at+UCSD&amp;rft.atitle=Detecting+Malicious+URLs&amp;rft_id=http%3A%2F%2Fwww.sysnet.ucsd.edu%2Fprojects%2Furl%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-199"><span class="mw-cite-backlink"><b><a href="#cite_ref-199">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHomayounAhmadzadehHashemiDehghantanha2018" class="citation cs2">Homayoun S, Ahmadzadeh M, Hashemi S, Dehghantanha A, Khayami R (2018), Dehghantanha A, Conti M, Dargahi T (eds.), "BoTShark: A Deep Learning Approach for Botnet Traffic Detection", <i>Cyber Threat Intelligence</i>, Advances in Information Security, vol. 70, Springer International Publishing, pp. 137–153, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-319-73951-9_7">10.1007/978-3-319-73951-9_7</a>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-319-73951-9" title="Special:BookSources/978-3-319-73951-9"><bdi>978-3-319-73951-9</bdi></a></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Cyber+Threat+Intelligence&amp;rft.atitle=BoTShark%3A+A+Deep+Learning+Approach+for+Botnet+Traffic+Detection&amp;rft.volume=70&amp;rft.pages=137-153&amp;rft.date=2018&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-319-73951-9_7&amp;rft.isbn=978-3-319-73951-9&amp;rft.aulast=Homayoun&amp;rft.aufirst=Sajad&amp;rft.au=Ahmadzadeh%2C+Marzieh&amp;rft.au=Hashemi%2C+Sattar&amp;rft.au=Dehghantanha%2C+Ali&amp;rft.au=Khayami%2C+Raouf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-200"><span class="mw-cite-backlink"><b><a href="#cite_ref-200">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGhoshReilly1994" class="citation book cs1">Ghosh, Reilly (January 1994). "Credit card fraud detection with a neural-network". <i>Proceedings of the Twenty-Seventh Hawaii International Conference on System Sciences HICSS-94</i>. Vol. 3. pp. 621–630. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FHICSS.1994.323314">10.1109/HICSS.1994.323314</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-8186-5090-1" title="Special:BookSources/978-0-8186-5090-1"><bdi>978-0-8186-5090-1</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:13260377">13260377</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Credit+card+fraud+detection+with+a+neural-network&amp;rft.btitle=Proceedings+of+the+Twenty-Seventh+Hawaii+International+Conference+on+System+Sciences+HICSS-94&amp;rft.pages=621-630&amp;rft.date=1994-01&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A13260377%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FHICSS.1994.323314&amp;rft.isbn=978-0-8186-5090-1&amp;rft.au=Ghosh&amp;rft.au=Reilly&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-201"><span class="mw-cite-backlink"><b><a href="#cite_ref-201">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAnanthaswamy2021" class="citation web cs1">Ananthaswamy A (19 April 2021). <a rel="nofollow" class="external text" href="https://www.quantamagazine.org/new-neural-networks-solve-hardest-equations-faster-than-ever-20210419/">"Latest Neural Nets Solve World's Hardest Equations Faster Than Ever Before"</a>. <i>Quanta Magazine</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082138/https://www.quantamagazine.org/new-neural-networks-solve-hardest-equations-faster-than-ever-20210419/">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">12 May</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Quanta+Magazine&amp;rft.atitle=Latest+Neural+Nets+Solve+World%27s+Hardest+Equations+Faster+Than+Ever+Before&amp;rft.date=2021-04-19&amp;rft.aulast=Ananthaswamy&amp;rft.aufirst=Anil&amp;rft_id=https%3A%2F%2Fwww.quantamagazine.org%2Fnew-neural-networks-solve-hardest-equations-faster-than-ever-20210419%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-202"><span class="mw-cite-backlink"><b><a href="#cite_ref-202">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.technologyreview.com/2020/10/30/1011435/ai-fourier-neural-network-cracks-navier-stokes-and-partial-differential-equations/">"AI has cracked a key mathematical puzzle for understanding our world"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082138/https://www.technologyreview.com/2020/10/30/1011435/ai-fourier-neural-network-cracks-navier-stokes-and-partial-differential-equations/">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">19 November</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=MIT+Technology+Review&amp;rft.atitle=AI+has+cracked+a+key+mathematical+puzzle+for+understanding+our+world&amp;rft_id=https%3A%2F%2Fwww.technologyreview.com%2F2020%2F10%2F30%2F1011435%2Fai-fourier-neural-network-cracks-navier-stokes-and-partial-differential-equations%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-203"><span class="mw-cite-backlink"><b><a href="#cite_ref-203">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.infoq.com/news/2020/12/caltech-ai-pde/">"Caltech Open-Sources AI for Solving Partial Differential Equations"</a>. <i>InfoQ</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210125233952/https://www.infoq.com/news/2020/12/caltech-ai-pde/">Archived</a> from the original on 25 January 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">20 January</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=InfoQ&amp;rft.atitle=Caltech+Open-Sources+AI+for+Solving+Partial+Differential+Equations&amp;rft_id=https%3A%2F%2Fwww.infoq.com%2Fnews%2F2020%2F12%2Fcaltech-ai-pde%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-204"><span class="mw-cite-backlink"><b><a href="#cite_ref-204">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNagy2019" class="citation journal cs1">Nagy A (28 June 2019). "Variational Quantum Monte Carlo Method with a Neural-Network Ansatz for Open Quantum Systems". <i><a href="/wiki/Physical_Review_Letters" title="Physical Review Letters">Physical Review Letters</a></i>. <b>122</b> (25): 250501. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1902.09483">1902.09483</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2019PhRvL.122y0501N">2019PhRvL.122y0501N</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1103%2FPhysRevLett.122.250501">10.1103/PhysRevLett.122.250501</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/31347886">31347886</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:119074378">119074378</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physical+Review+Letters&amp;rft.atitle=Variational+Quantum+Monte+Carlo+Method+with+a+Neural-Network+Ansatz+for+Open+Quantum+Systems&amp;rft.volume=122&amp;rft.issue=25&amp;rft.pages=250501&amp;rft.date=2019-06-28&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A119074378%23id-name%3DS2CID&amp;rft_id=info%3Abibcode%2F2019PhRvL.122y0501N&amp;rft_id=info%3Aarxiv%2F1902.09483&amp;rft_id=info%3Apmid%2F31347886&amp;rft_id=info%3Adoi%2F10.1103%2FPhysRevLett.122.250501&amp;rft.aulast=Nagy&amp;rft.aufirst=Alexandra&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-205"><span class="mw-cite-backlink"><b><a href="#cite_ref-205">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYoshiokaHamazaki2019" class="citation journal cs1">Yoshioka N, Hamazaki R (28 June 2019). "Constructing neural stationary states for open quantum many-body systems". <i>Physical Review B</i>. <b>99</b> (21): 214306. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1902.07006">1902.07006</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2019PhRvB..99u4306Y">2019PhRvB..99u4306Y</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1103%2FPhysRevB.99.214306">10.1103/PhysRevB.99.214306</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:119470636">119470636</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physical+Review+B&amp;rft.atitle=Constructing+neural+stationary+states+for+open+quantum+many-body+systems&amp;rft.volume=99&amp;rft.issue=21&amp;rft.pages=214306&amp;rft.date=2019-06-28&amp;rft_id=info%3Aarxiv%2F1902.07006&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A119470636%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1103%2FPhysRevB.99.214306&amp;rft_id=info%3Abibcode%2F2019PhRvB..99u4306Y&amp;rft.aulast=Yoshioka&amp;rft.aufirst=Nobuyuki&amp;rft.au=Hamazaki%2C+Ryusuke&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-206"><span class="mw-cite-backlink"><b><a href="#cite_ref-206">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHartmannCarleo2019" class="citation journal cs1">Hartmann MJ, Carleo G (28 June 2019). "Neural-Network Approach to Dissipative Quantum Many-Body Dynamics". <i>Physical Review Letters</i>. <b>122</b> (25): 250502. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1902.05131">1902.05131</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2019PhRvL.122y0502H">2019PhRvL.122y0502H</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1103%2FPhysRevLett.122.250502">10.1103/PhysRevLett.122.250502</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/31347862">31347862</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:119357494">119357494</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physical+Review+Letters&amp;rft.atitle=Neural-Network+Approach+to+Dissipative+Quantum+Many-Body+Dynamics&amp;rft.volume=122&amp;rft.issue=25&amp;rft.pages=250502&amp;rft.date=2019-06-28&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A119357494%23id-name%3DS2CID&amp;rft_id=info%3Abibcode%2F2019PhRvL.122y0502H&amp;rft_id=info%3Aarxiv%2F1902.05131&amp;rft_id=info%3Apmid%2F31347862&amp;rft_id=info%3Adoi%2F10.1103%2FPhysRevLett.122.250502&amp;rft.aulast=Hartmann&amp;rft.aufirst=Michael+J.&amp;rft.au=Carleo%2C+Giuseppe&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-207"><span class="mw-cite-backlink"><b><a href="#cite_ref-207">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVicentiniBiellaRegnaultCiuti2019" class="citation journal cs1">Vicentini F, Biella A, Regnault N, Ciuti C (28 June 2019). "Variational Neural-Network Ansatz for Steady States in Open Quantum Systems". <i>Physical Review Letters</i>. <b>122</b> (25): 250503. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1902.10104">1902.10104</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2019PhRvL.122y0503V">2019PhRvL.122y0503V</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1103%2FPhysRevLett.122.250503">10.1103/PhysRevLett.122.250503</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/31347877">31347877</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:119504484">119504484</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Physical+Review+Letters&amp;rft.atitle=Variational+Neural-Network+Ansatz+for+Steady+States+in+Open+Quantum+Systems&amp;rft.volume=122&amp;rft.issue=25&amp;rft.pages=250503&amp;rft.date=2019-06-28&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A119504484%23id-name%3DS2CID&amp;rft_id=info%3Abibcode%2F2019PhRvL.122y0503V&amp;rft_id=info%3Aarxiv%2F1902.10104&amp;rft_id=info%3Apmid%2F31347877&amp;rft_id=info%3Adoi%2F10.1103%2FPhysRevLett.122.250503&amp;rft.aulast=Vicentini&amp;rft.aufirst=Filippo&amp;rft.au=Biella%2C+Alberto&amp;rft.au=Regnault%2C+Nicolas&amp;rft.au=Ciuti%2C+Cristiano&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-208"><span class="mw-cite-backlink"><b><a href="#cite_ref-208">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFForrest_MD2015" class="citation journal cs1">Forrest MD (April 2015). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4417229">"Simulation of alcohol action upon a detailed Purkinje neuron model and a simpler surrogate model that runs &gt;400 times faster"</a>. <i>BMC Neuroscience</i>. <b>16</b> (27): 27. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1186%2Fs12868-015-0162-6">10.1186/s12868-015-0162-6</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4417229">4417229</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/25928094">25928094</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=BMC+Neuroscience&amp;rft.atitle=Simulation+of+alcohol+action+upon+a+detailed+Purkinje+neuron+model+and+a+simpler+surrogate+model+that+runs+%3E400+times+faster&amp;rft.volume=16&amp;rft.issue=27&amp;rft.pages=27&amp;rft.date=2015-04&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC4417229%23id-name%3DPMC&amp;rft_id=info%3Apmid%2F25928094&amp;rft_id=info%3Adoi%2F10.1186%2Fs12868-015-0162-6&amp;rft.au=Forrest+MD&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC4417229&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-209"><span class="mw-cite-backlink"><b><a href="#cite_ref-209">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWieczorekFilipiakFilipowska2018" class="citation journal cs1">Wieczorek S, Filipiak D, Filipowska A (2018). <a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/328964756">"Semantic Image-Based Profiling of Users' Interests with Neural Networks"</a>. <i>Studies on the Semantic Web</i>. <b>36</b> (Emerging Topics in Semantic Technologies). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.3233%2F978-1-61499-894-5-179">10.3233/978-1-61499-894-5-179</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082144/https://www.researchgate.net/publication/328964756_Semantic_Image-Based_Profiling_of_Users%27_Interests_with_Neural_Networks">Archived</a> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">20 January</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Studies+on+the+Semantic+Web&amp;rft.atitle=Semantic+Image-Based+Profiling+of+Users%27+Interests+with+Neural+Networks&amp;rft.volume=36&amp;rft.issue=Emerging+Topics+in+Semantic+Technologies&amp;rft.date=2018&amp;rft_id=info%3Adoi%2F10.3233%2F978-1-61499-894-5-179&amp;rft.aulast=Wieczorek&amp;rft.aufirst=Szymon&amp;rft.au=Filipiak%2C+Dominik&amp;rft.au=Filipowska%2C+Agata&amp;rft_id=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F328964756&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-210"><span class="mw-cite-backlink"><b><a href="#cite_ref-210">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMerchantBatznerSchoenholzAykol2023" class="citation journal cs1">Merchant A, Batzner S, Schoenholz SS, Aykol M, Cheon G, Cubuk ED (December 2023). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10700131">"Scaling deep learning for materials discovery"</a>. <i>Nature</i>. <b>624</b> (7990): 80–85. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2023Natur.624...80M">2023Natur.624...80M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs41586-023-06735-9">10.1038/s41586-023-06735-9</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1476-4687">1476-4687</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10700131">10700131</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/38030720">38030720</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Nature&amp;rft.atitle=Scaling+deep+learning+for+materials+discovery&amp;rft.volume=624&amp;rft.issue=7990&amp;rft.pages=80-85&amp;rft.date=2023-12&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10700131%23id-name%3DPMC&amp;rft_id=info%3Abibcode%2F2023Natur.624...80M&amp;rft_id=info%3Apmid%2F38030720&amp;rft_id=info%3Adoi%2F10.1038%2Fs41586-023-06735-9&amp;rft.issn=1476-4687&amp;rft.aulast=Merchant&amp;rft.aufirst=Amil&amp;rft.au=Batzner%2C+Simon&amp;rft.au=Schoenholz%2C+Samuel+S.&amp;rft.au=Aykol%2C+Muratahan&amp;rft.au=Cheon%2C+Gowoon&amp;rft.au=Cubuk%2C+Ekin+Dogus&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10700131&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-211"><span class="mw-cite-backlink"><b><a href="#cite_ref-211">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSiegelmannSontag1991" class="citation journal cs1">Siegelmann H, Sontag E (1991). <a rel="nofollow" class="external text" href="http://www.math.rutgers.edu/~sontag/FTPDIR/aml-turing.pdf">"Turing computability with neural nets"</a> <span class="cs1-format">(PDF)</span>. <i>Appl. Math. Lett</i>. <b>4</b> (6): 77–80. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0893-9659%2891%2990080-F">10.1016/0893-9659(91)90080-F</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082138/http://www.math.rutgers.edu/~sontag/FTPDIR/aml-turing.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">10 January</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Appl.+Math.+Lett.&amp;rft.atitle=Turing+computability+with+neural+nets&amp;rft.volume=4&amp;rft.issue=6&amp;rft.pages=77-80&amp;rft.date=1991&amp;rft_id=info%3Adoi%2F10.1016%2F0893-9659%2891%2990080-F&amp;rft.aulast=Siegelmann&amp;rft.aufirst=H.T.&amp;rft.au=Sontag%2C+E.D.&amp;rft_id=http%3A%2F%2Fwww.math.rutgers.edu%2F~sontag%2FFTPDIR%2Faml-turing.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-212"><span class="mw-cite-backlink"><b><a href="#cite_ref-212">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBains1998" class="citation news cs1">Bains S (3 November 1998). <a rel="nofollow" class="external text" href="https://www.eetimes.com/analog-computer-trumps-turing-model/">"Analog computer trumps Turing model"</a>. <i>EE Times</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230511152308/https://www.eetimes.com/analog-computer-trumps-turing-model/">Archived</a> from the original on 11 May 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">11 May</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=EE+Times&amp;rft.atitle=Analog+computer+trumps+Turing+model&amp;rft.date=1998-11-03&amp;rft.aulast=Bains&amp;rft.aufirst=Sunny&amp;rft_id=https%3A%2F%2Fwww.eetimes.com%2Fanalog-computer-trumps-turing-model%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-213"><span class="mw-cite-backlink"><b><a href="#cite_ref-213">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBalcázar1997" class="citation journal cs1">Balcázar J (July 1997). "Computational Power of Neural Networks: A Kolmogorov Complexity Characterization". <i>IEEE Transactions on Information Theory</i>. <b>43</b> (4): 1175–1183. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.411.7782">10.1.1.411.7782</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2F18.605580">10.1109/18.605580</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Transactions+on+Information+Theory&amp;rft.atitle=Computational+Power+of+Neural+Networks%3A+A+Kolmogorov+Complexity+Characterization&amp;rft.volume=43&amp;rft.issue=4&amp;rft.pages=1175-1183&amp;rft.date=1997-07&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.411.7782%23id-name%3DCiteSeerX&amp;rft_id=info%3Adoi%2F10.1109%2F18.605580&amp;rft.aulast=Balc%C3%A1zar&amp;rft.aufirst=Jos%C3%A9&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-auto-214"><span class="mw-cite-backlink">^ <a href="#cite_ref-auto_214-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-auto_214-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMacKay2003" class="citation book cs1"><a href="/wiki/David_J.C._MacKay" class="mw-redirect" title="David J.C. MacKay">MacKay DJ</a> (2003). <a rel="nofollow" class="external text" href="http://www.inference.phy.cam.ac.uk/itprnn/book.pdf"><i>Information Theory, Inference, and Learning Algorithms</i></a> <span class="cs1-format">(PDF)</span>. <a href="/wiki/Cambridge_University_Press" title="Cambridge University Press">Cambridge University Press</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-521-64298-9" title="Special:BookSources/978-0-521-64298-9"><bdi>978-0-521-64298-9</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20161019163258/http://www.inference.phy.cam.ac.uk/itprnn/book.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 19 October 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">11 June</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Information+Theory%2C+Inference%2C+and+Learning+Algorithms&amp;rft.pub=Cambridge+University+Press&amp;rft.date=2003&amp;rft.isbn=978-0-521-64298-9&amp;rft.aulast=MacKay&amp;rft.aufirst=David+J.C.&amp;rft_id=http%3A%2F%2Fwww.inference.phy.cam.ac.uk%2Fitprnn%2Fbook.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-215"><span class="mw-cite-backlink"><b><a href="#cite_ref-215">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCover1965" class="citation journal cs1 cs1-prop-long-vol"><a href="/wiki/Thomas_M._Cover" title="Thomas M. Cover">Cover T</a> (1965). <a rel="nofollow" class="external text" href="http://www-isl.stanford.edu/people/cover/papers/paper2.pdf">"Geometrical and Statistical Properties of Systems of Linear Inequalities with Applications in Pattern Recognition"</a> <span class="cs1-format">(PDF)</span>. <i>IEEE Transactions on Electronic Computers</i>. EC-14 (3). <a href="/wiki/IEEE" class="mw-redirect" title="IEEE">IEEE</a>: 326–334. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FPGEC.1965.264137">10.1109/PGEC.1965.264137</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160305031348/http://www-isl.stanford.edu/people/cover/papers/paper2.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 5 March 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">10 March</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Transactions+on+Electronic+Computers&amp;rft.atitle=Geometrical+and+Statistical+Properties+of+Systems+of+Linear+Inequalities+with+Applications+in+Pattern+Recognition&amp;rft.volume=EC-14&amp;rft.issue=3&amp;rft.pages=326-334&amp;rft.date=1965&amp;rft_id=info%3Adoi%2F10.1109%2FPGEC.1965.264137&amp;rft.aulast=Cover&amp;rft.aufirst=Thomas&amp;rft_id=http%3A%2F%2Fwww-isl.stanford.edu%2Fpeople%2Fcover%2Fpapers%2Fpaper2.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-216"><span class="mw-cite-backlink"><b><a href="#cite_ref-216">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGerald2019" class="citation book cs1"><a href="/wiki/Gerald_Friedland" title="Gerald Friedland">Gerald F</a> (2019). "Reproducibility and Experimental Design for Machine Learning on Audio and Multimedia Data". <i>Proceedings of the 27th ACM International Conference on Multimedia</i>. <a href="/wiki/Association_for_Computing_Machinery" title="Association for Computing Machinery">ACM</a>. pp. 2709–2710. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F3343031.3350545">10.1145/3343031.3350545</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4503-6889-6" title="Special:BookSources/978-1-4503-6889-6"><bdi>978-1-4503-6889-6</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:204837170">204837170</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Reproducibility+and+Experimental+Design+for+Machine+Learning+on+Audio+and+Multimedia+Data&amp;rft.btitle=Proceedings+of+the+27th+ACM+International+Conference+on+Multimedia&amp;rft.pages=2709-2710&amp;rft.pub=ACM&amp;rft.date=2019&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A204837170%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1145%2F3343031.3350545&amp;rft.isbn=978-1-4503-6889-6&amp;rft.aulast=Gerald&amp;rft.aufirst=Friedland&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-217"><span class="mw-cite-backlink"><b><a href="#cite_ref-217">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20220418025904/http://tfmeter.icsi.berkeley.edu/">"Stop tinkering, start measuring! Predictable experimental design of Neural Network experiments"</a>. <i>The Tensorflow Meter</i>. Archived from <a rel="nofollow" class="external text" href="http://tfmeter.icsi.berkeley.edu/">the original</a> on 18 April 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">10 March</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=The+Tensorflow+Meter&amp;rft.atitle=Stop+tinkering%2C+start+measuring%21+Predictable+experimental+design+of+Neural+Network+experiments&amp;rft_id=http%3A%2F%2Ftfmeter.icsi.berkeley.edu%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-218"><span class="mw-cite-backlink"><b><a href="#cite_ref-218">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLeeXiaoSchoenholzBahri2020" class="citation journal cs1">Lee J, Xiao L, Schoenholz SS, Bahri Y, Novak R, Sohl-Dickstein J, et al. (2020). "Wide neural networks of any depth evolve as linear models under gradient descent". <i>Journal of Statistical Mechanics: Theory and Experiment</i>. <b>2020</b> (12): 124002. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1902.06720">1902.06720</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2020JSMTE2020l4002L">2020JSMTE2020l4002L</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1088%2F1742-5468%2Fabc62b">10.1088/1742-5468/abc62b</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:62841516">62841516</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Statistical+Mechanics%3A+Theory+and+Experiment&amp;rft.atitle=Wide+neural+networks+of+any+depth+evolve+as+linear+models+under+gradient+descent&amp;rft.volume=2020&amp;rft.issue=12&amp;rft.pages=124002&amp;rft.date=2020&amp;rft_id=info%3Aarxiv%2F1902.06720&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A62841516%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1088%2F1742-5468%2Fabc62b&amp;rft_id=info%3Abibcode%2F2020JSMTE2020l4002L&amp;rft.aulast=Lee&amp;rft.aufirst=Jaehoon&amp;rft.au=Xiao%2C+Lechao&amp;rft.au=Schoenholz%2C+Samuel+S.&amp;rft.au=Bahri%2C+Yasaman&amp;rft.au=Novak%2C+Roman&amp;rft.au=Sohl-Dickstein%2C+Jascha&amp;rft.au=Pennington%2C+Jeffrey&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-219"><span class="mw-cite-backlink"><b><a href="#cite_ref-219">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFArthur_JacotFranck_GabrielClement_Hongler2018" class="citation conference cs1">Arthur Jacot, Franck Gabriel, Clement Hongler (2018). <a rel="nofollow" class="external text" href="https://proceedings.neurips.cc/paper/2018/file/5a4be1fa34e62bb8a6ec6b91d2462f5a-Paper.pdf"><i>Neural Tangent Kernel: Convergence and Generalization in Neural Networks</i></a> <span class="cs1-format">(PDF)</span>. 32nd Conference on Neural Information Processing Systems (NeurIPS 2018), Montreal, Canada. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220622033100/https://proceedings.neurips.cc/paper/2018/file/5a4be1fa34e62bb8a6ec6b91d2462f5a-Paper.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 22 June 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">4 June</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.btitle=Neural+Tangent+Kernel%3A+Convergence+and+Generalization+in+Neural+Networks&amp;rft.date=2018&amp;rft.au=Arthur+Jacot&amp;rft.au=Franck+Gabriel&amp;rft.au=Clement+Hongler&amp;rft_id=https%3A%2F%2Fproceedings.neurips.cc%2Fpaper%2F2018%2Ffile%2F5a4be1fa34e62bb8a6ec6b91d2462f5a-Paper.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-220"><span class="mw-cite-backlink"><b><a href="#cite_ref-220">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFXuZhangXiao2019" class="citation book cs1">Xu ZJ, Zhang Y, Xiao Y (2019). "Training Behavior of Deep Neural Network in Frequency Domain". In Gedeon T, Wong K, Lee M (eds.). <i>Neural Information Processing</i>. Lecture Notes in Computer Science. Vol. 11953. Springer, Cham. pp. 264–274. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1807.01251">1807.01251</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-030-36708-4_22">10.1007/978-3-030-36708-4_22</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-030-36707-7" title="Special:BookSources/978-3-030-36707-7"><bdi>978-3-030-36707-7</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:49562099">49562099</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Training+Behavior+of+Deep+Neural+Network+in+Frequency+Domain&amp;rft.btitle=Neural+Information+Processing&amp;rft.series=Lecture+Notes+in+Computer+Science&amp;rft.pages=264-274&amp;rft.pub=Springer%2C+Cham&amp;rft.date=2019&amp;rft_id=info%3Aarxiv%2F1807.01251&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A49562099%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-030-36708-4_22&amp;rft.isbn=978-3-030-36707-7&amp;rft.aulast=Xu&amp;rft.aufirst=ZJ&amp;rft.au=Zhang%2C+Y&amp;rft.au=Xiao%2C+Y&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-221"><span class="mw-cite-backlink"><b><a href="#cite_ref-221">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNasim_RahamanAristide_BaratinDevansh_ArpitFelix_Draxler2019" class="citation journal cs1">Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, et al. (2019). <a rel="nofollow" class="external text" href="http://proceedings.mlr.press/v97/rahaman19a/rahaman19a.pdf">"On the Spectral Bias of Neural Networks"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings of the 36th International Conference on Machine Learning</i>. <b>97</b>: 5301–5310. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1806.08734">1806.08734</a></span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221022155951/http://proceedings.mlr.press/v97/rahaman19a/rahaman19a.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 22 October 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">4 June</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+36th+International+Conference+on+Machine+Learning&amp;rft.atitle=On+the+Spectral+Bias+of+Neural+Networks&amp;rft.volume=97&amp;rft.pages=5301-5310&amp;rft.date=2019&amp;rft_id=info%3Aarxiv%2F1806.08734&amp;rft.au=Nasim+Rahaman&amp;rft.au=Aristide+Baratin&amp;rft.au=Devansh+Arpit&amp;rft.au=Felix+Draxler&amp;rft.au=Min+Lin&amp;rft.au=Fred+Hamprecht&amp;rft.au=Yoshua+Bengio&amp;rft.au=Aaron+Courville&amp;rft_id=http%3A%2F%2Fproceedings.mlr.press%2Fv97%2Frahaman19a%2Frahaman19a.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-222"><span class="mw-cite-backlink"><b><a href="#cite_ref-222">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhi-Qin_John_XuYaoyu_ZhangTao_LuoYanyang_Xiao2020" class="citation journal cs1">Zhi-Qin John Xu, Yaoyu Zhang, Tao Luo, Yanyang Xiao, Zheng Ma (2020). "Frequency Principle: Fourier Analysis Sheds Light on Deep Neural Networks". <i>Communications in Computational Physics</i>. <b>28</b> (5): 1746–1767. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1901.06523">1901.06523</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2020CCoPh..28.1746X">2020CCoPh..28.1746X</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.4208%2Fcicp.OA-2020-0085">10.4208/cicp.OA-2020-0085</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:58981616">58981616</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Communications+in+Computational+Physics&amp;rft.atitle=Frequency+Principle%3A+Fourier+Analysis+Sheds+Light+on+Deep+Neural+Networks&amp;rft.volume=28&amp;rft.issue=5&amp;rft.pages=1746-1767&amp;rft.date=2020&amp;rft_id=info%3Aarxiv%2F1901.06523&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A58981616%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.4208%2Fcicp.OA-2020-0085&amp;rft_id=info%3Abibcode%2F2020CCoPh..28.1746X&amp;rft.au=Zhi-Qin+John+Xu&amp;rft.au=Yaoyu+Zhang&amp;rft.au=Tao+Luo&amp;rft.au=Yanyang+Xiao&amp;rft.au=Zheng+Ma&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-223"><span class="mw-cite-backlink"><b><a href="#cite_ref-223">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTao_LuoZheng_MaZhi-Qin_John_XuYaoyu_Zhang2019" class="citation arxiv cs1">Tao Luo, Zheng Ma, Zhi-Qin John Xu, Yaoyu Zhang (2019). "Theory of the Frequency Principle for General Deep Neural Networks". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1906.09235">1906.09235</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=preprint&amp;rft.jtitle=arXiv&amp;rft.atitle=Theory+of+the+Frequency+Principle+for+General+Deep+Neural+Networks&amp;rft.date=2019&amp;rft_id=info%3Aarxiv%2F1906.09235&amp;rft.au=Tao+Luo&amp;rft.au=Zheng+Ma&amp;rft.au=Zhi-Qin+John+Xu&amp;rft.au=Yaoyu+Zhang&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-224"><span class="mw-cite-backlink"><b><a href="#cite_ref-224">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFXuZhou2021" class="citation journal cs1">Xu ZJ, Zhou H (18 May 2021). <a rel="nofollow" class="external text" href="https://ojs.aaai.org/index.php/AAAI/article/view/17261">"Deep Frequency Principle Towards Understanding Why Deeper Learning is Faster"</a>. <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. <b>35</b> (12): 10541–10550. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2007.14313">2007.14313</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1609%2Faaai.v35i12.17261">10.1609/aaai.v35i12.17261</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2374-3468">2374-3468</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:220831156">220831156</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211005142300/https://ojs.aaai.org/index.php/AAAI/article/view/17261">Archived</a> from the original on 5 October 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">5 October</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+AAAI+Conference+on+Artificial+Intelligence&amp;rft.atitle=Deep+Frequency+Principle+Towards+Understanding+Why+Deeper+Learning+is+Faster&amp;rft.volume=35&amp;rft.issue=12&amp;rft.pages=10541-10550&amp;rft.date=2021-05-18&amp;rft_id=info%3Aarxiv%2F2007.14313&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A220831156%23id-name%3DS2CID&amp;rft.issn=2374-3468&amp;rft_id=info%3Adoi%2F10.1609%2Faaai.v35i12.17261&amp;rft.aulast=Xu&amp;rft.aufirst=Zhiqin+John&amp;rft.au=Zhou%2C+Hanxu&amp;rft_id=https%3A%2F%2Fojs.aaai.org%2Findex.php%2FAAAI%2Farticle%2Fview%2F17261&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-225"><span class="mw-cite-backlink"><b><a href="#cite_ref-225">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFParisiKemkerPartKanan2019" class="citation journal cs1">Parisi GI, Kemker R, Part JL, Kanan C, Wermter S (1 May 2019). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neunet.2019.01.012">"Continual lifelong learning with neural networks: A review"</a>. <i>Neural Networks</i>. <b>113</b>: 54–71. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1802.07569">1802.07569</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.neunet.2019.01.012">10.1016/j.neunet.2019.01.012</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0893-6080">0893-6080</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/30780045">30780045</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Networks&amp;rft.atitle=Continual+lifelong+learning+with+neural+networks%3A+A+review&amp;rft.volume=113&amp;rft.pages=54-71&amp;rft.date=2019-05-01&amp;rft_id=info%3Aarxiv%2F1802.07569&amp;rft.issn=0893-6080&amp;rft_id=info%3Apmid%2F30780045&amp;rft_id=info%3Adoi%2F10.1016%2Fj.neunet.2019.01.012&amp;rft.aulast=Parisi&amp;rft.aufirst=German+I.&amp;rft.au=Kemker%2C+Ronald&amp;rft.au=Part%2C+Jose+L.&amp;rft.au=Kanan%2C+Christopher&amp;rft.au=Wermter%2C+Stefan&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.neunet.2019.01.012&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-226"><span class="mw-cite-backlink"><b><a href="#cite_ref-226">^</a></b></span> <span class="reference-text">Dean Pomerleau, "Knowledge-based Training of Artificial Neural Networks for Autonomous Robot Driving"</span> </li> <li id="cite_note-227"><span class="mw-cite-backlink"><b><a href="#cite_ref-227">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDewdney1997" class="citation book cs1">Dewdney AK (1 April 1997). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=KcHaAAAAMAAJ&amp;pg=PA82"><i>Yes, we have no neutrons: an eye-opening tour through the twists and turns of bad science</i></a>. Wiley. p. 82. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-471-10806-1" title="Special:BookSources/978-0-471-10806-1"><bdi>978-0-471-10806-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Yes%2C+we+have+no+neutrons%3A+an+eye-opening+tour+through+the+twists+and+turns+of+bad+science&amp;rft.pages=82&amp;rft.pub=Wiley&amp;rft.date=1997-04-01&amp;rft.isbn=978-0-471-10806-1&amp;rft.aulast=Dewdney&amp;rft.aufirst=A.+K.&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DKcHaAAAAMAAJ%26pg%3DPA82&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-228"><span class="mw-cite-backlink"><b><a href="#cite_ref-228">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://www.nasa.gov/centers/dryden/news/NewsReleases/2003/03-49.html">NASA – Dryden Flight Research Center – News Room: News Releases: NASA NEURAL NETWORK PROJECT PASSES MILESTONE</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20100402065100/http://www.nasa.gov/centers/dryden/news/NewsReleases/2003/03-49.html">Archived</a> 2 April 2010 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>. Nasa.gov. Retrieved on 20 November 2013.</span> </li> <li id="cite_note-229"><span class="mw-cite-backlink"><b><a href="#cite_ref-229">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20120319163352/http://members.fortunecity.com/templarseries/popper.html">"Roger Bridgman's defence of neural networks"</a>. Archived from <a rel="nofollow" class="external text" href="http://members.fortunecity.com/templarseries/popper.html">the original</a> on 19 March 2012<span class="reference-accessdate">. Retrieved <span class="nowrap">12 July</span> 2010</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Roger+Bridgman%27s+defence+of+neural+networks&amp;rft_id=http%3A%2F%2Fmembers.fortunecity.com%2Ftemplarseries%2Fpopper.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-230"><span class="mw-cite-backlink"><b><a href="#cite_ref-230">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.iro.umontreal.ca/~lisa/publications2/index.php/publications/show/4">"Scaling Learning Algorithms towards {AI} – LISA – Publications – Aigaion 2.0"</a>. <i>iro.umontreal.ca</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=iro.umontreal.ca&amp;rft.atitle=Scaling+Learning+Algorithms+towards+%7BAI%7D+%E2%80%93+LISA+%E2%80%93+Publications+%E2%80%93+Aigaion+2.0&amp;rft_id=http%3A%2F%2Fwww.iro.umontreal.ca%2F~lisa%2Fpublications2%2Findex.php%2Fpublications%2Fshow%2F4&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-VanEssen1991-231"><span class="mw-cite-backlink"><b><a href="#cite_ref-VanEssen1991_231-0">^</a></b></span> <span class="reference-text">D. J. Felleman and D. C. Van Essen, "<a rel="nofollow" class="external text" href="https://archive.today/20150120022056/http://cercor.oxfordjournals.org/content/1/1/1.1.full.pdf+html">Distributed hierarchical processing in the primate cerebral cortex</a>," <i>Cerebral Cortex</i>, 1, pp. 1–47, 1991.</span> </li> <li id="cite_note-Weng2012-232"><span class="mw-cite-backlink"><b><a href="#cite_ref-Weng2012_232-0">^</a></b></span> <span class="reference-text">J. Weng, "<a rel="nofollow" class="external text" href="https://www.amazon.com/Natural-Artificial-Intelligence-Introduction-Computational/dp/0985875720">Natural and Artificial Intelligence: Introduction to Computational Brain-Mind</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082645/https://www.amazon.com/Natural-Artificial-Intelligence-Introduction-Computational/dp/0985875720">Archived</a> 19 May 2024 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>," BMI Press, <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-9858757-2-5" title="Special:BookSources/978-0-9858757-2-5">978-0-9858757-2-5</a>, 2012.</span> </li> <li id="cite_note-:0-233"><span class="mw-cite-backlink">^ <a href="#cite_ref-:0_233-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:0_233-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEdwards2015" class="citation journal cs1">Edwards C (25 June 2015). "Growing pains for deep learning". <i>Communications of the ACM</i>. <b>58</b> (7): 14–16. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F2771283">10.1145/2771283</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:11026540">11026540</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Communications+of+the+ACM&amp;rft.atitle=Growing+pains+for+deep+learning&amp;rft.volume=58&amp;rft.issue=7&amp;rft.pages=14-16&amp;rft.date=2015-06-25&amp;rft_id=info%3Adoi%2F10.1145%2F2771283&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A11026540%23id-name%3DS2CID&amp;rft.aulast=Edwards&amp;rft.aufirst=Chris&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-234"><span class="mw-cite-backlink"><b><a href="#cite_ref-234">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.incompleteideas.net/IncIdeas/BitterLesson.html">"The Bitter Lesson"</a>. <i>incompleteideas.net</i><span class="reference-accessdate">. Retrieved <span class="nowrap">7 August</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=incompleteideas.net&amp;rft.atitle=The+Bitter+Lesson&amp;rft_id=http%3A%2F%2Fwww.incompleteideas.net%2FIncIdeas%2FBitterLesson.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-235"><span class="mw-cite-backlink"><b><a href="#cite_ref-235">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCade_Metz2016" class="citation news cs1">Cade Metz (18 May 2016). <a rel="nofollow" class="external text" href="https://www.wired.com/2016/05/google-tpu-custom-chips/">"Google Built Its Very Own Chips to Power Its AI Bots"</a>. <i>Wired</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180113150305/https://www.wired.com/2016/05/google-tpu-custom-chips/">Archived</a> from the original on 13 January 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">5 March</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Wired&amp;rft.atitle=Google+Built+Its+Very+Own+Chips+to+Power+Its+AI+Bots&amp;rft.date=2016-05-18&amp;rft.au=Cade+Metz&amp;rft_id=https%3A%2F%2Fwww.wired.com%2F2016%2F05%2Fgoogle-tpu-custom-chips%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-236"><span class="mw-cite-backlink"><b><a href="#cite_ref-236">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://yann.lecun.com/exdb/publis/pdf/bengio-lecun-07.pdf">"Scaling Learning Algorithms towards AI"</a> <span class="cs1-format">(PDF)</span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220812081157/http://yann.lecun.com/exdb/publis/pdf/bengio-lecun-07.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 12 August 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">6 July</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Scaling+Learning+Algorithms+towards+AI&amp;rft_id=http%3A%2F%2Fyann.lecun.com%2Fexdb%2Fpublis%2Fpdf%2Fbengio-lecun-07.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-237"><span class="mw-cite-backlink"><b><a href="#cite_ref-237">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTahmasebiHezarkhani2012" class="citation journal cs1">Tahmasebi, Hezarkhani (2012). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4268588">"A hybrid neural networks-fuzzy logic-genetic algorithm for grade estimation"</a>. <i>Computers &amp; Geosciences</i>. <b>42</b>: 18–27. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2012CG.....42...18T">2012CG.....42...18T</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.cageo.2012.02.004">10.1016/j.cageo.2012.02.004</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4268588">4268588</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/25540468">25540468</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Computers+%26+Geosciences&amp;rft.atitle=A+hybrid+neural+networks-fuzzy+logic-genetic+algorithm+for+grade+estimation&amp;rft.volume=42&amp;rft.pages=18-27&amp;rft.date=2012&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC4268588%23id-name%3DPMC&amp;rft_id=info%3Apmid%2F25540468&amp;rft_id=info%3Adoi%2F10.1016%2Fj.cageo.2012.02.004&amp;rft_id=info%3Abibcode%2F2012CG.....42...18T&amp;rft.au=Tahmasebi&amp;rft.au=Hezarkhani&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC4268588&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-238"><span class="mw-cite-backlink"><b><a href="#cite_ref-238">^</a></b></span> <span class="reference-text">Sun and Bookman, 1990</span> </li> <li id="cite_note-:010-239"><span class="mw-cite-backlink">^ <a href="#cite_ref-:010_239-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:010_239-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNororiHuAellenFaraci2021" class="citation journal cs1">Norori N, Hu Q, Aellen FM, Faraci FD, Tzovara A (October 2021). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8515002">"Addressing bias in big data and AI for health care: A call for open science"</a>. <i>Patterns</i>. <b>2</b> (10): 100347. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.patter.2021.100347">10.1016/j.patter.2021.100347</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8515002">8515002</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/34693373">34693373</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Patterns&amp;rft.atitle=Addressing+bias+in+big+data+and+AI+for+health+care%3A+A+call+for+open+science&amp;rft.volume=2&amp;rft.issue=10&amp;rft.pages=100347&amp;rft.date=2021-10&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC8515002%23id-name%3DPMC&amp;rft_id=info%3Apmid%2F34693373&amp;rft_id=info%3Adoi%2F10.1016%2Fj.patter.2021.100347&amp;rft.aulast=Norori&amp;rft.aufirst=Natalia&amp;rft.au=Hu%2C+Qiyang&amp;rft.au=Aellen%2C+Florence+Marcelle&amp;rft.au=Faraci%2C+Francesca+Dalia&amp;rft.au=Tzovara%2C+Athina&amp;rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC8515002&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:17-240"><span class="mw-cite-backlink">^ <a href="#cite_ref-:17_240-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:17_240-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCarina2022" class="citation journal cs1">Carina W (27 October 2022). <a rel="nofollow" class="external text" href="https://doi.org/10.26689%2Fssr.v4i10.4402">"Failing at Face Value: The Effect of Biased Facial Recognition Technology on Racial Discrimination in Criminal Justice"</a>. <i>Scientific and Social Research</i>. <b>4</b> (10): 29–40. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.26689%2Fssr.v4i10.4402">10.26689/ssr.v4i10.4402</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2661-4332">2661-4332</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Scientific+and+Social+Research&amp;rft.atitle=Failing+at+Face+Value%3A+The+Effect+of+Biased+Facial+Recognition+Technology+on+Racial+Discrimination+in+Criminal+Justice&amp;rft.volume=4&amp;rft.issue=10&amp;rft.pages=29-40&amp;rft.date=2022-10-27&amp;rft_id=info%3Adoi%2F10.26689%2Fssr.v4i10.4402&amp;rft.issn=2661-4332&amp;rft.aulast=Carina&amp;rft.aufirst=Wang&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.26689%252Fssr.v4i10.4402&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:22-241"><span class="mw-cite-backlink">^ <a href="#cite_ref-:22_241-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:22_241-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChang2023" class="citation journal cs1">Chang X (13 September 2023). <a rel="nofollow" class="external text" href="https://aemps.ewapublishing.org/article.html?pk=e5b93601b03d453c855d54d3153875ba">"Gender Bias in Hiring: An Analysis of the Impact of Amazon's Recruiting Algorithm"</a>. <i>Advances in Economics, Management and Political Sciences</i>. <b>23</b> (1): 134–140. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.54254%2F2754-1169%2F23%2F20230367">10.54254/2754-1169/23/20230367</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2754-1169">2754-1169</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231209135207/https://aemps.ewapublishing.org/article.html?pk=e5b93601b03d453c855d54d3153875ba">Archived</a> from the original on 9 December 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">9 December</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Advances+in+Economics%2C+Management+and+Political+Sciences&amp;rft.atitle=Gender+Bias+in+Hiring%3A+An+Analysis+of+the+Impact+of+Amazon%27s+Recruiting+Algorithm&amp;rft.volume=23&amp;rft.issue=1&amp;rft.pages=134-140&amp;rft.date=2023-09-13&amp;rft_id=info%3Adoi%2F10.54254%2F2754-1169%2F23%2F20230367&amp;rft.issn=2754-1169&amp;rft.aulast=Chang&amp;rft.aufirst=Xinyu&amp;rft_id=https%3A%2F%2Faemps.ewapublishing.org%2Farticle.html%3Fpk%3De5b93601b03d453c855d54d3153875ba&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-242"><span class="mw-cite-backlink"><b><a href="#cite_ref-242">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKortylewskiEggerSchneiderGerig2019" class="citation book cs1">Kortylewski A, Egger B, Schneider A, Gerig T, Morel-Forster A, Vetter T (June 2019). "Analyzing and Reducing the Damage of Dataset Bias to Face Recognition with Synthetic Data". <a rel="nofollow" class="external text" href="https://edoc.unibas.ch/75257/1/20200128164027_5e3055eb775f1.pdf"><i>2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)</i></a> <span class="cs1-format">(PDF)</span>. IEEE. pp. 2261–2268. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fcvprw.2019.00279">10.1109/cvprw.2019.00279</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-7281-2506-0" title="Special:BookSources/978-1-7281-2506-0"><bdi>978-1-7281-2506-0</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:198183828">198183828</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519082642/https://edoc.unibas.ch/75257/1/20200128164027_5e3055eb775f1.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 19 May 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">30 December</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Analyzing+and+Reducing+the+Damage+of+Dataset+Bias+to+Face+Recognition+with+Synthetic+Data&amp;rft.btitle=2019+IEEE%2FCVF+Conference+on+Computer+Vision+and+Pattern+Recognition+Workshops+%28CVPRW%29&amp;rft.pages=2261-2268&amp;rft.pub=IEEE&amp;rft.date=2019-06&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A198183828%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2Fcvprw.2019.00279&amp;rft.isbn=978-1-7281-2506-0&amp;rft.aulast=Kortylewski&amp;rft.aufirst=Adam&amp;rft.au=Egger%2C+Bernhard&amp;rft.au=Schneider%2C+Andreas&amp;rft.au=Gerig%2C+Thomas&amp;rft.au=Morel-Forster%2C+Andreas&amp;rft.au=Vetter%2C+Thomas&amp;rft_id=https%3A%2F%2Fedoc.unibas.ch%2F75257%2F1%2F20200128164027_5e3055eb775f1.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:07-243"><span class="mw-cite-backlink">^ <a href="#cite_ref-:07_243-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:07_243-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:07_243-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-:07_243-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-:07_243-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-:07_243-5"><sup><i><b>f</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHuang2009" class="citation journal cs1">Huang Y (2009). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Falgor2030973">"Advances in Artificial Neural Networks – Methodological Development and Application"</a>. <i>Algorithms</i>. <b>2</b> (3): 973–1007. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Falgor2030973">10.3390/algor2030973</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1999-4893">1999-4893</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Algorithms&amp;rft.atitle=Advances+in+Artificial+Neural+Networks+%E2%80%93+Methodological+Development+and+Application&amp;rft.volume=2&amp;rft.issue=3&amp;rft.pages=973-1007&amp;rft.date=2009&amp;rft_id=info%3Adoi%2F10.3390%2Falgor2030973&amp;rft.issn=1999-4893&amp;rft.aulast=Huang&amp;rft.aufirst=Yanbo&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Falgor2030973&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:15-244"><span class="mw-cite-backlink">^ <a href="#cite_ref-:15_244-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:15_244-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:15_244-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-:15_244-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-:15_244-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKaririLouatiLouatiMasmoudi2023" class="citation journal cs1">Kariri E, Louati H, Louati A, Masmoudi F (2023). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fapp13053186">"Exploring the Advancements and Future Research Directions of Artificial Neural Networks: A Text Mining Approach"</a>. <i>Applied Sciences</i>. <b>13</b> (5): 3186. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fapp13053186">10.3390/app13053186</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2076-3417">2076-3417</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Applied+Sciences&amp;rft.atitle=Exploring+the+Advancements+and+Future+Research+Directions+of+Artificial+Neural+Networks%3A+A+Text+Mining+Approach&amp;rft.volume=13&amp;rft.issue=5&amp;rft.pages=3186&amp;rft.date=2023&amp;rft_id=info%3Adoi%2F10.3390%2Fapp13053186&amp;rft.issn=2076-3417&amp;rft.aulast=Kariri&amp;rft.aufirst=Elham&amp;rft.au=Louati%2C+Hassen&amp;rft.au=Louati%2C+Ali&amp;rft.au=Masmoudi%2C+Fatma&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Fapp13053186&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-:09-245"><span class="mw-cite-backlink">^ <a href="#cite_ref-:09_245-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:09_245-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFui-Hoon_NahZhengCaiSiau2023" class="citation journal cs1">Fui-Hoon Nah F, Zheng R, Cai J, Siau K, Chen L (3 July 2023). <a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F15228053.2023.2233814">"Generative AI and ChatGPT: Applications, challenges, and AI-human collaboration"</a>. <i>Journal of Information Technology Case and Application Research</i>. <b>25</b> (3): 277–304. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F15228053.2023.2233814">10.1080/15228053.2023.2233814</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1522-8053">1522-8053</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Information+Technology+Case+and+Application+Research&amp;rft.atitle=Generative+AI+and+ChatGPT%3A+Applications%2C+challenges%2C+and+AI-human+collaboration&amp;rft.volume=25&amp;rft.issue=3&amp;rft.pages=277-304&amp;rft.date=2023-07-03&amp;rft_id=info%3Adoi%2F10.1080%2F15228053.2023.2233814&amp;rft.issn=1522-8053&amp;rft.aulast=Fui-Hoon+Nah&amp;rft.aufirst=Fiona&amp;rft.au=Zheng%2C+Ruilin&amp;rft.au=Cai%2C+Jingyuan&amp;rft.au=Siau%2C+Keng&amp;rft.au=Chen%2C+Langtao&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1080%252F15228053.2023.2233814&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-246"><span class="mw-cite-backlink"><b><a href="#cite_ref-246">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://spectrum.ieee.org/openai-dall-e-2">"DALL-E 2's Failures Are the Most Interesting Thing About It – IEEE Spectrum"</a>. <i><a href="/wiki/IEEE" class="mw-redirect" title="IEEE">IEEE</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220715204154/https://spectrum.ieee.org/openai-dall-e-2">Archived</a> from the original on 15 July 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">9 December</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=IEEE&amp;rft.atitle=DALL-E+2%27s+Failures+Are+the+Most+Interesting+Thing+About+It+%E2%80%93+IEEE+Spectrum&amp;rft_id=https%3A%2F%2Fspectrum.ieee.org%2Fopenai-dall-e-2&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-247"><span class="mw-cite-backlink"><b><a href="#cite_ref-247">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBriot2021" class="citation journal cs1">Briot JP (January 2021). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs00521-020-05399-0">"From artificial neural networks to deep learning for music generation: history, concepts and trends"</a>. <i>Neural Computing and Applications</i>. <b>33</b> (1): 39–65. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs00521-020-05399-0">10.1007/s00521-020-05399-0</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0941-0643">0941-0643</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computing+and+Applications&amp;rft.atitle=From+artificial+neural+networks+to+deep+learning+for+music+generation%3A+history%2C+concepts+and+trends&amp;rft.volume=33&amp;rft.issue=1&amp;rft.pages=39-65&amp;rft.date=2021-01&amp;rft_id=info%3Adoi%2F10.1007%2Fs00521-020-05399-0&amp;rft.issn=0941-0643&amp;rft.aulast=Briot&amp;rft.aufirst=Jean-Pierre&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252Fs00521-020-05399-0&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-248"><span class="mw-cite-backlink"><b><a href="#cite_ref-248">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChow2020" class="citation journal cs1">Chow PS (6 July 2020). "Ghost in the (Hollywood) machine: Emergent applications of artificial intelligence in the film industry". <i>NECSUS_European Journal of Media Studies</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.25969%2FMEDIAREP%2F14307">10.25969/MEDIAREP/14307</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2213-0217">2213-0217</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=NECSUS_European+Journal+of+Media+Studies&amp;rft.atitle=Ghost+in+the+%28Hollywood%29+machine%3A+Emergent+applications+of+artificial+intelligence+in+the+film+industry&amp;rft.date=2020-07-06&amp;rft_id=info%3Adoi%2F10.25969%2FMEDIAREP%2F14307&amp;rft.issn=2213-0217&amp;rft.aulast=Chow&amp;rft.aufirst=Pei-Sze&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> <li id="cite_note-249"><span class="mw-cite-backlink"><b><a href="#cite_ref-249">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYuHeGaoYang2010" class="citation book cs1">Yu X, He S, Gao Y, Yang J, Sha L, Zhang Y, et al. (June 2010). "Dynamic difficulty adjustment of game AI for video game Dead-End". <i>The 3rd International Conference on Information Sciences and Interaction Sciences</i>. IEEE. pp. 583–587. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ficicis.2010.5534761">10.1109/icicis.2010.5534761</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4244-7384-7" title="Special:BookSources/978-1-4244-7384-7"><bdi>978-1-4244-7384-7</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:17555595">17555595</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Dynamic+difficulty+adjustment+of+game+AI+for+video+game+Dead-End&amp;rft.btitle=The+3rd+International+Conference+on+Information+Sciences+and+Interaction+Sciences&amp;rft.pages=583-587&amp;rft.pub=IEEE&amp;rft.date=2010-06&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A17555595%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2Ficicis.2010.5534761&amp;rft.isbn=978-1-4244-7384-7&amp;rft.aulast=Yu&amp;rft.aufirst=Xinrui&amp;rft.au=He%2C+Suoju&amp;rft.au=Gao%2C+Yuan&amp;rft.au=Yang%2C+Jiajian&amp;rft.au=Sha%2C+Lingdao&amp;rft.au=Zhang%2C+Yidan&amp;rft.au=Ai%2C+Zhaobo&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></span> </li> </ol></div> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(13)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="Bibliography">Bibliography</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=52" title="Edit section: Bibliography" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-13 collapsible-block" id="mf-section-13"> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1184024115"><div class="div-col" style="column-width: 30em;"> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBhadeshia_H._K._D._H.1999" class="citation journal cs1">Bhadeshia H. K. D. H. (1999). <a rel="nofollow" class="external text" href="http://www.msm.cam.ac.uk/phase-trans/abstracts/neural.review.pdf">"Neural Networks in Materials Science"</a> <span class="cs1-format">(PDF)</span>. <i>ISIJ International</i>. <b>39</b> (10): 966–979. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.2355%2Fisijinternational.39.966">10.2355/isijinternational.39.966</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=ISIJ+International&amp;rft.atitle=Neural+Networks+in+Materials+Science&amp;rft.volume=39&amp;rft.issue=10&amp;rft.pages=966-979&amp;rft.date=1999&amp;rft_id=info%3Adoi%2F10.2355%2Fisijinternational.39.966&amp;rft.au=Bhadeshia+H.+K.+D.+H.&amp;rft_id=http%3A%2F%2Fwww.msm.cam.ac.uk%2Fphase-trans%2Fabstracts%2Fneural.review.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBishop1995" class="citation book cs1">Bishop CM (1995). <i>Neural networks for pattern recognition</i>. Clarendon Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-19-853849-3" title="Special:BookSources/978-0-19-853849-3"><bdi>978-0-19-853849-3</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/33101074">33101074</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Neural+networks+for+pattern+recognition&amp;rft.pub=Clarendon+Press&amp;rft.date=1995&amp;rft_id=info%3Aoclcnum%2F33101074&amp;rft.isbn=978-0-19-853849-3&amp;rft.aulast=Bishop&amp;rft.aufirst=Christopher+M.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBorgelt2003" class="citation book cs1">Borgelt C (2003). <i>Neuro-Fuzzy-Systeme: von den Grundlagen künstlicher Neuronaler Netze zur Kopplung mit Fuzzy-Systemen</i>. Vieweg. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-528-25265-6" title="Special:BookSources/978-3-528-25265-6"><bdi>978-3-528-25265-6</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/76538146">76538146</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Neuro-Fuzzy-Systeme%3A+von+den+Grundlagen+k%C3%BCnstlicher+Neuronaler+Netze+zur+Kopplung+mit+Fuzzy-Systemen&amp;rft.pub=Vieweg&amp;rft.date=2003&amp;rft_id=info%3Aoclcnum%2F76538146&amp;rft.isbn=978-3-528-25265-6&amp;rft.aulast=Borgelt&amp;rft.aufirst=Christian&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCybenko2006" class="citation book cs1"><a href="/wiki/George_Cybenko" title="George Cybenko">Cybenko G</a> (2006). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=4RtVAAAAMAAJ&amp;pg=PA303">"Approximation by Superpositions of a Sigmoidal function"</a>. In van Schuppen JH (ed.). <a href="/wiki/Mathematics_of_Control,_Signals,_and_Systems" title="Mathematics of Control, Signals, and Systems"><i>Mathematics of Control, Signals, and Systems</i></a>. Springer International. pp. 303–314.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Approximation+by+Superpositions+of+a+Sigmoidal+function&amp;rft.btitle=Mathematics+of+Control%2C+Signals%2C+and+Systems&amp;rft.pages=303-314&amp;rft.pub=Springer+International&amp;rft.date=2006&amp;rft.aulast=Cybenko&amp;rft.aufirst=G.V.&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3D4RtVAAAAMAAJ%26pg%3DPA303&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20110719183058/http://actcomm.dartmouth.edu/gvc/papers/approx_by_superposition.pdf">PDF</a></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDewdney1997" class="citation book cs1">Dewdney AK (1997). <i>Yes, we have no neutrons: an eye-opening tour through the twists and turns of bad science</i>. New York: Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-471-10806-1" title="Special:BookSources/978-0-471-10806-1"><bdi>978-0-471-10806-1</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/35558945">35558945</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Yes%2C+we+have+no+neutrons%3A+an+eye-opening+tour+through+the+twists+and+turns+of+bad+science&amp;rft.place=New+York&amp;rft.pub=Wiley&amp;rft.date=1997&amp;rft_id=info%3Aoclcnum%2F35558945&amp;rft.isbn=978-0-471-10806-1&amp;rft.aulast=Dewdney&amp;rft.aufirst=A.+K.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDudaHartStork2001" class="citation book cs1">Duda RO, Hart PE, Stork DG (2001). <i>Pattern classification</i> (2 ed.). Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-471-05669-0" title="Special:BookSources/978-0-471-05669-0"><bdi>978-0-471-05669-0</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/41347061">41347061</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Pattern+classification&amp;rft.edition=2&amp;rft.pub=Wiley&amp;rft.date=2001&amp;rft_id=info%3Aoclcnum%2F41347061&amp;rft.isbn=978-0-471-05669-0&amp;rft.aulast=Duda&amp;rft.aufirst=Richard+O.&amp;rft.au=Hart%2C+Peter+Elliot&amp;rft.au=Stork%2C+David+G.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEgmont-Petersende_RidderHandels2002" class="citation journal cs1">Egmont-Petersen M, de Ridder D, Handels H (2002). "Image processing with neural networks – a review". <i>Pattern Recognition</i>. <b>35</b> (10): 2279–2301. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.5444">10.1.1.21.5444</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2FS0031-3203%2801%2900178-9">10.1016/S0031-3203(01)00178-9</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Pattern+Recognition&amp;rft.atitle=Image+processing+with+neural+networks+%E2%80%93+a+review&amp;rft.volume=35&amp;rft.issue=10&amp;rft.pages=2279-2301&amp;rft.date=2002&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.21.5444%23id-name%3DCiteSeerX&amp;rft_id=info%3Adoi%2F10.1016%2FS0031-3203%2801%2900178-9&amp;rft.aulast=Egmont-Petersen&amp;rft.aufirst=M.&amp;rft.au=de+Ridder%2C+D.&amp;rft.au=Handels%2C+H.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFahlmanLebiere1991" class="citation web cs1">Fahlman S, Lebiere C (1991). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20130503184045/http://www.cs.iastate.edu/~honavar/fahlman.pdf">"The Cascade-Correlation Learning Architecture"</a> <span class="cs1-format">(PDF)</span>. Archived from <a rel="nofollow" class="external text" href="http://www.cs.iastate.edu/~honavar/fahlman.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 3 May 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">28 August</span> 2006</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=The+Cascade-Correlation+Learning+Architecture&amp;rft.date=1991&amp;rft.aulast=Fahlman&amp;rft.aufirst=S.&amp;rft.au=Lebiere%2C+C&amp;rft_id=http%3A%2F%2Fwww.cs.iastate.edu%2F~honavar%2Ffahlman.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span> <ul><li>created for <a href="/wiki/National_Science_Foundation" title="National Science Foundation">National Science Foundation</a>, Contract Number EET-8716324, and <a href="/wiki/Defense_Advanced_Research_Projects_Agency" class="mw-redirect" title="Defense Advanced Research Projects Agency">Defense Advanced Research Projects Agency</a> (DOD), ARPA Order No. 4976 under Contract F33615-87-C-1499.</li></ul></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGurney1997" class="citation book cs1">Gurney K (1997). <i>An introduction to neural networks</i>. UCL Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-85728-673-1" title="Special:BookSources/978-1-85728-673-1"><bdi>978-1-85728-673-1</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/37875698">37875698</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=An+introduction+to+neural+networks&amp;rft.pub=UCL+Press&amp;rft.date=1997&amp;rft_id=info%3Aoclcnum%2F37875698&amp;rft.isbn=978-1-85728-673-1&amp;rft.aulast=Gurney&amp;rft.aufirst=Kevin&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHaykin1999" class="citation book cs1">Haykin SS (1999). <i>Neural networks: a comprehensive foundation</i>. Prentice Hall. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-13-273350-2" title="Special:BookSources/978-0-13-273350-2"><bdi>978-0-13-273350-2</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/38908586">38908586</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Neural+networks%3A+a+comprehensive+foundation&amp;rft.pub=Prentice+Hall&amp;rft.date=1999&amp;rft_id=info%3Aoclcnum%2F38908586&amp;rft.isbn=978-0-13-273350-2&amp;rft.aulast=Haykin&amp;rft.aufirst=Simon+S.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHertzPalmerKrogh1991" class="citation book cs1">Hertz J, Palmer RG, Krogh AS (1991). <i>Introduction to the theory of neural computation</i>. Addison-Wesley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-201-51560-2" title="Special:BookSources/978-0-201-51560-2"><bdi>978-0-201-51560-2</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/21522159">21522159</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Introduction+to+the+theory+of+neural+computation&amp;rft.pub=Addison-Wesley&amp;rft.date=1991&amp;rft_id=info%3Aoclcnum%2F21522159&amp;rft.isbn=978-0-201-51560-2&amp;rft.aulast=Hertz&amp;rft.aufirst=J.&amp;rft.au=Palmer%2C+Richard+G.&amp;rft.au=Krogh%2C+Anders+S.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><i>Information theory, inference, and learning algorithms</i>. Cambridge University Press. 25 September 2003. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2003itil.book.....M">2003itil.book.....M</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-521-64298-9" title="Special:BookSources/978-0-521-64298-9"><bdi>978-0-521-64298-9</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/52377690">52377690</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Information+theory%2C+inference%2C+and+learning+algorithms&amp;rft.pub=Cambridge+University+Press&amp;rft.date=2003-09-25&amp;rft_id=info%3Aoclcnum%2F52377690&amp;rft_id=info%3Abibcode%2F2003itil.book.....M&amp;rft.isbn=978-0-521-64298-9&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKruseBorgeltKlawonnMoewes2013" class="citation book cs1">Kruse R, Borgelt C, Klawonn F, Moewes C, Steinbrecher M, Held P (2013). <i>Computational intelligence: a methodological introduction</i>. Springer. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4471-5012-1" title="Special:BookSources/978-1-4471-5012-1"><bdi>978-1-4471-5012-1</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/837524179">837524179</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Computational+intelligence%3A+a+methodological+introduction&amp;rft.pub=Springer&amp;rft.date=2013&amp;rft_id=info%3Aoclcnum%2F837524179&amp;rft.isbn=978-1-4471-5012-1&amp;rft.aulast=Kruse&amp;rft.aufirst=Rudolf&amp;rft.au=Borgelt%2C+Christian&amp;rft.au=Klawonn%2C+F.&amp;rft.au=Moewes%2C+Christian&amp;rft.au=Steinbrecher%2C+Matthias&amp;rft.au=Held%2C+Pascal&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLawrence1994" class="citation book cs1">Lawrence J (1994). <i>Introduction to neural networks: design, theory and applications</i>. California Scientific Software. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-883157-00-5" title="Special:BookSources/978-1-883157-00-5"><bdi>978-1-883157-00-5</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/32179420">32179420</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Introduction+to+neural+networks%3A+design%2C+theory+and+applications&amp;rft.pub=California+Scientific+Software&amp;rft.date=1994&amp;rft_id=info%3Aoclcnum%2F32179420&amp;rft.isbn=978-1-883157-00-5&amp;rft.aulast=Lawrence&amp;rft.aufirst=Jeanette&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMasters1994" class="citation book cs1">Masters T (1994). <i>Signal and image processing with neural networks: a C++ sourcebook</i>. J. Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-471-04963-0" title="Special:BookSources/978-0-471-04963-0"><bdi>978-0-471-04963-0</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/29877717">29877717</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Signal+and+image+processing+with+neural+networks%3A+a+C%2B%2B+sourcebook&amp;rft.pub=J.+Wiley&amp;rft.date=1994&amp;rft_id=info%3Aoclcnum%2F29877717&amp;rft.isbn=978-0-471-04963-0&amp;rft.aulast=Masters&amp;rft.aufirst=Timothy&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMaurer2021" class="citation book cs1">Maurer H (2021). <i>Cognitive science: integrative synchronization mechanisms in cognitive neuroarchitectures of the modern connectionism</i>. CRC Press. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1201%2F9781351043526">10.1201/9781351043526</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-351-04352-6" title="Special:BookSources/978-1-351-04352-6"><bdi>978-1-351-04352-6</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:242963768">242963768</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Cognitive+science%3A+integrative+synchronization+mechanisms+in+cognitive+neuroarchitectures+of+the+modern+connectionism&amp;rft.pub=CRC+Press&amp;rft.date=2021&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A242963768%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1201%2F9781351043526&amp;rft.isbn=978-1-351-04352-6&amp;rft.aulast=Maurer&amp;rft.aufirst=Harald&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRipley2007" class="citation book cs1"><a href="/wiki/Brian_D._Ripley" title="Brian D. Ripley">Ripley BD</a> (2007). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=m12UR8QmLqoC"><i>Pattern Recognition and Neural Networks</i></a>. Cambridge University Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-521-71770-0" title="Special:BookSources/978-0-521-71770-0"><bdi>978-0-521-71770-0</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Pattern+Recognition+and+Neural+Networks&amp;rft.pub=Cambridge+University+Press&amp;rft.date=2007&amp;rft.isbn=978-0-521-71770-0&amp;rft.aulast=Ripley&amp;rft.aufirst=Brian+D.&amp;rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3Dm12UR8QmLqoC&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSiegelmannSontag1994" class="citation journal cs1">Siegelmann H, Sontag ED (1994). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0304-3975%2894%2990178-3">"Analog computation via neural networks"</a>. <i>Theoretical Computer Science</i>. <b>131</b> (2): 331–360. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0304-3975%2894%2990178-3">10.1016/0304-3975(94)90178-3</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2456483">2456483</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Theoretical+Computer+Science&amp;rft.atitle=Analog+computation+via+neural+networks&amp;rft.volume=131&amp;rft.issue=2&amp;rft.pages=331-360&amp;rft.date=1994&amp;rft_id=info%3Adoi%2F10.1016%2F0304-3975%2894%2990178-3&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2456483%23id-name%3DS2CID&amp;rft.aulast=Siegelmann&amp;rft.aufirst=H.T.&amp;rft.au=Sontag%2C+Eduardo+D.&amp;rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252F0304-3975%252894%252990178-3&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSmith1993" class="citation book cs1">Smith M (1993). <i>Neural networks for statistical modeling</i>. Van Nostrand Reinhold. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-442-01310-3" title="Special:BookSources/978-0-442-01310-3"><bdi>978-0-442-01310-3</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/27145760">27145760</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Neural+networks+for+statistical+modeling&amp;rft.pub=Van+Nostrand+Reinhold&amp;rft.date=1993&amp;rft_id=info%3Aoclcnum%2F27145760&amp;rft.isbn=978-0-442-01310-3&amp;rft.aulast=Smith&amp;rft.aufirst=Murray&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWasserman1993" class="citation book cs1">Wasserman PD (1993). <i>Advanced methods in neural computing</i>. Van Nostrand Reinhold. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-442-00461-3" title="Special:BookSources/978-0-442-00461-3"><bdi>978-0-442-00461-3</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/27429729">27429729</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Advanced+methods+in+neural+computing&amp;rft.pub=Van+Nostrand+Reinhold&amp;rft.date=1993&amp;rft_id=info%3Aoclcnum%2F27429729&amp;rft.isbn=978-0-442-00461-3&amp;rft.aulast=Wasserman&amp;rft.aufirst=Philip+D.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWilson2018" class="citation book cs1">Wilson H (2018). <i>Artificial intelligence</i>. Grey House Publishing. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-68217-867-6" title="Special:BookSources/978-1-68217-867-6"><bdi>978-1-68217-867-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Artificial+intelligence&amp;rft.pub=Grey+House+Publishing&amp;rft.date=2018&amp;rft.isbn=978-1-68217-867-6&amp;rft.aulast=Wilson&amp;rft.aufirst=Halsey&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li></ul> </div> </section><div class="mw-heading mw-heading2 section-heading" onclick="mfTempOpenSection(14)"><span class="indicator mf-icon mf-icon-expand mf-icon--small"></span><h2 id="External_links">External links</h2><span class="mw-editsection"> <a role="button" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=edit&amp;section=53" title="Edit section: External links" class="cdx-button cdx-button--size-large cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--icon-only cdx-button--weight-quiet "> <span class="minerva-icon minerva-icon--edit"></span> <span>edit</span> </a> </span> </div><section class="mf-section-14 collapsible-block" id="mf-section-14"> <style data-mw-deduplicate="TemplateStyles:r1235611614">.mw-parser-output .spoken-wikipedia{border:1px solid #a2a9b1;background-color:var(--background-color-interactive-subtle,#f8f9fa);margin:0.5em 0;padding:0.2em;line-height:1.5em;font-size:90%}.mw-parser-output .spoken-wikipedia-header{text-align:center}.mw-parser-output .spoken-wikipedia-listen-to{font-weight:bold}.mw-parser-output .spoken-wikipedia-files{text-align:center;margin-top:10px;margin-bottom:0.4em}.mw-parser-output .spoken-wikipedia-icon{float:left;margin-left:5px;margin-top:10px}.mw-parser-output .spoken-wikipedia-disclaimer{margin-left:60px;margin-top:10px;font-size:95%;line-height:1.4em}.mw-parser-output .spoken-wikipedia-footer{margin-top:10px;text-align:center}@media(min-width:720px){.mw-parser-output .spoken-wikipedia{width:20em;float:right;clear:right;margin-left:1em}}</style><div class="spoken-wikipedia noprint haudio"><div class="spoken-wikipedia-header"><span class="spoken-wikipedia-listen-to">Listen to this article</span> (<span class="duration"><span class="min">31</span> minutes</span>)</div><div class="spoken-wikipedia-files"><figure class="mw-halign-center" typeof="mw:File"><span><audio id="mwe_player_0" controls="" preload="none" data-mw-tmh="" class="mw-file-element" width="200" style="width:200px;" data-durationhint="1883" data-mwtitle="En-Neural_network.ogg" data-mwprovider="wikimediacommons"><source src="//upload.wikimedia.org/wikipedia/commons/a/a3/En-Neural_network.ogg" type='audio/ogg; codecs="vorbis"' data-width="0" data-height="0"></source><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/a/a3/En-Neural_network.ogg/En-Neural_network.ogg.mp3" type="audio/mpeg" data-transcodekey="mp3" data-width="0" data-height="0"></source></audio></span><figcaption></figcaption></figure> </div><div class="spoken-wikipedia-icon"><span typeof="mw:File"><span title="Spoken Wikipedia"><noscript><img alt="Spoken Wikipedia icon" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/45px-Sound-icon.svg.png" decoding="async" width="45" height="34" class="mw-file-element" data-file-width="128" data-file-height="96"></noscript><span class="lazy-image-placeholder" style="width: 45px;height: 34px;" data-src="//upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/45px-Sound-icon.svg.png" data-alt="Spoken Wikipedia icon" data-width="45" data-height="34" data-srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/68px-Sound-icon.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/90px-Sound-icon.svg.png 2x" data-class="mw-file-element">&nbsp;</span></span></span></div><div class="spoken-wikipedia-disclaimer"><a href="/wiki/File:En-Neural_network.ogg" title="File:En-Neural network.ogg">This audio file</a> was created from a revision of this article dated 27 November 2011<span style="display:none"> (<span class="bday dtstart published updated itvstart">2011-11-27</span>)</span>, and does not reflect subsequent edits.</div><div class="spoken-wikipedia-footer">(<a href="/wiki/Wikipedia:Media_help" class="mw-redirect" title="Wikipedia:Media help">Audio help</a> · <a href="/wiki/Wikipedia:Spoken_articles" title="Wikipedia:Spoken articles">More spoken articles</a>)</div></div> <ul><li><a rel="nofollow" class="external text" href="http://www.dkriesel.com/en/science/neural_networks">A Brief Introduction to Neural Networks (D. Kriesel)</a> – Illustrated, bilingual manuscript about artificial neural networks; Topics so far: Perceptrons, Backpropagation, Radial Basis Functions, Recurrent Neural Networks, Self Organizing Maps, Hopfield Networks.</li> <li><a rel="nofollow" class="external text" href="http://www.msm.cam.ac.uk/phase-trans/abstracts/neural.review.html">Review of Neural Networks in Materials Science</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150607101310/http://www.msm.cam.ac.uk/phase-trans/abstracts/neural.review.html">Archived</a> 7 June 2015 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></li> <li><a rel="nofollow" class="external text" href="https://web.archive.org/web/20090318133122/http://www.gc.ssr.upm.es/inves/neural/ann1/anntutorial.html">Artificial Neural Networks Tutorial in three languages (Univ. Politécnica de Madrid)</a></li> <li><a rel="nofollow" class="external text" href="https://web.archive.org/web/20091216110504/http://www.doc.ic.ac.uk/~nd/surprise_96/journal/vol4/cs11/report.html">Another introduction to ANN</a></li> <li><a rel="nofollow" class="external text" href="https://www.youtube.com/watch?v=AyzOUbkUf3M">Next Generation of Neural Networks</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20110124234328/http://www.youtube.com/watch?v=AyzOUbkUf3M">Archived</a> 24 January 2011 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> – Google Tech Talks</li> <li><a rel="nofollow" class="external text" href="http://www.msm.cam.ac.uk/phase-trans/2009/performance.html">Performance of Neural Networks</a></li> <li><a rel="nofollow" class="external text" href="http://www.msm.cam.ac.uk/phase-trans/2009/review_Bhadeshia_SADM.pdf">Neural Networks and Information</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20090709153828/http://www.msm.cam.ac.uk/phase-trans/2009/review_Bhadeshia_SADM.pdf">Archived</a> 9 July 2009 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSanderson2017" class="citation web cs1">Sanderson G (5 October 2017). <a rel="nofollow" class="external text" href="https://www.youtube.com/watch?v=aircAruvnKk&amp;list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi">"But what <i>is</i> a Neural Network?"</a>. <i><a href="/wiki/3Blue1Brown" title="3Blue1Brown">3Blue1Brown</a></i>. <a rel="nofollow" class="external text" href="https://ghostarchive.org/varchive/youtube/20211107/aircAruvnKk">Archived</a> from the original on 7 November 2021 – via <a href="/wiki/YouTube" title="YouTube">YouTube</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=3Blue1Brown&amp;rft.atitle=But+what+is+a+Neural+Network%3F&amp;rft.date=2017-10-05&amp;rft.aulast=Sanderson&amp;rft.aufirst=Grant&amp;rft_id=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DaircAruvnKk%26list%3DPLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3ANeural+network+%28machine+learning%29" class="Z3988"></span></li></ul> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"><style data-mw-deduplicate="TemplateStyles:r1038841319">.mw-parser-output .tooltip-dotted{border-bottom:1px dotted;cursor:help}</style></div> <!-- NewPP limit report Parsed by mw‐web.codfw.main‐5cd4cd96d5‐npb9k Cached time: 20241127003534 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 2.970 seconds Real time usage: 3.344 seconds Preprocessor visited node count: 20857/1000000 Post‐expand include size: 782091/2097152 bytes Template argument size: 18836/2097152 bytes Highest expansion depth: 16/100 Expensive parser function count: 33/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 1016966/5000000 bytes Lua time usage: 1.873/10.000 seconds Lua memory usage: 10297245/52428800 bytes Lua Profile: ? 300 ms 14.6% dataWrapper <mw.lua:672> 300 ms 14.6% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::callParserFunction 180 ms 8.7% <mw.lua:694> 160 ms 7.8% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::anchorEncode 140 ms 6.8% type 100 ms 4.9% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::gsub 80 ms 3.9% test <Module:Citation/CS1:1508> 80 ms 3.9% makeMessage <mw.message.lua:76> 80 ms 3.9% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::getExpandedArgument 80 ms 3.9% [others] 560 ms 27.2% Number of Wikibase entities loaded: 2/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 2747.019 1 -total 62.70% 1722.448 1 Template:Reflist 23.61% 648.587 109 Template:Cite_journal 15.10% 414.703 70 Template:Cite_book 7.29% 200.151 22 Template:Cite_web 4.36% 119.874 18 Template:Fix 4.31% 118.400 14 Template:Citation_needed 3.91% 107.434 17 Template:Cite_arXiv 3.61% 99.171 9 Template:Navbox 3.59% 98.513 14 Template:Cite_conference --> <!-- Saved in parser cache with key enwiki:pcache:idhash:21523-0!canonical and timestamp 20241127003534 and revision id 1259558855. Rendering was triggered because: page-view --> </section></div> <!-- MobileFormatter took 0.130 seconds --><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.m.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1&amp;mobile=1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Neural_network_(machine_learning)&amp;oldid=1259558855">https://en.wikipedia.org/w/index.php?title=Neural_network_(machine_learning)&amp;oldid=1259558855</a>"</div></div> </div> <div class="post-content" id="page-secondary-actions"> </div> </main> <footer class="mw-footer minerva-footer" role="contentinfo"> <a class="last-modified-bar" href="/w/index.php?title=Neural_network_(machine_learning)&amp;action=history"> <div class="post-content last-modified-bar__content"> <span class="minerva-icon minerva-icon-size-medium minerva-icon--modified-history"></span> <span class="last-modified-bar__text modified-enhancement" data-user-name="Viktor Guer" data-user-gender="unknown" data-timestamp="1732567411"> <span>Last edited on 25 November 2024, at 20:43</span> </span> <span class="minerva-icon minerva-icon-size-small minerva-icon--expand"></span> </div> </a> <div class="post-content footer-content"> <div id='mw-data-after-content'> <div class="read-more-container"></div> </div> <div id="p-lang"> <h4>Languages</h4> <section> <ul id="p-variants" class="minerva-languages"></ul> <ul class="minerva-languages"><li class="interlanguage-link interwiki-ar mw-list-item"><a href="https://ar.wikipedia.org/wiki/%D8%B4%D8%A8%D9%83%D8%A9_%D8%B9%D8%B5%D8%A8%D9%88%D9%86%D9%8A%D8%A9_%D8%A7%D8%B5%D8%B7%D9%86%D8%A7%D8%B9%D9%8A%D8%A9" title="شبكة عصبونية اصطناعية – Arabic" lang="ar" hreflang="ar" data-title="شبكة عصبونية اصطناعية" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-hyw mw-list-item"><a href="https://hyw.wikipedia.org/wiki/%D4%B1%D6%80%D5%B0%D5%A5%D5%BD%D5%BF%D5%A1%D5%AF%D5%A1%D5%B6_%D5%8B%D5%B2%D5%A1%D5%A2%D5%BB%D5%AB%D5%BB%D5%A1%D5%B5%D5%AB%D5%B6_%D5%91%D5%A1%D5%B6%D6%81" title="Արհեստական Ջղաբջիջային Ցանց – Western Armenian" lang="hyw" hreflang="hyw" data-title="Արհեստական Ջղաբջիջային Ցանց" data-language-autonym="Արեւմտահայերէն" data-language-local-name="Western Armenian" class="interlanguage-link-target"><span>Արեւմտահայերէն</span></a></li><li class="interlanguage-link interwiki-az mw-list-item"><a href="https://az.wikipedia.org/wiki/S%C3%BCni_neyron_%C5%9F%C9%99b%C9%99k%C9%99l%C9%99r" title="Süni neyron şəbəkələr – Azerbaijani" lang="az" hreflang="az" data-title="Süni neyron şəbəkələr" data-language-autonym="Azərbaycanca" data-language-local-name="Azerbaijani" class="interlanguage-link-target"><span>Azərbaycanca</span></a></li><li class="interlanguage-link interwiki-bn mw-list-item"><a href="https://bn.wikipedia.org/wiki/%E0%A6%95%E0%A7%83%E0%A6%A4%E0%A7%8D%E0%A6%B0%E0%A6%BF%E0%A6%AE_%E0%A6%A8%E0%A6%BF%E0%A6%89%E0%A6%B0%E0%A6%BE%E0%A6%B2_%E0%A6%A8%E0%A7%87%E0%A6%9F%E0%A6%93%E0%A6%AF%E0%A6%BC%E0%A6%BE%E0%A6%B0%E0%A7%8D%E0%A6%95" title="কৃত্রিম নিউরাল নেটওয়ার্ক – Bangla" lang="bn" hreflang="bn" data-title="কৃত্রিম নিউরাল নেটওয়ার্ক" data-language-autonym="বাংলা" data-language-local-name="Bangla" class="interlanguage-link-target"><span>বাংলা</span></a></li><li class="interlanguage-link interwiki-zh-min-nan mw-list-item"><a href="https://zh-min-nan.wikipedia.org/wiki/J%C3%AEn-kang_s%C3%AEn-keng_b%C4%81ng-l%C5%8D%CD%98" title="Jîn-kang sîn-keng bāng-lō͘ – Minnan" lang="nan" hreflang="nan" data-title="Jîn-kang sîn-keng bāng-lō͘" data-language-autonym="閩南語 / Bân-lâm-gú" data-language-local-name="Minnan" class="interlanguage-link-target"><span>閩南語 / Bân-lâm-gú</span></a></li><li class="interlanguage-link interwiki-bg mw-list-item"><a href="https://bg.wikipedia.org/wiki/%D0%98%D0%B7%D0%BA%D1%83%D1%81%D1%82%D0%B2%D0%B5%D0%BD%D0%B0_%D0%BD%D0%B5%D0%B2%D1%80%D0%BE%D0%BD%D0%BD%D0%B0_%D0%BC%D1%80%D0%B5%D0%B6%D0%B0" title="Изкуствена невронна мрежа – Bulgarian" lang="bg" hreflang="bg" data-title="Изкуствена невронна мрежа" data-language-autonym="Български" data-language-local-name="Bulgarian" class="interlanguage-link-target"><span>Български</span></a></li><li class="interlanguage-link interwiki-bs mw-list-item"><a href="https://bs.wikipedia.org/wiki/Umjetna_neuronska_mre%C5%BEa" title="Umjetna neuronska mreža – Bosnian" lang="bs" hreflang="bs" data-title="Umjetna neuronska mreža" data-language-autonym="Bosanski" data-language-local-name="Bosnian" class="interlanguage-link-target"><span>Bosanski</span></a></li><li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/Xarxa_neuronal_artificial" title="Xarxa neuronal artificial – Catalan" lang="ca" hreflang="ca" data-title="Xarxa neuronal artificial" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-cs mw-list-item"><a href="https://cs.wikipedia.org/wiki/Um%C4%9Bl%C3%A1_neuronov%C3%A1_s%C3%AD%C5%A5" title="Umělá neuronová síť – Czech" lang="cs" hreflang="cs" data-title="Umělá neuronová síť" data-language-autonym="Čeština" data-language-local-name="Czech" class="interlanguage-link-target"><span>Čeština</span></a></li><li class="interlanguage-link interwiki-da mw-list-item"><a href="https://da.wikipedia.org/wiki/Kunstigt_neuralt_netv%C3%A6rk" title="Kunstigt neuralt netværk – Danish" lang="da" hreflang="da" data-title="Kunstigt neuralt netværk" data-language-autonym="Dansk" data-language-local-name="Danish" class="interlanguage-link-target"><span>Dansk</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/K%C3%BCnstliches_neuronales_Netz" title="Künstliches neuronales Netz – German" lang="de" hreflang="de" data-title="Künstliches neuronales Netz" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-et mw-list-item"><a href="https://et.wikipedia.org/wiki/Tehisn%C3%A4rviv%C3%B5rk" title="Tehisnärvivõrk – Estonian" lang="et" hreflang="et" data-title="Tehisnärvivõrk" data-language-autonym="Eesti" data-language-local-name="Estonian" class="interlanguage-link-target"><span>Eesti</span></a></li><li class="interlanguage-link interwiki-el mw-list-item"><a href="https://el.wikipedia.org/wiki/%CE%9D%CE%B5%CF%85%CF%81%CF%89%CE%BD%CE%B9%CE%BA%CF%8C_%CE%B4%CE%AF%CE%BA%CF%84%CF%85%CE%BF" title="Νευρωνικό δίκτυο – Greek" lang="el" hreflang="el" data-title="Νευρωνικό δίκτυο" data-language-autonym="Ελληνικά" data-language-local-name="Greek" class="interlanguage-link-target"><span>Ελληνικά</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/Red_neuronal_artificial" title="Red neuronal artificial – Spanish" lang="es" hreflang="es" data-title="Red neuronal artificial" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-eo mw-list-item"><a href="https://eo.wikipedia.org/wiki/Artefarita_ne%C5%ADra_reto" title="Artefarita neŭra reto – Esperanto" lang="eo" hreflang="eo" data-title="Artefarita neŭra reto" data-language-autonym="Esperanto" data-language-local-name="Esperanto" class="interlanguage-link-target"><span>Esperanto</span></a></li><li class="interlanguage-link interwiki-eu mw-list-item"><a href="https://eu.wikipedia.org/wiki/Neurona-sare_artifizial" title="Neurona-sare artifizial – Basque" lang="eu" hreflang="eu" data-title="Neurona-sare artifizial" data-language-autonym="Euskara" data-language-local-name="Basque" class="interlanguage-link-target"><span>Euskara</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%D8%B4%D8%A8%DA%A9%D9%87_%D8%B9%D8%B5%D8%A8%DB%8C_%D9%85%D8%B5%D9%86%D9%88%D8%B9%DB%8C" title="شبکه عصبی مصنوعی – Persian" lang="fa" hreflang="fa" data-title="شبکه عصبی مصنوعی" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/R%C3%A9seau_de_neurones_artificiels" title="Réseau de neurones artificiels – French" lang="fr" hreflang="fr" data-title="Réseau de neurones artificiels" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-ga mw-list-item"><a href="https://ga.wikipedia.org/wiki/L%C3%ADonra_n%C3%A9arach_saorga" title="Líonra néarach saorga – Irish" lang="ga" hreflang="ga" data-title="Líonra néarach saorga" data-language-autonym="Gaeilge" data-language-local-name="Irish" class="interlanguage-link-target"><span>Gaeilge</span></a></li><li class="interlanguage-link interwiki-gl mw-list-item"><a href="https://gl.wikipedia.org/wiki/Rede_neural_artificial" title="Rede neural artificial – Galician" lang="gl" hreflang="gl" data-title="Rede neural artificial" data-language-autonym="Galego" data-language-local-name="Galician" class="interlanguage-link-target"><span>Galego</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EC%9D%B8%EA%B3%B5_%EC%8B%A0%EA%B2%BD%EB%A7%9D" title="인공 신경망 – Korean" lang="ko" hreflang="ko" data-title="인공 신경망" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-hy mw-list-item"><a href="https://hy.wikipedia.org/wiki/%D4%B1%D6%80%D5%B0%D5%A5%D5%BD%D5%BF%D5%A1%D5%AF%D5%A1%D5%B6_%D5%B6%D5%A5%D5%B5%D6%80%D5%B8%D5%B6%D5%A1%D5%B5%D5%AB%D5%B6_%D6%81%D5%A1%D5%B6%D6%81" title="Արհեստական նեյրոնային ցանց – Armenian" lang="hy" hreflang="hy" data-title="Արհեստական նեյրոնային ցանց" data-language-autonym="Հայերեն" data-language-local-name="Armenian" class="interlanguage-link-target"><span>Հայերեն</span></a></li><li class="interlanguage-link interwiki-hi mw-list-item"><a href="https://hi.wikipedia.org/wiki/%E0%A4%95%E0%A5%83%E0%A4%A4%E0%A5%8D%E0%A4%B0%E0%A4%BF%E0%A4%AE_%E0%A4%A4%E0%A4%82%E0%A4%A4%E0%A5%8D%E0%A4%B0%E0%A4%BF%E0%A4%95%E0%A4%BE_%E0%A4%A8%E0%A5%87%E0%A4%9F%E0%A4%B5%E0%A4%B0%E0%A5%8D%E0%A4%95" title="कृत्रिम तंत्रिका नेटवर्क – Hindi" lang="hi" hreflang="hi" data-title="कृत्रिम तंत्रिका नेटवर्क" data-language-autonym="हिन्दी" data-language-local-name="Hindi" class="interlanguage-link-target"><span>हिन्दी</span></a></li><li class="interlanguage-link interwiki-hr mw-list-item"><a href="https://hr.wikipedia.org/wiki/Umjetna_neuronska_mre%C5%BEa" title="Umjetna neuronska mreža – Croatian" lang="hr" hreflang="hr" data-title="Umjetna neuronska mreža" data-language-autonym="Hrvatski" data-language-local-name="Croatian" class="interlanguage-link-target"><span>Hrvatski</span></a></li><li class="interlanguage-link interwiki-id mw-list-item"><a href="https://id.wikipedia.org/wiki/Jaringan_saraf_tiruan" title="Jaringan saraf tiruan – Indonesian" lang="id" hreflang="id" data-title="Jaringan saraf tiruan" data-language-autonym="Bahasa Indonesia" data-language-local-name="Indonesian" class="interlanguage-link-target"><span>Bahasa Indonesia</span></a></li><li class="interlanguage-link interwiki-ia mw-list-item"><a href="https://ia.wikipedia.org/wiki/Rete_neural_artificial" title="Rete neural artificial – Interlingua" lang="ia" hreflang="ia" data-title="Rete neural artificial" data-language-autonym="Interlingua" data-language-local-name="Interlingua" class="interlanguage-link-target"><span>Interlingua</span></a></li><li class="interlanguage-link interwiki-is mw-list-item"><a href="https://is.wikipedia.org/wiki/Gervitauganet" title="Gervitauganet – Icelandic" lang="is" hreflang="is" data-title="Gervitauganet" data-language-autonym="Íslenska" data-language-local-name="Icelandic" class="interlanguage-link-target"><span>Íslenska</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Rete_neurale_artificiale" title="Rete neurale artificiale – Italian" lang="it" hreflang="it" data-title="Rete neurale artificiale" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/%D7%A8%D7%A9%D7%AA_%D7%A2%D7%A6%D7%91%D7%99%D7%AA_%D7%9E%D7%9C%D7%90%D7%9B%D7%95%D7%AA%D7%99%D7%AA" title="רשת עצבית מלאכותית – Hebrew" lang="he" hreflang="he" data-title="רשת עצבית מלאכותית" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-ka mw-list-item"><a href="https://ka.wikipedia.org/wiki/%E1%83%AE%E1%83%94%E1%83%9A%E1%83%9D%E1%83%95%E1%83%9C%E1%83%A3%E1%83%A0%E1%83%98_%E1%83%9C%E1%83%94%E1%83%98%E1%83%A0%E1%83%9D%E1%83%9C%E1%83%A3%E1%83%9A%E1%83%98_%E1%83%A5%E1%83%A1%E1%83%94%E1%83%9A%E1%83%98" title="ხელოვნური ნეირონული ქსელი – Georgian" lang="ka" hreflang="ka" data-title="ხელოვნური ნეირონული ქსელი" data-language-autonym="ქართული" data-language-local-name="Georgian" class="interlanguage-link-target"><span>ქართული</span></a></li><li class="interlanguage-link interwiki-la mw-list-item"><a href="https://la.wikipedia.org/wiki/Rete_neurale_artificiale" title="Rete neurale artificiale – Latin" lang="la" hreflang="la" data-title="Rete neurale artificiale" data-language-autonym="Latina" data-language-local-name="Latin" class="interlanguage-link-target"><span>Latina</span></a></li><li class="interlanguage-link interwiki-lv mw-list-item"><a href="https://lv.wikipedia.org/wiki/M%C4%81ksl%C4%ABgais_neironu_t%C4%ABkls" title="Mākslīgais neironu tīkls – Latvian" lang="lv" hreflang="lv" data-title="Mākslīgais neironu tīkls" data-language-autonym="Latviešu" data-language-local-name="Latvian" class="interlanguage-link-target"><span>Latviešu</span></a></li><li class="interlanguage-link interwiki-lt mw-list-item"><a href="https://lt.wikipedia.org/wiki/Dirbtinis_neuroninis_tinklas" title="Dirbtinis neuroninis tinklas – Lithuanian" lang="lt" hreflang="lt" data-title="Dirbtinis neuroninis tinklas" data-language-autonym="Lietuvių" data-language-local-name="Lithuanian" class="interlanguage-link-target"><span>Lietuvių</span></a></li><li class="interlanguage-link interwiki-hu mw-list-item"><a href="https://hu.wikipedia.org/wiki/Mesters%C3%A9ges_neur%C3%A1lis_h%C3%A1l%C3%B3zat" title="Mesterséges neurális hálózat – Hungarian" lang="hu" hreflang="hu" data-title="Mesterséges neurális hálózat" data-language-autonym="Magyar" data-language-local-name="Hungarian" class="interlanguage-link-target"><span>Magyar</span></a></li><li class="interlanguage-link interwiki-mk mw-list-item"><a href="https://mk.wikipedia.org/wiki/%D0%92%D0%B5%D1%88%D1%82%D0%B0%D1%87%D0%BA%D0%B0_%D0%BD%D0%B5%D0%B2%D1%80%D0%BE%D0%BD%D1%81%D0%BA%D0%B0_%D0%BC%D1%80%D0%B5%D0%B6%D0%B0" title="Вештачка невронска мрежа – Macedonian" lang="mk" hreflang="mk" data-title="Вештачка невронска мрежа" data-language-autonym="Македонски" data-language-local-name="Macedonian" class="interlanguage-link-target"><span>Македонски</span></a></li><li class="interlanguage-link interwiki-mg mw-list-item"><a href="https://mg.wikipedia.org/wiki/Tambajotra_ner%C3%B4nina" title="Tambajotra nerônina – Malagasy" lang="mg" hreflang="mg" data-title="Tambajotra nerônina" data-language-autonym="Malagasy" data-language-local-name="Malagasy" class="interlanguage-link-target"><span>Malagasy</span></a></li><li class="interlanguage-link interwiki-ml mw-list-item"><a href="https://ml.wikipedia.org/wiki/%E0%B4%95%E0%B5%83%E0%B4%A4%E0%B5%8D%E0%B4%B0%E0%B4%BF%E0%B4%AE_%E0%B4%A8%E0%B4%BE%E0%B4%A1%E0%B5%80%E0%B4%B5%E0%B5%8D%E0%B4%AF%E0%B5%82%E0%B4%B9%E0%B4%82" title="കൃത്രിമ നാഡീവ്യൂഹം – Malayalam" lang="ml" hreflang="ml" data-title="കൃത്രിമ നാഡീവ്യൂഹം" data-language-autonym="മലയാളം" data-language-local-name="Malayalam" class="interlanguage-link-target"><span>മലയാളം</span></a></li><li class="interlanguage-link interwiki-ms mw-list-item"><a href="https://ms.wikipedia.org/wiki/Rangkaian_neural_buatan" title="Rangkaian neural buatan – Malay" lang="ms" hreflang="ms" data-title="Rangkaian neural buatan" data-language-autonym="Bahasa Melayu" data-language-local-name="Malay" class="interlanguage-link-target"><span>Bahasa Melayu</span></a></li><li class="interlanguage-link interwiki-nl badge-Q70894304 mw-list-item" title=""><a href="https://nl.wikipedia.org/wiki/Kunstmatig_neural_netwerk" title="Kunstmatig neural netwerk – Dutch" lang="nl" hreflang="nl" data-title="Kunstmatig neural netwerk" data-language-autonym="Nederlands" data-language-local-name="Dutch" class="interlanguage-link-target"><span>Nederlands</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/%E3%83%8B%E3%83%A5%E3%83%BC%E3%83%A9%E3%83%AB%E3%83%8D%E3%83%83%E3%83%88%E3%83%AF%E3%83%BC%E3%82%AF" title="ニューラルネットワーク – Japanese" lang="ja" hreflang="ja" data-title="ニューラルネットワーク" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-no mw-list-item"><a href="https://no.wikipedia.org/wiki/Kunstig_nevralt_nettverk" title="Kunstig nevralt nettverk – Norwegian Bokmål" lang="nb" hreflang="nb" data-title="Kunstig nevralt nettverk" data-language-autonym="Norsk bokmål" data-language-local-name="Norwegian Bokmål" class="interlanguage-link-target"><span>Norsk bokmål</span></a></li><li class="interlanguage-link interwiki-nn mw-list-item"><a href="https://nn.wikipedia.org/wiki/Kunstig_nevralt_nettverk" title="Kunstig nevralt nettverk – Norwegian Nynorsk" lang="nn" hreflang="nn" data-title="Kunstig nevralt nettverk" data-language-autonym="Norsk nynorsk" data-language-local-name="Norwegian Nynorsk" class="interlanguage-link-target"><span>Norsk nynorsk</span></a></li><li class="interlanguage-link interwiki-or mw-list-item"><a href="https://or.wikipedia.org/wiki/%E0%AC%86%E0%AC%B0%E0%AD%8D%E0%AC%9F%E0%AC%BF%E0%AC%AB%E0%AC%BF%E0%AC%B8%E0%AC%BF%E0%AC%86%E0%AC%B2_%E0%AC%A8%E0%AD%8D%E0%AD%9F%E0%AD%81%E0%AC%B0%E0%AC%BE%E0%AC%B2_%E0%AC%A8%E0%AD%87%E0%AC%9F%E0%AD%B1%E0%AC%B0%E0%AD%8D%E0%AC%95" title="ଆର୍ଟିଫିସିଆଲ ନ୍ୟୁରାଲ ନେଟୱର୍କ – Odia" lang="or" hreflang="or" data-title="ଆର୍ଟିଫିସିଆଲ ନ୍ୟୁରାଲ ନେଟୱର୍କ" data-language-autonym="ଓଡ଼ିଆ" data-language-local-name="Odia" class="interlanguage-link-target"><span>ଓଡ଼ିଆ</span></a></li><li class="interlanguage-link interwiki-pl mw-list-item"><a href="https://pl.wikipedia.org/wiki/Sie%C4%87_neuronowa" title="Sieć neuronowa – Polish" lang="pl" hreflang="pl" data-title="Sieć neuronowa" data-language-autonym="Polski" data-language-local-name="Polish" class="interlanguage-link-target"><span>Polski</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/Rede_neural_artificial" title="Rede neural artificial – Portuguese" lang="pt" hreflang="pt" data-title="Rede neural artificial" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-ro mw-list-item"><a href="https://ro.wikipedia.org/wiki/Re%C8%9Bea_neural%C4%83" title="Rețea neurală – Romanian" lang="ro" hreflang="ro" data-title="Rețea neurală" data-language-autonym="Română" data-language-local-name="Romanian" class="interlanguage-link-target"><span>Română</span></a></li><li class="interlanguage-link interwiki-qu mw-list-item"><a href="https://qu.wikipedia.org/wiki/Kapchisqa_ankucha_llika" title="Kapchisqa ankucha llika – Quechua" lang="qu" hreflang="qu" data-title="Kapchisqa ankucha llika" data-language-autonym="Runa Simi" data-language-local-name="Quechua" class="interlanguage-link-target"><span>Runa Simi</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/%D0%9D%D0%B5%D0%B9%D1%80%D0%BE%D0%BD%D0%BD%D0%B0%D1%8F_%D1%81%D0%B5%D1%82%D1%8C" title="Нейронная сеть – Russian" lang="ru" hreflang="ru" data-title="Нейронная сеть" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-simple mw-list-item"><a href="https://simple.wikipedia.org/wiki/Artificial_neural_network" title="Artificial neural network – Simple English" lang="en-simple" hreflang="en-simple" data-title="Artificial neural network" data-language-autonym="Simple English" data-language-local-name="Simple English" class="interlanguage-link-target"><span>Simple English</span></a></li><li class="interlanguage-link interwiki-sk mw-list-item"><a href="https://sk.wikipedia.org/wiki/Umel%C3%A1_neur%C3%B3nov%C3%A1_sie%C5%A5" title="Umelá neurónová sieť – Slovak" lang="sk" hreflang="sk" data-title="Umelá neurónová sieť" data-language-autonym="Slovenčina" data-language-local-name="Slovak" class="interlanguage-link-target"><span>Slovenčina</span></a></li><li class="interlanguage-link interwiki-szl mw-list-item"><a href="https://szl.wikipedia.org/wiki/Neuronowy_nec" title="Neuronowy nec – Silesian" lang="szl" hreflang="szl" data-title="Neuronowy nec" data-language-autonym="Ślůnski" data-language-local-name="Silesian" class="interlanguage-link-target"><span>Ślůnski</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/Ve%C5%A1ta%C4%8Dka_neuronska_mre%C5%BEa" title="Veštačka neuronska mreža – Serbian" lang="sr" hreflang="sr" data-title="Veštačka neuronska mreža" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-fi mw-list-item"><a href="https://fi.wikipedia.org/wiki/Neuroverkot" title="Neuroverkot – Finnish" lang="fi" hreflang="fi" data-title="Neuroverkot" data-language-autonym="Suomi" data-language-local-name="Finnish" class="interlanguage-link-target"><span>Suomi</span></a></li><li class="interlanguage-link interwiki-sv mw-list-item"><a href="https://sv.wikipedia.org/wiki/Artificiellt_neuronn%C3%A4t" title="Artificiellt neuronnät – Swedish" lang="sv" hreflang="sv" data-title="Artificiellt neuronnät" data-language-autonym="Svenska" data-language-local-name="Swedish" class="interlanguage-link-target"><span>Svenska</span></a></li><li class="interlanguage-link interwiki-ta mw-list-item"><a href="https://ta.wikipedia.org/wiki/%E0%AE%9A%E0%AF%86%E0%AE%AF%E0%AE%B1%E0%AF%8D%E0%AE%95%E0%AF%88_%E0%AE%A8%E0%AE%B0%E0%AE%AE%E0%AF%8D%E0%AE%AA%E0%AE%A3%E0%AF%81%E0%AE%AA%E0%AF%8D_%E0%AE%AA%E0%AE%BF%E0%AE%A3%E0%AF%88%E0%AE%AF%E0%AE%AE%E0%AF%8D" title="செயற்கை நரம்பணுப் பிணையம் – Tamil" lang="ta" hreflang="ta" data-title="செயற்கை நரம்பணுப் பிணையம்" data-language-autonym="தமிழ்" data-language-local-name="Tamil" class="interlanguage-link-target"><span>தமிழ்</span></a></li><li class="interlanguage-link interwiki-th mw-list-item"><a href="https://th.wikipedia.org/wiki/%E0%B9%82%E0%B8%84%E0%B8%A3%E0%B8%87%E0%B8%82%E0%B9%88%E0%B8%B2%E0%B8%A2%E0%B8%9B%E0%B8%A3%E0%B8%B0%E0%B8%AA%E0%B8%B2%E0%B8%97%E0%B9%80%E0%B8%97%E0%B8%B5%E0%B8%A2%E0%B8%A1" title="โครงข่ายประสาทเทียม – Thai" lang="th" hreflang="th" data-title="โครงข่ายประสาทเทียม" data-language-autonym="ไทย" data-language-local-name="Thai" class="interlanguage-link-target"><span>ไทย</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/Yapay_sinir_a%C4%9Flar%C4%B1" title="Yapay sinir ağları – Turkish" lang="tr" hreflang="tr" data-title="Yapay sinir ağları" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-uk mw-list-item"><a href="https://uk.wikipedia.org/wiki/%D0%A8%D1%82%D1%83%D1%87%D0%BD%D0%B0_%D0%BD%D0%B5%D0%B9%D1%80%D0%BE%D0%BD%D0%BD%D0%B0_%D0%BC%D0%B5%D1%80%D0%B5%D0%B6%D0%B0" title="Штучна нейронна мережа – Ukrainian" lang="uk" hreflang="uk" data-title="Штучна нейронна мережа" data-language-autonym="Українська" data-language-local-name="Ukrainian" class="interlanguage-link-target"><span>Українська</span></a></li><li class="interlanguage-link interwiki-ur mw-list-item"><a href="https://ur.wikipedia.org/wiki/%D8%A7%D8%B5%D8%B7%D9%86%D8%A7%D8%B9%DB%8C_%D8%B9%D8%B5%D8%A8%DB%8C_%D8%AC%D8%A7%D9%84%DA%A9%D8%A7%D8%B1" title="اصطناعی عصبی جالکار – Urdu" lang="ur" hreflang="ur" data-title="اصطناعی عصبی جالکار" data-language-autonym="اردو" data-language-local-name="Urdu" class="interlanguage-link-target"><span>اردو</span></a></li><li class="interlanguage-link interwiki-vi mw-list-item"><a href="https://vi.wikipedia.org/wiki/M%E1%BA%A1ng_th%E1%BA%A7n_kinh_nh%C3%A2n_t%E1%BA%A1o" title="Mạng thần kinh nhân tạo – Vietnamese" lang="vi" hreflang="vi" data-title="Mạng thần kinh nhân tạo" data-language-autonym="Tiếng Việt" data-language-local-name="Vietnamese" class="interlanguage-link-target"><span>Tiếng Việt</span></a></li><li class="interlanguage-link interwiki-wuu mw-list-item"><a href="https://wuu.wikipedia.org/wiki/%E4%BA%BA%E5%B7%A5%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C" title="人工神经网络 – Wu" lang="wuu" hreflang="wuu" data-title="人工神经网络" data-language-autonym="吴语" data-language-local-name="Wu" class="interlanguage-link-target"><span>吴语</span></a></li><li class="interlanguage-link interwiki-zh-yue mw-list-item"><a href="https://zh-yue.wikipedia.org/wiki/%E4%BA%BA%E5%B7%A5%E7%A5%9E%E7%B6%93%E7%B6%B2%E7%B5%A1" title="人工神經網絡 – Cantonese" lang="yue" hreflang="yue" data-title="人工神經網絡" data-language-autonym="粵語" data-language-local-name="Cantonese" class="interlanguage-link-target"><span>粵語</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/%E4%BA%BA%E5%B7%A5%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C" title="人工神经网络 – Chinese" lang="zh" hreflang="zh" data-title="人工神经网络" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li></ul> </section> </div> <div class="minerva-footer-logo"><img src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" alt="Wikipedia" width="120" height="18" style="width: 7.5em; height: 1.125em;"/> </div> <ul id="footer-info" class="footer-info hlist hlist-separated"> <li id="footer-info-lastmod"> This page was last edited on 25 November 2024, at 20:43<span class="anonymous-show">&#160;(UTC)</span>.</li> <li id="footer-info-copyright">Content is available under <a class="external" rel="nofollow" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en">CC BY-SA 4.0</a> unless otherwise noted.</li> </ul> <ul id="footer-places" class="footer-places hlist hlist-separated"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-terms-use"><a href="https://foundation.m.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use">Terms of Use</a></li> <li id="footer-places-desktop-toggle"><a id="mw-mf-display-toggle" href="//en.wikipedia.org/w/index.php?title=Neural_network_(machine_learning)&amp;mobileaction=toggle_view_desktop" data-event-name="switch_to_desktop">Desktop</a></li> </ul> </div> </footer> </div> </div> <div class="mw-notification-area" data-mw="interface"></div> <!-- v:8.3.1 --> <script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-5cd4cd96d5-7pqlb","wgBackendResponseTime":386,"wgPageParseReport":{"limitreport":{"cputime":"2.970","walltime":"3.344","ppvisitednodes":{"value":20857,"limit":1000000},"postexpandincludesize":{"value":782091,"limit":2097152},"templateargumentsize":{"value":18836,"limit":2097152},"expansiondepth":{"value":16,"limit":100},"expensivefunctioncount":{"value":33,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":1016966,"limit":5000000},"entityaccesscount":{"value":2,"limit":400},"timingprofile":["100.00% 2747.019 1 -total"," 62.70% 1722.448 1 Template:Reflist"," 23.61% 648.587 109 Template:Cite_journal"," 15.10% 414.703 70 Template:Cite_book"," 7.29% 200.151 22 Template:Cite_web"," 4.36% 119.874 18 Template:Fix"," 4.31% 118.400 14 Template:Citation_needed"," 3.91% 107.434 17 Template:Cite_arXiv"," 3.61% 99.171 9 Template:Navbox"," 3.59% 98.513 14 Template:Cite_conference"]},"scribunto":{"limitreport-timeusage":{"value":"1.873","limit":"10.000"},"limitreport-memusage":{"value":10297245,"limit":52428800},"limitreport-logs":"1 1 Sepp Hochreiter\n2 2 Jürgen Schmidhuber\n","limitreport-profile":[["?","300","14.6"],["dataWrapper \u003Cmw.lua:672\u003E","300","14.6"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::callParserFunction","180","8.7"],["\u003Cmw.lua:694\u003E","160","7.8"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::anchorEncode","140","6.8"],["type","100","4.9"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::gsub","80","3.9"],["test \u003CModule:Citation/CS1:1508\u003E","80","3.9"],["makeMessage \u003Cmw.message.lua:76\u003E","80","3.9"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::getExpandedArgument","80","3.9"],["[others]","560","27.2"]]},"cachereport":{"origin":"mw-web.codfw.main-5cd4cd96d5-npb9k","timestamp":"20241127003534","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Neural network (machine learning)","url":"https:\/\/en.wikipedia.org\/wiki\/Neural_network_(machine_learning)","sameAs":"http:\/\/www.wikidata.org\/entity\/Q192776","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q192776","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2001-10-02T12:50:20Z","dateModified":"2024-11-25T20:43:31Z","image":"https:\/\/upload.wikimedia.org\/wikipedia\/commons\/4\/46\/Colored_neural_network.svg","headline":"computational model used in machine learning, based on connected, hierarchical functions"}</script><script>(window.NORLQ=window.NORLQ||[]).push(function(){var ns,i,p,img;ns=document.getElementsByTagName('noscript');for(i=0;i<ns.length;i++){p=ns[i].nextSibling;if(p&&p.className&&p.className.indexOf('lazy-image-placeholder')>-1){img=document.createElement('img');img.setAttribute('src',p.getAttribute('data-src'));img.setAttribute('width',p.getAttribute('data-width'));img.setAttribute('height',p.getAttribute('data-height'));img.setAttribute('alt',p.getAttribute('data-alt'));p.parentNode.replaceChild(img,p);}}});</script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10