CINXE.COM

Unsupervised learning - Wikipedia

<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-sticky-header-enabled vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Unsupervised learning - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-sticky-header-enabled vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy","wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"c5f9a645-9e26-471c-bcd6-83c7f58718f3","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Unsupervised_learning","wgTitle":"Unsupervised learning","wgCurRevisionId":1278046684,"wgRevisionId":1278046684,"wgArticleId":233497,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Articles with short description","Short description is different from Wikidata","Unsupervised learning"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Unsupervised_learning","wgRelevantArticleId":233497,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":30000,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q1152135","wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false}; RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","ext.math.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","mediawiki.page.media","ext.scribunto.logs","site","mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.growthExperiments.SuggestedEditSession"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=ext.cite.styles%7Cext.math.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&amp;only=styles&amp;skin=vector-2022"> <script async="" src="/w/load.php?lang=en&amp;modules=startup&amp;only=scripts&amp;raw=1&amp;skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=site.styles&amp;only=styles&amp;skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.20"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Unsupervised learning - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/Unsupervised_learning"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Unsupervised_learning&amp;action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Unsupervised_learning"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&amp;feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Unsupervised_learning rootpage-Unsupervised_learning skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" title="Main menu" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li><li id="n-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages"><span>Special pages</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page&#039;s font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/?wmf_source=donate&amp;wmf_medium=sidebar&amp;wmf_campaign=en.wikipedia.org&amp;uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&amp;returnto=Unsupervised+learning" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&amp;returnto=Unsupervised+learning" title="You&#039;re encouraged to log in; however, it&#039;s not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/?wmf_source=donate&amp;wmf_medium=sidebar&amp;wmf_campaign=en.wikipedia.org&amp;uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&amp;returnto=Unsupervised+learning" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&amp;returnto=Unsupervised+learning" title="You&#039;re encouraged to log in; however, it&#039;s not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-Tasks" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Tasks"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>Tasks</span> </div> </a> <ul id="toc-Tasks-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Neural_network_architectures" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Neural_network_architectures"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Neural network architectures</span> </div> </a> <button aria-controls="toc-Neural_network_architectures-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Neural network architectures subsection</span> </button> <ul id="toc-Neural_network_architectures-sublist" class="vector-toc-list"> <li id="toc-Training" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Training"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1</span> <span>Training</span> </div> </a> <ul id="toc-Training-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Energy" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Energy"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2</span> <span>Energy</span> </div> </a> <ul id="toc-Energy-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Networks" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3</span> <span>Networks</span> </div> </a> <ul id="toc-Networks-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-History" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#History"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.4</span> <span>History</span> </div> </a> <ul id="toc-History-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Specific_Networks" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Specific_Networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.5</span> <span>Specific Networks</span> </div> </a> <ul id="toc-Specific_Networks-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Comparison_of_networks" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Comparison_of_networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.6</span> <span>Comparison of networks</span> </div> </a> <ul id="toc-Comparison_of_networks-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Hebbian_Learning,_ART,_SOM" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Hebbian_Learning,_ART,_SOM"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.7</span> <span>Hebbian Learning, ART, SOM</span> </div> </a> <ul id="toc-Hebbian_Learning,_ART,_SOM-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Probabilistic_methods" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Probabilistic_methods"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>Probabilistic methods</span> </div> </a> <button aria-controls="toc-Probabilistic_methods-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Probabilistic methods subsection</span> </button> <ul id="toc-Probabilistic_methods-sublist" class="vector-toc-list"> <li id="toc-Approaches" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Approaches"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.1</span> <span>Approaches</span> </div> </a> <ul id="toc-Approaches-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Method_of_moments" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Method_of_moments"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.2</span> <span>Method of moments</span> </div> </a> <ul id="toc-Method_of_moments-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Further_reading" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Further_reading"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Further reading</span> </div> </a> <ul id="toc-Further_reading-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" title="Table of Contents" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Unsupervised learning</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 31 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-31" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">31 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-ar mw-list-item"><a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B9%D9%84%D9%85_%D8%BA%D9%8A%D8%B1_%D9%85%D8%B1%D8%A7%D9%82%D8%A8" title="تعلم غير مراقب – Arabic" lang="ar" hreflang="ar" data-title="تعلم غير مراقب" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-zh-min-nan mw-list-item"><a href="https://zh-min-nan.wikipedia.org/wiki/B%C3%B4-k%C3%A0m-tok_ha%CC%8Dk-si%CC%8Dp" title="Bô-kàm-tok ha̍k-si̍p – Minnan" lang="nan" hreflang="nan" data-title="Bô-kàm-tok ha̍k-si̍p" data-language-autonym="閩南語 / Bân-lâm-gú" data-language-local-name="Minnan" class="interlanguage-link-target"><span>閩南語 / Bân-lâm-gú</span></a></li><li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/Aprenentatge_no_supervisat" title="Aprenentatge no supervisat – Catalan" lang="ca" hreflang="ca" data-title="Aprenentatge no supervisat" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-cs mw-list-item"><a href="https://cs.wikipedia.org/wiki/U%C4%8Den%C3%AD_bez_u%C4%8Ditele" title="Učení bez učitele – Czech" lang="cs" hreflang="cs" data-title="Učení bez učitele" data-language-autonym="Čeština" data-language-local-name="Czech" class="interlanguage-link-target"><span>Čeština</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Un%C3%BCberwachtes_Lernen" title="Unüberwachtes Lernen – German" lang="de" hreflang="de" data-title="Unüberwachtes Lernen" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-et mw-list-item"><a href="https://et.wikipedia.org/wiki/Juhendamata_masin%C3%B5pe" title="Juhendamata masinõpe – Estonian" lang="et" hreflang="et" data-title="Juhendamata masinõpe" data-language-autonym="Eesti" data-language-local-name="Estonian" class="interlanguage-link-target"><span>Eesti</span></a></li><li class="interlanguage-link interwiki-el mw-list-item"><a href="https://el.wikipedia.org/wiki/%CE%9C%CE%B7_%CE%B5%CF%80%CE%B9%CE%B2%CE%BB%CE%B5%CF%80%CF%8C%CE%BC%CE%B5%CE%BD%CE%B7_%CE%BC%CE%AC%CE%B8%CE%B7%CF%83%CE%B7" title="Μη επιβλεπόμενη μάθηση – Greek" lang="el" hreflang="el" data-title="Μη επιβλεπόμενη μάθηση" data-language-autonym="Ελληνικά" data-language-local-name="Greek" class="interlanguage-link-target"><span>Ελληνικά</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/Aprendizaje_no_supervisado" title="Aprendizaje no supervisado – Spanish" lang="es" hreflang="es" data-title="Aprendizaje no supervisado" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%DB%8C%D8%A7%D8%AF%DA%AF%DB%8C%D8%B1%DB%8C_%D8%A8%DB%8C%E2%80%8C%D9%86%D8%B8%D8%A7%D8%B1%D8%AA" title="یادگیری بی‌نظارت – Persian" lang="fa" hreflang="fa" data-title="یادگیری بی‌نظارت" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/Apprentissage_non_supervis%C3%A9" title="Apprentissage non supervisé – French" lang="fr" hreflang="fr" data-title="Apprentissage non supervisé" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EB%B9%84%EC%A7%80%EB%8F%84_%ED%95%99%EC%8A%B5" title="비지도 학습 – Korean" lang="ko" hreflang="ko" data-title="비지도 학습" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-id mw-list-item"><a href="https://id.wikipedia.org/wiki/Pemelajaran_tak_terarah" title="Pemelajaran tak terarah – Indonesian" lang="id" hreflang="id" data-title="Pemelajaran tak terarah" data-language-autonym="Bahasa Indonesia" data-language-local-name="Indonesian" class="interlanguage-link-target"><span>Bahasa Indonesia</span></a></li><li class="interlanguage-link interwiki-zu mw-list-item"><a href="https://zu.wikipedia.org/wiki/Ukufunda_okungaqondisiwe" title="Ukufunda okungaqondisiwe – Zulu" lang="zu" hreflang="zu" data-title="Ukufunda okungaqondisiwe" data-language-autonym="IsiZulu" data-language-local-name="Zulu" class="interlanguage-link-target"><span>IsiZulu</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Apprendimento_non_supervisionato" title="Apprendimento non supervisionato – Italian" lang="it" hreflang="it" data-title="Apprendimento non supervisionato" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/%D7%9C%D7%9E%D7%99%D7%93%D7%94_%D7%91%D7%9C%D7%AA%D7%99_%D7%9E%D7%95%D7%A0%D7%97%D7%99%D7%AA" title="למידה בלתי מונחית – Hebrew" lang="he" hreflang="he" data-title="למידה בלתי מונחית" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/%E6%95%99%E5%B8%AB%E3%81%AA%E3%81%97%E5%AD%A6%E7%BF%92" title="教師なし学習 – Japanese" lang="ja" hreflang="ja" data-title="教師なし学習" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-pl mw-list-item"><a href="https://pl.wikipedia.org/wiki/Uczenie_nienadzorowane" title="Uczenie nienadzorowane – Polish" lang="pl" hreflang="pl" data-title="Uczenie nienadzorowane" data-language-autonym="Polski" data-language-local-name="Polish" class="interlanguage-link-target"><span>Polski</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/Aprendizagem_n%C3%A3o_supervisionada" title="Aprendizagem não supervisionada – Portuguese" lang="pt" hreflang="pt" data-title="Aprendizagem não supervisionada" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-qu mw-list-item"><a href="https://qu.wikipedia.org/wiki/Mana_qhawasqa_yachay" title="Mana qhawasqa yachay – Quechua" lang="qu" hreflang="qu" data-title="Mana qhawasqa yachay" data-language-autonym="Runa Simi" data-language-local-name="Quechua" class="interlanguage-link-target"><span>Runa Simi</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/%D0%9E%D0%B1%D1%83%D1%87%D0%B5%D0%BD%D0%B8%D0%B5_%D0%B1%D0%B5%D0%B7_%D1%83%D1%87%D0%B8%D1%82%D0%B5%D0%BB%D1%8F" title="Обучение без учителя – Russian" lang="ru" hreflang="ru" data-title="Обучение без учителя" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-simple mw-list-item"><a href="https://simple.wikipedia.org/wiki/Unsupervised_learning" title="Unsupervised learning – Simple English" lang="en-simple" hreflang="en-simple" data-title="Unsupervised learning" data-language-autonym="Simple English" data-language-local-name="Simple English" class="interlanguage-link-target"><span>Simple English</span></a></li><li class="interlanguage-link interwiki-ckb mw-list-item"><a href="https://ckb.wikipedia.org/wiki/%D9%81%DB%8E%D8%B1%D8%A8%D9%88%D9%88%D9%86%DB%8C_%DA%86%D8%A7%D9%88%D8%AF%DB%8E%D8%B1%DB%8C%D9%86%DB%95%DA%A9%D8%B1%D8%A7%D9%88" title="فێربوونی چاودێرینەکراو – Central Kurdish" lang="ckb" hreflang="ckb" data-title="فێربوونی چاودێرینەکراو" data-language-autonym="کوردی" data-language-local-name="Central Kurdish" class="interlanguage-link-target"><span>کوردی</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/U%C4%8Denje_bez_nadzora" title="Učenje bez nadzora – Serbian" lang="sr" hreflang="sr" data-title="Učenje bez nadzora" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-fi mw-list-item"><a href="https://fi.wikipedia.org/wiki/Ohjaamaton_oppiminen" title="Ohjaamaton oppiminen – Finnish" lang="fi" hreflang="fi" data-title="Ohjaamaton oppiminen" data-language-autonym="Suomi" data-language-local-name="Finnish" class="interlanguage-link-target"><span>Suomi</span></a></li><li class="interlanguage-link interwiki-tl mw-list-item"><a href="https://tl.wikipedia.org/wiki/Hindi_pinapatnubayang_pagkatuto" title="Hindi pinapatnubayang pagkatuto – Tagalog" lang="tl" hreflang="tl" data-title="Hindi pinapatnubayang pagkatuto" data-language-autonym="Tagalog" data-language-local-name="Tagalog" class="interlanguage-link-target"><span>Tagalog</span></a></li><li class="interlanguage-link interwiki-th mw-list-item"><a href="https://th.wikipedia.org/wiki/%E0%B8%81%E0%B8%B2%E0%B8%A3%E0%B9%80%E0%B8%A3%E0%B8%B5%E0%B8%A2%E0%B8%99%E0%B8%A3%E0%B8%B9%E0%B9%89%E0%B9%81%E0%B8%9A%E0%B8%9A%E0%B9%84%E0%B8%A1%E0%B9%88%E0%B8%A1%E0%B8%B5%E0%B8%9C%E0%B8%B9%E0%B9%89%E0%B8%AA%E0%B8%AD%E0%B8%99" title="การเรียนรู้แบบไม่มีผู้สอน – Thai" lang="th" hreflang="th" data-title="การเรียนรู้แบบไม่มีผู้สอน" data-language-autonym="ไทย" data-language-local-name="Thai" class="interlanguage-link-target"><span>ไทย</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/G%C3%B6zetimsiz_%C3%B6%C4%9Frenme" title="Gözetimsiz öğrenme – Turkish" lang="tr" hreflang="tr" data-title="Gözetimsiz öğrenme" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-uk mw-list-item"><a href="https://uk.wikipedia.org/wiki/%D0%9D%D0%B5%D0%BA%D0%B5%D1%80%D0%BE%D0%B2%D0%B0%D0%BD%D0%B5_%D0%BD%D0%B0%D0%B2%D1%87%D0%B0%D0%BD%D0%BD%D1%8F" title="Некероване навчання – Ukrainian" lang="uk" hreflang="uk" data-title="Некероване навчання" data-language-autonym="Українська" data-language-local-name="Ukrainian" class="interlanguage-link-target"><span>Українська</span></a></li><li class="interlanguage-link interwiki-vi mw-list-item"><a href="https://vi.wikipedia.org/wiki/H%E1%BB%8Dc_kh%C3%B4ng_c%C3%B3_gi%C3%A1m_s%C3%A1t" title="Học không có giám sát – Vietnamese" lang="vi" hreflang="vi" data-title="Học không có giám sát" data-language-autonym="Tiếng Việt" data-language-local-name="Vietnamese" class="interlanguage-link-target"><span>Tiếng Việt</span></a></li><li class="interlanguage-link interwiki-zh-yue mw-list-item"><a href="https://zh-yue.wikipedia.org/wiki/%E9%9D%9E%E7%9B%A3%E7%9D%A3%E5%BC%8F%E5%AD%B8%E7%BF%92" title="非監督式學習 – Cantonese" lang="yue" hreflang="yue" data-title="非監督式學習" data-language-autonym="粵語" data-language-local-name="Cantonese" class="interlanguage-link-target"><span>粵語</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/%E7%84%A1%E7%9B%A3%E7%9D%A3%E5%AD%B8%E7%BF%92" title="無監督學習 – Chinese" lang="zh" hreflang="zh" data-title="無監督學習" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q1152135#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Unsupervised_learning" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Unsupervised_learning" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Unsupervised_learning"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Unsupervised_learning&amp;action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Unsupervised_learning"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Unsupervised_learning&amp;action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Unsupervised_learning" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Unsupervised_learning" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Unsupervised_learning&amp;oldid=1278046684" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Unsupervised_learning&amp;action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&amp;page=Unsupervised_learning&amp;id=1278046684&amp;wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&amp;url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FUnsupervised_learning"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&amp;url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FUnsupervised_learning"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&amp;page=Unsupervised_learning&amp;action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Unsupervised_learning&amp;printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q1152135" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Paradigm in machine learning that uses no classification labels</div> <style data-mw-deduplicate="TemplateStyles:r1244144826">.mw-parser-output .machine-learning-list-title{background-color:#ddddff}html.skin-theme-clientpref-night .mw-parser-output .machine-learning-list-title{background-color:#222}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .machine-learning-list-title{background-color:#222}}</style> <style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1246091330">.mw-parser-output .sidebar{width:22em;float:right;clear:right;margin:0.5em 0 1em 1em;background:var(--background-color-neutral-subtle,#f8f9fa);border:1px solid var(--border-color-base,#a2a9b1);padding:0.2em;text-align:center;line-height:1.4em;font-size:88%;border-collapse:collapse;display:table}body.skin-minerva .mw-parser-output .sidebar{display:table!important;float:right!important;margin:0.5em 0 1em 1em!important}.mw-parser-output .sidebar-subgroup{width:100%;margin:0;border-spacing:0}.mw-parser-output .sidebar-left{float:left;clear:left;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-none{float:none;clear:both;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-outer-title{padding:0 0.4em 0.2em;font-size:125%;line-height:1.2em;font-weight:bold}.mw-parser-output .sidebar-top-image{padding:0.4em}.mw-parser-output .sidebar-top-caption,.mw-parser-output .sidebar-pretitle-with-top-image,.mw-parser-output .sidebar-caption{padding:0.2em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-pretitle{padding:0.4em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-title,.mw-parser-output .sidebar-title-with-pretitle{padding:0.2em 0.8em;font-size:145%;line-height:1.2em}.mw-parser-output .sidebar-title-with-pretitle{padding:0.1em 0.4em}.mw-parser-output .sidebar-image{padding:0.2em 0.4em 0.4em}.mw-parser-output .sidebar-heading{padding:0.1em 0.4em}.mw-parser-output .sidebar-content{padding:0 0.5em 0.4em}.mw-parser-output .sidebar-content-with-subgroup{padding:0.1em 0.4em 0.2em}.mw-parser-output .sidebar-above,.mw-parser-output .sidebar-below{padding:0.3em 0.8em;font-weight:bold}.mw-parser-output .sidebar-collapse .sidebar-above,.mw-parser-output .sidebar-collapse .sidebar-below{border-top:1px solid #aaa;border-bottom:1px solid #aaa}.mw-parser-output .sidebar-navbar{text-align:right;font-size:115%;padding:0 0.4em 0.4em}.mw-parser-output .sidebar-list-title{padding:0 0.4em;text-align:left;font-weight:bold;line-height:1.6em;font-size:105%}.mw-parser-output .sidebar-list-title-c{padding:0 0.4em;text-align:center;margin:0 3.3em}@media(max-width:640px){body.mediawiki .mw-parser-output .sidebar{width:100%!important;clear:both;float:none!important;margin-left:0!important;margin-right:0!important}}body.skin--responsive .mw-parser-output .sidebar a>img{max-width:none!important}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media print{body.ns-0 .mw-parser-output .sidebar{display:none!important}}</style><style data-mw-deduplicate="TemplateStyles:r886047488">.mw-parser-output .nobold{font-weight:normal}</style><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r886047488" /><table class="sidebar sidebar-collapse nomobile nowraplinks"><tbody><tr><td class="sidebar-pretitle">Part of a series on</td></tr><tr><th class="sidebar-title-with-pretitle"><a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a><br />and <a href="/wiki/Data_mining" title="Data mining">data mining</a></th></tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Paradigms</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Supervised_learning" title="Supervised learning">Supervised learning</a></li> <li><a class="mw-selflink selflink">Unsupervised learning</a></li> <li><a href="/wiki/Semi-supervised_learning" class="mw-redirect" title="Semi-supervised learning">Semi-supervised learning</a></li> <li><a href="/wiki/Self-supervised_learning" title="Self-supervised learning">Self-supervised learning</a></li> <li><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a></li> <li><a href="/wiki/Meta-learning_(computer_science)" title="Meta-learning (computer science)">Meta-learning</a></li> <li><a href="/wiki/Online_machine_learning" title="Online machine learning">Online learning</a></li> <li><a href="/wiki/Batch_learning" class="mw-redirect" title="Batch learning">Batch learning</a></li> <li><a href="/wiki/Curriculum_learning" title="Curriculum learning">Curriculum learning</a></li> <li><a href="/wiki/Rule-based_machine_learning" title="Rule-based machine learning">Rule-based learning</a></li> <li><a href="/wiki/Neuro-symbolic_AI" title="Neuro-symbolic AI">Neuro-symbolic AI</a></li> <li><a href="/wiki/Neuromorphic_engineering" class="mw-redirect" title="Neuromorphic engineering">Neuromorphic engineering</a></li> <li><a href="/wiki/Quantum_machine_learning" title="Quantum machine learning">Quantum machine learning</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Problems</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Statistical_classification" title="Statistical classification">Classification</a></li> <li><a href="/wiki/Generative_model" title="Generative model">Generative modeling</a></li> <li><a href="/wiki/Regression_analysis" title="Regression analysis">Regression</a></li> <li><a href="/wiki/Cluster_analysis" title="Cluster analysis">Clustering</a></li> <li><a href="/wiki/Dimensionality_reduction" title="Dimensionality reduction">Dimensionality reduction</a></li> <li><a href="/wiki/Density_estimation" title="Density estimation">Density estimation</a></li> <li><a href="/wiki/Anomaly_detection" title="Anomaly detection">Anomaly detection</a></li> <li><a href="/wiki/Data_cleaning" class="mw-redirect" title="Data cleaning">Data cleaning</a></li> <li><a href="/wiki/Automated_machine_learning" title="Automated machine learning">AutoML</a></li> <li><a href="/wiki/Association_rule_learning" title="Association rule learning">Association rules</a></li> <li><a href="/wiki/Semantic_analysis_(machine_learning)" title="Semantic analysis (machine learning)">Semantic analysis</a></li> <li><a href="/wiki/Structured_prediction" title="Structured prediction">Structured prediction</a></li> <li><a href="/wiki/Feature_engineering" title="Feature engineering">Feature engineering</a></li> <li><a href="/wiki/Feature_learning" title="Feature learning">Feature learning</a></li> <li><a href="/wiki/Learning_to_rank" title="Learning to rank">Learning to rank</a></li> <li><a href="/wiki/Grammar_induction" title="Grammar induction">Grammar induction</a></li> <li><a href="/wiki/Ontology_learning" title="Ontology learning">Ontology learning</a></li> <li><a href="/wiki/Multimodal_learning" title="Multimodal learning">Multimodal learning</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><div style="display: inline-block; line-height: 1.2em; padding: .1em 0;"><a href="/wiki/Supervised_learning" title="Supervised learning">Supervised learning</a><br /><span class="nobold"><span style="font-size:85%;">(<b><a href="/wiki/Statistical_classification" title="Statistical classification">classification</a></b>&#160;&#8226;&#32;<b><a href="/wiki/Regression_analysis" title="Regression analysis">regression</a></b>)</span></span> </div></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Apprenticeship_learning" title="Apprenticeship learning">Apprenticeship learning</a></li> <li><a href="/wiki/Decision_tree_learning" title="Decision tree learning">Decision trees</a></li> <li><a href="/wiki/Ensemble_learning" title="Ensemble learning">Ensembles</a> <ul><li><a href="/wiki/Bootstrap_aggregating" title="Bootstrap aggregating">Bagging</a></li> <li><a href="/wiki/Boosting_(machine_learning)" title="Boosting (machine learning)">Boosting</a></li> <li><a href="/wiki/Random_forest" title="Random forest">Random forest</a></li></ul></li> <li><a href="/wiki/K-nearest_neighbors_algorithm" title="K-nearest neighbors algorithm"><i>k</i>-NN</a></li> <li><a href="/wiki/Linear_regression" title="Linear regression">Linear regression</a></li> <li><a href="/wiki/Naive_Bayes_classifier" title="Naive Bayes classifier">Naive Bayes</a></li> <li><a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">Artificial neural networks</a></li> <li><a href="/wiki/Logistic_regression" title="Logistic regression">Logistic regression</a></li> <li><a href="/wiki/Perceptron" title="Perceptron">Perceptron</a></li> <li><a href="/wiki/Relevance_vector_machine" title="Relevance vector machine">Relevance vector machine (RVM)</a></li> <li><a href="/wiki/Support_vector_machine" title="Support vector machine">Support vector machine (SVM)</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Cluster_analysis" title="Cluster analysis">Clustering</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/BIRCH" title="BIRCH">BIRCH</a></li> <li><a href="/wiki/CURE_algorithm" title="CURE algorithm">CURE</a></li> <li><a href="/wiki/Hierarchical_clustering" title="Hierarchical clustering">Hierarchical</a></li> <li><a href="/wiki/K-means_clustering" title="K-means clustering"><i>k</i>-means</a></li> <li><a href="/wiki/Fuzzy_clustering" title="Fuzzy clustering">Fuzzy</a></li> <li><a href="/wiki/Expectation%E2%80%93maximization_algorithm" title="Expectation–maximization algorithm">Expectation–maximization (EM)</a></li> <li><br /><a href="/wiki/DBSCAN" title="DBSCAN">DBSCAN</a></li> <li><a href="/wiki/OPTICS_algorithm" title="OPTICS algorithm">OPTICS</a></li> <li><a href="/wiki/Mean_shift" title="Mean shift">Mean shift</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Dimensionality_reduction" title="Dimensionality reduction">Dimensionality reduction</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Factor_analysis" title="Factor analysis">Factor analysis</a></li> <li><a href="/wiki/Canonical_correlation" title="Canonical correlation">CCA</a></li> <li><a href="/wiki/Independent_component_analysis" title="Independent component analysis">ICA</a></li> <li><a href="/wiki/Linear_discriminant_analysis" title="Linear discriminant analysis">LDA</a></li> <li><a href="/wiki/Non-negative_matrix_factorization" title="Non-negative matrix factorization">NMF</a></li> <li><a href="/wiki/Principal_component_analysis" title="Principal component analysis">PCA</a></li> <li><a href="/wiki/Proper_generalized_decomposition" title="Proper generalized decomposition">PGD</a></li> <li><a href="/wiki/T-distributed_stochastic_neighbor_embedding" title="T-distributed stochastic neighbor embedding">t-SNE</a></li> <li><a href="/wiki/Sparse_dictionary_learning" title="Sparse dictionary learning">SDL</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Structured_prediction" title="Structured prediction">Structured prediction</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Graphical_model" title="Graphical model">Graphical models</a> <ul><li><a href="/wiki/Bayesian_network" title="Bayesian network">Bayes net</a></li> <li><a href="/wiki/Conditional_random_field" title="Conditional random field">Conditional random field</a></li> <li><a href="/wiki/Hidden_Markov_model" title="Hidden Markov model">Hidden Markov</a></li></ul></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Anomaly_detection" title="Anomaly detection">Anomaly detection</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Random_sample_consensus" title="Random sample consensus">RANSAC</a></li> <li><a href="/wiki/K-nearest_neighbors_algorithm" title="K-nearest neighbors algorithm"><i>k</i>-NN</a></li> <li><a href="/wiki/Local_outlier_factor" title="Local outlier factor">Local outlier factor</a></li> <li><a href="/wiki/Isolation_forest" title="Isolation forest">Isolation forest</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">Artificial neural network</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></li> <li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li> <li><a href="/wiki/Feedforward_neural_network" title="Feedforward neural network">Feedforward neural network</a></li> <li><a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">Recurrent neural network</a> <ul><li><a href="/wiki/Long_short-term_memory" title="Long short-term memory">LSTM</a></li> <li><a href="/wiki/Gated_recurrent_unit" title="Gated recurrent unit">GRU</a></li> <li><a href="/wiki/Echo_state_network" title="Echo state network">ESN</a></li> <li><a href="/wiki/Reservoir_computing" title="Reservoir computing">reservoir computing</a></li></ul></li> <li><a href="/wiki/Boltzmann_machine" title="Boltzmann machine">Boltzmann machine</a> <ul><li><a href="/wiki/Restricted_Boltzmann_machine" title="Restricted Boltzmann machine">Restricted</a></li></ul></li> <li><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">GAN</a></li> <li><a href="/wiki/Diffusion_model" title="Diffusion model">Diffusion model</a></li> <li><a href="/wiki/Self-organizing_map" title="Self-organizing map">SOM</a></li> <li><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">Convolutional neural network</a> <ul><li><a href="/wiki/U-Net" title="U-Net">U-Net</a></li> <li><a href="/wiki/LeNet" title="LeNet">LeNet</a></li> <li><a href="/wiki/AlexNet" title="AlexNet">AlexNet</a></li> <li><a href="/wiki/DeepDream" title="DeepDream">DeepDream</a></li></ul></li> <li><a href="/wiki/Neural_radiance_field" title="Neural radiance field">Neural radiance field</a></li> <li><a href="/wiki/Transformer_(machine_learning_model)" class="mw-redirect" title="Transformer (machine learning model)">Transformer</a> <ul><li><a href="/wiki/Vision_transformer" title="Vision transformer">Vision</a></li></ul></li> <li><a href="/wiki/Mamba_(deep_learning_architecture)" title="Mamba (deep learning architecture)">Mamba</a></li> <li><a href="/wiki/Spiking_neural_network" title="Spiking neural network">Spiking neural network</a></li> <li><a href="/wiki/Memtransistor" title="Memtransistor">Memtransistor</a></li> <li><a href="/wiki/Electrochemical_RAM" title="Electrochemical RAM">Electrochemical RAM</a> (ECRAM)</li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Q-learning" title="Q-learning">Q-learning</a></li> <li><a href="/wiki/State%E2%80%93action%E2%80%93reward%E2%80%93state%E2%80%93action" title="State–action–reward–state–action">SARSA</a></li> <li><a href="/wiki/Temporal_difference_learning" title="Temporal difference learning">Temporal difference (TD)</a></li> <li><a href="/wiki/Multi-agent_reinforcement_learning" title="Multi-agent reinforcement learning">Multi-agent</a> <ul><li><a href="/wiki/Self-play_(reinforcement_learning_technique)" class="mw-redirect" title="Self-play (reinforcement learning technique)">Self-play</a></li></ul></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Learning with humans</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Active_learning_(machine_learning)" title="Active learning (machine learning)">Active learning</a></li> <li><a href="/wiki/Crowdsourcing" title="Crowdsourcing">Crowdsourcing</a></li> <li><a href="/wiki/Human-in-the-loop" title="Human-in-the-loop">Human-in-the-loop</a></li> <li><a href="/wiki/Reinforcement_learning_from_human_feedback" title="Reinforcement learning from human feedback">RLHF</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Model diagnostics</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Coefficient_of_determination" title="Coefficient of determination">Coefficient of determination</a></li> <li><a href="/wiki/Confusion_matrix" title="Confusion matrix">Confusion matrix</a></li> <li><a href="/wiki/Learning_curve_(machine_learning)" title="Learning curve (machine learning)">Learning curve</a></li> <li><a href="/wiki/Receiver_operating_characteristic" title="Receiver operating characteristic">ROC curve</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Mathematical foundations</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Kernel_machines" class="mw-redirect" title="Kernel machines">Kernel machines</a></li> <li><a href="/wiki/Bias%E2%80%93variance_tradeoff" title="Bias–variance tradeoff">Bias–variance tradeoff</a></li> <li><a href="/wiki/Computational_learning_theory" title="Computational learning theory">Computational learning theory</a></li> <li><a href="/wiki/Empirical_risk_minimization" title="Empirical risk minimization">Empirical risk minimization</a></li> <li><a href="/wiki/Occam_learning" title="Occam learning">Occam learning</a></li> <li><a href="/wiki/Probably_approximately_correct_learning" title="Probably approximately correct learning">PAC learning</a></li> <li><a href="/wiki/Statistical_learning_theory" title="Statistical learning theory">Statistical learning</a></li> <li><a href="/wiki/Vapnik%E2%80%93Chervonenkis_theory" title="Vapnik–Chervonenkis theory">VC theory</a></li> <li><a href="/wiki/Topological_deep_learning" title="Topological deep learning">Topological deep learning</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Journals and conferences</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/ECML_PKDD" title="ECML PKDD">ECML PKDD</a></li> <li><a href="/wiki/Conference_on_Neural_Information_Processing_Systems" title="Conference on Neural Information Processing Systems">NeurIPS</a></li> <li><a href="/wiki/International_Conference_on_Machine_Learning" title="International Conference on Machine Learning">ICML</a></li> <li><a href="/wiki/International_Conference_on_Learning_Representations" title="International Conference on Learning Representations">ICLR</a></li> <li><a href="/wiki/International_Joint_Conference_on_Artificial_Intelligence" title="International Joint Conference on Artificial Intelligence">IJCAI</a></li> <li><a href="/wiki/Machine_Learning_(journal)" title="Machine Learning (journal)">ML</a></li> <li><a href="/wiki/Journal_of_Machine_Learning_Research" title="Journal of Machine Learning Research">JMLR</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Related articles</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Glossary_of_artificial_intelligence" title="Glossary of artificial intelligence">Glossary of artificial intelligence</a></li> <li><a href="/wiki/List_of_datasets_for_machine-learning_research" title="List of datasets for machine-learning research">List of datasets for machine-learning research</a> <ul><li><a href="/wiki/List_of_datasets_in_computer_vision_and_image_processing" title="List of datasets in computer vision and image processing">List of datasets in computer vision and image processing</a></li></ul></li> <li><a href="/wiki/Outline_of_machine_learning" title="Outline of machine learning">Outline of machine learning</a></li></ul></div></div></td> </tr><tr><td class="sidebar-navbar"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374" /><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Machine_learning" title="Template:Machine learning"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Machine_learning" title="Template talk:Machine learning"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Machine_learning" title="Special:EditPage/Template:Machine learning"><abbr title="Edit this template">e</abbr></a></li></ul></div></td></tr></tbody></table> <p><b>Unsupervised learning</b> is a framework in <a href="/wiki/Machine_learning" title="Machine learning">machine learning</a> where, in contrast to <a href="/wiki/Supervised_learning" title="Supervised learning">supervised learning</a>, algorithms learn patterns exclusively from unlabeled data.<sup id="cite_ref-WeiWu_1-0" class="reference"><a href="#cite_note-WeiWu-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup> Other frameworks in the spectrum of supervisions include <a href="/wiki/Weak_supervision" title="Weak supervision">weak- or semi-supervision</a>, where a small portion of the data is tagged, and <a href="/wiki/Self-supervised_learning" title="Self-supervised learning">self-supervision</a>. Some researchers consider self-supervised learning a form of unsupervised learning.<sup id="cite_ref-2" class="reference"><a href="#cite_note-2"><span class="cite-bracket">&#91;</span>2<span class="cite-bracket">&#93;</span></a></sup> </p><p>Conceptually, unsupervised learning divides into the aspects of data, training, algorithm, and downstream applications. Typically, the dataset is harvested cheaply "in the wild", such as massive <a href="/wiki/Text_corpus" title="Text corpus">text corpus</a> obtained by <a href="/wiki/Web_crawler" title="Web crawler">web crawling</a>, with only minor filtering (such as <a href="/wiki/Common_Crawl" title="Common Crawl">Common Crawl</a>). This compares favorably to supervised learning, where the dataset (such as the <a href="/wiki/ImageNet" title="ImageNet">ImageNet1000</a>) is typically constructed manually, which is much more expensive. </p><p>There were algorithms designed specifically for unsupervised learning, such as <a href="/wiki/Cluster_analysis" title="Cluster analysis">clustering algorithms</a> like <a href="/wiki/K-means_clustering" title="K-means clustering">k-means</a>, <a href="/wiki/Dimensionality_reduction" title="Dimensionality reduction">dimensionality reduction</a> techniques like <a href="/wiki/Principal_component_analysis" title="Principal component analysis">principal component analysis (PCA)</a>, <a href="/wiki/Boltzmann_machine" title="Boltzmann machine">Boltzmann machine learning</a>, and <a href="/wiki/Autoencoder" title="Autoencoder">autoencoders</a>. After the rise of deep learning, most large-scale unsupervised learning have been done by training general-purpose neural network architectures by <a href="/wiki/Gradient_descent" title="Gradient descent">gradient descent</a>, adapted to performing unsupervised learning by designing an appropriate training procedure. </p><p>Sometimes a trained model can be used as-is, but more often they are modified for downstream applications. For example, the generative pretraining method trains a model to generate a textual dataset, before finetuning it for other applications, such as text classification.<sup id="cite_ref-gpt1paper_3-0" class="reference"><a href="#cite_note-gpt1paper-3"><span class="cite-bracket">&#91;</span>3<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-4" class="reference"><a href="#cite_note-4"><span class="cite-bracket">&#91;</span>4<span class="cite-bracket">&#93;</span></a></sup> As another example, autoencoders are trained to <a href="/wiki/Feature_learning" title="Feature learning">good features</a>, which can then be used as a module for other models, such as in a <a href="/wiki/Latent_diffusion_model" title="Latent diffusion model">latent diffusion model</a>. </p> <meta property="mw:PageProp/toc" /> <div class="mw-heading mw-heading2"><h2 id="Tasks">Tasks</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=1" title="Edit section: Tasks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-halign-left" typeof="mw:File/Thumb"><a href="/wiki/File:Task-guidance.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/9/90/Task-guidance.png/300px-Task-guidance.png" decoding="async" width="300" height="225" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/90/Task-guidance.png/450px-Task-guidance.png 1.5x, //upload.wikimedia.org/wikipedia/commons/9/90/Task-guidance.png 2x" data-file-width="565" data-file-height="424" /></a><figcaption>Tendency for a task to employ supervised vs. unsupervised methods. Task names straddling circle boundaries is intentional. It shows that the classical division of imaginative tasks (left) employing unsupervised methods is blurred in today's learning schemes.</figcaption></figure><p>Tasks are often categorized as <a href="/wiki/Discriminative_model" title="Discriminative model">discriminative</a> (recognition) or <a href="/wiki/Generative_model" title="Generative model">generative</a> (imagination). Often but not always, discriminative tasks use supervised methods and generative tasks use unsupervised (see <a href="/wiki/Venn_diagram" title="Venn diagram">Venn diagram</a>); however, the separation is very hazy. For example, object recognition favors supervised learning but unsupervised learning can also cluster objects into groups. Furthermore, as progress marches onward, some tasks employ both methods, and some tasks swing from one to another. For example, image recognition started off as heavily supervised, but became hybrid by employing unsupervised pre-training, and then moved towards supervision again with the advent of <a href="/wiki/Dilution_(neural_networks)" title="Dilution (neural networks)">dropout</a>, <a href="/wiki/Rectifier_(neural_networks)" title="Rectifier (neural networks)">ReLU</a>, and <a href="/wiki/Learning_rate" title="Learning rate">adaptive learning rates</a>. </p><p>A typical generative task is as follows. At each step, a datapoint is sampled from the dataset, and part of the data is removed, and the model must infer the removed part. This is particularly clear for the <a href="/wiki/Autoencoder" title="Autoencoder">denoising autoencoders</a> and <a href="/wiki/BERT_(language_model)" title="BERT (language model)">BERT</a>. </p> <div class="mw-heading mw-heading2"><h2 id="Neural_network_architectures">Neural network architectures</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=2" title="Edit section: Neural network architectures"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Training">Training</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=3" title="Edit section: Training"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>During the learning phase, an unsupervised network tries to mimic the data it's given and uses the error in its mimicked output to correct itself (i.e. correct its weights and biases). Sometimes the error is expressed as a low probability that the erroneous output occurs, or it might be expressed as an unstable high energy state in the network. </p><p>In contrast to supervised methods' dominant use of <a href="/wiki/Backpropagation" title="Backpropagation">backpropagation</a>, unsupervised learning also employs other methods including: Hopfield learning rule, Boltzmann learning rule, <a href="/wiki/Contrastive_Divergence" class="mw-redirect" title="Contrastive Divergence">Contrastive Divergence</a>, <a href="/wiki/Wake-sleep_algorithm" title="Wake-sleep algorithm">Wake Sleep</a>, <a href="/wiki/Variational_Inference" class="mw-redirect" title="Variational Inference">Variational Inference</a>, <a href="/wiki/Maximum_Likelihood" class="mw-redirect" title="Maximum Likelihood">Maximum Likelihood</a>, <a href="/wiki/Maximum_A_Posteriori" class="mw-redirect" title="Maximum A Posteriori">Maximum A Posteriori</a>, <a href="/wiki/Gibbs_Sampling" class="mw-redirect" title="Gibbs Sampling">Gibbs Sampling</a>, and backpropagating reconstruction errors or hidden state reparameterizations. See the table below for more details. </p> <div class="mw-heading mw-heading3"><h3 id="Energy">Energy</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=4" title="Edit section: Energy"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>An energy function is a macroscopic measure of a network's activation state. In Boltzmann machines, it plays the role of the Cost function. This analogy with physics is inspired by Ludwig Boltzmann's analysis of a gas' macroscopic energy from the microscopic probabilities of particle motion <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle p\propto e^{-E/kT}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mi>p</mi> <mo>&#x221d;<!-- ∝ --></mo> <msup> <mi>e</mi> <mrow class="MJX-TeXAtom-ORD"> <mo>&#x2212;<!-- − --></mo> <mi>E</mi> <mrow class="MJX-TeXAtom-ORD"> <mo>/</mo> </mrow> <mi>k</mi> <mi>T</mi> </mrow> </msup> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle p\propto e^{-E/kT}}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/b9696fac92eaa5fede297c17fa8bb086945efd8e" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.671ex; margin-left: -0.089ex; width:11.043ex; height:3.176ex;" alt="{\displaystyle p\propto e^{-E/kT}}" /></span>, where k is the Boltzmann constant and T is temperature. In the <a href="/wiki/Restricted_Boltzmann_machine" title="Restricted Boltzmann machine">RBM</a> network the relation is <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle p=e^{-E}/Z}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mi>p</mi> <mo>=</mo> <msup> <mi>e</mi> <mrow class="MJX-TeXAtom-ORD"> <mo>&#x2212;<!-- − --></mo> <mi>E</mi> </mrow> </msup> <mrow class="MJX-TeXAtom-ORD"> <mo>/</mo> </mrow> <mi>Z</mi> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle p=e^{-E}/Z}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/7c5acd1a54ea1654f16ad3b413693ab78c72319a" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; margin-left: -0.089ex; width:11.05ex; height:3.176ex;" alt="{\displaystyle p=e^{-E}/Z}" /></span>,<sup id="cite_ref-Hinton2010_5-0" class="reference"><a href="#cite_note-Hinton2010-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> where <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle p}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mi>p</mi> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle p}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/81eac1e205430d1f40810df36a0edffdc367af36" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.671ex; margin-left: -0.089ex; width:1.259ex; height:2.009ex;" alt="{\displaystyle p}" /></span> and <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle E}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mi>E</mi> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle E}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/4232c9de2ee3eec0a9c0a19b15ab92daa6223f9b" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.776ex; height:2.176ex;" alt="{\displaystyle E}" /></span> vary over every possible activation pattern and <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \textstyle {Z=\sum _{\scriptscriptstyle {\text{All Patterns}}}e^{-E({\text{pattern}})}}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mstyle displaystyle="false" scriptlevel="0"> <mrow class="MJX-TeXAtom-ORD"> <mi>Z</mi> <mo>=</mo> <munder> <mo>&#x2211;<!-- ∑ --></mo> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="false" scriptlevel="2"> <mrow class="MJX-TeXAtom-ORD"> <mtext>All Patterns</mtext> </mrow> </mstyle> </mrow> </munder> <msup> <mi>e</mi> <mrow class="MJX-TeXAtom-ORD"> <mo>&#x2212;<!-- − --></mo> <mi>E</mi> <mo stretchy="false">(</mo> <mrow class="MJX-TeXAtom-ORD"> <mtext>pattern</mtext> </mrow> <mo stretchy="false">)</mo> </mrow> </msup> </mrow> </mstyle> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \textstyle {Z=\sum _{\scriptscriptstyle {\text{All Patterns}}}e^{-E({\text{pattern}})}}}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/b2d124436452bf4f7c46be05f2a4ec154e266c2d" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -1.005ex; width:25.608ex; height:3.343ex;" alt="{\displaystyle \textstyle {Z=\sum _{\scriptscriptstyle {\text{All Patterns}}}e^{-E({\text{pattern}})}}}" /></span>. To be more precise, <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle p(a)=e^{-E(a)}/Z}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mi>p</mi> <mo stretchy="false">(</mo> <mi>a</mi> <mo stretchy="false">)</mo> <mo>=</mo> <msup> <mi>e</mi> <mrow class="MJX-TeXAtom-ORD"> <mo>&#x2212;<!-- − --></mo> <mi>E</mi> <mo stretchy="false">(</mo> <mi>a</mi> <mo stretchy="false">)</mo> </mrow> </msup> <mrow class="MJX-TeXAtom-ORD"> <mo>/</mo> </mrow> <mi>Z</mi> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle p(a)=e^{-E(a)}/Z}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/749ebbae5c41727f56fbb5fbee29d6f792ccfa4f" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; margin-left: -0.089ex; width:16.238ex; height:3.343ex;" alt="{\displaystyle p(a)=e^{-E(a)}/Z}" /></span>, where <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle a}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mi>a</mi> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle a}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/ffd2487510aa438433a2579450ab2b3d557e5edc" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.23ex; height:1.676ex;" alt="{\displaystyle a}" /></span> is an activation pattern of all neurons (visible and hidden). Hence, some early neural networks bear the name Boltzmann Machine. Paul Smolensky calls <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle -E\,}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mo>&#x2212;<!-- − --></mo> <mi>E</mi> <mspace width="thinmathspace"></mspace> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle -E\,}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/27723660512942ff84ce2d85df2509299c698147" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.505ex; width:3.971ex; height:2.343ex;" alt="{\displaystyle -E\,}" /></span> the <i>Harmony</i>. A network seeks low energy which is high Harmony. </p> <div class="mw-heading mw-heading3"><h3 id="Networks">Networks</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=5" title="Edit section: Networks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>This table shows connection diagrams of various unsupervised networks, the details of which will be given in the section Comparison of Networks. Circles are neurons and edges between them are connection weights. As network design changes, features are added on to enable new capabilities or removed to make learning faster. For instance, neurons change between deterministic (Hopfield) and stochastic (Boltzmann) to allow robust output, weights are removed within a layer (RBM) to hasten learning, or connections are allowed to become asymmetric (Helmholtz). </p> <table class="wikitable"> <tbody><tr> <th><a href="/wiki/Hopfield_network" title="Hopfield network">Hopfield</a></th> <th><a href="/wiki/Boltzmann_machine" title="Boltzmann machine">Boltzmann</a></th> <th><a href="/wiki/Restricted_Boltzmann_machine" title="Restricted Boltzmann machine">RBM</a></th> <th><a href="/wiki/Stacked_Restricted_Boltzmann_Machine" class="mw-redirect" title="Stacked Restricted Boltzmann Machine">Stacked Boltzmann</a> </th></tr> <tr> <td><figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Hopfield-net-vector.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/4/44/Hopfield-net-vector.svg/220px-Hopfield-net-vector.svg.png" decoding="async" width="220" height="252" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/44/Hopfield-net-vector.svg/330px-Hopfield-net-vector.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/44/Hopfield-net-vector.svg/440px-Hopfield-net-vector.svg.png 2x" data-file-width="730" data-file-height="835" /></a><figcaption>A network based on magnetic domains in iron with a single self-connected layer. It can be used as a content addressable memory.</figcaption></figure> </td> <td><figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Boltzmannexamplev1.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/7/7a/Boltzmannexamplev1.png/220px-Boltzmannexamplev1.png" decoding="async" width="220" height="209" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/7/7a/Boltzmannexamplev1.png/330px-Boltzmannexamplev1.png 1.5x, //upload.wikimedia.org/wikipedia/commons/7/7a/Boltzmannexamplev1.png 2x" data-file-width="433" data-file-height="411" /></a><figcaption>Network is separated into 2 layers (hidden vs. visible), but still using symmetric 2-way weights. Following Boltzmann's thermodynamics, individual probabilities give rise to macroscopic energies.</figcaption></figure> </td> <td><figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Restricted_Boltzmann_machine.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e8/Restricted_Boltzmann_machine.svg/220px-Restricted_Boltzmann_machine.svg.png" decoding="async" width="220" height="234" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/e8/Restricted_Boltzmann_machine.svg/330px-Restricted_Boltzmann_machine.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/e8/Restricted_Boltzmann_machine.svg/440px-Restricted_Boltzmann_machine.svg.png 2x" data-file-width="331" data-file-height="352" /></a><figcaption>Restricted Boltzmann Machine. This is a Boltzmann machine where lateral connections within a layer are prohibited to make analysis tractable.</figcaption></figure> </td> <td><figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Stacked-boltzmann.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/c/c9/Stacked-boltzmann.png/220px-Stacked-boltzmann.png" decoding="async" width="220" height="260" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/c/c9/Stacked-boltzmann.png/330px-Stacked-boltzmann.png 1.5x, //upload.wikimedia.org/wikipedia/commons/c/c9/Stacked-boltzmann.png 2x" data-file-width="408" data-file-height="482" /></a><figcaption>This network has multiple RBM's to encode a hierarchy of hidden features. After a single RBM is trained, another blue hidden layer (see left RBM) is added, and the top 2 layers are trained as a red &amp; blue RBM. Thus the middle layers of an RBM acts as hidden or visible, depending on the training phase it is in.</figcaption></figure> </td></tr></tbody></table> <table class="wikitable"> <tbody><tr> <th><a href="/wiki/Helmholtz_machine" title="Helmholtz machine">Helmholtz</a></th> <th><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></th> <th><a href="/wiki/Variational_autoencoder" title="Variational autoencoder">VAE</a> </th></tr> <tr> <td><figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Helmholtz_Machine.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e2/Helmholtz_Machine.png/220px-Helmholtz_Machine.png" decoding="async" width="220" height="261" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/e/e2/Helmholtz_Machine.png 1.5x" data-file-width="273" data-file-height="324" /></a><figcaption>Instead of the bidirectional symmetric connection of the stacked Boltzmann machines, we have separate one-way connections to form a loop. It does both generation and discrimination.</figcaption></figure> </td> <td><figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Autoencoder_schema.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/3/37/Autoencoder_schema.png/220px-Autoencoder_schema.png" decoding="async" width="220" height="200" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/37/Autoencoder_schema.png/330px-Autoencoder_schema.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/37/Autoencoder_schema.png/440px-Autoencoder_schema.png 2x" data-file-width="841" data-file-height="765" /></a><figcaption>A feed forward network that aims to find a good middle layer representation of its input world. This network is deterministic, so it is not as robust as its successor the VAE.</figcaption></figure> </td> <td><figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:VAE_blocks.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/5/5e/VAE_blocks.png/220px-VAE_blocks.png" decoding="async" width="220" height="200" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/5/5e/VAE_blocks.png/330px-VAE_blocks.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/5/5e/VAE_blocks.png/440px-VAE_blocks.png 2x" data-file-width="841" data-file-height="765" /></a><figcaption>Applies Variational Inference to the Autoencoder. The middle layer is a set of means &amp; variances for Gaussian distributions. The stochastic nature allows for more robust imagination than the deterministic autoencoder.</figcaption></figure> </td></tr></tbody></table> <p>Of the networks bearing people's names, only Hopfield worked directly with neural networks. Boltzmann and Helmholtz came before artificial neural networks, but their work in physics and physiology inspired the analytical methods that were used. </p> <div class="mw-heading mw-heading3"><h3 id="History">History</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=6" title="Edit section: History"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <table class="wikitable"> <tbody><tr> <td>1974</td> <td>Ising magnetic model proposed by <a href="/w/index.php?title=William_A._Little_(physicist)&amp;action=edit&amp;redlink=1" class="new" title="William A. Little (physicist) (page does not exist)">WA Little</a><span class="noprint" style="font-size:85%; font-style: normal;">&#160;&#91;<a href="https://de.wikipedia.org/wiki/William_A._Little" class="extiw" title="de:William A. Little">de</a>&#93;</span> for cognition </td></tr> <tr> <td>1980</td> <td><a href="/wiki/Kunihiko_Fukushima" title="Kunihiko Fukushima">Kunihiko Fukushima</a> introduces the <a href="/wiki/Neocognitron" title="Neocognitron">neocognitron</a>, which is later called a <a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">convolutional neural network</a>. It is mostly used in SL, but deserves a mention here. </td></tr> <tr> <td>1982</td> <td>Ising variant Hopfield net described as <a href="/wiki/Content-addressable_memory" title="Content-addressable memory">CAMs</a> and classifiers by John Hopfield. </td></tr> <tr> <td>1983</td> <td>Ising variant Boltzmann machine with probabilistic neurons described by <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Hinton</a> &amp; <a href="/wiki/Terry_Sejnowski" title="Terry Sejnowski">Sejnowski</a> following Sherington &amp; Kirkpatrick's 1975 work. </td></tr> <tr> <td>1986</td> <td><a href="/wiki/Paul_Smolensky" title="Paul Smolensky">Paul Smolensky</a> publishes Harmony Theory, which is an RBM with practically the same Boltzmann energy function. Smolensky did not give a practical training scheme. Hinton did in mid-2000s. </td></tr> <tr> <td>1995</td> <td>Schmidthuber introduces the <a href="/wiki/Long_short-term_memory" title="Long short-term memory">LSTM</a> neuron for languages. </td></tr> <tr> <td>1995</td> <td>Dayan &amp; Hinton introduces Helmholtz machine </td></tr> <tr> <td>2013</td> <td>Kingma, Rezende, &amp; co. introduced Variational Autoencoders as Bayesian graphical probability network, with neural nets as components. </td></tr></tbody></table> <div class="mw-heading mw-heading3"><h3 id="Specific_Networks">Specific Networks</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=7" title="Edit section: Specific Networks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Here, we highlight some characteristics of select networks. The details of each are given in the comparison table below. </p> <style data-mw-deduplicate="TemplateStyles:r1228772891">.mw-parser-output .glossary dt{margin-top:0.4em}.mw-parser-output .glossary dt+dt{margin-top:-0.2em}.mw-parser-output .glossary .templatequote{margin-top:0;margin-bottom:-0.5em}</style> <dl class="glossary"> <dt id="hopfield_network"><dfn><a href="/wiki/Hopfield_Network" class="mw-redirect" title="Hopfield Network">Hopfield Network</a></dfn></dt> <dd>Ferromagnetism inspired Hopfield networks. A neuron correspond to an iron domain with binary magnetic moments Up and Down, and neural connections correspond to the domain's influence on each other. Symmetric connections enable a global energy formulation. During inference the network updates each state using the standard activation step function. Symmetric weights and the right energy functions guarantees convergence to a stable activation pattern. Asymmetric weights are difficult to analyze. Hopfield nets are used as Content Addressable Memories (CAM).</dd> <dt id="boltzmann_machine"><dfn><a href="/wiki/Boltzmann_Machine" class="mw-redirect" title="Boltzmann Machine">Boltzmann Machine</a></dfn></dt> <dd>These are stochastic Hopfield nets. Their state value is sampled from this <a href="/wiki/Probability_density_function" title="Probability density function">pdf</a> as follows: suppose a binary neuron fires with the Bernoulli probability p(1) = 1/3 and rests with p(0) = 2/3. One samples from it by taking a <i>uniformly</i> distributed random number y, and plugging it into the inverted <a href="/wiki/Cumulative_distribution_function" title="Cumulative distribution function">cumulative distribution function</a>, which in this case is the step function thresholded at 2/3. The inverse function = { 0 if x &lt;= 2/3, 1 if x &gt; 2/3 }.</dd> <dt id="sigmoid_belief_net"><dfn>Sigmoid Belief Net</dfn></dt> <dd>Introduced by Radford Neal in 1992, this network applies ideas from probabilistic graphical models to neural networks. A key difference is that nodes in graphical models have pre-assigned meanings, whereas Belief Net neurons' features are determined after training. The network is a sparsely connected directed acyclic graph composed of binary stochastic neurons. The learning rule comes from Maximum Likelihood on p(X): Δw<sub>ij</sub> <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \propto }"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mo>&#x221d;<!-- ∝ --></mo> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \propto }</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/0e3a55007ba2f092d6cafe6d33598e0608b81150" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.338ex; width:1.808ex; height:1.676ex;" alt="{\displaystyle \propto }" /></span> s<sub>j</sub> * (s<sub>i</sub> - p<sub>i</sub>), where p<sub>i</sub> = 1 / ( 1 + e<sup>weighted inputs into neuron i</sup> ). s<sub>j</sub>'s are activations from an unbiased sample of the posterior distribution and this is problematic due to the Explaining Away problem raised by Judea Perl. <a href="/wiki/Variational_Bayesian_methods" title="Variational Bayesian methods">Variational Bayesian methods</a> uses a surrogate posterior and blatantly disregard this complexity.</dd> <dt id="deep_belief_network"><dfn><a href="/wiki/Deep_Belief_Network" class="mw-redirect" title="Deep Belief Network">Deep Belief Network</a></dfn></dt> <dd>Introduced by Hinton, this network is a hybrid of RBM and Sigmoid Belief Network. The top 2 layers is an RBM and the second layer downwards form a sigmoid belief network. One trains it by the <a href="/wiki/Stacked_Restricted_Boltzmann_Machine" class="mw-redirect" title="Stacked Restricted Boltzmann Machine">stacked RBM</a> method and then throw away the recognition weights below the top RBM. As of 2009, 3-4 layers seems to be the optimal depth.<sup id="cite_ref-HintonMlss2009_6-0" class="reference"><a href="#cite_note-HintonMlss2009-6"><span class="cite-bracket">&#91;</span>6<span class="cite-bracket">&#93;</span></a></sup></dd> <dt id="helmholtz_machine"><dfn><a href="/wiki/Helmholtz_machine" title="Helmholtz machine">Helmholtz machine</a></dfn></dt> <dd>These are early inspirations for the Variational Auto Encoders. Its 2 networks combined into one—forward weights operates recognition and backward weights implements imagination. It is perhaps the first network to do both. Helmholtz did not work in machine learning but he inspired the view of "statistical inference engine whose function is to infer probable causes of sensory input".<sup id="cite_ref-nc95_7-0" class="reference"><a href="#cite_note-nc95-7"><span class="cite-bracket">&#91;</span>7<span class="cite-bracket">&#93;</span></a></sup> the stochastic binary neuron outputs a probability that its state is 0 or 1. The data input is normally not considered a layer, but in the Helmholtz machine generation mode, the data layer receives input from the middle layer and has separate weights for this purpose, so it is considered a layer. Hence this network has 3 layers.</dd> <dt id="variational_autoencoder"><dfn><a href="/wiki/Variational_autoencoder" title="Variational autoencoder">Variational autoencoder</a></dfn></dt> <dd>These are inspired by Helmholtz machines and combines probability network with neural networks. An Autoencoder is a 3-layer CAM network, where the middle layer is supposed to be some internal representation of input patterns. The encoder neural network is a probability distribution q<sub>φ</sub>(z given x) and the decoder network is p<sub>θ</sub>(x given z). The weights are named phi &amp; theta rather than W and V as in Helmholtz—a cosmetic difference. These 2 networks here can be fully connected, or use another NN scheme.</dd> </dl> <div class="mw-heading mw-heading3"><h3 id="Comparison_of_networks">Comparison of networks</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=8" title="Edit section: Comparison of networks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <table class="wikitable"> <tbody><tr> <th></th> <th>Hopfield</th> <th>Boltzmann</th> <th>RBM</th> <th>Stacked RBM</th> <th>Helmholtz</th> <th>Autoencoder</th> <th>VAE </th></tr> <tr> <td><b>Usage &amp; notables</b></td> <td>CAM, traveling salesman problem</td> <td>CAM. The freedom of connections makes this network difficult to analyze.</td> <td>pattern recognition. used in MNIST digits and speech.</td> <td>recognition &amp; imagination. trained with unsupervised pre-training and/or supervised fine tuning.</td> <td>imagination, mimicry</td> <td>language: creative writing, translation. vision: enhancing blurry images</td> <td>generate realistic data </td></tr> <tr> <td><b>Neuron</b></td> <td>deterministic binary state. Activation = { 0 (or -1) if x is negative, 1 otherwise }</td> <td>stochastic binary Hopfield neuron</td> <td>← same. (extended to real-valued in mid 2000s)</td> <td>← same</td> <td>← same</td> <td>language: LSTM. vision: local receptive fields. usually real valued relu activation.</td> <td>middle layer neurons encode means &amp; variances for Gaussians. In run mode (inference), the output of the middle layer are sampled values from the Gaussians. </td></tr> <tr> <td><b>Connections</b></td> <td>1-layer with symmetric weights. No self-connections.</td> <td>2-layers. 1-hidden &amp; 1-visible. symmetric weights.</td> <td>← same. <br />no lateral connections within a layer.</td> <td>top layer is undirected, symmetric. other layers are 2-way, asymmetric.</td> <td>3-layers: asymmetric weights. 2 networks combined into 1.</td> <td>3-layers. The input is considered a layer even though it has no inbound weights. recurrent layers for NLP. feedforward convolutions for vision. input &amp; output have the same neuron counts.</td> <td>3-layers: input, encoder, distribution sampler decoder. the sampler is not considered a layer </td></tr> <tr> <td><b>Inference &amp; energy</b></td> <td>Energy is given by Gibbs probability measure&#160;:<span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle E=-{\frac {1}{2}}\sum _{i,j}{w_{ij}{s_{i}}{s_{j}}}+\sum _{i}{\theta _{i}}{s_{i}}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mi>E</mi> <mo>=</mo> <mo>&#x2212;<!-- − --></mo> <mrow class="MJX-TeXAtom-ORD"> <mfrac> <mn>1</mn> <mn>2</mn> </mfrac> </mrow> <munder> <mo>&#x2211;<!-- ∑ --></mo> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> <mo>,</mo> <mi>j</mi> </mrow> </munder> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>w</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> <mi>j</mi> </mrow> </msub> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> </mrow> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>j</mi> </mrow> </msub> </mrow> </mrow> <mo>+</mo> <munder> <mo>&#x2211;<!-- ∑ --></mo> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </munder> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>&#x3b8;<!-- θ --></mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> </mrow> <mrow class="MJX-TeXAtom-ORD"> <msub> <mi>s</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> </mrow> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle E=-{\frac {1}{2}}\sum _{i,j}{w_{ij}{s_{i}}{s_{j}}}+\sum _{i}{\theta _{i}}{s_{i}}}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/76ad57f7823ca1b74ab67b5f3dadbc83d24da7c2" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -3.338ex; width:30.204ex; height:6.676ex;" alt="{\displaystyle E=-{\frac {1}{2}}\sum _{i,j}{w_{ij}{s_{i}}{s_{j}}}+\sum _{i}{\theta _{i}}{s_{i}}}" /></span></td> <td>← same</td> <td>← same</td> <td></td> <td>minimize KL divergence</td> <td>inference is only feed-forward. previous UL networks ran forwards AND backwards</td> <td>minimize error = reconstruction error - KLD </td></tr> <tr> <td><b>Training</b></td> <td>Δw<sub>ij</sub> = s<sub>i</sub>*s<sub>j</sub>, for +1/-1 neuron</td> <td>Δw<sub>ij</sub> = e*(p<sub>ij</sub> - p'<sub>ij</sub>). This is derived from minimizing KLD. e = learning rate, p' = predicted and p = actual distribution. </td> <td>Δw<sub>ij</sub> = e*( &lt; v<sub>i</sub> h<sub>j</sub> &gt;<sub>data</sub> - &lt; v<sub>i</sub> h<sub>j</sub> &gt;<sub>equilibrium</sub> ). This is a form of contrastive divergence w/ Gibbs Sampling. "&lt;&gt;" are expectations.</td> <td>← similar. train 1-layer at a time. approximate equilibrium state with a 3-segment pass. no back propagation.</td> <td>wake-sleep 2 phase training</td> <td>back propagate the reconstruction error</td> <td>reparameterize hidden state for backprop </td></tr> <tr> <td><b>Strength</b></td> <td>resembles physical systems so it inherits their equations</td> <td>← same. hidden neurons act as internal representatation of the external world</td> <td>faster more practical training scheme than Boltzmann machines</td> <td>trains quickly. gives hierarchical layer of features</td> <td>mildly anatomical. analyzable w/ information theory &amp; statistical mechanics</td> <td></td> <td> </td></tr> <tr> <td><b>Weakness</b></td> <td></td> <td>hard to train due to lateral connections</td> <td>equilibrium requires too many iterations</td> <td>integer &amp; real-valued neurons are more complicated.</td> <td></td> <td></td> <td> </td></tr></tbody></table> <div class="mw-heading mw-heading3"><h3 id="Hebbian_Learning,_ART,_SOM"><span id="Hebbian_Learning.2C_ART.2C_SOM"></span>Hebbian Learning, ART, SOM</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=9" title="Edit section: Hebbian Learning, ART, SOM"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The classical example of unsupervised learning in the study of neural networks is <a href="/wiki/Donald_Hebb" class="mw-redirect" title="Donald Hebb">Donald Hebb</a>'s principle, that is, neurons that fire together wire together.<sup id="cite_ref-Buhmann_8-0" class="reference"><a href="#cite_note-Buhmann-8"><span class="cite-bracket">&#91;</span>8<span class="cite-bracket">&#93;</span></a></sup> In <a href="/wiki/Hebbian_learning" class="mw-redirect" title="Hebbian learning">Hebbian learning</a>, the connection is reinforced irrespective of an error, but is exclusively a function of the coincidence between action potentials between the two neurons.<sup id="cite_ref-Comesana_9-0" class="reference"><a href="#cite_note-Comesana-9"><span class="cite-bracket">&#91;</span>9<span class="cite-bracket">&#93;</span></a></sup> A similar version that modifies synaptic weights takes into account the time between the action potentials (<a href="/wiki/Spike-timing-dependent_plasticity" title="Spike-timing-dependent plasticity">spike-timing-dependent plasticity</a> or STDP). Hebbian Learning has been hypothesized to underlie a range of cognitive functions, such as <a href="/wiki/Pattern_recognition" title="Pattern recognition">pattern recognition</a> and experiential learning. </p><p>Among <a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">neural network</a> models, the <a href="/wiki/Self-organizing_map" title="Self-organizing map">self-organizing map</a> (SOM) and <a href="/wiki/Adaptive_resonance_theory" title="Adaptive resonance theory">adaptive resonance theory</a> (ART) are commonly used in unsupervised learning algorithms. The SOM is a topographic organization in which nearby locations in the map represent inputs with similar properties. The ART model allows the number of clusters to vary with problem size and lets the user control the degree of similarity between members of the same clusters by means of a user-defined constant called the vigilance parameter. ART networks are used for many pattern recognition tasks, such as <a href="/wiki/Automatic_target_recognition" title="Automatic target recognition">automatic target recognition</a> and seismic signal processing.<sup id="cite_ref-Carpenter_10-0" class="reference"><a href="#cite_note-Carpenter-10"><span class="cite-bracket">&#91;</span>10<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Probabilistic_methods">Probabilistic methods</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=10" title="Edit section: Probabilistic methods"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Two of the main methods used in unsupervised learning are <a href="/wiki/Principal_component_analysis" title="Principal component analysis">principal component</a> and <a href="/wiki/Cluster_analysis" title="Cluster analysis">cluster analysis</a>. <a href="/wiki/Cluster_analysis" title="Cluster analysis">Cluster analysis</a> is used in unsupervised learning to group, or segment, datasets with shared attributes in order to extrapolate algorithmic relationships.<sup id="cite_ref-tds-ul_11-0" class="reference"><a href="#cite_note-tds-ul-11"><span class="cite-bracket">&#91;</span>11<span class="cite-bracket">&#93;</span></a></sup> Cluster analysis is a branch of <a href="/wiki/Machine_learning" title="Machine learning">machine learning</a> that groups the data that has not been <a href="/wiki/Labeled_data" title="Labeled data">labelled</a>, classified or categorized. Instead of responding to feedback, cluster analysis identifies commonalities in the data and reacts based on the presence or absence of such commonalities in each new piece of data. This approach helps detect anomalous data points that do not fit into either group. </p><p>A central application of unsupervised learning is in the field of <a href="/wiki/Density_estimation" title="Density estimation">density estimation</a> in <a href="/wiki/Statistics" title="Statistics">statistics</a>,<sup id="cite_ref-JordanBishop2004_12-0" class="reference"><a href="#cite_note-JordanBishop2004-12"><span class="cite-bracket">&#91;</span>12<span class="cite-bracket">&#93;</span></a></sup> though unsupervised learning encompasses many other domains involving summarizing and explaining data features. It can be contrasted with supervised learning by saying that whereas supervised learning intends to infer a <a href="/wiki/Conditional_probability_distribution" title="Conditional probability distribution">conditional probability distribution</a> conditioned on the label of input data; unsupervised learning intends to infer an <a href="/wiki/A_priori_probability" class="mw-redirect" title="A priori probability">a priori probability</a> distribution . </p> <div class="mw-heading mw-heading3"><h3 id="Approaches">Approaches</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=11" title="Edit section: Approaches"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Some of the most common algorithms used in unsupervised learning include: (1) Clustering, (2) Anomaly detection, (3) Approaches for learning latent variable models. Each approach uses several methods as follows: </p> <ul><li><a href="/wiki/Data_clustering" class="mw-redirect" title="Data clustering">Clustering</a> methods include: <a href="/wiki/Hierarchical_clustering" title="Hierarchical clustering">hierarchical clustering</a>,<sup id="cite_ref-Hastie_13-0" class="reference"><a href="#cite_note-Hastie-13"><span class="cite-bracket">&#91;</span>13<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/K-means" class="mw-redirect" title="K-means">k-means</a>,<sup id="cite_ref-tds-kmeans_14-0" class="reference"><a href="#cite_note-tds-kmeans-14"><span class="cite-bracket">&#91;</span>14<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Mixture_models" class="mw-redirect" title="Mixture models">mixture models</a>, <a href="/wiki/Model-based_clustering" title="Model-based clustering">model-based clustering</a>, <a href="/wiki/DBSCAN" title="DBSCAN">DBSCAN</a>, and <a href="/wiki/OPTICS_algorithm" title="OPTICS algorithm">OPTICS algorithm</a></li> <li><a href="/wiki/Anomaly_detection" title="Anomaly detection">Anomaly detection</a> methods include: <a href="/wiki/Local_Outlier_Factor" class="mw-redirect" title="Local Outlier Factor">Local Outlier Factor</a>, and <a href="/wiki/Isolation_Forest" class="mw-redirect" title="Isolation Forest">Isolation Forest</a></li> <li>Approaches for learning <a href="/wiki/Latent_variable_model" title="Latent variable model">latent variable models</a> such as <a href="/wiki/Expectation%E2%80%93maximization_algorithm" title="Expectation–maximization algorithm">Expectation–maximization algorithm</a> (EM), <a href="/wiki/Method_of_moments_(statistics)" title="Method of moments (statistics)">Method of moments</a>, and <a href="/wiki/Blind_signal_separation" class="mw-redirect" title="Blind signal separation">Blind signal separation</a> techniques (<a href="/wiki/Principal_component_analysis" title="Principal component analysis">Principal component analysis</a>, <a href="/wiki/Independent_component_analysis" title="Independent component analysis">Independent component analysis</a>, <a href="/wiki/Non-negative_matrix_factorization" title="Non-negative matrix factorization">Non-negative matrix factorization</a>, <a href="/wiki/Singular_value_decomposition" title="Singular value decomposition">Singular value decomposition</a>)</li></ul> <div class="mw-heading mw-heading3"><h3 id="Method_of_moments">Method of moments</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=12" title="Edit section: Method of moments"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>One of the statistical approaches for unsupervised learning is the <a href="/wiki/Method_of_moments_(statistics)" title="Method of moments (statistics)">method of moments</a>. In the method of moments, the unknown parameters (of interest) in the model are related to the moments of one or more random variables, and thus, these unknown parameters can be estimated given the moments. The moments are usually estimated from samples empirically. The basic moments are first and second order moments. For a random vector, the first order moment is the <a href="/wiki/Mean" title="Mean">mean</a> vector, and the second order moment is the <a href="/wiki/Covariance_matrix" title="Covariance matrix">covariance matrix</a> (when the mean is zero). Higher order moments are usually represented using <a href="/wiki/Tensors" class="mw-redirect" title="Tensors">tensors</a> which are the generalization of matrices to higher orders as multi-dimensional arrays. </p><p>In particular, the method of moments is shown to be effective in learning the parameters of <a href="/wiki/Latent_variable_model" title="Latent variable model">latent variable models</a>. Latent variable models are statistical models where in addition to the observed variables, a set of latent variables also exists which is not observed. A highly practical example of latent variable models in machine learning is the <a href="/wiki/Topic_modeling" class="mw-redirect" title="Topic modeling">topic modeling</a> which is a statistical model for generating the words (observed variables) in the document based on the topic (latent variable) of the document. In the topic modeling, the words in the document are generated according to different statistical parameters when the topic of the document is changed. It is shown that method of moments (tensor decomposition techniques) consistently recover the parameters of a large class of latent variable models under some assumptions.<sup id="cite_ref-TensorLVMs_15-0" class="reference"><a href="#cite_note-TensorLVMs-15"><span class="cite-bracket">&#91;</span>15<span class="cite-bracket">&#93;</span></a></sup> </p><p>The <a href="/wiki/Expectation%E2%80%93maximization_algorithm" title="Expectation–maximization algorithm">Expectation–maximization algorithm</a> (EM) is also one of the most practical methods for learning latent variable models. However, it can get stuck in local optima, and it is not guaranteed that the algorithm will converge to the true unknown parameters of the model. In contrast, for the method of moments, the global convergence is guaranteed under some conditions. </p> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=13" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/Automated_machine_learning" title="Automated machine learning">Automated machine learning</a></li> <li><a href="/wiki/Cluster_analysis" title="Cluster analysis">Cluster analysis</a></li> <li><a href="/wiki/Model-based_clustering" title="Model-based clustering">Model-based clustering</a></li> <li><a href="/wiki/Anomaly_detection" title="Anomaly detection">Anomaly detection</a></li> <li><a href="/wiki/Expectation%E2%80%93maximization_algorithm" title="Expectation–maximization algorithm">Expectation–maximization algorithm</a></li> <li><a href="/wiki/Generative_topographic_map" title="Generative topographic map">Generative topographic map</a></li> <li><a href="/wiki/Meta-learning_(computer_science)" title="Meta-learning (computer science)">Meta-learning (computer science)</a></li> <li><a href="/wiki/Multivariate_analysis" class="mw-redirect" title="Multivariate analysis">Multivariate analysis</a></li> <li><a href="/wiki/Radial_basis_function_network" title="Radial basis function network">Radial basis function network</a></li> <li><a href="/wiki/Weak_supervision" title="Weak supervision">Weak supervision</a></li></ul> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=14" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist"> <div class="mw-references-wrap mw-references-columns"><ol class="references"> <li id="cite_note-WeiWu-1"><span class="mw-cite-backlink"><b><a href="#cite_ref-WeiWu_1-0">^</a></b></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite id="CITEREFWu" class="citation web cs1">Wu, Wei. <a rel="nofollow" class="external text" href="https://na.uni-tuebingen.de/ex/ml_seminar_ss2022/Unsupervised_Learning%20Final.pdf">"Unsupervised Learning"</a> <span class="cs1-format">(PDF)</span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240414213810/https://na.uni-tuebingen.de/ex/ml_seminar_ss2022/Unsupervised_Learning%20Final.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 14 April 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">26 April</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Unsupervised+Learning&amp;rft.aulast=Wu&amp;rft.aufirst=Wei&amp;rft_id=https%3A%2F%2Fna.uni-tuebingen.de%2Fex%2Fml_seminar_ss2022%2FUnsupervised_Learning%2520Final.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-2">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLiuZhangHouMian2021" class="citation journal cs1">Liu, Xiao; Zhang, Fanjin; Hou, Zhenyu; Mian, Li; Wang, Zhaoyu; Zhang, Jing; Tang, Jie (2021). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/9462394">"Self-supervised Learning: Generative or Contrastive"</a>. <i>IEEE Transactions on Knowledge and Data Engineering</i>: 1. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2006.08218">2006.08218</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTKDE.2021.3090866">10.1109/TKDE.2021.3090866</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1041-4347">1041-4347</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=IEEE+Transactions+on+Knowledge+and+Data+Engineering&amp;rft.atitle=Self-supervised+Learning%3A+Generative+or+Contrastive&amp;rft.pages=1&amp;rft.date=2021&amp;rft_id=info%3Aarxiv%2F2006.08218&amp;rft.issn=1041-4347&amp;rft_id=info%3Adoi%2F10.1109%2FTKDE.2021.3090866&amp;rft.aulast=Liu&amp;rft.aufirst=Xiao&amp;rft.au=Zhang%2C+Fanjin&amp;rft.au=Hou%2C+Zhenyu&amp;rft.au=Mian%2C+Li&amp;rft.au=Wang%2C+Zhaoyu&amp;rft.au=Zhang%2C+Jing&amp;rft.au=Tang%2C+Jie&amp;rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F9462394&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-gpt1paper-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-gpt1paper_3-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRadfordNarasimhanSalimansSutskever2018" class="citation web cs1">Radford, Alec; Narasimhan, Karthik; Salimans, Tim; Sutskever, Ilya (11 June 2018). <a rel="nofollow" class="external text" href="https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf">"Improving Language Understanding by Generative Pre-Training"</a> <span class="cs1-format">(PDF)</span>. <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a>. p.&#160;12. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210126024542/https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 26 January 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">23 January</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Improving+Language+Understanding+by+Generative+Pre-Training&amp;rft.pages=12&amp;rft.pub=OpenAI&amp;rft.date=2018-06-11&amp;rft.aulast=Radford&amp;rft.aufirst=Alec&amp;rft.au=Narasimhan%2C+Karthik&amp;rft.au=Salimans%2C+Tim&amp;rft.au=Sutskever%2C+Ilya&amp;rft_id=https%3A%2F%2Fcdn.openai.com%2Fresearch-covers%2Flanguage-unsupervised%2Flanguage_understanding_paper.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-4"><span class="mw-cite-backlink"><b><a href="#cite_ref-4">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFLiWallaceShenLin2020" class="citation journal cs1">Li, Zhuohan; Wallace, Eric; Shen, Sheng; Lin, Kevin; Keutzer, Kurt; Klein, Dan; Gonzalez, Joey (2020-11-21). <a rel="nofollow" class="external text" href="https://proceedings.mlr.press/v119/li20m.html">"Train Big, Then Compress: Rethinking Model Size for Efficient Training and Inference of Transformers"</a>. <i>Proceedings of the 37th International Conference on Machine Learning</i>. PMLR: <span class="nowrap">5958–</span>5968.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Proceedings+of+the+37th+International+Conference+on+Machine+Learning&amp;rft.atitle=Train+Big%2C+Then+Compress%3A+Rethinking+Model+Size+for+Efficient+Training+and+Inference+of+Transformers&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E5958-%3C%2Fspan%3E5968&amp;rft.date=2020-11-21&amp;rft.aulast=Li&amp;rft.aufirst=Zhuohan&amp;rft.au=Wallace%2C+Eric&amp;rft.au=Shen%2C+Sheng&amp;rft.au=Lin%2C+Kevin&amp;rft.au=Keutzer%2C+Kurt&amp;rft.au=Klein%2C+Dan&amp;rft.au=Gonzalez%2C+Joey&amp;rft_id=https%3A%2F%2Fproceedings.mlr.press%2Fv119%2Fli20m.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-Hinton2010-5"><span class="mw-cite-backlink"><b><a href="#cite_ref-Hinton2010_5-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHinton2012" class="citation book cs1">Hinton, G. (2012). <a rel="nofollow" class="external text" href="http://www.cs.utoronto.ca/~hinton/absps/guideTR.pdf">"A Practical Guide to Training Restricted Boltzmann Machines"</a> <span class="cs1-format">(PDF)</span>. <i>Neural Networks: Tricks of the Trade</i>. Lecture Notes in Computer Science. Vol.&#160;7700. Springer. pp.&#160;<span class="nowrap">599–</span>619. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-642-35289-8_32">10.1007/978-3-642-35289-8_32</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-3-642-35289-8" title="Special:BookSources/978-3-642-35289-8"><bdi>978-3-642-35289-8</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220903215809/http://www.cs.utoronto.ca/~hinton/absps/guideTR.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2022-09-03<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-11-03</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=A+Practical+Guide+to+Training+Restricted+Boltzmann+Machines&amp;rft.btitle=Neural+Networks%3A+Tricks+of+the+Trade&amp;rft.series=Lecture+Notes+in+Computer+Science&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E599-%3C%2Fspan%3E619&amp;rft.pub=Springer&amp;rft.date=2012&amp;rft_id=info%3Adoi%2F10.1007%2F978-3-642-35289-8_32&amp;rft.isbn=978-3-642-35289-8&amp;rft.aulast=Hinton&amp;rft.aufirst=G.&amp;rft_id=http%3A%2F%2Fwww.cs.utoronto.ca%2F~hinton%2Fabsps%2FguideTR.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-HintonMlss2009-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-HintonMlss2009_6-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://videolectures.net/mlss09uk_hinton_dbn">"Deep Belief Nets"</a> (video). September 2009. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220308022539/http://videolectures.net/mlss09uk_hinton_dbn/">Archived</a> from the original on 2022-03-08<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-03-27</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Deep+Belief+Nets&amp;rft.date=2009-09&amp;rft_id=https%3A%2F%2Fvideolectures.net%2Fmlss09uk_hinton_dbn&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-nc95-7"><span class="mw-cite-backlink"><b><a href="#cite_ref-nc95_7-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFPeterHintonNealZemel1995" class="citation journal cs1"><a href="/wiki/Peter_Dayan" title="Peter Dayan">Peter, Dayan</a>; <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Hinton, Geoffrey E.</a>; <a href="/wiki/Radford_M._Neal" title="Radford M. Neal">Neal, Radford M.</a>; <a href="/wiki/Richard_Zemel" title="Richard Zemel">Zemel, Richard S.</a> (1995). "The Helmholtz machine". <i>Neural Computation</i>. <b>7</b> (5): <span class="nowrap">889–</span>904. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1162%2Fneco.1995.7.5.889">10.1162/neco.1995.7.5.889</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/21.11116%2F0000-0002-D6D3-E">21.11116/0000-0002-D6D3-E</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a>&#160;<a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/7584891">7584891</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:1890561">1890561</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Neural+Computation&amp;rft.atitle=The+Helmholtz+machine.&amp;rft.volume=7&amp;rft.issue=5&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E889-%3C%2Fspan%3E904&amp;rft.date=1995&amp;rft_id=info%3Ahdl%2F21.11116%2F0000-0002-D6D3-E&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A1890561%23id-name%3DS2CID&amp;rft_id=info%3Apmid%2F7584891&amp;rft_id=info%3Adoi%2F10.1162%2Fneco.1995.7.5.889&amp;rft.aulast=Peter&amp;rft.aufirst=Dayan&amp;rft.au=Hinton%2C+Geoffrey+E.&amp;rft.au=Neal%2C+Radford+M.&amp;rft.au=Zemel%2C+Richard+S.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span> <span style="position:relative; top: -2px;"><span typeof="mw:File"><a href="/wiki/Paywall" title="closed access publication – behind paywall"><img alt="Closed access icon" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/9px-Closed_Access_logo_transparent.svg.png" decoding="async" width="9" height="14" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/14px-Closed_Access_logo_transparent.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0e/Closed_Access_logo_transparent.svg/18px-Closed_Access_logo_transparent.svg.png 2x" data-file-width="640" data-file-height="1000" /></a></span></span></span> </li> <li id="cite_note-Buhmann-8"><span class="mw-cite-backlink"><b><a href="#cite_ref-Buhmann_8-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBuhmannKuhnel1992" class="citation book cs1">Buhmann, J.; Kuhnel, H. (1992). "Unsupervised and supervised data clustering with competitive neural networks". <i>&#91;Proceedings 1992&#93; IJCNN International Joint Conference on Neural Networks</i>. Vol.&#160;4. IEEE. pp.&#160;<span class="nowrap">796–</span>801. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fijcnn.1992.227220">10.1109/ijcnn.1992.227220</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/0780305590" title="Special:BookSources/0780305590"><bdi>0780305590</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:62651220">62651220</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Unsupervised+and+supervised+data+clustering+with+competitive+neural+networks&amp;rft.btitle=%26%2391%3BProceedings+1992%26%2393%3B+IJCNN+International+Joint+Conference+on+Neural+Networks&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E796-%3C%2Fspan%3E801&amp;rft.pub=IEEE&amp;rft.date=1992&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A62651220%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2Fijcnn.1992.227220&amp;rft.isbn=0780305590&amp;rft.aulast=Buhmann&amp;rft.aufirst=J.&amp;rft.au=Kuhnel%2C+H.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-Comesana-9"><span class="mw-cite-backlink"><b><a href="#cite_ref-Comesana_9-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFComesaña-CamposBouza-Rodríguez2016" class="citation journal cs1">Comesaña-Campos, Alberto; Bouza-Rodríguez, José Benito (June 2016). "An application of Hebbian learning in the design process decision-making". <i>Journal of Intelligent Manufacturing</i>. <b>27</b> (3): <span class="nowrap">487–</span>506. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10845-014-0881-z">10.1007/s10845-014-0881-z</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a>&#160;<a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0956-5515">0956-5515</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:207171436">207171436</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Intelligent+Manufacturing&amp;rft.atitle=An+application+of+Hebbian+learning+in+the+design+process+decision-making&amp;rft.volume=27&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E487-%3C%2Fspan%3E506&amp;rft.date=2016-06&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A207171436%23id-name%3DS2CID&amp;rft.issn=0956-5515&amp;rft_id=info%3Adoi%2F10.1007%2Fs10845-014-0881-z&amp;rft.aulast=Comesa%C3%B1a-Campos&amp;rft.aufirst=Alberto&amp;rft.au=Bouza-Rodr%C3%ADguez%2C+Jos%C3%A9+Benito&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-Carpenter-10"><span class="mw-cite-backlink"><b><a href="#cite_ref-Carpenter_10-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFCarpenter,_G.A.Grossberg,_S.1988" class="citation journal cs1">Carpenter, G.A. &amp; Grossberg, S. (1988). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180516131553/http://www.cns.bu.edu/Profiles/Grossberg/CarGro1988Computer.pdf">"The ART of adaptive pattern recognition by a self-organizing neural network"</a> <span class="cs1-format">(PDF)</span>. <i>Computer</i>. <b>21</b> (3): <span class="nowrap">77–</span>88. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2F2.33">10.1109/2.33</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:14625094">14625094</a>. Archived from <a rel="nofollow" class="external text" href="http://www.cns.bu.edu/Profiles/Grossberg/CarGro1988Computer.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 2018-05-16<span class="reference-accessdate">. Retrieved <span class="nowrap">2013-09-16</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Computer&amp;rft.atitle=The+ART+of+adaptive+pattern+recognition+by+a+self-organizing+neural+network&amp;rft.volume=21&amp;rft.issue=3&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E77-%3C%2Fspan%3E88&amp;rft.date=1988&amp;rft_id=info%3Adoi%2F10.1109%2F2.33&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A14625094%23id-name%3DS2CID&amp;rft.au=Carpenter%2C+G.A.&amp;rft.au=Grossberg%2C+S.&amp;rft_id=http%3A%2F%2Fwww.cns.bu.edu%2FProfiles%2FGrossberg%2FCarGro1988Computer.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-tds-ul-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-tds-ul_11-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFRoman2019" class="citation web cs1">Roman, Victor (2019-04-21). <a rel="nofollow" class="external text" href="https://towardsdatascience.com/unsupervised-machine-learning-clustering-analysis-d40f2b34ae7e">"Unsupervised Machine Learning: Clustering Analysis"</a>. <i>Medium</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200821132257/https://towardsdatascience.com/unsupervised-machine-learning-clustering-analysis-d40f2b34ae7e">Archived</a> from the original on 2020-08-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-10-01</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Medium&amp;rft.atitle=Unsupervised+Machine+Learning%3A+Clustering+Analysis&amp;rft.date=2019-04-21&amp;rft.aulast=Roman&amp;rft.aufirst=Victor&amp;rft_id=https%3A%2F%2Ftowardsdatascience.com%2Funsupervised-machine-learning-clustering-analysis-d40f2b34ae7e&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-JordanBishop2004-12"><span class="mw-cite-backlink"><b><a href="#cite_ref-JordanBishop2004_12-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFJordanBishop2004" class="citation book cs1">Jordan, Michael I.; Bishop, Christopher M. (2004). "7. Intelligent Systems §Neural Networks". In Tucker, Allen B. (ed.). <a rel="nofollow" class="external text" href="https://www.taylorfrancis.com/books/mono/10.1201/9780203494455/computer-science-handbook-allen-tucker"><i>Computer Science Handbook</i></a> (2nd&#160;ed.). Chapman &amp; Hall/CRC Press. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1201%2F9780203494455">10.1201/9780203494455</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/1-58488-360-X" title="Special:BookSources/1-58488-360-X"><bdi>1-58488-360-X</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221103234201/https://www.taylorfrancis.com/books/mono/10.1201/9780203494455/computer-science-handbook-allen-tucker">Archived</a> from the original on 2022-11-03<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-11-03</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=7.+Intelligent+Systems+%C2%A7Neural+Networks&amp;rft.btitle=Computer+Science+Handbook&amp;rft.edition=2nd&amp;rft.pub=Chapman+%26+Hall%2FCRC+Press&amp;rft.date=2004&amp;rft_id=info%3Adoi%2F10.1201%2F9780203494455&amp;rft.isbn=1-58488-360-X&amp;rft.aulast=Jordan&amp;rft.aufirst=Michael+I.&amp;rft.au=Bishop%2C+Christopher+M.&amp;rft_id=https%3A%2F%2Fwww.taylorfrancis.com%2Fbooks%2Fmono%2F10.1201%2F9780203494455%2Fcomputer-science-handbook-allen-tucker&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-Hastie-13"><span class="mw-cite-backlink"><b><a href="#cite_ref-Hastie_13-0">^</a></b></span> <span class="reference-text"><a href="#CITEREFHastieTibshiraniFriedman2009">Hastie, Tibshirani &amp; Friedman 2009</a>, pp.&#160;485–586</span> </li> <li id="cite_note-tds-kmeans-14"><span class="mw-cite-backlink"><b><a href="#cite_ref-tds-kmeans_14-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFGarbade2018" class="citation web cs1">Garbade, Dr Michael J. (2018-09-12). <a rel="nofollow" class="external text" href="https://towardsdatascience.com/understanding-k-means-clustering-in-machine-learning-6a6e67336aa1">"Understanding K-means Clustering in Machine Learning"</a>. <i>Medium</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190528183913/https://towardsdatascience.com/understanding-k-means-clustering-in-machine-learning-6a6e67336aa1">Archived</a> from the original on 2019-05-28<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-10-31</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=Medium&amp;rft.atitle=Understanding+K-means+Clustering+in+Machine+Learning&amp;rft.date=2018-09-12&amp;rft.aulast=Garbade&amp;rft.aufirst=Dr+Michael+J.&amp;rft_id=https%3A%2F%2Ftowardsdatascience.com%2Funderstanding-k-means-clustering-in-machine-learning-6a6e67336aa1&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> <li id="cite_note-TensorLVMs-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-TensorLVMs_15-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFAnandkumarGeHsuKakade2014" class="citation journal cs1">Anandkumar, Animashree; Ge, Rong; Hsu, Daniel; Kakade, Sham; Telgarsky, Matus (2014). <a rel="nofollow" class="external text" href="http://www.jmlr.org/papers/volume15/anandkumar14b/anandkumar14b.pdf">"Tensor Decompositions for Learning Latent Variable Models"</a> <span class="cs1-format">(PDF)</span>. <i>Journal of Machine Learning Research</i>. <b>15</b>: <span class="nowrap">2773–</span>2832. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1210.7559">1210.7559</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2012arXiv1210.7559A">2012arXiv1210.7559A</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150320201108/http://jmlr.org/papers/volume15/anandkumar14b/anandkumar14b.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2015-03-20<span class="reference-accessdate">. Retrieved <span class="nowrap">2015-04-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Journal+of+Machine+Learning+Research&amp;rft.atitle=Tensor+Decompositions+for+Learning+Latent+Variable+Models&amp;rft.volume=15&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E2773-%3C%2Fspan%3E2832&amp;rft.date=2014&amp;rft_id=info%3Aarxiv%2F1210.7559&amp;rft_id=info%3Abibcode%2F2012arXiv1210.7559A&amp;rft.aulast=Anandkumar&amp;rft.aufirst=Animashree&amp;rft.au=Ge%2C+Rong&amp;rft.au=Hsu%2C+Daniel&amp;rft.au=Kakade%2C+Sham&amp;rft.au=Telgarsky%2C+Matus&amp;rft_id=http%3A%2F%2Fwww.jmlr.org%2Fpapers%2Fvolume15%2Fanandkumar14b%2Fanandkumar14b.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></span> </li> </ol></div></div> <div class="mw-heading mw-heading2"><h2 id="Further_reading">Further reading</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Unsupervised_learning&amp;action=edit&amp;section=15" title="Edit section: Further reading"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239549316">.mw-parser-output .refbegin{margin-bottom:0.5em}.mw-parser-output .refbegin-hanging-indents>ul{margin-left:0}.mw-parser-output .refbegin-hanging-indents>ul>li{margin-left:0;padding-left:3.2em;text-indent:-3.2em}.mw-parser-output .refbegin-hanging-indents ul,.mw-parser-output .refbegin-hanging-indents ul li{list-style:none}@media(max-width:720px){.mw-parser-output .refbegin-hanging-indents>ul>li{padding-left:1.6em;text-indent:-1.6em}}.mw-parser-output .refbegin-columns{margin-top:0.3em}.mw-parser-output .refbegin-columns ul{margin-top:0}.mw-parser-output .refbegin-columns li{page-break-inside:avoid;break-inside:avoid-column}@media screen{.mw-parser-output .refbegin{font-size:90%}}</style><div class="refbegin" style=""> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFBousquet,_O.von_Luxburg,_U.Raetsch,_G.2004" class="citation book cs1">Bousquet, O.; <a href="/wiki/Ulrike_von_Luxburg" title="Ulrike von Luxburg">von Luxburg, U.</a>; Raetsch, G., eds. (2004). <a rel="nofollow" class="external text" href="https://archive.org/details/springer_10.1007-b100712"><i>Advanced Lectures on Machine Learning</i></a>. Springer. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-3540231226" title="Special:BookSources/978-3540231226"><bdi>978-3540231226</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Advanced+Lectures+on+Machine+Learning&amp;rft.pub=Springer&amp;rft.date=2004&amp;rft.isbn=978-3540231226&amp;rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fspringer_10.1007-b100712&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFDuda,_Richard_O.Hart,_Peter_E.Stork,_David_G.2001" class="citation book cs1"><a href="/wiki/Richard_O._Duda" title="Richard O. Duda">Duda, Richard O.</a>; <a href="/wiki/Peter_E._Hart" title="Peter E. Hart">Hart, Peter E.</a>; Stork, David G. (2001). "Unsupervised Learning and Clustering". <a href="/wiki/Pattern_classification" class="mw-redirect" title="Pattern classification"><i>Pattern classification</i></a> (2nd&#160;ed.). Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/0-471-05669-3" title="Special:BookSources/0-471-05669-3"><bdi>0-471-05669-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Unsupervised+Learning+and+Clustering&amp;rft.btitle=Pattern+classification&amp;rft.edition=2nd&amp;rft.pub=Wiley&amp;rft.date=2001&amp;rft.isbn=0-471-05669-3&amp;rft.au=Duda%2C+Richard+O.&amp;rft.au=Hart%2C+Peter+E.&amp;rft.au=Stork%2C+David+G.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHastieTibshiraniFriedman2009" class="citation book cs1"><a href="/wiki/Trevor_Hastie" title="Trevor Hastie">Hastie, Trevor</a>; <a href="/wiki/Robert_Tibshirani" title="Robert Tibshirani">Tibshirani, Robert</a>; Friedman, Jerome (2009). <a rel="nofollow" class="external text" href="https://link.springer.com/chapter/10.1007/978-0-387-84858-7_14">"Unsupervised Learning"</a>. <i>The Elements of Statistical Learning: Data mining, Inference, and Prediction</i>. Springer. pp.&#160;<span class="nowrap">485–</span>586. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-0-387-84858-7_14">10.1007/978-0-387-84858-7_14</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-387-84857-0" title="Special:BookSources/978-0-387-84857-0"><bdi>978-0-387-84857-0</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221103234204/https://link.springer.com/chapter/10.1007/978-0-387-84858-7_14">Archived</a> from the original on 2022-11-03<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-11-03</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=Unsupervised+Learning&amp;rft.btitle=The+Elements+of+Statistical+Learning%3A+Data+mining%2C+Inference%2C+and+Prediction&amp;rft.pages=%3Cspan+class%3D%22nowrap%22%3E485-%3C%2Fspan%3E586&amp;rft.pub=Springer&amp;rft.date=2009&amp;rft_id=info%3Adoi%2F10.1007%2F978-0-387-84858-7_14&amp;rft.isbn=978-0-387-84857-0&amp;rft.aulast=Hastie&amp;rft.aufirst=Trevor&amp;rft.au=Tibshirani%2C+Robert&amp;rft.au=Friedman%2C+Jerome&amp;rft_id=https%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-0-387-84858-7_14&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222" /><cite id="CITEREFHintonSejnowski1999" class="citation book cs1"><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Hinton, Geoffrey</a>; <a href="/wiki/Terrence_J._Sejnowski" class="mw-redirect" title="Terrence J. Sejnowski">Sejnowski, Terrence J.</a>, eds. (1999). <i>Unsupervised Learning: Foundations of Neural Computation</i>. <a href="/wiki/MIT_Press" title="MIT Press">MIT Press</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/0-262-58168-X" title="Special:BookSources/0-262-58168-X"><bdi>0-262-58168-X</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Unsupervised+Learning%3A+Foundations+of+Neural+Computation&amp;rft.pub=MIT+Press&amp;rft.date=1999&amp;rft.isbn=0-262-58168-X&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AUnsupervised+learning" class="Z3988"></span></li></ul> </div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374" /><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Differentiable_computing254" style="padding:3px"><table class="nowraplinks hlist mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374" /><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231" /><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Differentiable_computing" title="Template:Differentiable computing"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Differentiable_computing" title="Template talk:Differentiable computing"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Differentiable_computing" title="Special:EditPage/Template:Differentiable computing"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Differentiable_computing254" style="font-size:114%;margin:0 4em">Differentiable computing</div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Differentiable_function" title="Differentiable function">General</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><b><a href="/wiki/Differentiable_programming" title="Differentiable programming">Differentiable programming</a></b></li> <li><a href="/wiki/Information_geometry" title="Information geometry">Information geometry</a></li> <li><a href="/wiki/Statistical_manifold" title="Statistical manifold">Statistical manifold</a></li> <li><a href="/wiki/Automatic_differentiation" title="Automatic differentiation">Automatic differentiation</a></li> <li><a href="/wiki/Neuromorphic_computing" title="Neuromorphic computing">Neuromorphic computing</a></li> <li><a href="/wiki/Pattern_recognition" title="Pattern recognition">Pattern recognition</a></li> <li><a href="/wiki/Ricci_calculus" title="Ricci calculus">Ricci calculus</a></li> <li><a href="/wiki/Computational_learning_theory" title="Computational learning theory">Computational learning theory</a></li> <li><a href="/wiki/Inductive_bias" title="Inductive bias">Inductive bias</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Hardware</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Graphcore" title="Graphcore">IPU</a></li> <li><a href="/wiki/Tensor_Processing_Unit" title="Tensor Processing Unit">TPU</a></li> <li><a href="/wiki/Vision_processing_unit" title="Vision processing unit">VPU</a></li> <li><a href="/wiki/Memristor" title="Memristor">Memristor</a></li> <li><a href="/wiki/SpiNNaker" title="SpiNNaker">SpiNNaker</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Software libraries</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/TensorFlow" title="TensorFlow">TensorFlow</a></li> <li><a href="/wiki/PyTorch" title="PyTorch">PyTorch</a></li> <li><a href="/wiki/Keras" title="Keras">Keras</a></li> <li><a href="/wiki/Scikit-learn" title="Scikit-learn">scikit-learn</a></li> <li><a href="/wiki/Theano_(software)" title="Theano (software)">Theano</a></li> <li><a href="/wiki/JAX_(software)" title="JAX (software)">JAX</a></li> <li><a href="/wiki/Flux_(machine-learning_framework)" title="Flux (machine-learning framework)">Flux.jl</a></li> <li><a href="/wiki/MindSpore" title="MindSpore">MindSpore</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><a href="/wiki/File:Symbol_portal_class.svg" class="mw-file-description" title="Portal"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/16px-Symbol_portal_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/23px-Symbol_portal_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/31px-Symbol_portal_class.svg.png 2x" data-file-width="180" data-file-height="185" /></a></span> Portals <ul><li><a href="/wiki/Portal:Computer_programming" title="Portal:Computer programming">Computer programming</a></li> <li><a href="/wiki/Portal:Technology" title="Portal:Technology">Technology</a></li></ul></li></ul> </div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374" /><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235" /></div><div role="navigation" class="navbox authority-control" aria-label="Navbox391" style="padding:3px"><table class="nowraplinks hlist navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Help:Authority_control" title="Help:Authority control">Authority control databases</a>: National <span class="mw-valign-text-top noprint" typeof="mw:File/Frameless"><a href="https://www.wikidata.org/wiki/Q1152135#identifiers" title="Edit this at Wikidata"><img alt="Edit this at Wikidata" src="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/10px-OOjs_UI_icon_edit-ltr-progressive.svg.png" decoding="async" width="10" height="10" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/15px-OOjs_UI_icon_edit-ltr-progressive.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/20px-OOjs_UI_icon_edit-ltr-progressive.svg.png 2x" data-file-width="20" data-file-height="20" /></a></span></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"><ul><li><span class="uid"><a rel="nofollow" class="external text" href="https://d-nb.info/gnd/4580265-8">Germany</a></span></li></ul></div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐api‐int.codfw.main‐5b7ccf7b7c‐mvwz8 Cached time: 20250317164112 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 0.609 seconds Real time usage: 0.784 seconds Preprocessor visited node count: 2383/1000000 Post‐expand include size: 92422/2097152 bytes Template argument size: 6634/2097152 bytes Highest expansion depth: 12/100 Expensive parser function count: 2/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 91178/5000000 bytes Lua time usage: 0.385/10.000 seconds Lua memory usage: 7337722/52428800 bytes Number of Wikibase entities loaded: 1/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 612.675 1 -total 41.12% 251.951 1 Template:Reflist 22.11% 135.491 1 Template:Machine_learning 20.41% 125.024 1 Template:Sidebar_with_collapsible_lists 16.80% 102.946 5 Template:Cite_web 12.13% 74.296 1 Template:Short_description 7.80% 47.801 1 Template:Harvnb 7.72% 47.291 6 Template:Cite_journal 7.35% 45.061 2 Template:Pagetype 6.76% 41.399 1 Template:Differentiable_computing --> <!-- Saved in parser cache with key enwiki:pcache:233497:|#|:idhash:canonical and timestamp 20250317164112 and revision id 1278046684. Rendering was triggered because: api-parse --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?useformat=desktop&amp;type=1x1&amp;usesul3=0" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Unsupervised_learning&amp;oldid=1278046684">https://en.wikipedia.org/w/index.php?title=Unsupervised_learning&amp;oldid=1278046684</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Category</a>: <ul><li><a href="/wiki/Category:Unsupervised_learning" title="Category:Unsupervised learning">Unsupervised learning</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_is_different_from_Wikidata" title="Category:Short description is different from Wikidata">Short description is different from Wikidata</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 28 February 2025, at 05:06<span class="anonymous-show">&#160;(UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Unsupervised_learning&amp;mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><picture><source media="(min-width: 500px)" srcset="/static/images/footer/wikimedia-button.svg" width="84" height="29"><img src="/static/images/footer/wikimedia.svg" width="25" height="25" alt="Wikimedia Foundation" lang="en" loading="lazy"></picture></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><picture><source media="(min-width: 500px)" srcset="/w/resources/assets/poweredby_mediawiki.svg" width="88" height="31"><img src="/w/resources/assets/mediawiki_compact.svg" alt="Powered by MediaWiki" lang="en" width="25" height="25" loading="lazy"></picture></a></li> </ul> </footer> </div> </div> </div> <div class="vector-header-container vector-sticky-header-container"> <div id="vector-sticky-header" class="vector-sticky-header"> <div class="vector-sticky-header-start"> <div class="vector-sticky-header-icon-start vector-button-flush-left vector-button-flush-right" aria-hidden="true"> <button class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-sticky-header-search-toggle" tabindex="-1" data-event-name="ui.vector-sticky-search-form.icon"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </button> </div> <div role="search" class="vector-search-box-vue vector-search-box-show-thumbnail vector-search-box"> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail"> <form action="/w/index.php" id="vector-sticky-search-form" class="cdx-search-input cdx-search-input--has-end-button"> <div class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia"> <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <div class="vector-sticky-header-context-bar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-sticky-header-toc" class="vector-dropdown mw-portlet mw-portlet-sticky-header-toc vector-sticky-header-toc vector-button-flush-left" > <input type="checkbox" id="vector-sticky-header-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-sticky-header-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-sticky-header-toc-label" for="vector-sticky-header-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-sticky-header-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div class="vector-sticky-header-context-bar-primary" aria-hidden="true" ><span class="mw-page-title-main">Unsupervised learning</span></div> </div> </div> <div class="vector-sticky-header-end" aria-hidden="true"> <div class="vector-sticky-header-icons"> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-talk-sticky-header" tabindex="-1" data-event-name="talk-sticky-header"><span class="vector-icon mw-ui-icon-speechBubbles mw-ui-icon-wikimedia-speechBubbles"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-subject-sticky-header" tabindex="-1" data-event-name="subject-sticky-header"><span class="vector-icon mw-ui-icon-article mw-ui-icon-wikimedia-article"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-history-sticky-header" tabindex="-1" data-event-name="history-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-history mw-ui-icon-wikimedia-wikimedia-history"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only mw-watchlink" id="ca-watchstar-sticky-header" tabindex="-1" data-event-name="watch-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-star mw-ui-icon-wikimedia-wikimedia-star"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-edit-sticky-header" tabindex="-1" data-event-name="wikitext-edit-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-wikiText mw-ui-icon-wikimedia-wikimedia-wikiText"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-ve-edit-sticky-header" tabindex="-1" data-event-name="ve-edit-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-edit mw-ui-icon-wikimedia-wikimedia-edit"></span> <span></span> </a> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only" id="ca-viewsource-sticky-header" tabindex="-1" data-event-name="ve-edit-protected-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-editLock mw-ui-icon-wikimedia-wikimedia-editLock"></span> <span></span> </a> </div> <div class="vector-sticky-header-buttons"> <button class="cdx-button cdx-button--weight-quiet mw-interlanguage-selector" id="p-lang-btn-sticky-header" tabindex="-1" data-event-name="ui.dropdown-p-lang-btn-sticky-header"><span class="vector-icon mw-ui-icon-wikimedia-language mw-ui-icon-wikimedia-wikimedia-language"></span> <span>31 languages</span> </button> <a href="#" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive" id="ca-addsection-sticky-header" tabindex="-1" data-event-name="addsection-sticky-header"><span class="vector-icon mw-ui-icon-speechBubbleAdd-progressive mw-ui-icon-wikimedia-speechBubbleAdd-progressive"></span> <span>Add topic</span> </a> </div> <div class="vector-sticky-header-icon-end"> <div class="vector-user-links"> </div> </div> </div> </div> </div> <div class="mw-portlet mw-portlet-dock-bottom emptyPortlet" id="p-dock-bottom"> <ul> </ul> </div> <script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-69755cc569-8n5w6","wgBackendResponseTime":165,"wgPageParseReport":{"limitreport":{"cputime":"0.609","walltime":"0.784","ppvisitednodes":{"value":2383,"limit":1000000},"postexpandincludesize":{"value":92422,"limit":2097152},"templateargumentsize":{"value":6634,"limit":2097152},"expansiondepth":{"value":12,"limit":100},"expensivefunctioncount":{"value":2,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":91178,"limit":5000000},"entityaccesscount":{"value":1,"limit":400},"timingprofile":["100.00% 612.675 1 -total"," 41.12% 251.951 1 Template:Reflist"," 22.11% 135.491 1 Template:Machine_learning"," 20.41% 125.024 1 Template:Sidebar_with_collapsible_lists"," 16.80% 102.946 5 Template:Cite_web"," 12.13% 74.296 1 Template:Short_description"," 7.80% 47.801 1 Template:Harvnb"," 7.72% 47.291 6 Template:Cite_journal"," 7.35% 45.061 2 Template:Pagetype"," 6.76% 41.399 1 Template:Differentiable_computing"]},"scribunto":{"limitreport-timeusage":{"value":"0.385","limit":"10.000"},"limitreport-memusage":{"value":7337722,"limit":52428800},"limitreport-logs":"anchor_id_list = table#1 {\n [\"CITEREFAnandkumarGeHsuKakade2014\"] = 1,\n [\"CITEREFBousquet,_O.von_Luxburg,_U.Raetsch,_G.2004\"] = 1,\n [\"CITEREFBuhmannKuhnel1992\"] = 1,\n [\"CITEREFCarpenter,_G.A.Grossberg,_S.1988\"] = 1,\n [\"CITEREFComesaña-CamposBouza-Rodríguez2016\"] = 1,\n [\"CITEREFDuda,_Richard_O.Hart,_Peter_E.Stork,_David_G.2001\"] = 1,\n [\"CITEREFGarbade2018\"] = 1,\n [\"CITEREFHastieTibshiraniFriedman2009\"] = 1,\n [\"CITEREFHinton2012\"] = 1,\n [\"CITEREFHintonSejnowski1999\"] = 1,\n [\"CITEREFJordanBishop2004\"] = 1,\n [\"CITEREFLiWallaceShenLin2020\"] = 1,\n [\"CITEREFLiuZhangHouMian2021\"] = 1,\n [\"CITEREFPeterHintonNealZemel1995\"] = 1,\n [\"CITEREFRadfordNarasimhanSalimansSutskever2018\"] = 1,\n [\"CITEREFRoman2019\"] = 1,\n [\"CITEREFWu\"] = 1,\n}\ntemplate_list = table#1 {\n [\"Authority control\"] = 1,\n [\"Cite book\"] = 7,\n [\"Cite journal\"] = 6,\n [\"Cite web\"] = 5,\n [\"Closed access\"] = 1,\n [\"DEFAULTSORT:Unsupervised Learning\"] = 1,\n [\"Defn\"] = 6,\n [\"Differentiable computing\"] = 1,\n [\"Glossary\"] = 1,\n [\"Glossary end\"] = 1,\n [\"Harvnb\"] = 1,\n [\"Ill\"] = 1,\n [\"Machine learning\"] = 1,\n [\"Refbegin\"] = 1,\n [\"Refend\"] = 1,\n [\"Reflist\"] = 1,\n [\"Short description\"] = 1,\n [\"Term\"] = 6,\n}\narticle_whitelist = table#1 {\n}\nciteref_patterns = table#1 {\n}\n"},"cachereport":{"origin":"mw-api-int.codfw.main-5b7ccf7b7c-mvwz8","timestamp":"20250317164112","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Unsupervised learning","url":"https:\/\/en.wikipedia.org\/wiki\/Unsupervised_learning","sameAs":"http:\/\/www.wikidata.org\/entity\/Q1152135","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q1152135","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2003-05-25T06:42:27Z","dateModified":"2025-02-28T05:06:13Z","headline":"machine learning technique"}</script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10