CINXE.COM
Machine learning - Wikipedia
<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Machine learning - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy", "wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"2f7c9e87-7f10-46b7-a4c3-f2a61c7bb641","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Machine_learning","wgTitle":"Machine learning","wgCurRevisionId":1259171820,"wgRevisionId":1259171820,"wgArticleId":233488,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Webarchive template wayback links","Articles with short description","Short description is different from Wikidata","Articles with excerpts","Wikipedia articles needing clarification from January 2024","Commons category link from Wikidata","Machine learning","Cybernetics","Learning"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Machine_learning","wgRelevantArticleId":233488,"wgIsProbablyEditable":true, "wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":100000,"wgRelatedArticlesCompat":[],"wgCentralAuthMobileDomain":false,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q2539","wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics": true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","ext.math.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","mediawiki.page.media","ext.scribunto.logs","site","mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar", "ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.growthExperiments.SuggestedEditSession","wikibase.sidebar.tracking"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&modules=ext.cite.styles%7Cext.math.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&only=styles&skin=vector-2022"> <script async="" src="/w/load.php?lang=en&modules=startup&only=scripts&raw=1&skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&modules=site.styles&only=styles&skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.4"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Machine learning - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/Machine_learning"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Machine_learning&action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Machine_learning"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Machine_learning rootpage-Machine_learning skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page's font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&returnto=Machine+learning" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&returnto=Machine+learning" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&returnto=Machine+learning" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&returnto=Machine+learning" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-History" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#History"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>History</span> </div> </a> <ul id="toc-History-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Relationships_to_other_fields" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Relationships_to_other_fields"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Relationships to other fields</span> </div> </a> <button aria-controls="toc-Relationships_to_other_fields-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Relationships to other fields subsection</span> </button> <ul id="toc-Relationships_to_other_fields-sublist" class="vector-toc-list"> <li id="toc-Artificial_intelligence" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Artificial_intelligence"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1</span> <span>Artificial intelligence</span> </div> </a> <ul id="toc-Artificial_intelligence-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Data_compression" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Data_compression"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2</span> <span>Data compression</span> </div> </a> <ul id="toc-Data_compression-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Data_mining" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Data_mining"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3</span> <span>Data mining</span> </div> </a> <ul id="toc-Data_mining-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Generalization" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Generalization"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.4</span> <span>Generalization</span> </div> </a> <ul id="toc-Generalization-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Statistics" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Statistics"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.5</span> <span>Statistics</span> </div> </a> <ul id="toc-Statistics-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Statistical_physics" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Statistical_physics"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.6</span> <span>Statistical physics</span> </div> </a> <ul id="toc-Statistical_physics-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Theory" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Theory"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>Theory</span> </div> </a> <ul id="toc-Theory-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Approaches" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Approaches"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>Approaches</span> </div> </a> <button aria-controls="toc-Approaches-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Approaches subsection</span> </button> <ul id="toc-Approaches-sublist" class="vector-toc-list"> <li id="toc-Supervised_learning" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Supervised_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1</span> <span>Supervised learning</span> </div> </a> <ul id="toc-Supervised_learning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Unsupervised_learning" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Unsupervised_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2</span> <span>Unsupervised learning</span> </div> </a> <ul id="toc-Unsupervised_learning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Semi-supervised_learning" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Semi-supervised_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.3</span> <span>Semi-supervised learning</span> </div> </a> <ul id="toc-Semi-supervised_learning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Reinforcement_learning" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Reinforcement_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.4</span> <span>Reinforcement learning</span> </div> </a> <ul id="toc-Reinforcement_learning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Dimensionality_reduction" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Dimensionality_reduction"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.5</span> <span>Dimensionality reduction</span> </div> </a> <ul id="toc-Dimensionality_reduction-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Other_types" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Other_types"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.6</span> <span>Other types</span> </div> </a> <ul id="toc-Other_types-sublist" class="vector-toc-list"> <li id="toc-Self-learning" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Self-learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.6.1</span> <span>Self-learning</span> </div> </a> <ul id="toc-Self-learning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Feature_learning" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Feature_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.6.2</span> <span>Feature learning</span> </div> </a> <ul id="toc-Feature_learning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Sparse_dictionary_learning" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Sparse_dictionary_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.6.3</span> <span>Sparse dictionary learning</span> </div> </a> <ul id="toc-Sparse_dictionary_learning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Anomaly_detection" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Anomaly_detection"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.6.4</span> <span>Anomaly detection</span> </div> </a> <ul id="toc-Anomaly_detection-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Robot_learning" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Robot_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.6.5</span> <span>Robot learning</span> </div> </a> <ul id="toc-Robot_learning-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Association_rules" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Association_rules"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.6.6</span> <span>Association rules</span> </div> </a> <ul id="toc-Association_rules-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> </ul> </li> <li id="toc-Models" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Models"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>Models</span> </div> </a> <button aria-controls="toc-Models-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Models subsection</span> </button> <ul id="toc-Models-sublist" class="vector-toc-list"> <li id="toc-Artificial_neural_networks" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Artificial_neural_networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.1</span> <span>Artificial neural networks</span> </div> </a> <ul id="toc-Artificial_neural_networks-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Decision_trees" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Decision_trees"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.2</span> <span>Decision trees</span> </div> </a> <ul id="toc-Decision_trees-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Support-vector_machines" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Support-vector_machines"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.3</span> <span>Support-vector machines</span> </div> </a> <ul id="toc-Support-vector_machines-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Regression_analysis" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Regression_analysis"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.4</span> <span>Regression analysis</span> </div> </a> <ul id="toc-Regression_analysis-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Bayesian_networks" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Bayesian_networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.5</span> <span>Bayesian networks</span> </div> </a> <ul id="toc-Bayesian_networks-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Gaussian_processes" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Gaussian_processes"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.6</span> <span>Gaussian processes</span> </div> </a> <ul id="toc-Gaussian_processes-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Genetic_algorithms" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Genetic_algorithms"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.7</span> <span>Genetic algorithms</span> </div> </a> <ul id="toc-Genetic_algorithms-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Belief_functions" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Belief_functions"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.8</span> <span>Belief functions</span> </div> </a> <ul id="toc-Belief_functions-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Training_models" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Training_models"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.9</span> <span>Training models</span> </div> </a> <ul id="toc-Training_models-sublist" class="vector-toc-list"> <li id="toc-Federated_learning" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Federated_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.9.1</span> <span>Federated learning</span> </div> </a> <ul id="toc-Federated_learning-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> </ul> </li> <li id="toc-Applications" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Applications"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Applications</span> </div> </a> <ul id="toc-Applications-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Limitations" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Limitations"> <div class="vector-toc-text"> <span class="vector-toc-numb">7</span> <span>Limitations</span> </div> </a> <button aria-controls="toc-Limitations-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Limitations subsection</span> </button> <ul id="toc-Limitations-sublist" class="vector-toc-list"> <li id="toc-Bias" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Bias"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.1</span> <span>Bias</span> </div> </a> <ul id="toc-Bias-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Explainability" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Explainability"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.2</span> <span>Explainability</span> </div> </a> <ul id="toc-Explainability-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Overfitting" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Overfitting"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.3</span> <span>Overfitting</span> </div> </a> <ul id="toc-Overfitting-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Other_limitations_and_vulnerabilities" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Other_limitations_and_vulnerabilities"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.4</span> <span>Other limitations and vulnerabilities</span> </div> </a> <ul id="toc-Other_limitations_and_vulnerabilities-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Model_assessments" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Model_assessments"> <div class="vector-toc-text"> <span class="vector-toc-numb">8</span> <span>Model assessments</span> </div> </a> <ul id="toc-Model_assessments-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Ethics" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Ethics"> <div class="vector-toc-text"> <span class="vector-toc-numb">9</span> <span>Ethics</span> </div> </a> <ul id="toc-Ethics-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Hardware" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Hardware"> <div class="vector-toc-text"> <span class="vector-toc-numb">10</span> <span>Hardware</span> </div> </a> <button aria-controls="toc-Hardware-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Hardware subsection</span> </button> <ul id="toc-Hardware-sublist" class="vector-toc-list"> <li id="toc-Neuromorphic_computing" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Neuromorphic_computing"> <div class="vector-toc-text"> <span class="vector-toc-numb">10.1</span> <span>Neuromorphic computing</span> </div> </a> <ul id="toc-Neuromorphic_computing-sublist" class="vector-toc-list"> <li id="toc-physical_neural_networks" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#physical_neural_networks"> <div class="vector-toc-text"> <span class="vector-toc-numb">10.1.1</span> <span>physical neural networks</span> </div> </a> <ul id="toc-physical_neural_networks-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Embedded_machine_learning" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Embedded_machine_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">10.2</span> <span>Embedded machine learning</span> </div> </a> <ul id="toc-Embedded_machine_learning-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Software" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Software"> <div class="vector-toc-text"> <span class="vector-toc-numb">11</span> <span>Software</span> </div> </a> <button aria-controls="toc-Software-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Software subsection</span> </button> <ul id="toc-Software-sublist" class="vector-toc-list"> <li id="toc-Free_and_open-source_software" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Free_and_open-source_software"> <div class="vector-toc-text"> <span class="vector-toc-numb">11.1</span> <span>Free and open-source software</span> </div> </a> <ul id="toc-Free_and_open-source_software-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Proprietary_software_with_free_and_open-source_editions" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Proprietary_software_with_free_and_open-source_editions"> <div class="vector-toc-text"> <span class="vector-toc-numb">11.2</span> <span>Proprietary software with free and open-source editions</span> </div> </a> <ul id="toc-Proprietary_software_with_free_and_open-source_editions-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Proprietary_software" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Proprietary_software"> <div class="vector-toc-text"> <span class="vector-toc-numb">11.3</span> <span>Proprietary software</span> </div> </a> <ul id="toc-Proprietary_software-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Journals" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Journals"> <div class="vector-toc-text"> <span class="vector-toc-numb">12</span> <span>Journals</span> </div> </a> <ul id="toc-Journals-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Conferences" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Conferences"> <div class="vector-toc-text"> <span class="vector-toc-numb">13</span> <span>Conferences</span> </div> </a> <ul id="toc-Conferences-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">14</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">15</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Sources" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Sources"> <div class="vector-toc-text"> <span class="vector-toc-numb">16</span> <span>Sources</span> </div> </a> <ul id="toc-Sources-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Further_reading" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Further_reading"> <div class="vector-toc-text"> <span class="vector-toc-numb">17</span> <span>Further reading</span> </div> </a> <ul id="toc-Further_reading-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-External_links" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#External_links"> <div class="vector-toc-text"> <span class="vector-toc-numb">18</span> <span>External links</span> </div> </a> <ul id="toc-External_links-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Machine learning</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 84 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-84" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">84 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-af mw-list-item"><a href="https://af.wikipedia.org/wiki/Masjienleer" title="Masjienleer – Afrikaans" lang="af" hreflang="af" data-title="Masjienleer" data-language-autonym="Afrikaans" data-language-local-name="Afrikaans" class="interlanguage-link-target"><span>Afrikaans</span></a></li><li class="interlanguage-link interwiki-ar mw-list-item"><a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B9%D9%84%D9%85_%D8%A7%D9%84%D8%A2%D9%84%D8%A9" title="تعلم الآلة – Arabic" lang="ar" hreflang="ar" data-title="تعلم الآلة" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-as mw-list-item"><a href="https://as.wikipedia.org/wiki/%E0%A6%AF%E0%A6%A8%E0%A7%8D%E0%A6%A4%E0%A7%8D%E0%A7%B0_%E0%A6%B6%E0%A6%BF%E0%A6%95%E0%A7%8D%E0%A6%B7%E0%A6%A3" title="যন্ত্ৰ শিক্ষণ – Assamese" lang="as" hreflang="as" data-title="যন্ত্ৰ শিক্ষণ" data-language-autonym="অসমীয়া" data-language-local-name="Assamese" class="interlanguage-link-target"><span>অসমীয়া</span></a></li><li class="interlanguage-link interwiki-az mw-list-item"><a href="https://az.wikipedia.org/wiki/Ma%C5%9F%C4%B1n_%C3%B6yr%C9%99nm%C9%99si" title="Maşın öyrənməsi – Azerbaijani" lang="az" hreflang="az" data-title="Maşın öyrənməsi" data-language-autonym="Azərbaycanca" data-language-local-name="Azerbaijani" class="interlanguage-link-target"><span>Azərbaycanca</span></a></li><li class="interlanguage-link interwiki-azb mw-list-item"><a href="https://azb.wikipedia.org/wiki/%D9%85%D8%A7%D8%B4%DB%8C%D9%86_%D8%A7%D8%A4%DB%8C%D8%B1%D9%86%D9%85%D9%87%E2%80%8C%D8%B3%DB%8C" title="ماشین اؤیرنمهسی – South Azerbaijani" lang="azb" hreflang="azb" data-title="ماشین اؤیرنمهسی" data-language-autonym="تۆرکجه" data-language-local-name="South Azerbaijani" class="interlanguage-link-target"><span>تۆرکجه</span></a></li><li class="interlanguage-link interwiki-bn mw-list-item"><a href="https://bn.wikipedia.org/wiki/%E0%A6%AE%E0%A7%87%E0%A6%B6%E0%A6%BF%E0%A6%A8_%E0%A6%B2%E0%A6%BE%E0%A6%B0%E0%A7%8D%E0%A6%A8%E0%A6%BF%E0%A6%82" title="মেশিন লার্নিং – Bangla" lang="bn" hreflang="bn" data-title="মেশিন লার্নিং" data-language-autonym="বাংলা" data-language-local-name="Bangla" class="interlanguage-link-target"><span>বাংলা</span></a></li><li class="interlanguage-link interwiki-zh-min-nan mw-list-item"><a href="https://zh-min-nan.wikipedia.org/wiki/Ki-h%C4%81i_ha%CC%8Dk-si%CC%8Dp" title="Ki-hāi ha̍k-si̍p – Minnan" lang="nan" hreflang="nan" data-title="Ki-hāi ha̍k-si̍p" data-language-autonym="閩南語 / Bân-lâm-gú" data-language-local-name="Minnan" class="interlanguage-link-target"><span>閩南語 / Bân-lâm-gú</span></a></li><li class="interlanguage-link interwiki-ba mw-list-item"><a href="https://ba.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D0%B8%D0%BD%D0%B0%D0%BB%D1%8B_%D3%A9%D0%B9%D1%80%D3%99%D0%BD%D0%B5%D2%AF" title="Машиналы өйрәнеү – Bashkir" lang="ba" hreflang="ba" data-title="Машиналы өйрәнеү" data-language-autonym="Башҡортса" data-language-local-name="Bashkir" class="interlanguage-link-target"><span>Башҡортса</span></a></li><li class="interlanguage-link interwiki-be mw-list-item"><a href="https://be.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D1%8B%D0%BD%D0%BD%D0%B0%D0%B5_%D0%BD%D0%B0%D0%B2%D1%83%D1%87%D0%B0%D0%BD%D0%BD%D0%B5" title="Машыннае навучанне – Belarusian" lang="be" hreflang="be" data-title="Машыннае навучанне" data-language-autonym="Беларуская" data-language-local-name="Belarusian" class="interlanguage-link-target"><span>Беларуская</span></a></li><li class="interlanguage-link interwiki-bh mw-list-item"><a href="https://bh.wikipedia.org/wiki/%E0%A4%AE%E0%A4%B6%E0%A5%80%E0%A4%A8_%E0%A4%B2%E0%A4%B0%E0%A5%8D%E0%A4%A8%E0%A4%BF%E0%A4%82%E0%A4%97" title="मशीन लर्निंग – Bhojpuri" lang="bh" hreflang="bh" data-title="मशीन लर्निंग" data-language-autonym="भोजपुरी" data-language-local-name="Bhojpuri" class="interlanguage-link-target"><span>भोजपुरी</span></a></li><li class="interlanguage-link interwiki-bg mw-list-item"><a href="https://bg.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D0%B8%D0%BD%D0%BD%D0%BE_%D1%81%D0%B0%D0%BC%D0%BE%D0%BE%D0%B1%D1%83%D1%87%D0%B5%D0%BD%D0%B8%D0%B5" title="Машинно самообучение – Bulgarian" lang="bg" hreflang="bg" data-title="Машинно самообучение" data-language-autonym="Български" data-language-local-name="Bulgarian" class="interlanguage-link-target"><span>Български</span></a></li><li class="interlanguage-link interwiki-bo mw-list-item"><a href="https://bo.wikipedia.org/wiki/%E0%BD%A0%E0%BD%95%E0%BE%B2%E0%BD%B4%E0%BD%A3%E0%BC%8B%E0%BD%86%E0%BD%A6%E0%BC%8B%E0%BD%A6%E0%BE%B3%E0%BD%BC%E0%BD%96%E0%BC%8B%E0%BD%A6%E0%BE%A6%E0%BE%B1%E0%BD%BC%E0%BD%84%E0%BC%8B%E0%BC%8D" title="འཕྲུལ་ཆས་སློབ་སྦྱོང་། – Tibetan" lang="bo" hreflang="bo" data-title="འཕྲུལ་ཆས་སློབ་སྦྱོང་།" data-language-autonym="བོད་ཡིག" data-language-local-name="Tibetan" class="interlanguage-link-target"><span>བོད་ཡིག</span></a></li><li class="interlanguage-link interwiki-bs mw-list-item"><a href="https://bs.wikipedia.org/wiki/Ma%C5%A1insko_u%C4%8Denje" title="Mašinsko učenje – Bosnian" lang="bs" hreflang="bs" data-title="Mašinsko učenje" data-language-autonym="Bosanski" data-language-local-name="Bosnian" class="interlanguage-link-target"><span>Bosanski</span></a></li><li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/Aprenentatge_autom%C3%A0tic" title="Aprenentatge automàtic – Catalan" lang="ca" hreflang="ca" data-title="Aprenentatge automàtic" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-cs mw-list-item"><a href="https://cs.wikipedia.org/wiki/Strojov%C3%A9_u%C4%8Den%C3%AD" title="Strojové učení – Czech" lang="cs" hreflang="cs" data-title="Strojové učení" data-language-autonym="Čeština" data-language-local-name="Czech" class="interlanguage-link-target"><span>Čeština</span></a></li><li class="interlanguage-link interwiki-cy mw-list-item"><a href="https://cy.wikipedia.org/wiki/Dysgu_peirianyddol" title="Dysgu peirianyddol – Welsh" lang="cy" hreflang="cy" data-title="Dysgu peirianyddol" data-language-autonym="Cymraeg" data-language-local-name="Welsh" class="interlanguage-link-target"><span>Cymraeg</span></a></li><li class="interlanguage-link interwiki-da mw-list-item"><a href="https://da.wikipedia.org/wiki/Maskinl%C3%A6ring" title="Maskinlæring – Danish" lang="da" hreflang="da" data-title="Maskinlæring" data-language-autonym="Dansk" data-language-local-name="Danish" class="interlanguage-link-target"><span>Dansk</span></a></li><li class="interlanguage-link interwiki-ary mw-list-item"><a href="https://ary.wikipedia.org/wiki/%D8%AA%D8%B9%D9%84%D8%A7%D9%85_%D9%85%D8%A7%D9%83%D9%8A%D9%86%D9%8A" title="تعلام ماكيني – Moroccan Arabic" lang="ary" hreflang="ary" data-title="تعلام ماكيني" data-language-autonym="الدارجة" data-language-local-name="Moroccan Arabic" class="interlanguage-link-target"><span>الدارجة</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Maschinelles_Lernen" title="Maschinelles Lernen – German" lang="de" hreflang="de" data-title="Maschinelles Lernen" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-et mw-list-item"><a href="https://et.wikipedia.org/wiki/Masin%C3%B5ppimine" title="Masinõppimine – Estonian" lang="et" hreflang="et" data-title="Masinõppimine" data-language-autonym="Eesti" data-language-local-name="Estonian" class="interlanguage-link-target"><span>Eesti</span></a></li><li class="interlanguage-link interwiki-el mw-list-item"><a href="https://el.wikipedia.org/wiki/%CE%9C%CE%B7%CF%87%CE%B1%CE%BD%CE%B9%CE%BA%CE%AE_%CE%BC%CE%AC%CE%B8%CE%B7%CF%83%CE%B7" title="Μηχανική μάθηση – Greek" lang="el" hreflang="el" data-title="Μηχανική μάθηση" data-language-autonym="Ελληνικά" data-language-local-name="Greek" class="interlanguage-link-target"><span>Ελληνικά</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/Aprendizaje_autom%C3%A1tico" title="Aprendizaje automático – Spanish" lang="es" hreflang="es" data-title="Aprendizaje automático" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-eu mw-list-item"><a href="https://eu.wikipedia.org/wiki/Ikasketa_automatiko" title="Ikasketa automatiko – Basque" lang="eu" hreflang="eu" data-title="Ikasketa automatiko" data-language-autonym="Euskara" data-language-local-name="Basque" class="interlanguage-link-target"><span>Euskara</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%DB%8C%D8%A7%D8%AF%DA%AF%DB%8C%D8%B1%DB%8C_%D9%85%D8%A7%D8%B4%DB%8C%D9%86" title="یادگیری ماشین – Persian" lang="fa" hreflang="fa" data-title="یادگیری ماشین" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/Apprentissage_automatique" title="Apprentissage automatique – French" lang="fr" hreflang="fr" data-title="Apprentissage automatique" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-gv mw-list-item"><a href="https://gv.wikipedia.org/wiki/Ynsaghey_jeshaghtagh" title="Ynsaghey jeshaghtagh – Manx" lang="gv" hreflang="gv" data-title="Ynsaghey jeshaghtagh" data-language-autonym="Gaelg" data-language-local-name="Manx" class="interlanguage-link-target"><span>Gaelg</span></a></li><li class="interlanguage-link interwiki-gl mw-list-item"><a href="https://gl.wikipedia.org/wiki/Aprendizaxe_autom%C3%A1tica" title="Aprendizaxe automática – Galician" lang="gl" hreflang="gl" data-title="Aprendizaxe automática" data-language-autonym="Galego" data-language-local-name="Galician" class="interlanguage-link-target"><span>Galego</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EA%B8%B0%EA%B3%84_%ED%95%99%EC%8A%B5" title="기계 학습 – Korean" lang="ko" hreflang="ko" data-title="기계 학습" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-hy mw-list-item"><a href="https://hy.wikipedia.org/wiki/%D5%84%D5%A5%D6%84%D5%A5%D5%B6%D5%A1%D5%B5%D5%A1%D5%AF%D5%A1%D5%B6_%D5%B8%D6%82%D5%BD%D5%B8%D6%82%D6%81%D5%B8%D6%82%D5%B4" title="Մեքենայական ուսուցում – Armenian" lang="hy" hreflang="hy" data-title="Մեքենայական ուսուցում" data-language-autonym="Հայերեն" data-language-local-name="Armenian" class="interlanguage-link-target"><span>Հայերեն</span></a></li><li class="interlanguage-link interwiki-hi mw-list-item"><a href="https://hi.wikipedia.org/wiki/%E0%A4%AF%E0%A4%82%E0%A4%A4%E0%A5%8D%E0%A4%B0_%E0%A4%B6%E0%A4%BF%E0%A4%95%E0%A5%8D%E0%A4%B7%E0%A4%A3" title="यंत्र शिक्षण – Hindi" lang="hi" hreflang="hi" data-title="यंत्र शिक्षण" data-language-autonym="हिन्दी" data-language-local-name="Hindi" class="interlanguage-link-target"><span>हिन्दी</span></a></li><li class="interlanguage-link interwiki-id mw-list-item"><a href="https://id.wikipedia.org/wiki/Pemelajaran_mesin" title="Pemelajaran mesin – Indonesian" lang="id" hreflang="id" data-title="Pemelajaran mesin" data-language-autonym="Bahasa Indonesia" data-language-local-name="Indonesian" class="interlanguage-link-target"><span>Bahasa Indonesia</span></a></li><li class="interlanguage-link interwiki-zu mw-list-item"><a href="https://zu.wikipedia.org/wiki/Ukufunda_kwenguxa" title="Ukufunda kwenguxa – Zulu" lang="zu" hreflang="zu" data-title="Ukufunda kwenguxa" data-language-autonym="IsiZulu" data-language-local-name="Zulu" class="interlanguage-link-target"><span>IsiZulu</span></a></li><li class="interlanguage-link interwiki-is mw-list-item"><a href="https://is.wikipedia.org/wiki/V%C3%A9lan%C3%A1m" title="Vélanám – Icelandic" lang="is" hreflang="is" data-title="Vélanám" data-language-autonym="Íslenska" data-language-local-name="Icelandic" class="interlanguage-link-target"><span>Íslenska</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Apprendimento_automatico" title="Apprendimento automatico – Italian" lang="it" hreflang="it" data-title="Apprendimento automatico" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/%D7%9C%D7%9E%D7%99%D7%93%D7%AA_%D7%9E%D7%9B%D7%95%D7%A0%D7%94" title="למידת מכונה – Hebrew" lang="he" hreflang="he" data-title="למידת מכונה" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-kn mw-list-item"><a href="https://kn.wikipedia.org/wiki/%E0%B2%AF%E0%B2%82%E0%B2%A4%E0%B3%8D%E0%B2%B0_%E0%B2%95%E0%B2%B2%E0%B2%BF%E0%B2%95%E0%B3%86" title="ಯಂತ್ರ ಕಲಿಕೆ – Kannada" lang="kn" hreflang="kn" data-title="ಯಂತ್ರ ಕಲಿಕೆ" data-language-autonym="ಕನ್ನಡ" data-language-local-name="Kannada" class="interlanguage-link-target"><span>ಕನ್ನಡ</span></a></li><li class="interlanguage-link interwiki-ka mw-list-item"><a href="https://ka.wikipedia.org/wiki/%E1%83%9B%E1%83%90%E1%83%9C%E1%83%A5%E1%83%90%E1%83%9C%E1%83%A3%E1%83%A0%E1%83%98_%E1%83%A1%E1%83%AC%E1%83%90%E1%83%95%E1%83%9A%E1%83%94%E1%83%91%E1%83%90" title="მანქანური სწავლება – Georgian" lang="ka" hreflang="ka" data-title="მანქანური სწავლება" data-language-autonym="ქართული" data-language-local-name="Georgian" class="interlanguage-link-target"><span>ქართული</span></a></li><li class="interlanguage-link interwiki-ky mw-list-item"><a href="https://ky.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D0%B8%D0%BD%D0%B0_%D2%AF%D0%B9%D1%80%D3%A9%D0%BD%D2%AF%D2%AF" title="Машина үйрөнүү – Kyrgyz" lang="ky" hreflang="ky" data-title="Машина үйрөнүү" data-language-autonym="Кыргызча" data-language-local-name="Kyrgyz" class="interlanguage-link-target"><span>Кыргызча</span></a></li><li class="interlanguage-link interwiki-lv mw-list-item"><a href="https://lv.wikipedia.org/wiki/Ma%C5%A1%C4%ABnm%C4%81c%C4%AB%C5%A1an%C4%81s" title="Mašīnmācīšanās – Latvian" lang="lv" hreflang="lv" data-title="Mašīnmācīšanās" data-language-autonym="Latviešu" data-language-local-name="Latvian" class="interlanguage-link-target"><span>Latviešu</span></a></li><li class="interlanguage-link interwiki-lt mw-list-item"><a href="https://lt.wikipedia.org/wiki/Ma%C5%A1ininis_mokymasis" title="Mašininis mokymasis – Lithuanian" lang="lt" hreflang="lt" data-title="Mašininis mokymasis" data-language-autonym="Lietuvių" data-language-local-name="Lithuanian" class="interlanguage-link-target"><span>Lietuvių</span></a></li><li class="interlanguage-link interwiki-lij mw-list-item"><a href="https://lij.wikipedia.org/wiki/Imprendimento_automatico" title="Imprendimento automatico – Ligurian" lang="lij" hreflang="lij" data-title="Imprendimento automatico" data-language-autonym="Ligure" data-language-local-name="Ligurian" class="interlanguage-link-target"><span>Ligure</span></a></li><li class="interlanguage-link interwiki-hu mw-list-item"><a href="https://hu.wikipedia.org/wiki/G%C3%A9pi_tanul%C3%A1s" title="Gépi tanulás – Hungarian" lang="hu" hreflang="hu" data-title="Gépi tanulás" data-language-autonym="Magyar" data-language-local-name="Hungarian" class="interlanguage-link-target"><span>Magyar</span></a></li><li class="interlanguage-link interwiki-mk mw-list-item"><a href="https://mk.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D0%B8%D0%BD%D1%81%D0%BA%D0%BE_%D1%83%D1%87%D0%B5%D1%9A%D0%B5" title="Машинско учење – Macedonian" lang="mk" hreflang="mk" data-title="Машинско учење" data-language-autonym="Македонски" data-language-local-name="Macedonian" class="interlanguage-link-target"><span>Македонски</span></a></li><li class="interlanguage-link interwiki-ml mw-list-item"><a href="https://ml.wikipedia.org/wiki/%E0%B4%AF%E0%B4%A8%E0%B5%8D%E0%B4%A4%E0%B5%8D%E0%B4%B0%E0%B4%AA%E0%B4%A0%E0%B4%A8%E0%B4%82" title="യന്ത്രപഠനം – Malayalam" lang="ml" hreflang="ml" data-title="യന്ത്രപഠനം" data-language-autonym="മലയാളം" data-language-local-name="Malayalam" class="interlanguage-link-target"><span>മലയാളം</span></a></li><li class="interlanguage-link interwiki-mr mw-list-item"><a href="https://mr.wikipedia.org/wiki/%E0%A4%AF%E0%A4%82%E0%A4%A4%E0%A5%8D%E0%A4%B0_%E0%A4%B6%E0%A4%BF%E0%A4%95%E0%A5%8D%E0%A4%B7%E0%A4%A3" title="यंत्र शिक्षण – Marathi" lang="mr" hreflang="mr" data-title="यंत्र शिक्षण" data-language-autonym="मराठी" data-language-local-name="Marathi" class="interlanguage-link-target"><span>मराठी</span></a></li><li class="interlanguage-link interwiki-ms mw-list-item"><a href="https://ms.wikipedia.org/wiki/Pembelajaran_mesin" title="Pembelajaran mesin – Malay" lang="ms" hreflang="ms" data-title="Pembelajaran mesin" data-language-autonym="Bahasa Melayu" data-language-local-name="Malay" class="interlanguage-link-target"><span>Bahasa Melayu</span></a></li><li class="interlanguage-link interwiki-mn mw-list-item"><a href="https://mn.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D0%B8%D0%BD_%D1%81%D1%83%D1%80%D0%B3%D0%B0%D0%BB%D1%82" title="Машин сургалт – Mongolian" lang="mn" hreflang="mn" data-title="Машин сургалт" data-language-autonym="Монгол" data-language-local-name="Mongolian" class="interlanguage-link-target"><span>Монгол</span></a></li><li class="interlanguage-link interwiki-nl mw-list-item"><a href="https://nl.wikipedia.org/wiki/Machinaal_leren" title="Machinaal leren – Dutch" lang="nl" hreflang="nl" data-title="Machinaal leren" data-language-autonym="Nederlands" data-language-local-name="Dutch" class="interlanguage-link-target"><span>Nederlands</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/%E6%A9%9F%E6%A2%B0%E5%AD%A6%E7%BF%92" title="機械学習 – Japanese" lang="ja" hreflang="ja" data-title="機械学習" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-no mw-list-item"><a href="https://no.wikipedia.org/wiki/Maskinl%C3%A6ring" title="Maskinlæring – Norwegian Bokmål" lang="nb" hreflang="nb" data-title="Maskinlæring" data-language-autonym="Norsk bokmål" data-language-local-name="Norwegian Bokmål" class="interlanguage-link-target"><span>Norsk bokmål</span></a></li><li class="interlanguage-link interwiki-nn mw-list-item"><a href="https://nn.wikipedia.org/wiki/Maskinl%C3%A6ring" title="Maskinlæring – Norwegian Nynorsk" lang="nn" hreflang="nn" data-title="Maskinlæring" data-language-autonym="Norsk nynorsk" data-language-local-name="Norwegian Nynorsk" class="interlanguage-link-target"><span>Norsk nynorsk</span></a></li><li class="interlanguage-link interwiki-oc mw-list-item"><a href="https://oc.wikipedia.org/wiki/Aprendissatge_automatic" title="Aprendissatge automatic – Occitan" lang="oc" hreflang="oc" data-title="Aprendissatge automatic" data-language-autonym="Occitan" data-language-local-name="Occitan" class="interlanguage-link-target"><span>Occitan</span></a></li><li class="interlanguage-link interwiki-or mw-list-item"><a href="https://or.wikipedia.org/wiki/%E0%AC%AE%E0%AD%87%E0%AC%B8%E0%AC%BF%E0%AC%A8_%E0%AC%B2%E0%AC%B0%E0%AD%8D%E0%AC%A3%E0%AD%8D%E0%AC%A3%E0%AC%BF%E0%AC%99%E0%AD%8D%E0%AC%97%E0%AD%8D" title="ମେସିନ ଲର୍ଣ୍ଣିଙ୍ଗ୍ – Odia" lang="or" hreflang="or" data-title="ମେସିନ ଲର୍ଣ୍ଣିଙ୍ଗ୍" data-language-autonym="ଓଡ଼ିଆ" data-language-local-name="Odia" class="interlanguage-link-target"><span>ଓଡ଼ିଆ</span></a></li><li class="interlanguage-link interwiki-uz mw-list-item"><a href="https://uz.wikipedia.org/wiki/Mashina_o%CA%BBrganuvi" title="Mashina oʻrganuvi – Uzbek" lang="uz" hreflang="uz" data-title="Mashina oʻrganuvi" data-language-autonym="Oʻzbekcha / ўзбекча" data-language-local-name="Uzbek" class="interlanguage-link-target"><span>Oʻzbekcha / ўзбекча</span></a></li><li class="interlanguage-link interwiki-pnb mw-list-item"><a href="https://pnb.wikipedia.org/wiki/%D9%85%D8%B4%DB%8C%D9%86%DB%8C_%D8%A2%D9%85%D9%88%D8%B2%DB%8C" title="مشینی آموزی – Western Punjabi" lang="pnb" hreflang="pnb" data-title="مشینی آموزی" data-language-autonym="پنجابی" data-language-local-name="Western Punjabi" class="interlanguage-link-target"><span>پنجابی</span></a></li><li class="interlanguage-link interwiki-ps mw-list-item"><a href="https://ps.wikipedia.org/wiki/%D9%85%D8%A7%D8%B4%D9%8A%D9%86_%D8%B2%D8%AF%D9%87_%DA%A9%DA%93%D9%87" title="ماشين زده کړه – Pashto" lang="ps" hreflang="ps" data-title="ماشين زده کړه" data-language-autonym="پښتو" data-language-local-name="Pashto" class="interlanguage-link-target"><span>پښتو</span></a></li><li class="interlanguage-link interwiki-pl mw-list-item"><a href="https://pl.wikipedia.org/wiki/Uczenie_maszynowe" title="Uczenie maszynowe – Polish" lang="pl" hreflang="pl" data-title="Uczenie maszynowe" data-language-autonym="Polski" data-language-local-name="Polish" class="interlanguage-link-target"><span>Polski</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/Aprendizado_de_m%C3%A1quina" title="Aprendizado de máquina – Portuguese" lang="pt" hreflang="pt" data-title="Aprendizado de máquina" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-kaa mw-list-item"><a href="https://kaa.wikipedia.org/wiki/Mashinal%C4%B1q_oq%C4%B1t%C4%B1w" title="Mashinalıq oqıtıw – Kara-Kalpak" lang="kaa" hreflang="kaa" data-title="Mashinalıq oqıtıw" data-language-autonym="Qaraqalpaqsha" data-language-local-name="Kara-Kalpak" class="interlanguage-link-target"><span>Qaraqalpaqsha</span></a></li><li class="interlanguage-link interwiki-ro mw-list-item"><a href="https://ro.wikipedia.org/wiki/%C3%8Env%C4%83%C8%9Bare_automat%C4%83" title="Învățare automată – Romanian" lang="ro" hreflang="ro" data-title="Învățare automată" data-language-autonym="Română" data-language-local-name="Romanian" class="interlanguage-link-target"><span>Română</span></a></li><li class="interlanguage-link interwiki-qu mw-list-item"><a href="https://qu.wikipedia.org/wiki/Qallwa_yachaqay" title="Qallwa yachaqay – Quechua" lang="qu" hreflang="qu" data-title="Qallwa yachaqay" data-language-autonym="Runa Simi" data-language-local-name="Quechua" class="interlanguage-link-target"><span>Runa Simi</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D0%B8%D0%BD%D0%BD%D0%BE%D0%B5_%D0%BE%D0%B1%D1%83%D1%87%D0%B5%D0%BD%D0%B8%D0%B5" title="Машинное обучение – Russian" lang="ru" hreflang="ru" data-title="Машинное обучение" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-sat mw-list-item"><a href="https://sat.wikipedia.org/wiki/%E1%B1%A2%E1%B1%AE%E1%B1%A5%E1%B1%A4%E1%B1%B1_%E1%B1%9E%E1%B1%9A%E1%B1%A8%E1%B1%B1%E1%B1%A4%E1%B1%9D" title="ᱢᱮᱥᱤᱱ ᱞᱚᱨᱱᱤᱝ – Santali" lang="sat" hreflang="sat" data-title="ᱢᱮᱥᱤᱱ ᱞᱚᱨᱱᱤᱝ" data-language-autonym="ᱥᱟᱱᱛᱟᱲᱤ" data-language-local-name="Santali" class="interlanguage-link-target"><span>ᱥᱟᱱᱛᱟᱲᱤ</span></a></li><li class="interlanguage-link interwiki-sq mw-list-item"><a href="https://sq.wikipedia.org/wiki/M%C3%ABsimi_makinerik" title="Mësimi makinerik – Albanian" lang="sq" hreflang="sq" data-title="Mësimi makinerik" data-language-autonym="Shqip" data-language-local-name="Albanian" class="interlanguage-link-target"><span>Shqip</span></a></li><li class="interlanguage-link interwiki-simple mw-list-item"><a href="https://simple.wikipedia.org/wiki/Machine_learning" title="Machine learning – Simple English" lang="en-simple" hreflang="en-simple" data-title="Machine learning" data-language-autonym="Simple English" data-language-local-name="Simple English" class="interlanguage-link-target"><span>Simple English</span></a></li><li class="interlanguage-link interwiki-sl mw-list-item"><a href="https://sl.wikipedia.org/wiki/Strojno_u%C4%8Denje" title="Strojno učenje – Slovenian" lang="sl" hreflang="sl" data-title="Strojno učenje" data-language-autonym="Slovenščina" data-language-local-name="Slovenian" class="interlanguage-link-target"><span>Slovenščina</span></a></li><li class="interlanguage-link interwiki-ckb mw-list-item"><a href="https://ckb.wikipedia.org/wiki/%D9%81%DB%8E%D8%B1%D8%A8%D9%88%D9%88%D9%86%DB%8C_%D9%85%DB%95%DA%A9%DB%8C%D9%86%DB%95" title="فێربوونی مەکینە – Central Kurdish" lang="ckb" hreflang="ckb" data-title="فێربوونی مەکینە" data-language-autonym="کوردی" data-language-local-name="Central Kurdish" class="interlanguage-link-target"><span>کوردی</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D0%B8%D0%BD%D1%81%D0%BA%D0%BE_%D1%83%D1%87%D0%B5%D1%9A%D0%B5" title="Машинско учење – Serbian" lang="sr" hreflang="sr" data-title="Машинско учење" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-sh mw-list-item"><a href="https://sh.wikipedia.org/wiki/Ma%C5%A1insko_u%C4%8Denje" title="Mašinsko učenje – Serbo-Croatian" lang="sh" hreflang="sh" data-title="Mašinsko učenje" data-language-autonym="Srpskohrvatski / српскохрватски" data-language-local-name="Serbo-Croatian" class="interlanguage-link-target"><span>Srpskohrvatski / српскохрватски</span></a></li><li class="interlanguage-link interwiki-fi mw-list-item"><a href="https://fi.wikipedia.org/wiki/Koneoppiminen" title="Koneoppiminen – Finnish" lang="fi" hreflang="fi" data-title="Koneoppiminen" data-language-autonym="Suomi" data-language-local-name="Finnish" class="interlanguage-link-target"><span>Suomi</span></a></li><li class="interlanguage-link interwiki-sv mw-list-item"><a href="https://sv.wikipedia.org/wiki/Maskininl%C3%A4rning" title="Maskininlärning – Swedish" lang="sv" hreflang="sv" data-title="Maskininlärning" data-language-autonym="Svenska" data-language-local-name="Swedish" class="interlanguage-link-target"><span>Svenska</span></a></li><li class="interlanguage-link interwiki-tl mw-list-item"><a href="https://tl.wikipedia.org/wiki/Pagkatuto_ng_makina" title="Pagkatuto ng makina – Tagalog" lang="tl" hreflang="tl" data-title="Pagkatuto ng makina" data-language-autonym="Tagalog" data-language-local-name="Tagalog" class="interlanguage-link-target"><span>Tagalog</span></a></li><li class="interlanguage-link interwiki-ta mw-list-item"><a href="https://ta.wikipedia.org/wiki/%E0%AE%87%E0%AE%AF%E0%AE%A8%E0%AF%8D%E0%AE%A4%E0%AE%BF%E0%AE%B0_%E0%AE%95%E0%AE%B1%E0%AF%8D%E0%AE%B1%E0%AE%B2%E0%AF%8D" title="இயந்திர கற்றல் – Tamil" lang="ta" hreflang="ta" data-title="இயந்திர கற்றல்" data-language-autonym="தமிழ்" data-language-local-name="Tamil" class="interlanguage-link-target"><span>தமிழ்</span></a></li><li class="interlanguage-link interwiki-te mw-list-item"><a href="https://te.wikipedia.org/wiki/%E0%B0%AE%E0%B0%B0_%E0%B0%AA%E0%B1%8D%E0%B0%B0%E0%B0%9C%E0%B1%8D%E0%B0%9E" title="మర ప్రజ్ఞ – Telugu" lang="te" hreflang="te" data-title="మర ప్రజ్ఞ" data-language-autonym="తెలుగు" data-language-local-name="Telugu" class="interlanguage-link-target"><span>తెలుగు</span></a></li><li class="interlanguage-link interwiki-th mw-list-item"><a href="https://th.wikipedia.org/wiki/%E0%B8%81%E0%B8%B2%E0%B8%A3%E0%B9%80%E0%B8%A3%E0%B8%B5%E0%B8%A2%E0%B8%99%E0%B8%A3%E0%B8%B9%E0%B9%89%E0%B8%82%E0%B8%AD%E0%B8%87%E0%B9%80%E0%B8%84%E0%B8%A3%E0%B8%B7%E0%B9%88%E0%B8%AD%E0%B8%87" title="การเรียนรู้ของเครื่อง – Thai" lang="th" hreflang="th" data-title="การเรียนรู้ของเครื่อง" data-language-autonym="ไทย" data-language-local-name="Thai" class="interlanguage-link-target"><span>ไทย</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/Makine_%C3%B6%C4%9Frenimi" title="Makine öğrenimi – Turkish" lang="tr" hreflang="tr" data-title="Makine öğrenimi" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-uk mw-list-item"><a href="https://uk.wikipedia.org/wiki/%D0%9C%D0%B0%D1%88%D0%B8%D0%BD%D0%BD%D0%B5_%D0%BD%D0%B0%D0%B2%D1%87%D0%B0%D0%BD%D0%BD%D1%8F" title="Машинне навчання – Ukrainian" lang="uk" hreflang="uk" data-title="Машинне навчання" data-language-autonym="Українська" data-language-local-name="Ukrainian" class="interlanguage-link-target"><span>Українська</span></a></li><li class="interlanguage-link interwiki-ur mw-list-item"><a href="https://ur.wikipedia.org/wiki/%D9%85%D8%B4%DB%8C%D9%86_%D8%A2%D9%85%D9%88%D8%B2%DB%8C" title="مشین آموزی – Urdu" lang="ur" hreflang="ur" data-title="مشین آموزی" data-language-autonym="اردو" data-language-local-name="Urdu" class="interlanguage-link-target"><span>اردو</span></a></li><li class="interlanguage-link interwiki-ug mw-list-item"><a href="https://ug.wikipedia.org/wiki/%D9%85%D8%A7%D8%B4%D9%86%D9%89%D9%84%D9%89%D9%82_%D8%A6%DB%86%DA%AF%D9%89%D9%86%D9%89%D8%B4" title="ماشنىلىق ئۆگىنىش – Uyghur" lang="ug" hreflang="ug" data-title="ماشنىلىق ئۆگىنىش" data-language-autonym="ئۇيغۇرچە / Uyghurche" data-language-local-name="Uyghur" class="interlanguage-link-target"><span>ئۇيغۇرچە / Uyghurche</span></a></li><li class="interlanguage-link interwiki-vi mw-list-item"><a href="https://vi.wikipedia.org/wiki/H%E1%BB%8Dc_m%C3%A1y" title="Học máy – Vietnamese" lang="vi" hreflang="vi" data-title="Học máy" data-language-autonym="Tiếng Việt" data-language-local-name="Vietnamese" class="interlanguage-link-target"><span>Tiếng Việt</span></a></li><li class="interlanguage-link interwiki-fiu-vro mw-list-item"><a href="https://fiu-vro.wikipedia.org/wiki/Massinoppus" title="Massinoppus – Võro" lang="vro" hreflang="vro" data-title="Massinoppus" data-language-autonym="Võro" data-language-local-name="Võro" class="interlanguage-link-target"><span>Võro</span></a></li><li class="interlanguage-link interwiki-wuu mw-list-item"><a href="https://wuu.wikipedia.org/wiki/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0" title="机器学习 – Wu" lang="wuu" hreflang="wuu" data-title="机器学习" data-language-autonym="吴语" data-language-local-name="Wu" class="interlanguage-link-target"><span>吴语</span></a></li><li class="interlanguage-link interwiki-zh-yue mw-list-item"><a href="https://zh-yue.wikipedia.org/wiki/%E6%A9%9F%E6%A2%B0%E5%AD%B8%E7%BF%92" title="機械學習 – Cantonese" lang="yue" hreflang="yue" data-title="機械學習" data-language-autonym="粵語" data-language-local-name="Cantonese" class="interlanguage-link-target"><span>粵語</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0" title="机器学习 – Chinese" lang="zh" hreflang="zh" data-title="机器学习" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q2539#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Machine_learning" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Machine_learning" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Machine_learning"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Machine_learning&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Machine_learning&action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Machine_learning"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Machine_learning&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Machine_learning&action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Machine_learning" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Machine_learning" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Machine_learning&oldid=1259171820" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Machine_learning&action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&page=Machine_learning&id=1259171820&wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FMachine_learning"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FMachine_learning"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&page=Machine_learning&action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Machine_learning&printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="wb-otherproject-link wb-otherproject-commons mw-list-item"><a href="https://commons.wikimedia.org/wiki/Category:Machine_learning" hreflang="en"><span>Wikimedia Commons</span></a></li><li class="wb-otherproject-link wb-otherproject-wikiquote mw-list-item"><a href="https://en.wikiquote.org/wiki/Machine_learning" hreflang="en"><span>Wikiquote</span></a></li><li class="wb-otherproject-link wb-otherproject-wikiversity mw-list-item"><a href="https://en.wikiversity.org/wiki/Machine_learning" hreflang="en"><span>Wikiversity</span></a></li><li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q2539" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Study of algorithms that improve automatically through experience</div> <style data-mw-deduplicate="TemplateStyles:r1236090951">.mw-parser-output .hatnote{font-style:italic}.mw-parser-output div.hatnote{padding-left:1.6em;margin-bottom:0.5em}.mw-parser-output .hatnote i{font-style:normal}.mw-parser-output .hatnote+link+.hatnote{margin-top:-0.5em}@media print{body.ns-0 .mw-parser-output .hatnote{display:none!important}}</style><div role="note" class="hatnote navigation-not-searchable">For the journal, see <a href="/wiki/Machine_Learning_(journal)" title="Machine Learning (journal)"><i>Machine Learning</i> (journal)</a>.</div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">"Statistical learning" redirects here. For statistical learning in linguistics, see <a href="/wiki/Statistical_learning_in_language_acquisition" title="Statistical learning in language acquisition">statistical learning in language acquisition</a>.</div> <style data-mw-deduplicate="TemplateStyles:r1244144826">.mw-parser-output .machine-learning-list-title{background-color:#ddddff}html.skin-theme-clientpref-night .mw-parser-output .machine-learning-list-title{background-color:#222}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .machine-learning-list-title{background-color:#222}}</style> <style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1246091330">.mw-parser-output .sidebar{width:22em;float:right;clear:right;margin:0.5em 0 1em 1em;background:var(--background-color-neutral-subtle,#f8f9fa);border:1px solid var(--border-color-base,#a2a9b1);padding:0.2em;text-align:center;line-height:1.4em;font-size:88%;border-collapse:collapse;display:table}body.skin-minerva .mw-parser-output .sidebar{display:table!important;float:right!important;margin:0.5em 0 1em 1em!important}.mw-parser-output .sidebar-subgroup{width:100%;margin:0;border-spacing:0}.mw-parser-output .sidebar-left{float:left;clear:left;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-none{float:none;clear:both;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-outer-title{padding:0 0.4em 0.2em;font-size:125%;line-height:1.2em;font-weight:bold}.mw-parser-output .sidebar-top-image{padding:0.4em}.mw-parser-output .sidebar-top-caption,.mw-parser-output .sidebar-pretitle-with-top-image,.mw-parser-output .sidebar-caption{padding:0.2em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-pretitle{padding:0.4em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-title,.mw-parser-output .sidebar-title-with-pretitle{padding:0.2em 0.8em;font-size:145%;line-height:1.2em}.mw-parser-output .sidebar-title-with-pretitle{padding:0.1em 0.4em}.mw-parser-output .sidebar-image{padding:0.2em 0.4em 0.4em}.mw-parser-output .sidebar-heading{padding:0.1em 0.4em}.mw-parser-output .sidebar-content{padding:0 0.5em 0.4em}.mw-parser-output .sidebar-content-with-subgroup{padding:0.1em 0.4em 0.2em}.mw-parser-output .sidebar-above,.mw-parser-output .sidebar-below{padding:0.3em 0.8em;font-weight:bold}.mw-parser-output .sidebar-collapse .sidebar-above,.mw-parser-output .sidebar-collapse .sidebar-below{border-top:1px solid #aaa;border-bottom:1px solid #aaa}.mw-parser-output .sidebar-navbar{text-align:right;font-size:115%;padding:0 0.4em 0.4em}.mw-parser-output .sidebar-list-title{padding:0 0.4em;text-align:left;font-weight:bold;line-height:1.6em;font-size:105%}.mw-parser-output .sidebar-list-title-c{padding:0 0.4em;text-align:center;margin:0 3.3em}@media(max-width:640px){body.mediawiki .mw-parser-output .sidebar{width:100%!important;clear:both;float:none!important;margin-left:0!important;margin-right:0!important}}body.skin--responsive .mw-parser-output .sidebar a>img{max-width:none!important}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media print{body.ns-0 .mw-parser-output .sidebar{display:none!important}}</style><style data-mw-deduplicate="TemplateStyles:r886047488">.mw-parser-output .nobold{font-weight:normal}</style><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r886047488"><table class="sidebar sidebar-collapse nomobile nowraplinks"><tbody><tr><td class="sidebar-pretitle">Part of a series on</td></tr><tr><th class="sidebar-title-with-pretitle"><a class="mw-selflink selflink">Machine learning</a><br />and <a href="/wiki/Data_mining" title="Data mining">data mining</a></th></tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Paradigms</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Supervised_learning" title="Supervised learning">Supervised learning</a></li> <li><a href="/wiki/Unsupervised_learning" title="Unsupervised learning">Unsupervised learning</a></li> <li><a href="/wiki/Semi-supervised_learning" class="mw-redirect" title="Semi-supervised learning">Semi-supervised learning</a></li> <li><a href="/wiki/Self-supervised_learning" title="Self-supervised learning">Self-supervised learning</a></li> <li><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a></li> <li><a href="/wiki/Meta-learning_(computer_science)" title="Meta-learning (computer science)">Meta-learning</a></li> <li><a href="/wiki/Online_machine_learning" title="Online machine learning">Online learning</a></li> <li><a href="/wiki/Batch_learning" class="mw-redirect" title="Batch learning">Batch learning</a></li> <li><a href="/wiki/Curriculum_learning" title="Curriculum learning">Curriculum learning</a></li> <li><a href="/wiki/Rule-based_machine_learning" title="Rule-based machine learning">Rule-based learning</a></li> <li><a href="/wiki/Neuro-symbolic_AI" title="Neuro-symbolic AI">Neuro-symbolic AI</a></li> <li><a href="/wiki/Neuromorphic_engineering" class="mw-redirect" title="Neuromorphic engineering">Neuromorphic engineering</a></li> <li><a href="/wiki/Quantum_machine_learning" title="Quantum machine learning">Quantum machine learning</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Problems</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Statistical_classification" title="Statistical classification">Classification</a></li> <li><a href="/wiki/Generative_model" title="Generative model">Generative modeling</a></li> <li><a href="/wiki/Regression_analysis" title="Regression analysis">Regression</a></li> <li><a href="/wiki/Cluster_analysis" title="Cluster analysis">Clustering</a></li> <li><a href="/wiki/Dimensionality_reduction" title="Dimensionality reduction">Dimensionality reduction</a></li> <li><a href="/wiki/Density_estimation" title="Density estimation">Density estimation</a></li> <li><a href="/wiki/Anomaly_detection" title="Anomaly detection">Anomaly detection</a></li> <li><a href="/wiki/Data_cleaning" class="mw-redirect" title="Data cleaning">Data cleaning</a></li> <li><a href="/wiki/Automated_machine_learning" title="Automated machine learning">AutoML</a></li> <li><a href="/wiki/Association_rule_learning" title="Association rule learning">Association rules</a></li> <li><a href="/wiki/Semantic_analysis_(machine_learning)" title="Semantic analysis (machine learning)">Semantic analysis</a></li> <li><a href="/wiki/Structured_prediction" title="Structured prediction">Structured prediction</a></li> <li><a href="/wiki/Feature_engineering" title="Feature engineering">Feature engineering</a></li> <li><a href="/wiki/Feature_learning" title="Feature learning">Feature learning</a></li> <li><a href="/wiki/Learning_to_rank" title="Learning to rank">Learning to rank</a></li> <li><a href="/wiki/Grammar_induction" title="Grammar induction">Grammar induction</a></li> <li><a href="/wiki/Ontology_learning" title="Ontology learning">Ontology learning</a></li> <li><a href="/wiki/Multimodal_learning" title="Multimodal learning">Multimodal learning</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><div style="display: inline-block; line-height: 1.2em; padding: .1em 0;"><a href="/wiki/Supervised_learning" title="Supervised learning">Supervised learning</a><br /><span class="nobold"><span style="font-size:85%;">(<b><a href="/wiki/Statistical_classification" title="Statistical classification">classification</a></b> • <b><a href="/wiki/Regression_analysis" title="Regression analysis">regression</a></b>)</span></span> </div></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Apprenticeship_learning" title="Apprenticeship learning">Apprenticeship learning</a></li> <li><a href="/wiki/Decision_tree_learning" title="Decision tree learning">Decision trees</a></li> <li><a href="/wiki/Ensemble_learning" title="Ensemble learning">Ensembles</a> <ul><li><a href="/wiki/Bootstrap_aggregating" title="Bootstrap aggregating">Bagging</a></li> <li><a href="/wiki/Boosting_(machine_learning)" title="Boosting (machine learning)">Boosting</a></li> <li><a href="/wiki/Random_forest" title="Random forest">Random forest</a></li></ul></li> <li><a href="/wiki/K-nearest_neighbors_algorithm" title="K-nearest neighbors algorithm"><i>k</i>-NN</a></li> <li><a href="/wiki/Linear_regression" title="Linear regression">Linear regression</a></li> <li><a href="/wiki/Naive_Bayes_classifier" title="Naive Bayes classifier">Naive Bayes</a></li> <li><a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">Artificial neural networks</a></li> <li><a href="/wiki/Logistic_regression" title="Logistic regression">Logistic regression</a></li> <li><a href="/wiki/Perceptron" title="Perceptron">Perceptron</a></li> <li><a href="/wiki/Relevance_vector_machine" title="Relevance vector machine">Relevance vector machine (RVM)</a></li> <li><a href="/wiki/Support_vector_machine" title="Support vector machine">Support vector machine (SVM)</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Cluster_analysis" title="Cluster analysis">Clustering</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/BIRCH" title="BIRCH">BIRCH</a></li> <li><a href="/wiki/CURE_algorithm" title="CURE algorithm">CURE</a></li> <li><a href="/wiki/Hierarchical_clustering" title="Hierarchical clustering">Hierarchical</a></li> <li><a href="/wiki/K-means_clustering" title="K-means clustering"><i>k</i>-means</a></li> <li><a href="/wiki/Fuzzy_clustering" title="Fuzzy clustering">Fuzzy</a></li> <li><a href="/wiki/Expectation%E2%80%93maximization_algorithm" title="Expectation–maximization algorithm">Expectation–maximization (EM)</a></li> <li><br /><a href="/wiki/DBSCAN" title="DBSCAN">DBSCAN</a></li> <li><a href="/wiki/OPTICS_algorithm" title="OPTICS algorithm">OPTICS</a></li> <li><a href="/wiki/Mean_shift" title="Mean shift">Mean shift</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Dimensionality_reduction" title="Dimensionality reduction">Dimensionality reduction</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Factor_analysis" title="Factor analysis">Factor analysis</a></li> <li><a href="/wiki/Canonical_correlation" title="Canonical correlation">CCA</a></li> <li><a href="/wiki/Independent_component_analysis" title="Independent component analysis">ICA</a></li> <li><a href="/wiki/Linear_discriminant_analysis" title="Linear discriminant analysis">LDA</a></li> <li><a href="/wiki/Non-negative_matrix_factorization" title="Non-negative matrix factorization">NMF</a></li> <li><a href="/wiki/Principal_component_analysis" title="Principal component analysis">PCA</a></li> <li><a href="/wiki/Proper_generalized_decomposition" title="Proper generalized decomposition">PGD</a></li> <li><a href="/wiki/T-distributed_stochastic_neighbor_embedding" title="T-distributed stochastic neighbor embedding">t-SNE</a></li> <li><a href="/wiki/Sparse_dictionary_learning" title="Sparse dictionary learning">SDL</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Structured_prediction" title="Structured prediction">Structured prediction</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Graphical_model" title="Graphical model">Graphical models</a> <ul><li><a href="/wiki/Bayesian_network" title="Bayesian network">Bayes net</a></li> <li><a href="/wiki/Conditional_random_field" title="Conditional random field">Conditional random field</a></li> <li><a href="/wiki/Hidden_Markov_model" title="Hidden Markov model">Hidden Markov</a></li></ul></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Anomaly_detection" title="Anomaly detection">Anomaly detection</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Random_sample_consensus" title="Random sample consensus">RANSAC</a></li> <li><a href="/wiki/K-nearest_neighbors_algorithm" title="K-nearest neighbors algorithm"><i>k</i>-NN</a></li> <li><a href="/wiki/Local_outlier_factor" title="Local outlier factor">Local outlier factor</a></li> <li><a href="/wiki/Isolation_forest" title="Isolation forest">Isolation forest</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">Artificial neural network</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></li> <li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li> <li><a href="/wiki/Feedforward_neural_network" title="Feedforward neural network">Feedforward neural network</a></li> <li><a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">Recurrent neural network</a> <ul><li><a href="/wiki/Long_short-term_memory" title="Long short-term memory">LSTM</a></li> <li><a href="/wiki/Gated_recurrent_unit" title="Gated recurrent unit">GRU</a></li> <li><a href="/wiki/Echo_state_network" title="Echo state network">ESN</a></li> <li><a href="/wiki/Reservoir_computing" title="Reservoir computing">reservoir computing</a></li></ul></li> <li><a href="/wiki/Boltzmann_machine" title="Boltzmann machine">Boltzmann machine</a> <ul><li><a href="/wiki/Restricted_Boltzmann_machine" title="Restricted Boltzmann machine">Restricted</a></li></ul></li> <li><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">GAN</a></li> <li><a href="/wiki/Diffusion_model" title="Diffusion model">Diffusion model</a></li> <li><a href="/wiki/Self-organizing_map" title="Self-organizing map">SOM</a></li> <li><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">Convolutional neural network</a> <ul><li><a href="/wiki/U-Net" title="U-Net">U-Net</a></li> <li><a href="/wiki/LeNet" title="LeNet">LeNet</a></li> <li><a href="/wiki/AlexNet" title="AlexNet">AlexNet</a></li> <li><a href="/wiki/DeepDream" title="DeepDream">DeepDream</a></li></ul></li> <li><a href="/wiki/Neural_radiance_field" title="Neural radiance field">Neural radiance field</a></li> <li><a href="/wiki/Transformer_(machine_learning_model)" class="mw-redirect" title="Transformer (machine learning model)">Transformer</a> <ul><li><a href="/wiki/Vision_transformer" title="Vision transformer">Vision</a></li></ul></li> <li><a href="/wiki/Mamba_(deep_learning_architecture)" title="Mamba (deep learning architecture)">Mamba</a></li> <li><a href="/wiki/Spiking_neural_network" title="Spiking neural network">Spiking neural network</a></li> <li><a href="/wiki/Memtransistor" title="Memtransistor">Memtransistor</a></li> <li><a href="/wiki/Electrochemical_RAM" title="Electrochemical RAM">Electrochemical RAM</a> (ECRAM)</li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)"><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a></div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Q-learning" title="Q-learning">Q-learning</a></li> <li><a href="/wiki/State%E2%80%93action%E2%80%93reward%E2%80%93state%E2%80%93action" title="State–action–reward–state–action">SARSA</a></li> <li><a href="/wiki/Temporal_difference_learning" title="Temporal difference learning">Temporal difference (TD)</a></li> <li><a href="/wiki/Multi-agent_reinforcement_learning" title="Multi-agent reinforcement learning">Multi-agent</a> <ul><li><a href="/wiki/Self-play_(reinforcement_learning_technique)" class="mw-redirect" title="Self-play (reinforcement learning technique)">Self-play</a></li></ul></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Learning with humans</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Active_learning_(machine_learning)" title="Active learning (machine learning)">Active learning</a></li> <li><a href="/wiki/Crowdsourcing" title="Crowdsourcing">Crowdsourcing</a></li> <li><a href="/wiki/Human-in-the-loop" title="Human-in-the-loop">Human-in-the-loop</a></li> <li><a href="/wiki/Reinforcement_learning_from_human_feedback" title="Reinforcement learning from human feedback">RLHF</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Model diagnostics</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Coefficient_of_determination" title="Coefficient of determination">Coefficient of determination</a></li> <li><a href="/wiki/Confusion_matrix" title="Confusion matrix">Confusion matrix</a></li> <li><a href="/wiki/Learning_curve_(machine_learning)" title="Learning curve (machine learning)">Learning curve</a></li> <li><a href="/wiki/Receiver_operating_characteristic" title="Receiver operating characteristic">ROC curve</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Mathematical foundations</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Kernel_machines" class="mw-redirect" title="Kernel machines">Kernel machines</a></li> <li><a href="/wiki/Bias%E2%80%93variance_tradeoff" title="Bias–variance tradeoff">Bias–variance tradeoff</a></li> <li><a href="/wiki/Computational_learning_theory" title="Computational learning theory">Computational learning theory</a></li> <li><a href="/wiki/Empirical_risk_minimization" title="Empirical risk minimization">Empirical risk minimization</a></li> <li><a href="/wiki/Occam_learning" title="Occam learning">Occam learning</a></li> <li><a href="/wiki/Probably_approximately_correct_learning" title="Probably approximately correct learning">PAC learning</a></li> <li><a href="/wiki/Statistical_learning_theory" title="Statistical learning theory">Statistical learning</a></li> <li><a href="/wiki/Vapnik%E2%80%93Chervonenkis_theory" title="Vapnik–Chervonenkis theory">VC theory</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Journals and conferences</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/ECML_PKDD" title="ECML PKDD">ECML PKDD</a></li> <li><a href="/wiki/Conference_on_Neural_Information_Processing_Systems" title="Conference on Neural Information Processing Systems">NeurIPS</a></li> <li><a href="/wiki/International_Conference_on_Machine_Learning" title="International Conference on Machine Learning">ICML</a></li> <li><a href="/wiki/International_Conference_on_Learning_Representations" title="International Conference on Learning Representations">ICLR</a></li> <li><a href="/wiki/International_Joint_Conference_on_Artificial_Intelligence" title="International Joint Conference on Artificial Intelligence">IJCAI</a></li> <li><a href="/wiki/Machine_Learning_(journal)" title="Machine Learning (journal)">ML</a></li> <li><a href="/wiki/Journal_of_Machine_Learning_Research" title="Journal of Machine Learning Research">JMLR</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed machine-learning-list-title"><div class="sidebar-list-title" style="border-top:1px solid #aaa; text-align:center;;color: var(--color-base)">Related articles</div><div class="sidebar-list-content mw-collapsible-content hlist"> <ul><li><a href="/wiki/Glossary_of_artificial_intelligence" title="Glossary of artificial intelligence">Glossary of artificial intelligence</a></li> <li><a href="/wiki/List_of_datasets_for_machine-learning_research" title="List of datasets for machine-learning research">List of datasets for machine-learning research</a> <ul><li><a href="/wiki/List_of_datasets_in_computer_vision_and_image_processing" title="List of datasets in computer vision and image processing">List of datasets in computer vision and image processing</a></li></ul></li> <li><a href="/wiki/Outline_of_machine_learning" title="Outline of machine learning">Outline of machine learning</a></li></ul></div></div></td> </tr><tr><td class="sidebar-navbar"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Machine_learning" title="Template:Machine learning"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Machine_learning" title="Template talk:Machine learning"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Machine_learning" title="Special:EditPage/Template:Machine learning"><abbr title="Edit this template">e</abbr></a></li></ul></div></td></tr></tbody></table> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1246091330"><table class="sidebar sidebar-collapse nomobile nowraplinks hlist"><tbody><tr><td class="sidebar-pretitle">Part of a series on</td></tr><tr><th class="sidebar-title-with-pretitle"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence</a></th></tr><tr><td class="sidebar-image"><figure class="mw-halign-center" typeof="mw:File"><a href="/wiki/File:Dall-e_3_(jan_%2724)_artificial_intelligence_icon.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/100px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png" decoding="async" width="100" height="100" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/150px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/64/Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png/200px-Dall-e_3_%28jan_%2724%29_artificial_intelligence_icon.png 2x" data-file-width="820" data-file-height="820" /></a><figcaption></figcaption></figure></td></tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Artificial_intelligence#Goals" title="Artificial intelligence">Major goals</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a></li> <li><a href="/wiki/Intelligent_agent" title="Intelligent agent">Intelligent agent</a></li> <li><a href="/wiki/Recursive_self-improvement" title="Recursive self-improvement">Recursive self-improvement</a></li> <li><a href="/wiki/Automated_planning_and_scheduling" title="Automated planning and scheduling">Planning</a></li> <li><a href="/wiki/Computer_vision" title="Computer vision">Computer vision</a></li> <li><a href="/wiki/General_game_playing" title="General game playing">General game playing</a></li> <li><a href="/wiki/Knowledge_representation_and_reasoning" title="Knowledge representation and reasoning">Knowledge reasoning</a></li> <li><a href="/wiki/Natural_language_processing" title="Natural language processing">Natural language processing</a></li> <li><a href="/wiki/Robotics" title="Robotics">Robotics</a></li> <li><a href="/wiki/AI_safety" title="AI safety">AI safety</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)">Approaches</div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a class="mw-selflink selflink">Machine learning</a></li> <li><a href="/wiki/Symbolic_artificial_intelligence" title="Symbolic artificial intelligence">Symbolic</a></li> <li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li> <li><a href="/wiki/Bayesian_network" title="Bayesian network">Bayesian networks</a></li> <li><a href="/wiki/Evolutionary_algorithm" title="Evolutionary algorithm">Evolutionary algorithms</a></li> <li><a href="/wiki/Hybrid_intelligent_system" title="Hybrid intelligent system">Hybrid intelligent systems</a></li> <li><a href="/wiki/Artificial_intelligence_systems_integration" title="Artificial intelligence systems integration">Systems integration</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Applications_of_artificial_intelligence" title="Applications of artificial intelligence">Applications</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Machine_learning_in_bioinformatics" title="Machine learning in bioinformatics">Bioinformatics</a></li> <li><a href="/wiki/Deepfake" title="Deepfake">Deepfake</a></li> <li><a href="/wiki/Machine_learning_in_earth_sciences" title="Machine learning in earth sciences">Earth sciences</a></li> <li><a href="/wiki/Applications_of_artificial_intelligence#Finance" title="Applications of artificial intelligence"> Finance </a></li> <li><a href="/wiki/Generative_artificial_intelligence" title="Generative artificial intelligence">Generative AI</a> <ul><li><a href="/wiki/Artificial_intelligence_art" title="Artificial intelligence art">Art</a></li> <li><a href="/wiki/Generative_audio" title="Generative audio">Audio</a></li> <li><a href="/wiki/Music_and_artificial_intelligence" title="Music and artificial intelligence">Music</a></li></ul></li> <li><a href="/wiki/Artificial_intelligence_in_government" title="Artificial intelligence in government">Government</a></li> <li><a href="/wiki/Artificial_intelligence_in_healthcare" title="Artificial intelligence in healthcare">Healthcare</a> <ul><li><a href="/wiki/Artificial_intelligence_in_mental_health" title="Artificial intelligence in mental health">Mental health</a></li></ul></li> <li><a href="/wiki/Artificial_intelligence_in_industry" title="Artificial intelligence in industry">Industry</a></li> <li><a href="/wiki/Machine_translation" title="Machine translation">Translation</a></li> <li><a href="/wiki/Artificial_intelligence_arms_race" title="Artificial intelligence arms race"> Military </a></li> <li><a href="/wiki/Machine_learning_in_physics" title="Machine learning in physics">Physics</a></li> <li><a href="/wiki/List_of_artificial_intelligence_projects" title="List of artificial intelligence projects">Projects</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/Philosophy_of_artificial_intelligence" title="Philosophy of artificial intelligence">Philosophy</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Artificial_consciousness" title="Artificial consciousness">Artificial consciousness</a></li> <li><a href="/wiki/Chinese_room" title="Chinese room">Chinese room</a></li> <li><a href="/wiki/Friendly_artificial_intelligence" title="Friendly artificial intelligence">Friendly AI</a></li> <li><a href="/wiki/AI_control_problem" class="mw-redirect" title="AI control problem">Control problem</a>/<a href="/wiki/AI_takeover" title="AI takeover">Takeover</a></li> <li><a href="/wiki/Ethics_of_artificial_intelligence" title="Ethics of artificial intelligence">Ethics</a></li> <li><a href="/wiki/Existential_risk_from_artificial_general_intelligence" class="mw-redirect" title="Existential risk from artificial general intelligence">Existential risk</a></li> <li><a href="/wiki/Regulation_of_artificial_intelligence" title="Regulation of artificial intelligence">Regulation</a></li> <li><a href="/wiki/Turing_test" title="Turing test">Turing test</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)"><a href="/wiki/History_of_artificial_intelligence" title="History of artificial intelligence">History</a></div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Timeline_of_artificial_intelligence" title="Timeline of artificial intelligence">Timeline</a></li> <li><a href="/wiki/Progress_in_artificial_intelligence" title="Progress in artificial intelligence">Progress</a></li> <li><a href="/wiki/AI_winter" title="AI winter">AI winter</a></li> <li><a href="/wiki/AI_boom" title="AI boom">AI boom</a></li></ul></div></div></td> </tr><tr><td class="sidebar-content"> <div class="sidebar-list mw-collapsible mw-collapsed"><div class="sidebar-list-title" style="text-align:center;color: var(--color-base)">Glossary</div><div class="sidebar-list-content mw-collapsible-content"> <ul><li><a href="/wiki/Glossary_of_artificial_intelligence" title="Glossary of artificial intelligence">Glossary</a></li></ul></div></div></td> </tr><tr><td class="sidebar-navbar"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Artificial_intelligence" title="Template:Artificial intelligence"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Artificial_intelligence" title="Template talk:Artificial intelligence"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Artificial_intelligence" title="Special:EditPage/Template:Artificial intelligence"><abbr title="Edit this template">e</abbr></a></li></ul></div></td></tr></tbody></table> <p><b>Machine learning</b> (<b>ML</b>) is a <a href="/wiki/Field_of_study" class="mw-redirect" title="Field of study">field of study</a> in <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a> concerned with the development and study of <a href="/wiki/Computational_statistics" title="Computational statistics">statistical algorithms</a> that can learn from <a href="/wiki/Data" title="Data">data</a> and <a href="/wiki/Generalize" class="mw-redirect" title="Generalize">generalize</a> to unseen data, and thus perform <a href="/wiki/Task_(computing)" title="Task (computing)">tasks</a> without explicit <a href="/wiki/Machine_code" title="Machine code">instructions</a>.<sup id="cite_ref-1" class="reference"><a href="#cite_note-1"><span class="cite-bracket">[</span>1<span class="cite-bracket">]</span></a></sup> Advances in the field of <a href="/wiki/Deep_learning" title="Deep learning">deep learning</a> have allowed <a href="/wiki/Neural_network_(machine_learning)" title="Neural network (machine learning)">neural networks</a> to surpass many previous approaches in performance.<sup id="cite_ref-ibm_2-0" class="reference"><a href="#cite_note-ibm-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup> </p><p>ML finds application in many fields, including <a href="/wiki/Natural_language_processing" title="Natural language processing">natural language processing</a>, <a href="/wiki/Computer_vision" title="Computer vision">computer vision</a>, <a href="/wiki/Speech_recognition" title="Speech recognition">speech recognition</a>, <a href="/wiki/Email_filtering" title="Email filtering">email filtering</a>, <a href="/wiki/Agriculture" title="Agriculture">agriculture</a>, and <a href="/wiki/Medicine" title="Medicine">medicine</a>.<sup id="cite_ref-tvt_3-0" class="reference"><a href="#cite_note-tvt-3"><span class="cite-bracket">[</span>3<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-YoosefzadehNajafabadi-2021_4-0" class="reference"><a href="#cite_note-YoosefzadehNajafabadi-2021-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup> The application of ML to business problems is known as <a href="/wiki/Predictive_analytics" title="Predictive analytics">predictive analytics</a>. </p><p><a href="/wiki/Statistics" title="Statistics">Statistics</a> and <a href="/wiki/Mathematical_optimization" title="Mathematical optimization">mathematical optimization</a> (mathematical programming) methods comprise the foundations of machine learning. <a href="/wiki/Data_mining" title="Data mining">Data mining</a> is a related field of study, focusing on <a href="/wiki/Exploratory_data_analysis" title="Exploratory data analysis">exploratory data analysis</a> (EDA) via <a href="/wiki/Unsupervised_learning" title="Unsupervised learning">unsupervised learning</a>.<sup id="cite_ref-6" class="reference"><a href="#cite_note-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Friedman-1998_7-0" class="reference"><a href="#cite_note-Friedman-1998-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> </p><p>From a theoretical viewpoint, <a href="/wiki/Probably_approximately_correct_learning" title="Probably approximately correct learning">probably approximately correct (PAC) learning</a> provides a framework for describing machine learning. </p> <style data-mw-deduplicate="TemplateStyles:r886046785">.mw-parser-output .toclimit-2 .toclevel-1 ul,.mw-parser-output .toclimit-3 .toclevel-2 ul,.mw-parser-output .toclimit-4 .toclevel-3 ul,.mw-parser-output .toclimit-5 .toclevel-4 ul,.mw-parser-output .toclimit-6 .toclevel-5 ul,.mw-parser-output .toclimit-7 .toclevel-6 ul{display:none}</style><div class="toclimit-3"><meta property="mw:PageProp/toc" /></div> <div class="mw-heading mw-heading2"><h2 id="History">History</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=1" title="Edit section: History"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Timeline_of_machine_learning" title="Timeline of machine learning">Timeline of machine learning</a></div> <p>The term <i>machine learning</i> was coined in 1959 by <a href="/wiki/Arthur_Samuel_(computer_scientist)" title="Arthur Samuel (computer scientist)">Arthur Samuel</a>, an <a href="/wiki/IBM" title="IBM">IBM</a> employee and pioneer in the field of <a href="/wiki/Computer_gaming" class="mw-redirect" title="Computer gaming">computer gaming</a> and <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a>.<sup id="cite_ref-Samuel_8-0" class="reference"><a href="#cite_note-Samuel-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Kohavi_9-0" class="reference"><a href="#cite_note-Kohavi-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> The synonym <i>self-teaching computers</i> was also used in this time period.<sup id="cite_ref-cyberthreat_10-0" class="reference"><a href="#cite_note-cyberthreat-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-11" class="reference"><a href="#cite_note-11"><span class="cite-bracket">[</span>11<span class="cite-bracket">]</span></a></sup> </p><p>Although the earliest machine learning model was introduced in the 1950s when <a href="/wiki/Arthur_Samuel_(computer_scientist)" title="Arthur Samuel (computer scientist)">Arthur Samuel</a> invented a <a href="/wiki/Computer_program" title="Computer program">program</a> that calculated the winning chance in checkers for each side, the history of machine learning roots back to decades of human desire and effort to study human cognitive processes.<sup id="cite_ref-WhatIs_12-0" class="reference"><a href="#cite_note-WhatIs-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup> In 1949, <a href="/wiki/Canadians" title="Canadians">Canadian</a> psychologist <a href="/wiki/Donald_O._Hebb" title="Donald O. Hebb">Donald Hebb</a> published the book <i><a href="/wiki/Organization_of_Behavior" title="Organization of Behavior">The Organization of Behavior</a></i>, in which he introduced a <a href="/wiki/Hebbian_theory" title="Hebbian theory">theoretical neural structure</a> formed by certain interactions among <a href="/wiki/Nerve_cells" class="mw-redirect" title="Nerve cells">nerve cells</a>.<sup id="cite_ref-13" class="reference"><a href="#cite_note-13"><span class="cite-bracket">[</span>13<span class="cite-bracket">]</span></a></sup> Hebb's model of <a href="/wiki/Neuron" title="Neuron">neurons</a> interacting with one another set a groundwork for how AIs and machine learning algorithms work under nodes, or <a href="/wiki/Artificial_neuron" title="Artificial neuron">artificial neurons</a> used by computers to communicate data.<sup id="cite_ref-WhatIs_12-1" class="reference"><a href="#cite_note-WhatIs-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup> Other researchers who have studied human <a href="/wiki/Cognitive_systems_engineering" title="Cognitive systems engineering">cognitive systems</a> contributed to the modern machine learning technologies as well, including logician <a href="/wiki/Walter_Pitts" title="Walter Pitts">Walter Pitts</a> and <a href="/wiki/Warren_Sturgis_McCulloch" title="Warren Sturgis McCulloch">Warren McCulloch</a>, who proposed the early mathematical models of neural networks to come up with <a href="/wiki/Algorithm" title="Algorithm">algorithms</a> that mirror human thought processes.<sup id="cite_ref-WhatIs_12-2" class="reference"><a href="#cite_note-WhatIs-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup> </p><p>By the early 1960s, an experimental "learning machine" with <a href="/wiki/Punched_tape" title="Punched tape">punched tape</a> memory, called Cybertron, had been developed by <a href="/wiki/Raytheon_Company" class="mw-redirect" title="Raytheon Company">Raytheon Company</a> to analyze <a href="/wiki/Sonar" title="Sonar">sonar</a> signals, <a href="/wiki/Electrocardiography" title="Electrocardiography">electrocardiograms</a>, and speech patterns using rudimentary <a href="/wiki/Reinforcement_learning" title="Reinforcement learning">reinforcement learning</a>. It was repetitively "trained" by a human operator/teacher to recognize patterns and equipped with a "<a href="/wiki/Goof" title="Goof">goof</a>" button to cause it to reevaluate incorrect decisions.<sup id="cite_ref-14" class="reference"><a href="#cite_note-14"><span class="cite-bracket">[</span>14<span class="cite-bracket">]</span></a></sup> A representative book on research into machine learning during the 1960s was Nilsson's book on Learning Machines, dealing mostly with machine learning for pattern classification.<sup id="cite_ref-15" class="reference"><a href="#cite_note-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup> Interest related to pattern recognition continued into the 1970s, as described by Duda and Hart in 1973.<sup id="cite_ref-16" class="reference"><a href="#cite_note-16"><span class="cite-bracket">[</span>16<span class="cite-bracket">]</span></a></sup> In 1981 a report was given on using teaching strategies so that an <a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">artificial neural network</a> learns to recognize 40 characters (26 letters, 10 digits, and 4 special symbols) from a computer terminal.<sup id="cite_ref-17" class="reference"><a href="#cite_note-17"><span class="cite-bracket">[</span>17<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Tom_M._Mitchell" title="Tom M. Mitchell">Tom M. Mitchell</a> provided a widely quoted, more formal definition of the algorithms studied in the machine learning field: "A computer program is said to learn from experience <i>E</i> with respect to some class of tasks <i>T</i> and performance measure <i>P</i> if its performance at tasks in <i>T</i>, as measured by <i>P</i>, improves with experience <i>E</i>."<sup id="cite_ref-Mitchell-1997_18-0" class="reference"><a href="#cite_note-Mitchell-1997-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup> This definition of the tasks in which machine learning is concerned offers a fundamentally <a href="/wiki/Operational_definition" title="Operational definition">operational definition</a> rather than defining the field in cognitive terms. This follows <a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a>'s proposal in his paper "<a href="/wiki/Computing_Machinery_and_Intelligence" title="Computing Machinery and Intelligence">Computing Machinery and Intelligence</a>", in which the question "Can machines think?" is replaced with the question "Can machines do what we (as thinking entities) can do?".<sup id="cite_ref-19" class="reference"><a href="#cite_note-19"><span class="cite-bracket">[</span>19<span class="cite-bracket">]</span></a></sup> </p><p>Modern-day machine learning has two objectives. One is to classify data based on models which have been developed; the other purpose is to make predictions for future outcomes based on these models. A hypothetical algorithm specific to classifying data may use computer vision of moles coupled with supervised learning in order to train it to classify the cancerous moles. A machine learning algorithm for stock trading may inform the trader of future potential predictions.<sup id="cite_ref-20" class="reference"><a href="#cite_note-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Relationships_to_other_fields">Relationships to other fields</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=2" title="Edit section: Relationships to other fields"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Artificial_intelligence">Artificial intelligence</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=3" title="Edit section: Artificial intelligence"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:AI_hierarchy.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/1/1b/AI_hierarchy.svg/220px-AI_hierarchy.svg.png" decoding="async" width="220" height="220" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/1b/AI_hierarchy.svg/330px-AI_hierarchy.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/1b/AI_hierarchy.svg/440px-AI_hierarchy.svg.png 2x" data-file-width="399" data-file-height="399" /></a><figcaption>Machine learning as subfield of AI<sup id="cite_ref-journalimcms.org_21-0" class="reference"><a href="#cite_note-journalimcms.org-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup></figcaption></figure> <p>As a scientific endeavor, machine learning grew out of the quest for <a href="/wiki/Artificial_intelligence" title="Artificial intelligence">artificial intelligence</a> (AI). In the early days of AI as an <a href="/wiki/Discipline_(academia)" class="mw-redirect" title="Discipline (academia)">academic discipline</a>, some researchers were interested in having machines learn from data. They attempted to approach the problem with various symbolic methods, as well as what were then termed "<a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">neural networks</a>"; these were mostly <a href="/wiki/Perceptron" title="Perceptron">perceptrons</a> and <a href="/wiki/ADALINE" title="ADALINE">other models</a> that were later found to be reinventions of the <a href="/wiki/Generalized_linear_model" title="Generalized linear model">generalized linear models</a> of statistics.<sup id="cite_ref-22" class="reference"><a href="#cite_note-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Probabilistic_reasoning" class="mw-redirect" title="Probabilistic reasoning">Probabilistic reasoning</a> was also employed, especially in <a href="/wiki/Automated_medical_diagnosis" class="mw-redirect" title="Automated medical diagnosis">automated medical diagnosis</a>.<sup id="cite_ref-aima_23-0" class="reference"><a href="#cite_note-aima-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup class="reference nowrap"><span title="Page / location: 488">: 488 </span></sup> </p><p>However, an increasing emphasis on the <a href="/wiki/Symbolic_AI" class="mw-redirect" title="Symbolic AI">logical, knowledge-based approach</a> caused a rift between AI and machine learning. Probabilistic systems were plagued by theoretical and practical problems of data acquisition and representation.<sup id="cite_ref-aima_23-1" class="reference"><a href="#cite_note-aima-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup class="reference nowrap"><span title="Page / location: 488">: 488 </span></sup> By 1980, <a href="/wiki/Expert_system" title="Expert system">expert systems</a> had come to dominate AI, and statistics was out of favor.<sup id="cite_ref-changing_24-0" class="reference"><a href="#cite_note-changing-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> Work on symbolic/knowledge-based learning did continue within AI, leading to <a href="/wiki/Inductive_logic_programming" title="Inductive logic programming">inductive logic programming</a>(ILP), but the more statistical line of research was now outside the field of AI proper, in <a href="/wiki/Pattern_recognition" title="Pattern recognition">pattern recognition</a> and <a href="/wiki/Information_retrieval" title="Information retrieval">information retrieval</a>.<sup id="cite_ref-aima_23-2" class="reference"><a href="#cite_note-aima-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup class="reference nowrap"><span title="Page / location: 708–710, 755">: 708–710, 755 </span></sup> Neural networks research had been abandoned by AI and <a href="/wiki/Computer_science" title="Computer science">computer science</a> around the same time. This line, too, was continued outside the AI/CS field, as "<a href="/wiki/Connectionism" title="Connectionism">connectionism</a>", by researchers from other disciplines including <a href="/wiki/John_Hopfield" title="John Hopfield">John Hopfield</a>, <a href="/wiki/David_Rumelhart" title="David Rumelhart">David Rumelhart</a>, and <a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a>. Their main success came in the mid-1980s with the reinvention of <a href="/wiki/Backpropagation" title="Backpropagation">backpropagation</a>.<sup id="cite_ref-aima_23-3" class="reference"><a href="#cite_note-aima-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup><sup class="reference nowrap"><span title="Page / location: 25">: 25 </span></sup> </p><p>Machine learning (ML), reorganized and recognized as its own field, started to flourish in the 1990s. The field changed its goal from achieving artificial intelligence to tackling solvable problems of a practical nature. It shifted focus away from the <a href="/wiki/Symbolic_artificial_intelligence" title="Symbolic artificial intelligence">symbolic approaches</a> it had inherited from AI, and toward methods and models borrowed from statistics, <a href="/wiki/Fuzzy_logic" title="Fuzzy logic">fuzzy logic</a>, and <a href="/wiki/Probability_theory" title="Probability theory">probability theory</a>.<sup id="cite_ref-changing_24-1" class="reference"><a href="#cite_note-changing-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Data_compression">Data compression</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=4" title="Edit section: Data compression"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="excerpt-block"><style data-mw-deduplicate="TemplateStyles:r1066933788">.mw-parser-output .excerpt-hat .mw-editsection-like{font-style:normal}</style><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable dablink excerpt-hat selfref">This section is an excerpt from <a href="/wiki/Data_compression#Machine_learning" title="Data compression">Data compression § Machine learning</a>.<span class="mw-editsection-like plainlinks"><span class="mw-editsection-bracket">[</span><a class="external text" href="https://en.wikipedia.org/w/index.php?title=Data_compression&action=edit#Machine_learning">edit</a><span class="mw-editsection-bracket">]</span></span></div><div class="excerpt"> <p>There is a close connection between machine learning and compression. A system that predicts the <a href="/wiki/Posterior_probabilities" class="mw-redirect" title="Posterior probabilities">posterior probabilities</a> of a sequence given its entire history can be used for optimal data compression (by using <a href="/wiki/Arithmetic_coding" title="Arithmetic coding">arithmetic coding</a> on the output distribution). Conversely, an optimal compressor can be used for prediction (by finding the symbol that compresses best, given the previous history). This equivalence has been used as a justification for using data compression as a benchmark for "general intelligence".<sup id="cite_ref-Data_compression_Mahoney_25-0" class="reference"><a href="#cite_note-Data_compression_Mahoney-25"><span class="cite-bracket">[</span>25<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Data_compression_Market_Efficiency_26-0" class="reference"><a href="#cite_note-Data_compression_Market_Efficiency-26"><span class="cite-bracket">[</span>26<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Data_compression_Ben-Gal_27-0" class="reference"><a href="#cite_note-Data_compression_Ben-Gal-27"><span class="cite-bracket">[</span>27<span class="cite-bracket">]</span></a></sup> </p><p>An alternative view can show compression algorithms implicitly map strings into implicit <a href="/wiki/Feature_space_vector" class="mw-redirect" title="Feature space vector">feature space vectors</a>, and compression-based similarity measures compute similarity within these feature spaces. For each compressor C(.) we define an associated vector space ℵ, such that C(.) maps an input string x, corresponding to the vector norm ||~x||. An exhaustive examination of the feature spaces underlying all compression algorithms is precluded by space; instead, feature vectors chooses to examine three representative lossless compression methods, LZW, LZ77, and PPM.<sup id="cite_ref-Data_compression_ScullyBrodley_28-0" class="reference"><a href="#cite_note-Data_compression_ScullyBrodley-28"><span class="cite-bracket">[</span>28<span class="cite-bracket">]</span></a></sup> </p><p>According to <a href="/wiki/AIXI" title="AIXI">AIXI</a> theory, a connection more directly explained in <a href="/wiki/Hutter_Prize" title="Hutter Prize">Hutter Prize</a>, the best possible compression of x is the smallest possible software that generates x. For example, in that model, a zip file's compressed size includes both the zip file and the unzipping software, since you can not unzip it without both, but there may be an even smaller combined form. </p><p>Examples of AI-powered audio/video compression software include <a href="/wiki/NVIDIA_Maxine" class="mw-redirect" title="NVIDIA Maxine">NVIDIA Maxine</a>, AIVC.<sup id="cite_ref-29" class="reference"><a href="#cite_note-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> Examples of software that can perform AI-powered image compression include <a href="/wiki/OpenCV" title="OpenCV">OpenCV</a>, <a href="/wiki/TensorFlow" title="TensorFlow">TensorFlow</a>, <a href="/wiki/MATLAB" title="MATLAB">MATLAB</a>'s Image Processing Toolbox (IPT) and High-Fidelity Generative Image Compression.<sup id="cite_ref-30" class="reference"><a href="#cite_note-30"><span class="cite-bracket">[</span>30<span class="cite-bracket">]</span></a></sup> </p><p>In <a href="/wiki/Unsupervised_machine_learning" class="mw-redirect" title="Unsupervised machine learning">unsupervised machine learning</a>, <a href="/wiki/K-means_clustering" title="K-means clustering">k-means clustering</a> can be utilized to compress data by grouping similar data points into clusters. This technique simplifies handling extensive datasets that lack predefined labels and finds widespread use in fields such as <a href="/wiki/Image_compression" title="Image compression">image compression</a>.<sup id="cite_ref-31" class="reference"><a href="#cite_note-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> </p><p>Data compression aims to reduce the size of data files, enhancing storage efficiency and speeding up data transmission. K-means clustering, an unsupervised machine learning algorithm, is employed to partition a dataset into a specified number of clusters, k, each represented by the <a href="/wiki/Centroid" title="Centroid">centroid</a> of its points. This process condenses extensive datasets into a more compact set of representative points. Particularly beneficial in <a href="/wiki/Image_processing" class="mw-redirect" title="Image processing">image</a> and <a href="/wiki/Signal_processing" title="Signal processing">signal processing</a>, k-means clustering aids in data reduction by replacing groups of data points with their centroids, thereby preserving the core information of the original data while significantly decreasing the required storage space.<sup id="cite_ref-32" class="reference"><a href="#cite_note-32"><span class="cite-bracket">[</span>32<span class="cite-bracket">]</span></a></sup> </p> <a href="/wiki/Large_language_model" title="Large language model">Large language models</a> (LLMs) are also capable of lossless data compression, as demonstrated by <a href="/wiki/DeepMind" class="mw-redirect" title="DeepMind">DeepMind</a>'s research with the Chinchilla 70B model. Developed by DeepMind, Chinchilla 70B effectively compressed data, outperforming conventional methods such as <a href="/wiki/Portable_Network_Graphics" class="mw-redirect" title="Portable Network Graphics">Portable Network Graphics</a> (PNG) for images and <a href="/wiki/Free_Lossless_Audio_Codec" class="mw-redirect" title="Free Lossless Audio Codec">Free Lossless Audio Codec</a> (FLAC) for audio. It achieved compression of image and audio data to 43.4% and 16.4% of their original sizes, respectively.<sup id="cite_ref-33" class="reference"><a href="#cite_note-33"><span class="cite-bracket">[</span>33<span class="cite-bracket">]</span></a></sup></div></div> <div class="mw-heading mw-heading3"><h3 id="Data_mining">Data mining</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=5" title="Edit section: Data mining"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Machine learning and <a href="/wiki/Data_mining" title="Data mining">data mining</a> often employ the same methods and overlap significantly, but while machine learning focuses on prediction, based on <i>known</i> properties learned from the training data, data mining focuses on the <a href="/wiki/Discovery_(observation)" title="Discovery (observation)">discovery</a> of (previously) <i>unknown</i> properties in the data (this is the analysis step of <a href="/wiki/Knowledge_discovery" class="mw-redirect" title="Knowledge discovery">knowledge discovery</a> in databases). Data mining uses many machine learning methods, but with different goals; on the other hand, machine learning also employs data mining methods as "<a href="/wiki/Unsupervised_learning" title="Unsupervised learning">unsupervised learning</a>" or as a preprocessing step to improve learner accuracy. Much of the confusion between these two research communities (which do often have separate conferences and separate journals, <a href="/wiki/ECML_PKDD" title="ECML PKDD">ECML PKDD</a> being a major exception) comes from the basic assumptions they work with: in machine learning, performance is usually evaluated with respect to the ability to <i>reproduce known</i> knowledge, while in knowledge discovery and data mining (KDD) the key task is the discovery of previously <i>unknown</i> knowledge. Evaluated with respect to known knowledge, an uninformed (unsupervised) method will easily be outperformed by other supervised methods, while in a typical KDD task, supervised methods cannot be used due to the unavailability of training data. </p><p>Machine learning also has intimate ties to <a href="/wiki/Mathematical_optimization" title="Mathematical optimization">optimization</a>: Many learning problems are formulated as minimization of some <a href="/wiki/Loss_function" title="Loss function">loss function</a> on a training set of examples. Loss functions express the discrepancy between the predictions of the model being trained and the actual problem instances (for example, in classification, one wants to assign a <a href="/wiki/Labeled_data" title="Labeled data">label</a> to instances, and models are trained to correctly predict the preassigned labels of a set of examples).<sup id="cite_ref-34" class="reference"><a href="#cite_note-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Generalization">Generalization</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=6" title="Edit section: Generalization"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Characterizing the generalization of various learning algorithms is an active topic of current research, especially for <a href="/wiki/Deep_learning" title="Deep learning">deep learning</a> algorithms. </p> <div class="mw-heading mw-heading3"><h3 id="Statistics">Statistics</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=7" title="Edit section: Statistics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Machine learning and <a href="/wiki/Statistics" title="Statistics">statistics</a> are closely related fields in terms of methods, but distinct in their principal goal: statistics draws population <a href="/wiki/Statistical_inference" title="Statistical inference">inferences</a> from a <a href="/wiki/Sample_(statistics)" class="mw-redirect" title="Sample (statistics)">sample</a>, while machine learning finds generalizable predictive patterns.<sup id="cite_ref-35" class="reference"><a href="#cite_note-35"><span class="cite-bracket">[</span>35<span class="cite-bracket">]</span></a></sup> According to <a href="/wiki/Michael_I._Jordan" title="Michael I. Jordan">Michael I. Jordan</a>, the ideas of machine learning, from methodological principles to theoretical tools, have had a long pre-history in statistics.<sup id="cite_ref-mi_jordan_ama_36-0" class="reference"><a href="#cite_note-mi_jordan_ama-36"><span class="cite-bracket">[</span>36<span class="cite-bracket">]</span></a></sup> He also suggested the term <a href="/wiki/Data_science" title="Data science">data science</a> as a placeholder to call the overall field.<sup id="cite_ref-mi_jordan_ama_36-1" class="reference"><a href="#cite_note-mi_jordan_ama-36"><span class="cite-bracket">[</span>36<span class="cite-bracket">]</span></a></sup> </p><p>Conventional statistical analyses require the a priori selection of a model most suitable for the study data set. In addition, only significant or theoretically relevant variables based on previous experience are included for analysis. In contrast, machine learning is not built on a pre-structured model; rather, the data shape the model by detecting underlying patterns. The more variables (input) used to train the model, the more accurate the ultimate model will be.<sup id="cite_ref-37" class="reference"><a href="#cite_note-37"><span class="cite-bracket">[</span>37<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Leo_Breiman" title="Leo Breiman">Leo Breiman</a> distinguished two statistical modeling paradigms: data model and algorithmic model,<sup id="cite_ref-Cornell-University-Library-2001_38-0" class="reference"><a href="#cite_note-Cornell-University-Library-2001-38"><span class="cite-bracket">[</span>38<span class="cite-bracket">]</span></a></sup> wherein "algorithmic model" means more or less the machine learning algorithms like <a href="/wiki/Random_forest" title="Random forest">Random Forest</a>. </p><p>Some statisticians have adopted methods from machine learning, leading to a combined field that they call <i>statistical learning</i>.<sup id="cite_ref-islr_39-0" class="reference"><a href="#cite_note-islr-39"><span class="cite-bracket">[</span>39<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Statistical_physics">Statistical physics</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=8" title="Edit section: Statistical physics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Analytical and computational techniques derived from deep-rooted physics of disordered systems can be extended to large-scale problems, including machine learning, e.g., to analyze the weight space of <a href="/wiki/Deep_neural_network" class="mw-redirect" title="Deep neural network">deep neural networks</a>.<sup id="cite_ref-SP_1_40-0" class="reference"><a href="#cite_note-SP_1-40"><span class="cite-bracket">[</span>40<span class="cite-bracket">]</span></a></sup> Statistical physics is thus finding applications in the area of <a href="/wiki/Medical_diagnostics" class="mw-redirect" title="Medical diagnostics">medical diagnostics</a>.<sup id="cite_ref-SP_2_41-0" class="reference"><a href="#cite_note-SP_2-41"><span class="cite-bracket">[</span>41<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Theory"><span class="anchor" id="Generalization"></span> Theory</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=9" title="Edit section: Theory"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main articles: <a href="/wiki/Computational_learning_theory" title="Computational learning theory">Computational learning theory</a> and <a href="/wiki/Statistical_learning_theory" title="Statistical learning theory">Statistical learning theory</a></div> <p>A core objective of a learner is to generalize from its experience.<sup id="cite_ref-bishop2006_5-1" class="reference"><a href="#cite_note-bishop2006-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Mohri-2012_42-0" class="reference"><a href="#cite_note-Mohri-2012-42"><span class="cite-bracket">[</span>42<span class="cite-bracket">]</span></a></sup> Generalization in this context is the ability of a learning machine to perform accurately on new, unseen examples/tasks after having experienced a learning data set. The training examples come from some generally unknown probability distribution (considered representative of the space of occurrences) and the learner has to build a general model about this space that enables it to produce sufficiently accurate predictions in new cases. </p><p>The computational analysis of machine learning algorithms and their performance is a branch of <a href="/wiki/Theoretical_computer_science" title="Theoretical computer science">theoretical computer science</a> known as <a href="/wiki/Computational_learning_theory" title="Computational learning theory">computational learning theory</a> via the <a href="/wiki/Probably_approximately_correct_learning" title="Probably approximately correct learning">Probably Approximately Correct Learning</a> (PAC) model. Because training sets are finite and the future is uncertain, learning theory usually does not yield guarantees of the performance of algorithms. Instead, probabilistic bounds on the performance are quite common. The <a href="/wiki/Bias%E2%80%93variance_decomposition" class="mw-redirect" title="Bias–variance decomposition">bias–variance decomposition</a> is one way to quantify generalization <a href="/wiki/Errors_and_residuals" title="Errors and residuals">error</a>. </p><p>For the best performance in the context of generalization, the complexity of the hypothesis should match the complexity of the function underlying the data. If the hypothesis is less complex than the function, then the model has under fitted the data. If the complexity of the model is increased in response, then the training error decreases. But if the hypothesis is too complex, then the model is subject to <a href="/wiki/Overfitting" title="Overfitting">overfitting</a> and generalization will be poorer.<sup id="cite_ref-alpaydin_43-0" class="reference"><a href="#cite_note-alpaydin-43"><span class="cite-bracket">[</span>43<span class="cite-bracket">]</span></a></sup> </p><p>In addition to performance bounds, learning theorists study the time complexity and feasibility of learning. In computational learning theory, a computation is considered feasible if it can be done in <a href="/wiki/Time_complexity#Polynomial_time" title="Time complexity">polynomial time</a>. There are two kinds of <a href="/wiki/Time_complexity" title="Time complexity">time complexity</a> results: Positive results show that a certain class of functions can be learned in polynomial time. Negative results show that certain classes cannot be learned in polynomial time. </p> <div class="mw-heading mw-heading2"><h2 id="Approaches">Approaches</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=10" title="Edit section: Approaches"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><span class="anchor" id="Algorithm_types"></span> </p> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Supervised_and_unsupervised_learning.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Supervised_and_unsupervised_learning.png/290px-Supervised_and_unsupervised_learning.png" decoding="async" width="290" height="129" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Supervised_and_unsupervised_learning.png/435px-Supervised_and_unsupervised_learning.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Supervised_and_unsupervised_learning.png/580px-Supervised_and_unsupervised_learning.png 2x" data-file-width="714" data-file-height="317" /></a><figcaption>In supervised learning, the training data is labeled with the expected answers, while in <a href="/wiki/Unsupervised_learning" title="Unsupervised learning">unsupervised learning</a>, the model identifies patterns or structures in unlabeled data.</figcaption></figure> <p>Machine learning approaches are traditionally divided into three broad categories, which correspond to learning paradigms, depending on the nature of the "signal" or "feedback" available to the learning system: </p> <ul><li><a href="/wiki/Supervised_learning" title="Supervised learning">Supervised learning</a>: The computer is presented with example inputs and their desired outputs, given by a "teacher", and the goal is to learn a general rule that <a href="/wiki/Map_(mathematics)" title="Map (mathematics)">maps</a> inputs to outputs.</li> <li><a href="/wiki/Unsupervised_learning" title="Unsupervised learning">Unsupervised learning</a>: No labels are given to the learning algorithm, leaving it on its own to find structure in its input. Unsupervised learning can be a goal in itself (discovering hidden patterns in data) or a means towards an end (<a href="/wiki/Feature_learning" title="Feature learning">feature learning</a>).</li> <li><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a>: A computer program interacts with a dynamic environment in which it must perform a certain goal (such as <a href="/wiki/Autonomous_car" class="mw-redirect" title="Autonomous car">driving a vehicle</a> or playing a game against an opponent). As it navigates its problem space, the program is provided feedback that's analogous to rewards, which it tries to maximize.<sup id="cite_ref-bishop2006_5-2" class="reference"><a href="#cite_note-bishop2006-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup></li></ul> <p>Although each algorithm has advantages and limitations, no single algorithm works for all problems.<sup id="cite_ref-44" class="reference"><a href="#cite_note-44"><span class="cite-bracket">[</span>44<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-45" class="reference"><a href="#cite_note-45"><span class="cite-bracket">[</span>45<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-46" class="reference"><a href="#cite_note-46"><span class="cite-bracket">[</span>46<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Supervised_learning">Supervised learning</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=11" title="Edit section: Supervised learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Supervised_learning" title="Supervised learning">Supervised learning</a></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Svm_max_sep_hyperplane_with_margin.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/2/2a/Svm_max_sep_hyperplane_with_margin.png/220px-Svm_max_sep_hyperplane_with_margin.png" decoding="async" width="220" height="237" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/2a/Svm_max_sep_hyperplane_with_margin.png/330px-Svm_max_sep_hyperplane_with_margin.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/2a/Svm_max_sep_hyperplane_with_margin.png/440px-Svm_max_sep_hyperplane_with_margin.png 2x" data-file-width="800" data-file-height="862" /></a><figcaption>A <a href="/wiki/Support-vector_machine" class="mw-redirect" title="Support-vector machine">support-vector machine</a> is a supervised learning model that divides the data into regions separated by a <a href="/wiki/Linear_classifier" title="Linear classifier">linear boundary</a>. Here, the linear boundary divides the black circles from the white.</figcaption></figure> <p>Supervised learning algorithms build a mathematical model of a set of data that contains both the inputs and the desired outputs.<sup id="cite_ref-47" class="reference"><a href="#cite_note-47"><span class="cite-bracket">[</span>47<span class="cite-bracket">]</span></a></sup> The data, known as <a href="/wiki/Training_data" class="mw-redirect" title="Training data">training data</a>, consists of a set of training examples. Each training example has one or more inputs and the desired output, also known as a supervisory signal. In the mathematical model, each training example is represented by an <a href="/wiki/Array_data_structure" class="mw-redirect" title="Array data structure">array</a> or vector, sometimes called a <a href="/wiki/Feature_vector" class="mw-redirect" title="Feature vector">feature vector</a>, and the training data is represented by a <a href="/wiki/Matrix_(mathematics)" title="Matrix (mathematics)">matrix</a>. Through <a href="/wiki/Mathematical_optimization#Computational_optimization_techniques" title="Mathematical optimization">iterative optimization</a> of an <a href="/wiki/Loss_function" title="Loss function">objective function</a>, supervised learning algorithms learn a function that can be used to predict the output associated with new inputs.<sup id="cite_ref-48" class="reference"><a href="#cite_note-48"><span class="cite-bracket">[</span>48<span class="cite-bracket">]</span></a></sup> An optimal function allows the algorithm to correctly determine the output for inputs that were not a part of the training data. An algorithm that improves the accuracy of its outputs or predictions over time is said to have learned to perform that task.<sup id="cite_ref-Mitchell-1997_18-1" class="reference"><a href="#cite_note-Mitchell-1997-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup> </p><p>Types of supervised-learning algorithms include <a href="/wiki/Active_learning_(machine_learning)" title="Active learning (machine learning)">active learning</a>, <a href="/wiki/Statistical_classification" title="Statistical classification">classification</a> and <a href="/wiki/Regression_analysis" title="Regression analysis">regression</a>.<sup id="cite_ref-Alpaydin-2010_49-0" class="reference"><a href="#cite_note-Alpaydin-2010-49"><span class="cite-bracket">[</span>49<span class="cite-bracket">]</span></a></sup> Classification algorithms are used when the outputs are restricted to a limited set of values, and regression algorithms are used when the outputs may have any numerical value within a range. As an example, for a classification algorithm that filters emails, the input would be an incoming email, and the output would be the name of the folder in which to file the email. Examples of regression would be predicting the height of a person, or the future temperature. <sup id="cite_ref-50" class="reference"><a href="#cite_note-50"><span class="cite-bracket">[</span>50<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Similarity_learning" title="Similarity learning">Similarity learning</a> is an area of supervised machine learning closely related to regression and classification, but the goal is to learn from examples using a similarity function that measures how similar or related two objects are. It has applications in <a href="/wiki/Ranking" title="Ranking">ranking</a>, <a href="/wiki/Recommender_system" title="Recommender system">recommendation systems</a>, visual identity tracking, face verification, and speaker verification. </p> <div class="mw-heading mw-heading3"><h3 id="Unsupervised_learning">Unsupervised learning</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=12" title="Edit section: Unsupervised learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Unsupervised_learning" title="Unsupervised learning">Unsupervised learning</a></div><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Cluster_analysis" title="Cluster analysis">Cluster analysis</a></div> <p>Unsupervised learning algorithms find structures in data that has not been labeled, classified or categorized. Instead of responding to feedback, unsupervised learning algorithms identify commonalities in the data and react based on the presence or absence of such commonalities in each new piece of data. Central applications of unsupervised machine learning include clustering, <a href="/wiki/Dimensionality_reduction" title="Dimensionality reduction">dimensionality reduction</a>,<sup id="cite_ref-Friedman-1998_7-1" class="reference"><a href="#cite_note-Friedman-1998-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> and <a href="/wiki/Density_estimation" title="Density estimation">density estimation</a>.<sup id="cite_ref-JordanBishop2004_51-0" class="reference"><a href="#cite_note-JordanBishop2004-51"><span class="cite-bracket">[</span>51<span class="cite-bracket">]</span></a></sup> Unsupervised learning algorithms also streamlined the process of identifying large <a href="/wiki/Indel" title="Indel">indel</a> based <a href="/wiki/Haplotype" title="Haplotype">haplotypes</a> of a gene of interest from <a href="/wiki/Pan-genome" title="Pan-genome">pan-genome</a>.<sup id="cite_ref-52" class="reference"><a href="#cite_note-52"><span class="cite-bracket">[</span>52<span class="cite-bracket">]</span></a></sup> </p> <figure typeof="mw:File/Thumb"><a href="/wiki/File:CLIPS.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/0/02/CLIPS.jpg/542px-CLIPS.jpg" decoding="async" width="542" height="129" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/02/CLIPS.jpg/813px-CLIPS.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/02/CLIPS.jpg/1084px-CLIPS.jpg 2x" data-file-width="3866" data-file-height="921" /></a><figcaption>Clustering via Large Indel Permuted Slopes, CLIPS,<sup id="cite_ref-53" class="reference"><a href="#cite_note-53"><span class="cite-bracket">[</span>53<span class="cite-bracket">]</span></a></sup> turns the alignment image into a learning regression problem. The varied slope (<i>b</i>) estimates between each pair of DNA segments enables to identify segments sharing the same set of indels.</figcaption></figure> <p>Cluster analysis is the assignment of a set of observations into subsets (called <i>clusters</i>) so that observations within the same cluster are similar according to one or more predesignated criteria, while observations drawn from different clusters are dissimilar. Different clustering techniques make different assumptions on the structure of the data, often defined by some <i>similarity metric</i> and evaluated, for example, by <i>internal compactness</i>, or the similarity between members of the same cluster, and <i>separation</i>, the difference between clusters. Other methods are based on <i>estimated density</i> and <i>graph connectivity</i>. </p><p>A special type of unsupervised learning called, <a href="/wiki/Self-supervised_learning" title="Self-supervised learning">self-supervised learning</a> involves training a model by generating the supervisory signal from the data itself.<sup id="cite_ref-54" class="reference"><a href="#cite_note-54"><span class="cite-bracket">[</span>54<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-55" class="reference"><a href="#cite_note-55"><span class="cite-bracket">[</span>55<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Semi-supervised_learning">Semi-supervised learning</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=13" title="Edit section: Semi-supervised learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Semi-supervised_learning" class="mw-redirect" title="Semi-supervised learning">Semi-supervised learning</a></div> <p>Semi-supervised learning falls between <a href="/wiki/Unsupervised_learning" title="Unsupervised learning">unsupervised learning</a> (without any labeled training data) and <a href="/wiki/Supervised_learning" title="Supervised learning">supervised learning</a> (with completely labeled training data). Some of the training examples are missing training labels, yet many machine-learning researchers have found that unlabeled data, when used in conjunction with a small amount of labeled data, can produce a considerable improvement in learning accuracy. </p><p>In <a href="/wiki/Weak_supervision" title="Weak supervision">weakly supervised learning</a>, the training labels are noisy, limited, or imprecise; however, these labels are often cheaper to obtain, resulting in larger effective training sets.<sup id="cite_ref-56" class="reference"><a href="#cite_note-56"><span class="cite-bracket">[</span>56<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Reinforcement_learning">Reinforcement learning</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=14" title="Edit section: Reinforcement learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a></div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Frameless"><a href="/wiki/File:Reinforcement_learning_diagram.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/1/1b/Reinforcement_learning_diagram.svg/220px-Reinforcement_learning_diagram.svg.png" decoding="async" width="220" height="213" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/1b/Reinforcement_learning_diagram.svg/330px-Reinforcement_learning_diagram.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/1b/Reinforcement_learning_diagram.svg/440px-Reinforcement_learning_diagram.svg.png 2x" data-file-width="300" data-file-height="290" /></a><figcaption></figcaption></figure> <p>Reinforcement learning is an area of machine learning concerned with how <a href="/wiki/Software_agent" title="Software agent">software agents</a> ought to take <a href="/wiki/Action_selection" title="Action selection">actions</a> in an environment so as to maximize some notion of cumulative reward. Due to its generality, the field is studied in many other disciplines, such as <a href="/wiki/Game_theory" title="Game theory">game theory</a>, <a href="/wiki/Control_theory" title="Control theory">control theory</a>, <a href="/wiki/Operations_research" title="Operations research">operations research</a>, <a href="/wiki/Information_theory" title="Information theory">information theory</a>, <a href="/wiki/Simulation-based_optimization" title="Simulation-based optimization">simulation-based optimization</a>, <a href="/wiki/Multi-agent_system" title="Multi-agent system">multi-agent systems</a>, <a href="/wiki/Swarm_intelligence" title="Swarm intelligence">swarm intelligence</a>, <a href="/wiki/Statistics" title="Statistics">statistics</a> and <a href="/wiki/Genetic_algorithm" title="Genetic algorithm">genetic algorithms</a>. In reinforcement learning, the environment is typically represented as a <a href="/wiki/Markov_decision_process" title="Markov decision process">Markov decision process</a> (MDP). Many reinforcements learning algorithms use <a href="/wiki/Dynamic_programming" title="Dynamic programming">dynamic programming</a> techniques.<sup id="cite_ref-57" class="reference"><a href="#cite_note-57"><span class="cite-bracket">[</span>57<span class="cite-bracket">]</span></a></sup> Reinforcement learning algorithms do not assume knowledge of an exact mathematical model of the MDP and are used when exact models are infeasible. Reinforcement learning algorithms are used in autonomous vehicles or in learning to play a game against a human opponent. </p> <div class="mw-heading mw-heading3"><h3 id="Dimensionality_reduction">Dimensionality reduction</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=15" title="Edit section: Dimensionality reduction"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Dimensionality_reduction" title="Dimensionality reduction">Dimensionality reduction</a> is a process of reducing the number of random variables under consideration by obtaining a set of principal variables.<sup id="cite_ref-58" class="reference"><a href="#cite_note-58"><span class="cite-bracket">[</span>58<span class="cite-bracket">]</span></a></sup> In other words, it is a process of reducing the dimension of the <a href="/wiki/Feature_(machine_learning)" title="Feature (machine learning)">feature</a> set, also called the "number of features". Most of the dimensionality reduction techniques can be considered as either feature elimination or <a href="/wiki/Feature_extraction" class="mw-redirect" title="Feature extraction">extraction</a>. One of the popular methods of dimensionality reduction is <a href="/wiki/Principal_component_analysis" title="Principal component analysis">principal component analysis</a> (PCA). PCA involves changing higher-dimensional data (e.g., 3D) to a smaller space (e.g., 2D). The <a href="/wiki/Manifold_hypothesis" title="Manifold hypothesis">manifold hypothesis</a> proposes that high-dimensional data sets lie along low-dimensional <a href="/wiki/Manifold" title="Manifold">manifolds</a>, and many dimensionality reduction techniques make this assumption, leading to the area of <a href="/wiki/Manifold_learning" class="mw-redirect" title="Manifold learning">manifold learning</a> and <a href="/wiki/Manifold_regularization" title="Manifold regularization">manifold regularization</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Other_types">Other types</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=16" title="Edit section: Other types"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Other approaches have been developed which do not fit neatly into this three-fold categorization, and sometimes more than one is used by the same machine learning system. For example, <a href="/wiki/Topic_modeling" class="mw-redirect" title="Topic modeling">topic modeling</a>, <a href="/wiki/Meta-learning_(computer_science)" title="Meta-learning (computer science)">meta-learning</a>.<sup id="cite_ref-59" class="reference"><a href="#cite_note-59"><span class="cite-bracket">[</span>59<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Self-learning">Self-learning</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=17" title="Edit section: Self-learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Self-learning, as a machine learning paradigm was introduced in 1982 along with a neural network capable of self-learning, named <i>crossbar adaptive array</i> (CAA).<sup id="cite_ref-60" class="reference"><a href="#cite_note-60"><span class="cite-bracket">[</span>60<span class="cite-bracket">]</span></a></sup> It is learning with no external rewards and no external teacher advice. The CAA self-learning algorithm computes, in a crossbar fashion, both decisions about actions and emotions (feelings) about consequence situations. The system is driven by the interaction between cognition and emotion.<sup id="cite_ref-61" class="reference"><a href="#cite_note-61"><span class="cite-bracket">[</span>61<span class="cite-bracket">]</span></a></sup> The self-learning algorithm updates a memory matrix W =||w(a,s)|| such that in each iteration executes the following machine learning routine: </p> <ol><li>in situation <i>s</i> perform action <i>a</i></li> <li>receive a consequence situation <i>s'</i></li> <li>compute emotion of being in the consequence situation <i>v(s')</i></li> <li>update crossbar memory <i>w'(a,s) = w(a,s) + v(s')</i></li></ol> <p>It is a system with only one input, situation, and only one output, action (or behavior) a. There is neither a separate reinforcement input nor an advice input from the environment. The backpropagated value (secondary reinforcement) is the emotion toward the consequence situation. The CAA exists in two environments, one is the behavioral environment where it behaves, and the other is the genetic environment, wherefrom it initially and only once receives initial emotions about situations to be encountered in the behavioral environment. After receiving the genome (species) vector from the genetic environment, the CAA learns a goal-seeking behavior, in an environment that contains both desirable and undesirable situations.<sup id="cite_ref-62" class="reference"><a href="#cite_note-62"><span class="cite-bracket">[</span>62<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Feature_learning">Feature learning</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=18" title="Edit section: Feature learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Feature_learning" title="Feature learning">Feature learning</a></div> <p>Several learning algorithms aim at discovering better representations of the inputs provided during training.<sup id="cite_ref-pami_63-0" class="reference"><a href="#cite_note-pami-63"><span class="cite-bracket">[</span>63<span class="cite-bracket">]</span></a></sup> Classic examples include <a href="/wiki/Principal_component_analysis" title="Principal component analysis">principal component analysis</a> and cluster analysis. Feature learning algorithms, also called representation learning algorithms, often attempt to preserve the information in their input but also transform it in a way that makes it useful, often as a pre-processing step before performing classification or predictions. This technique allows reconstruction of the inputs coming from the unknown data-generating distribution, while not being necessarily faithful to configurations that are implausible under that distribution. This replaces manual <a href="/wiki/Feature_engineering" title="Feature engineering">feature engineering</a>, and allows a machine to both learn the features and use them to perform a specific task. </p><p>Feature learning can be either supervised or unsupervised. In supervised feature learning, features are learned using labeled input data. Examples include <a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">artificial neural networks</a>, <a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">multilayer perceptrons</a>, and supervised <a href="/wiki/Dictionary_learning" class="mw-redirect" title="Dictionary learning">dictionary learning</a>. In unsupervised feature learning, features are learned with unlabeled input data. Examples include dictionary learning, <a href="/wiki/Independent_component_analysis" title="Independent component analysis">independent component analysis</a>, <a href="/wiki/Autoencoder" title="Autoencoder">autoencoders</a>, <a href="/wiki/Matrix_decomposition" title="Matrix decomposition">matrix factorization</a><sup id="cite_ref-64" class="reference"><a href="#cite_note-64"><span class="cite-bracket">[</span>64<span class="cite-bracket">]</span></a></sup> and various forms of <a href="/wiki/Cluster_analysis" title="Cluster analysis">clustering</a>.<sup id="cite_ref-coates2011_65-0" class="reference"><a href="#cite_note-coates2011-65"><span class="cite-bracket">[</span>65<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-66" class="reference"><a href="#cite_note-66"><span class="cite-bracket">[</span>66<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-jurafsky_67-0" class="reference"><a href="#cite_note-jurafsky-67"><span class="cite-bracket">[</span>67<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Manifold_learning" class="mw-redirect" title="Manifold learning">Manifold learning</a> algorithms attempt to do so under the constraint that the learned representation is low-dimensional. <a href="/wiki/Sparse_coding" class="mw-redirect" title="Sparse coding">Sparse coding</a> algorithms attempt to do so under the constraint that the learned representation is sparse, meaning that the mathematical model has many zeros. <a href="/wiki/Multilinear_subspace_learning" title="Multilinear subspace learning">Multilinear subspace learning</a> algorithms aim to learn low-dimensional representations directly from <a href="/wiki/Tensor" title="Tensor">tensor</a> representations for multidimensional data, without reshaping them into higher-dimensional vectors.<sup id="cite_ref-68" class="reference"><a href="#cite_note-68"><span class="cite-bracket">[</span>68<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a> algorithms discover multiple levels of representation, or a hierarchy of features, with higher-level, more abstract features defined in terms of (or generating) lower-level features. It has been argued that an intelligent machine is one that learns a representation that disentangles the underlying factors of variation that explain the observed data.<sup id="cite_ref-69" class="reference"><a href="#cite_note-69"><span class="cite-bracket">[</span>69<span class="cite-bracket">]</span></a></sup> </p><p>Feature learning is motivated by the fact that machine learning tasks such as classification often require input that is mathematically and computationally convenient to process. However, real-world data such as images, video, and sensory data has not yielded attempts to algorithmically define specific features. An alternative is to discover such features or representations through examination, without relying on explicit algorithms. </p> <div class="mw-heading mw-heading4"><h4 id="Sparse_dictionary_learning">Sparse dictionary learning</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=19" title="Edit section: Sparse dictionary learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Sparse_dictionary_learning" title="Sparse dictionary learning">Sparse dictionary learning</a></div> <p>Sparse dictionary learning is a feature learning method where a training example is represented as a linear combination of <a href="/wiki/Basis_function" title="Basis function">basis functions</a> and assumed to be a <a href="/wiki/Sparse_matrix" title="Sparse matrix">sparse matrix</a>. The method is <a href="/wiki/Strongly_NP-hard" class="mw-redirect" title="Strongly NP-hard">strongly NP-hard</a> and difficult to solve approximately.<sup id="cite_ref-70" class="reference"><a href="#cite_note-70"><span class="cite-bracket">[</span>70<span class="cite-bracket">]</span></a></sup> A popular <a href="/wiki/Heuristic" title="Heuristic">heuristic</a> method for sparse dictionary learning is the <a href="/wiki/K-SVD" title="K-SVD"><i>k</i>-SVD</a> algorithm. Sparse dictionary learning has been applied in several contexts. In classification, the problem is to determine the class to which a previously unseen training example belongs. For a dictionary where each class has already been built, a new training example is associated with the class that is best sparsely represented by the corresponding dictionary. Sparse dictionary learning has also been applied in <a href="/wiki/Image_de-noising" class="mw-redirect" title="Image de-noising">image de-noising</a>. The key idea is that a clean image patch can be sparsely represented by an image dictionary, but the noise cannot.<sup id="cite_ref-71" class="reference"><a href="#cite_note-71"><span class="cite-bracket">[</span>71<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Anomaly_detection">Anomaly detection</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=20" title="Edit section: Anomaly detection"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Anomaly_detection" title="Anomaly detection">Anomaly detection</a></div> <p>In <a href="/wiki/Data_mining" title="Data mining">data mining</a>, anomaly detection, also known as outlier detection, is the identification of rare items, events or observations which raise suspicions by differing significantly from the majority of the data.<sup id="cite_ref-Zimek-2017_72-0" class="reference"><a href="#cite_note-Zimek-2017-72"><span class="cite-bracket">[</span>72<span class="cite-bracket">]</span></a></sup> Typically, the anomalous items represent an issue such as <a href="/wiki/Bank_fraud" title="Bank fraud">bank fraud</a>, a structural defect, medical problems or errors in a text. Anomalies are referred to as <a href="/wiki/Outlier" title="Outlier">outliers</a>, novelties, noise, deviations and exceptions.<sup id="cite_ref-73" class="reference"><a href="#cite_note-73"><span class="cite-bracket">[</span>73<span class="cite-bracket">]</span></a></sup> </p><p>In particular, in the context of abuse and network intrusion detection, the interesting objects are often not rare objects, but unexpected bursts of inactivity. This pattern does not adhere to the common statistical definition of an outlier as a rare object. Many outlier detection methods (in particular, unsupervised algorithms) will fail on such data unless aggregated appropriately. Instead, a cluster analysis algorithm may be able to detect the micro-clusters formed by these patterns.<sup id="cite_ref-74" class="reference"><a href="#cite_note-74"><span class="cite-bracket">[</span>74<span class="cite-bracket">]</span></a></sup> </p><p>Three broad categories of anomaly detection techniques exist.<sup id="cite_ref-ChandolaSurvey_75-0" class="reference"><a href="#cite_note-ChandolaSurvey-75"><span class="cite-bracket">[</span>75<span class="cite-bracket">]</span></a></sup> Unsupervised anomaly detection techniques detect anomalies in an unlabeled test data set under the assumption that the majority of the instances in the data set are normal, by looking for instances that seem to fit the least to the remainder of the data set. Supervised anomaly detection techniques require a data set that has been labeled as "normal" and "abnormal" and involves training a classifier (the key difference from many other statistical classification problems is the inherently unbalanced nature of outlier detection). Semi-supervised anomaly detection techniques construct a model representing normal behavior from a given normal training data set and then test the likelihood of a test instance to be generated by the model. </p> <div class="mw-heading mw-heading4"><h4 id="Robot_learning">Robot learning</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=21" title="Edit section: Robot learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Robot_learning" title="Robot learning">Robot learning</a> is inspired by a multitude of machine learning methods, starting from supervised learning, reinforcement learning,<sup id="cite_ref-76" class="reference"><a href="#cite_note-76"><span class="cite-bracket">[</span>76<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-77" class="reference"><a href="#cite_note-77"><span class="cite-bracket">[</span>77<span class="cite-bracket">]</span></a></sup> and finally <a href="/wiki/Meta-learning_(computer_science)" title="Meta-learning (computer science)">meta-learning</a> (e.g. MAML). </p> <div class="mw-heading mw-heading4"><h4 id="Association_rules">Association rules</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=22" title="Edit section: Association rules"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Association_rule_learning" title="Association rule learning">Association rule learning</a></div><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Inductive_logic_programming" title="Inductive logic programming">Inductive logic programming</a></div> <p>Association rule learning is a <a href="/wiki/Rule-based_machine_learning" title="Rule-based machine learning">rule-based machine learning</a> method for discovering relationships between variables in large databases. It is intended to identify strong rules discovered in databases using some measure of "interestingness".<sup id="cite_ref-piatetsky_78-0" class="reference"><a href="#cite_note-piatetsky-78"><span class="cite-bracket">[</span>78<span class="cite-bracket">]</span></a></sup> </p><p>Rule-based machine learning is a general term for any machine learning method that identifies, learns, or evolves "rules" to store, manipulate or apply knowledge. The defining characteristic of a rule-based machine learning algorithm is the identification and utilization of a set of relational rules that collectively represent the knowledge captured by the system. This is in contrast to other machine learning algorithms that commonly identify a singular model that can be universally applied to any instance in order to make a prediction.<sup id="cite_ref-79" class="reference"><a href="#cite_note-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> Rule-based machine learning approaches include <a href="/wiki/Learning_classifier_system" title="Learning classifier system">learning classifier systems</a>, association rule learning, and <a href="/wiki/Artificial_immune_system" title="Artificial immune system">artificial immune systems</a>. </p><p>Based on the concept of strong rules, <a href="/wiki/Rakesh_Agrawal_(computer_scientist)" title="Rakesh Agrawal (computer scientist)">Rakesh Agrawal</a>, <a href="/wiki/Tomasz_Imieli%C5%84ski" title="Tomasz Imieliński">Tomasz Imieliński</a> and Arun Swami introduced association rules for discovering regularities between products in large-scale transaction data recorded by <a href="/wiki/Point-of-sale" class="mw-redirect" title="Point-of-sale">point-of-sale</a> (POS) systems in supermarkets.<sup id="cite_ref-mining_80-0" class="reference"><a href="#cite_note-mining-80"><span class="cite-bracket">[</span>80<span class="cite-bracket">]</span></a></sup> For example, the rule <span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle \{\mathrm {onions,potatoes} \}\Rightarrow \{\mathrm {burger} \}}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <mo fence="false" stretchy="false">{</mo> <mrow class="MJX-TeXAtom-ORD"> <mi mathvariant="normal">o</mi> <mi mathvariant="normal">n</mi> <mi mathvariant="normal">i</mi> <mi mathvariant="normal">o</mi> <mi mathvariant="normal">n</mi> <mi mathvariant="normal">s</mi> <mo>,</mo> <mi mathvariant="normal">p</mi> <mi mathvariant="normal">o</mi> <mi mathvariant="normal">t</mi> <mi mathvariant="normal">a</mi> <mi mathvariant="normal">t</mi> <mi mathvariant="normal">o</mi> <mi mathvariant="normal">e</mi> <mi mathvariant="normal">s</mi> </mrow> <mo fence="false" stretchy="false">}</mo> <mo stretchy="false">⇒<!-- ⇒ --></mo> <mo fence="false" stretchy="false">{</mo> <mrow class="MJX-TeXAtom-ORD"> <mi mathvariant="normal">b</mi> <mi mathvariant="normal">u</mi> <mi mathvariant="normal">r</mi> <mi mathvariant="normal">g</mi> <mi mathvariant="normal">e</mi> <mi mathvariant="normal">r</mi> </mrow> <mo fence="false" stretchy="false">}</mo> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle \{\mathrm {onions,potatoes} \}\Rightarrow \{\mathrm {burger} \}}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/2e6daa2c8e553e87e411d6e0ec66ae596c3c9381" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -0.838ex; width:30.912ex; height:2.843ex;" alt="{\displaystyle \{\mathrm {onions,potatoes} \}\Rightarrow \{\mathrm {burger} \}}"></span> found in the sales data of a supermarket would indicate that if a customer buys onions and potatoes together, they are likely to also buy hamburger meat. Such information can be used as the basis for decisions about marketing activities such as promotional <a href="/wiki/Pricing" title="Pricing">pricing</a> or <a href="/wiki/Product_placement" title="Product placement">product placements</a>. In addition to <a href="/wiki/Market_basket_analysis" class="mw-redirect" title="Market basket analysis">market basket analysis</a>, association rules are employed today in application areas including <a href="/wiki/Web_usage_mining" class="mw-redirect" title="Web usage mining">Web usage mining</a>, <a href="/wiki/Intrusion_detection" class="mw-redirect" title="Intrusion detection">intrusion detection</a>, <a href="/wiki/Continuous_production" title="Continuous production">continuous production</a>, and <a href="/wiki/Bioinformatics" title="Bioinformatics">bioinformatics</a>. In contrast with <a href="/wiki/Sequence_mining" class="mw-redirect" title="Sequence mining">sequence mining</a>, association rule learning typically does not consider the order of items either within a transaction or across transactions. </p><p>Learning classifier systems (LCS) are a family of rule-based machine learning algorithms that combine a discovery component, typically a <a href="/wiki/Genetic_algorithm" title="Genetic algorithm">genetic algorithm</a>, with a learning component, performing either <a href="/wiki/Supervised_learning" title="Supervised learning">supervised learning</a>, <a href="/wiki/Reinforcement_learning" title="Reinforcement learning">reinforcement learning</a>, or <a href="/wiki/Unsupervised_learning" title="Unsupervised learning">unsupervised learning</a>. They seek to identify a set of context-dependent rules that collectively store and apply knowledge in a <a href="/wiki/Piecewise" class="mw-redirect" title="Piecewise">piecewise</a> manner in order to make predictions.<sup id="cite_ref-81" class="reference"><a href="#cite_note-81"><span class="cite-bracket">[</span>81<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Inductive_logic_programming" title="Inductive logic programming">Inductive logic programming</a> (ILP) is an approach to rule learning using <a href="/wiki/Logic_programming" title="Logic programming">logic programming</a> as a uniform representation for input examples, background knowledge, and hypotheses. Given an encoding of the known background knowledge and a set of examples represented as a logical database of facts, an ILP system will derive a hypothesized logic program that <a href="/wiki/Entailment" class="mw-redirect" title="Entailment">entails</a> all positive and no negative examples. <a href="/wiki/Inductive_programming" title="Inductive programming">Inductive programming</a> is a related field that considers any kind of programming language for representing hypotheses (and not only logic programming), such as <a href="/wiki/Functional_programming" title="Functional programming">functional programs</a>. </p><p>Inductive logic programming is particularly useful in <a href="/wiki/Bioinformatics" title="Bioinformatics">bioinformatics</a> and <a href="/wiki/Natural_language_processing" title="Natural language processing">natural language processing</a>. <a href="/wiki/Gordon_Plotkin" title="Gordon Plotkin">Gordon Plotkin</a> and <a href="/wiki/Ehud_Shapiro" title="Ehud Shapiro">Ehud Shapiro</a> laid the initial theoretical foundation for inductive machine learning in a logical setting.<sup id="cite_ref-82" class="reference"><a href="#cite_note-82"><span class="cite-bracket">[</span>82<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-83" class="reference"><a href="#cite_note-83"><span class="cite-bracket">[</span>83<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-84" class="reference"><a href="#cite_note-84"><span class="cite-bracket">[</span>84<span class="cite-bracket">]</span></a></sup> Shapiro built their first implementation (Model Inference System) in 1981: a Prolog program that inductively inferred logic programs from positive and negative examples.<sup id="cite_ref-85" class="reference"><a href="#cite_note-85"><span class="cite-bracket">[</span>85<span class="cite-bracket">]</span></a></sup> The term <i>inductive</i> here refers to <a href="/wiki/Inductive_reasoning" title="Inductive reasoning">philosophical</a> induction, suggesting a theory to explain observed facts, rather than <a href="/wiki/Mathematical_induction" title="Mathematical induction">mathematical induction</a>, proving a property for all members of a well-ordered set. </p> <div class="mw-heading mw-heading2"><h2 id="Models">Models</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=23" title="Edit section: Models"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>A <b><style data-mw-deduplicate="TemplateStyles:r1238216509">.mw-parser-output .vanchor>:target~.vanchor-text{background-color:#b1d2ff}@media screen{html.skin-theme-clientpref-night .mw-parser-output .vanchor>:target~.vanchor-text{background-color:#0f4dc9}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .vanchor>:target~.vanchor-text{background-color:#0f4dc9}}</style><span class="vanchor"><span id="machine_learning_model"></span><span class="vanchor-text">machine learning model</span></span></b> is a type of <a href="/wiki/Mathematical_model" title="Mathematical model">mathematical model</a> that, after being "trained" on a given dataset, can be used to make predictions or classifications on new data. During training, a learning algorithm iteratively adjusts the model's internal parameters to minimize errors in its predictions.<sup id="cite_ref-86" class="reference"><a href="#cite_note-86"><span class="cite-bracket">[</span>86<span class="cite-bracket">]</span></a></sup> By extension, the term "model" can refer to several levels of specificity, from a general class of models and their associated learning algorithms to a fully trained model with all its internal parameters tuned.<sup id="cite_ref-87" class="reference"><a href="#cite_note-87"><span class="cite-bracket">[</span>87<span class="cite-bracket">]</span></a></sup> </p><p>Various types of models have been used and researched for machine learning systems, picking the best model for a task is called <a href="/wiki/Model_selection" title="Model selection">model selection</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Artificial_neural_networks">Artificial neural networks</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=24" title="Edit section: Artificial neural networks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">Artificial neural network</a></div><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Colored_neural_network.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/300px-Colored_neural_network.svg.png" decoding="async" width="300" height="361" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/450px-Colored_neural_network.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/46/Colored_neural_network.svg/600px-Colored_neural_network.svg.png 2x" data-file-width="296" data-file-height="356" /></a><figcaption>An artificial neural network is an interconnected group of nodes, akin to the vast network of <a href="/wiki/Neuron" title="Neuron">neurons</a> in a <a href="/wiki/Brain" title="Brain">brain</a>. Here, each circular node represents an <a href="/wiki/Artificial_neuron" title="Artificial neuron">artificial neuron</a> and an arrow represents a connection from the output of one artificial neuron to the input of another.</figcaption></figure> <p>Artificial neural networks (ANNs), or <a href="/wiki/Connectionism" title="Connectionism">connectionist</a> systems, are computing systems vaguely inspired by the <a href="/wiki/Biological_neural_network" class="mw-redirect" title="Biological neural network">biological neural networks</a> that constitute animal <a href="/wiki/Brain" title="Brain">brains</a>. Such systems "learn" to perform tasks by considering examples, generally without being programmed with any task-specific rules. </p><p>An ANN is a model based on a collection of connected units or nodes called "<a href="/wiki/Artificial_neuron" title="Artificial neuron">artificial neurons</a>", which loosely model the <a href="/wiki/Neuron" title="Neuron">neurons</a> in a biological brain. Each connection, like the <a href="/wiki/Synapse" title="Synapse">synapses</a> in a biological brain, can transmit information, a "signal", from one artificial neuron to another. An artificial neuron that receives a signal can process it and then signal additional artificial neurons connected to it. In common ANN implementations, the signal at a connection between artificial neurons is a <a href="/wiki/Real_number" title="Real number">real number</a>, and the output of each artificial neuron is computed by some non-linear function of the sum of its inputs. The connections between artificial neurons are called "edges". Artificial neurons and edges typically have a <a href="/wiki/Weight_(mathematics)" class="mw-redirect" title="Weight (mathematics)">weight</a> that adjusts as learning proceeds. The weight increases or decreases the strength of the signal at a connection. Artificial neurons may have a threshold such that the signal is only sent if the aggregate signal crosses that threshold. Typically, artificial neurons are aggregated into layers. Different layers may perform different kinds of transformations on their inputs. Signals travel from the first layer (the input layer) to the last layer (the output layer), possibly after traversing the layers multiple times. </p><p>The original goal of the ANN approach was to solve problems in the same way that a <a href="/wiki/Human_brain" title="Human brain">human brain</a> would. However, over time, attention moved to performing specific tasks, leading to deviations from <a href="/wiki/Biology" title="Biology">biology</a>. Artificial neural networks have been used on a variety of tasks, including <a href="/wiki/Computer_vision" title="Computer vision">computer vision</a>, <a href="/wiki/Speech_recognition" title="Speech recognition">speech recognition</a>, <a href="/wiki/Machine_translation" title="Machine translation">machine translation</a>, <a href="/wiki/Social_network" title="Social network">social network</a> filtering, <a href="/wiki/General_game_playing" title="General game playing">playing board and video games</a> and <a href="/wiki/Medical_diagnosis" title="Medical diagnosis">medical diagnosis</a>. </p><p><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a> consists of multiple hidden layers in an artificial neural network. This approach tries to model the way the human brain processes light and sound into vision and hearing. Some successful applications of deep learning are computer vision and speech recognition.<sup id="cite_ref-88" class="reference"><a href="#cite_note-88"><span class="cite-bracket">[</span>88<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Decision_trees">Decision trees</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=25" title="Edit section: Decision trees"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Decision_tree_learning" title="Decision tree learning">Decision tree learning</a></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Decision_Tree.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/e/eb/Decision_Tree.jpg/220px-Decision_Tree.jpg" decoding="async" width="220" height="228" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/eb/Decision_Tree.jpg/330px-Decision_Tree.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/eb/Decision_Tree.jpg/440px-Decision_Tree.jpg 2x" data-file-width="457" data-file-height="473" /></a><figcaption>A decision tree showing survival probability of passengers on the <a href="/wiki/Titanic" title="Titanic">Titanic</a></figcaption></figure> <p>Decision tree learning uses a <a href="/wiki/Decision_tree" title="Decision tree">decision tree</a> as a <a href="/wiki/Predictive_modeling" class="mw-redirect" title="Predictive modeling">predictive model</a> to go from observations about an item (represented in the branches) to conclusions about the item's target value (represented in the leaves). It is one of the predictive modeling approaches used in statistics, data mining, and machine learning. Tree models where the target variable can take a discrete set of values are called classification trees; in these tree structures, <a href="/wiki/Leaf_node" class="mw-redirect" title="Leaf node">leaves</a> represent class labels, and branches represent <a href="/wiki/Logical_conjunction" title="Logical conjunction">conjunctions</a> of features that lead to those class labels. Decision trees where the target variable can take continuous values (typically <a href="/wiki/Real_numbers" class="mw-redirect" title="Real numbers">real numbers</a>) are called regression trees. In decision analysis, a decision tree can be used to visually and explicitly represent decisions and <a href="/wiki/Decision_making" class="mw-redirect" title="Decision making">decision making</a>. In data mining, a decision tree describes data, but the resulting classification tree can be an input for decision-making. </p> <div class="mw-heading mw-heading3"><h3 id="Support-vector_machines">Support-vector machines</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=26" title="Edit section: Support-vector machines"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Support-vector_machine" class="mw-redirect" title="Support-vector machine">Support-vector machine</a></div> <p>Support-vector machines (SVMs), also known as support-vector networks, are a set of related <a href="/wiki/Supervised_learning" title="Supervised learning">supervised learning</a> methods used for classification and regression. Given a set of training examples, each marked as belonging to one of two categories, an SVM training algorithm builds a model that predicts whether a new example falls into one category.<sup id="cite_ref-CorinnaCortes_89-0" class="reference"><a href="#cite_note-CorinnaCortes-89"><span class="cite-bracket">[</span>89<span class="cite-bracket">]</span></a></sup> An SVM training algorithm is a non-<a href="/wiki/Probabilistic_classification" title="Probabilistic classification">probabilistic</a>, <a href="/wiki/Binary_classifier" class="mw-redirect" title="Binary classifier">binary</a>, <a href="/wiki/Linear_classifier" title="Linear classifier">linear classifier</a>, although methods such as <a href="/wiki/Platt_scaling" title="Platt scaling">Platt scaling</a> exist to use SVM in a probabilistic classification setting. In addition to performing linear classification, SVMs can efficiently perform a non-linear classification using what is called the <a href="/wiki/Kernel_trick" class="mw-redirect" title="Kernel trick">kernel trick</a>, implicitly mapping their inputs into high-dimensional feature spaces. </p> <div class="mw-heading mw-heading3"><h3 id="Regression_analysis">Regression analysis</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=27" title="Edit section: Regression analysis"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Regression_analysis" title="Regression analysis">Regression analysis</a></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Linear_regression.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Linear_regression.svg/290px-Linear_regression.svg.png" decoding="async" width="290" height="191" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Linear_regression.svg/435px-Linear_regression.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Linear_regression.svg/580px-Linear_regression.svg.png 2x" data-file-width="438" data-file-height="289" /></a><figcaption>Illustration of linear regression on a data set</figcaption></figure> <p>Regression analysis encompasses a large variety of statistical methods to estimate the relationship between input variables and their associated features. Its most common form is <a href="/wiki/Linear_regression" title="Linear regression">linear regression</a>, where a single line is drawn to best fit the given data according to a mathematical criterion such as <a href="/wiki/Ordinary_least_squares" title="Ordinary least squares">ordinary least squares</a>. The latter is often extended by <a href="/wiki/Regularization_(mathematics)" title="Regularization (mathematics)">regularization</a> methods to mitigate overfitting and bias, as in <a href="/wiki/Ridge_regression" title="Ridge regression">ridge regression</a>. When dealing with non-linear problems, go-to models include <a href="/wiki/Polynomial_regression" title="Polynomial regression">polynomial regression</a> (for example, used for trendline fitting in Microsoft Excel<sup id="cite_ref-90" class="reference"><a href="#cite_note-90"><span class="cite-bracket">[</span>90<span class="cite-bracket">]</span></a></sup>), <a href="/wiki/Logistic_regression" title="Logistic regression">logistic regression</a> (often used in <a href="/wiki/Statistical_classification" title="Statistical classification">statistical classification</a>) or even <a href="/wiki/Kernel_regression" title="Kernel regression">kernel regression</a>, which introduces non-linearity by taking advantage of the <a href="/wiki/Kernel_trick" class="mw-redirect" title="Kernel trick">kernel trick</a> to implicitly map input variables to higher-dimensional space. </p> <div class="mw-heading mw-heading3"><h3 id="Bayesian_networks">Bayesian networks</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=28" title="Edit section: Bayesian networks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Bayesian_network" title="Bayesian network">Bayesian network</a></div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:SimpleBayesNetNodes.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/f/fd/SimpleBayesNetNodes.svg/220px-SimpleBayesNetNodes.svg.png" decoding="async" width="220" height="114" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/f/fd/SimpleBayesNetNodes.svg/330px-SimpleBayesNetNodes.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/f/fd/SimpleBayesNetNodes.svg/440px-SimpleBayesNetNodes.svg.png 2x" data-file-width="246" data-file-height="128" /></a><figcaption>A simple Bayesian network. Rain influences whether the sprinkler is activated, and both rain and the sprinkler influence whether the grass is wet.</figcaption></figure> <p>A Bayesian network, belief network, or directed acyclic graphical model is a probabilistic <a href="/wiki/Graphical_model" title="Graphical model">graphical model</a> that represents a set of <a href="/wiki/Random_variables" class="mw-redirect" title="Random variables">random variables</a> and their <a href="/wiki/Conditional_independence" title="Conditional independence">conditional independence</a> with a <a href="/wiki/Directed_acyclic_graph" title="Directed acyclic graph">directed acyclic graph</a> (DAG). For example, a Bayesian network could represent the probabilistic relationships between diseases and symptoms. Given symptoms, the network can be used to compute the probabilities of the presence of various diseases. Efficient algorithms exist that perform <a href="/wiki/Bayesian_inference" title="Bayesian inference">inference</a> and learning. Bayesian networks that model sequences of variables, like <a href="/wiki/Speech_recognition" title="Speech recognition">speech signals</a> or <a href="/wiki/Peptide_sequence" class="mw-redirect" title="Peptide sequence">protein sequences</a>, are called <a href="/wiki/Dynamic_Bayesian_network" title="Dynamic Bayesian network">dynamic Bayesian networks</a>. Generalizations of Bayesian networks that can represent and solve decision problems under uncertainty are called <a href="/wiki/Influence_diagram" title="Influence diagram">influence diagrams</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Gaussian_processes">Gaussian processes</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=29" title="Edit section: Gaussian processes"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Gaussian_processes" class="mw-redirect" title="Gaussian processes">Gaussian processes</a></div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Regressions_sine_demo.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Regressions_sine_demo.svg/220px-Regressions_sine_demo.svg.png" decoding="async" width="220" height="110" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Regressions_sine_demo.svg/330px-Regressions_sine_demo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Regressions_sine_demo.svg/440px-Regressions_sine_demo.svg.png 2x" data-file-width="900" data-file-height="450" /></a><figcaption>An example of Gaussian Process Regression (prediction) compared with other regression models<sup id="cite_ref-91" class="reference"><a href="#cite_note-91"><span class="cite-bracket">[</span>91<span class="cite-bracket">]</span></a></sup></figcaption></figure> <p>A Gaussian process is a <a href="/wiki/Stochastic_process" title="Stochastic process">stochastic process</a> in which every finite collection of the random variables in the process has a <a href="/wiki/Multivariate_normal_distribution" title="Multivariate normal distribution">multivariate normal distribution</a>, and it relies on a pre-defined <a href="/wiki/Covariance_function" title="Covariance function">covariance function</a>, or kernel, that models how pairs of points relate to each other depending on their locations. </p><p>Given a set of observed points, or input–output examples, the distribution of the (unobserved) output of a new point as function of its input data can be directly computed by looking like the observed points and the covariances between those points and the new, unobserved point. </p><p>Gaussian processes are popular surrogate models in <a href="/wiki/Bayesian_optimization" title="Bayesian optimization">Bayesian optimization</a> used to do <a href="/wiki/Hyperparameter_optimization" title="Hyperparameter optimization">hyperparameter optimization</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Genetic_algorithms">Genetic algorithms</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=30" title="Edit section: Genetic algorithms"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Genetic_algorithm" title="Genetic algorithm">Genetic algorithm</a></div> <p>A genetic algorithm (GA) is a <a href="/wiki/Search_algorithm" title="Search algorithm">search algorithm</a> and <a href="/wiki/Heuristic_(computer_science)" title="Heuristic (computer science)">heuristic</a> technique that mimics the process of <a href="/wiki/Natural_selection" title="Natural selection">natural selection</a>, using methods such as <a href="/wiki/Mutation_(genetic_algorithm)" title="Mutation (genetic algorithm)">mutation</a> and <a href="/wiki/Crossover_(genetic_algorithm)" title="Crossover (genetic algorithm)">crossover</a> to generate new <a href="/wiki/Chromosome_(genetic_algorithm)" title="Chromosome (genetic algorithm)">genotypes</a> in the hope of finding good solutions to a given problem. In machine learning, genetic algorithms were used in the 1980s and 1990s.<sup id="cite_ref-92" class="reference"><a href="#cite_note-92"><span class="cite-bracket">[</span>92<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-93" class="reference"><a href="#cite_note-93"><span class="cite-bracket">[</span>93<span class="cite-bracket">]</span></a></sup> Conversely, machine learning techniques have been used to improve the performance of genetic and <a href="/wiki/Evolutionary_algorithm" title="Evolutionary algorithm">evolutionary algorithms</a>.<sup id="cite_ref-94" class="reference"><a href="#cite_note-94"><span class="cite-bracket">[</span>94<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Belief_functions">Belief functions</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=31" title="Edit section: Belief functions"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Dempster%E2%80%93Shafer_theory" title="Dempster–Shafer theory">Dempster–Shafer theory</a></div> <p>The theory of belief functions, also referred to as evidence theory or Dempster–Shafer theory, is a general framework for reasoning with uncertainty, with understood connections to other frameworks such as <a href="/wiki/Probability" title="Probability">probability</a>, <a href="/wiki/Possibility_theory" title="Possibility theory">possibility</a> and <a href="/wiki/Imprecise_probability" title="Imprecise probability">imprecise probability theories</a>. These theoretical frameworks can be thought of as a kind of learner and have some analogous properties of how evidence is combined (e.g., Dempster's rule of combination), just like how in a <a href="/wiki/Probability_mass_function" title="Probability mass function">pmf</a>-based Bayesian approach<sup class="noprint Inline-Template" style="margin-left:0.1em; white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Please_clarify" title="Wikipedia:Please clarify"><span title="The text near this tag may need clarification or removal of jargon. (January 2024)">clarification needed</span></a></i>]</sup> would combine probabilities. However, there are many caveats to these beliefs functions when compared to Bayesian approaches in order to incorporate ignorance and <a href="/wiki/Uncertainty_quantification" title="Uncertainty quantification">uncertainty quantification</a>. These belief function approaches that are implemented within the machine learning domain typically leverage a fusion approach of various <a href="/wiki/Ensemble_methods" class="mw-redirect" title="Ensemble methods">ensemble methods</a> to better handle the learner's <a href="/wiki/Decision_boundary" title="Decision boundary">decision boundary</a>, low samples, and ambiguous class issues that standard machine learning approach tend to have difficulty resolving.<sup id="cite_ref-YoosefzadehNajafabadi-2021_4-1" class="reference"><a href="#cite_note-YoosefzadehNajafabadi-2021-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Kohavi_9-1" class="reference"><a href="#cite_note-Kohavi-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> However, the computational complexity of these algorithms are dependent on the number of propositions (classes), and can lead to a much higher computation time when compared to other machine learning approaches. </p> <div class="mw-heading mw-heading3"><h3 id="Training_models">Training models</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=32" title="Edit section: Training models"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Typically, machine learning models require a high quantity of reliable data to perform accurate predictions. When training a machine learning model, machine learning engineers need to target and collect a large and representative <a href="/wiki/Sample_(statistics)" class="mw-redirect" title="Sample (statistics)">sample</a> of data. Data from the training set can be as varied as a <a href="/wiki/Corpus_of_text" class="mw-redirect" title="Corpus of text">corpus of text</a>, a collection of images, <a href="/wiki/Sensor" title="Sensor">sensor</a> data, and data collected from individual users of a service. <a href="/wiki/Overfitting" title="Overfitting">Overfitting</a> is something to watch out for when training a machine learning model. Trained models derived from biased or non-evaluated data can result in skewed or undesired predictions. Biased models may result in detrimental outcomes, thereby furthering the negative impacts on society or objectives. <a href="/wiki/Algorithmic_bias" title="Algorithmic bias">Algorithmic bias</a> is a potential result of data not being fully prepared for training. Machine learning ethics is becoming a field of study and notably, becoming integrated within machine learning engineering teams. </p> <div class="mw-heading mw-heading4"><h4 id="Federated_learning">Federated learning</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=33" title="Edit section: Federated learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Federated_learning" title="Federated learning">Federated learning</a></div> <p>Federated learning is an adapted form of <a href="/wiki/Distributed_artificial_intelligence" title="Distributed artificial intelligence">distributed artificial intelligence</a> to training machine learning models that decentralizes the training process, allowing for users' privacy to be maintained by not needing to send their data to a centralized server. This also increases efficiency by decentralizing the training process to many devices. For example, <a href="/wiki/Gboard" title="Gboard">Gboard</a> uses federated machine learning to train search query prediction models on users' mobile phones without having to send individual searches back to <a href="/wiki/Google" title="Google">Google</a>.<sup id="cite_ref-95" class="reference"><a href="#cite_note-95"><span class="cite-bracket">[</span>95<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Applications">Applications</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=34" title="Edit section: Applications"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>There are many applications for machine learning, including: </p> <style data-mw-deduplicate="TemplateStyles:r1184024115">.mw-parser-output .div-col{margin-top:0.3em;column-width:30em}.mw-parser-output .div-col-small{font-size:90%}.mw-parser-output .div-col-rules{column-rule:1px solid #aaa}.mw-parser-output .div-col dl,.mw-parser-output .div-col ol,.mw-parser-output .div-col ul{margin-top:0}.mw-parser-output .div-col li,.mw-parser-output .div-col dd{page-break-inside:avoid;break-inside:avoid-column}</style><div class="div-col" style="column-width: 21em;"> <ul><li><a href="/wiki/Precision_agriculture" title="Precision agriculture">Agriculture</a></li> <li><a href="/wiki/Computational_anatomy" title="Computational anatomy">Anatomy</a></li> <li><a href="/wiki/Adaptive_website" title="Adaptive website">Adaptive website</a></li> <li><a href="/wiki/Affective_computing" title="Affective computing">Affective computing</a></li> <li><a href="/wiki/Astroinformatics" title="Astroinformatics">Astronomy</a></li> <li><a href="/wiki/Automated_decision-making" title="Automated decision-making">Automated decision-making</a></li> <li><a href="/wiki/Banking" class="mw-redirect" title="Banking">Banking</a></li> <li><a href="/wiki/Behaviorism" title="Behaviorism">Behaviorism</a></li> <li><a href="/wiki/Bioinformatics" title="Bioinformatics">Bioinformatics</a></li> <li><a href="/wiki/Brain%E2%80%93computer_interface" title="Brain–computer interface">Brain–machine interfaces</a></li> <li><a href="/wiki/Cheminformatics" title="Cheminformatics">Cheminformatics</a></li> <li><a href="/wiki/Citizen_Science" class="mw-redirect" title="Citizen Science">Citizen Science</a></li> <li><a href="/wiki/Climate_Science" class="mw-redirect" title="Climate Science">Climate Science</a></li> <li><a href="/wiki/Network_simulation" title="Network simulation">Computer networks</a></li> <li><a href="/wiki/Computer_vision" title="Computer vision">Computer vision</a></li> <li><a href="/wiki/Credit-card_fraud" class="mw-redirect" title="Credit-card fraud">Credit-card fraud</a> detection</li> <li><a href="/wiki/Data_quality" title="Data quality">Data quality</a></li> <li><a href="/wiki/DNA_sequence" class="mw-redirect" title="DNA sequence">DNA sequence</a> classification</li> <li><a href="/wiki/Computational_economics" title="Computational economics">Economics</a></li> <li><a href="/wiki/Financial_market" title="Financial market">Financial market</a> analysis<sup id="cite_ref-96" class="reference"><a href="#cite_note-96"><span class="cite-bracket">[</span>96<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/General_game_playing" title="General game playing">General game playing</a></li> <li><a href="/wiki/Handwriting_recognition" title="Handwriting recognition">Handwriting recognition</a></li> <li><a href="/wiki/Artificial_intelligence_in_healthcare" title="Artificial intelligence in healthcare">Healthcare</a></li> <li><a href="/wiki/Information_retrieval" title="Information retrieval">Information retrieval</a></li> <li><a href="/wiki/Insurance" title="Insurance">Insurance</a></li> <li><a href="/wiki/Internet_fraud" title="Internet fraud">Internet fraud</a> detection</li> <li><a href="/wiki/Knowledge_graph_embedding" title="Knowledge graph embedding">Knowledge graph embedding</a></li> <li><a href="/wiki/Computational_linguistics" title="Computational linguistics">Linguistics</a></li> <li><a href="/wiki/Machine_learning_control" title="Machine learning control">Machine learning control</a></li> <li><a href="/wiki/Machine_perception" title="Machine perception">Machine perception</a></li> <li><a href="/wiki/Machine_translation" title="Machine translation">Machine translation</a></li> <li><a href="/wiki/Marketing" title="Marketing">Marketing</a></li> <li><a href="/wiki/Automated_medical_diagnosis" class="mw-redirect" title="Automated medical diagnosis">Medical diagnosis</a></li> <li><a href="/wiki/Natural_language_processing" title="Natural language processing">Natural language processing</a></li> <li><a href="/wiki/Natural-language_understanding" class="mw-redirect" title="Natural-language understanding">Natural language understanding</a></li> <li><a href="/wiki/Online_advertising" title="Online advertising">Online advertising</a></li> <li><a href="/wiki/Mathematical_optimization" title="Mathematical optimization">Optimization</a></li> <li><a href="/wiki/Recommender_system" title="Recommender system">Recommender systems</a></li> <li><a href="/wiki/Robot_locomotion" title="Robot locomotion">Robot locomotion</a></li> <li><a href="/wiki/Search_engines" class="mw-redirect" title="Search engines">Search engines</a></li> <li><a href="/wiki/Sentiment_analysis" title="Sentiment analysis">Sentiment analysis</a></li> <li><a href="/wiki/Sequence_mining" class="mw-redirect" title="Sequence mining">Sequence mining</a></li> <li><a href="/wiki/Software_engineering" title="Software engineering">Software engineering</a></li> <li><a href="/wiki/Speech_recognition" title="Speech recognition">Speech recognition</a></li> <li><a href="/wiki/Structural_health_monitoring" title="Structural health monitoring">Structural health monitoring</a></li> <li><a href="/wiki/Syntactic_pattern_recognition" title="Syntactic pattern recognition">Syntactic pattern recognition</a></li> <li><a href="/wiki/Telecommunications" title="Telecommunications">Telecommunications</a></li> <li><a href="/wiki/Automated_theorem_proving" title="Automated theorem proving">Theorem proving</a></li> <li><a href="/wiki/Time_series" title="Time series">Time-series forecasting</a></li> <li><a href="/wiki/Tomographic_reconstruction" title="Tomographic reconstruction">Tomographic reconstruction</a><sup id="cite_ref-97" class="reference"><a href="#cite_note-97"><span class="cite-bracket">[</span>97<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/User_behavior_analytics" title="User behavior analytics">User behavior analytics</a></li></ul> </div> <p>In 2006, the media-services provider <a href="/wiki/Netflix" title="Netflix">Netflix</a> held the first "<a href="/wiki/Netflix_Prize" title="Netflix Prize">Netflix Prize</a>" competition to find a program to better predict user preferences and improve the accuracy of its existing Cinematch movie recommendation algorithm by at least 10%. A joint team made up of researchers from <a href="/wiki/AT%26T_Labs" title="AT&T Labs">AT&T Labs</a>-Research in collaboration with the teams Big Chaos and Pragmatic Theory built an <a href="/wiki/Ensemble_Averaging" class="mw-redirect" title="Ensemble Averaging">ensemble model</a> to win the Grand Prize in 2009 for $1 million.<sup id="cite_ref-98" class="reference"><a href="#cite_note-98"><span class="cite-bracket">[</span>98<span class="cite-bracket">]</span></a></sup> Shortly after the prize was awarded, Netflix realized that viewers' ratings were not the best indicators of their viewing patterns ("everything is a recommendation") and they changed their recommendation engine accordingly.<sup id="cite_ref-99" class="reference"><a href="#cite_note-99"><span class="cite-bracket">[</span>99<span class="cite-bracket">]</span></a></sup> In 2010 The Wall Street Journal wrote about the firm Rebellion Research and their use of machine learning to predict the financial crisis.<sup id="cite_ref-100" class="reference"><a href="#cite_note-100"><span class="cite-bracket">[</span>100<span class="cite-bracket">]</span></a></sup> In 2012, co-founder of <a href="/wiki/Sun_Microsystems" title="Sun Microsystems">Sun Microsystems</a>, <a href="/wiki/Vinod_Khosla" title="Vinod Khosla">Vinod Khosla</a>, predicted that 80% of medical doctors jobs would be lost in the next two decades to automated machine learning medical diagnostic software.<sup id="cite_ref-101" class="reference"><a href="#cite_note-101"><span class="cite-bracket">[</span>101<span class="cite-bracket">]</span></a></sup> In 2014, it was reported that a machine learning algorithm had been applied in the field of art history to study fine art paintings and that it may have revealed previously unrecognized influences among artists.<sup id="cite_ref-102" class="reference"><a href="#cite_note-102"><span class="cite-bracket">[</span>102<span class="cite-bracket">]</span></a></sup> In 2019 <a href="/wiki/Springer_Nature" title="Springer Nature">Springer Nature</a> published the first research book created using machine learning.<sup id="cite_ref-103" class="reference"><a href="#cite_note-103"><span class="cite-bracket">[</span>103<span class="cite-bracket">]</span></a></sup> In 2020, machine learning technology was used to help make diagnoses and aid researchers in developing a cure for COVID-19.<sup id="cite_ref-104" class="reference"><a href="#cite_note-104"><span class="cite-bracket">[</span>104<span class="cite-bracket">]</span></a></sup> Machine learning was recently applied to predict the pro-environmental behavior of travelers.<sup id="cite_ref-105" class="reference"><a href="#cite_note-105"><span class="cite-bracket">[</span>105<span class="cite-bracket">]</span></a></sup> Recently, machine learning technology was also applied to optimize smartphone's performance and thermal behavior based on the user's interaction with the phone.<sup id="cite_ref-106" class="reference"><a href="#cite_note-106"><span class="cite-bracket">[</span>106<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-107" class="reference"><a href="#cite_note-107"><span class="cite-bracket">[</span>107<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-108" class="reference"><a href="#cite_note-108"><span class="cite-bracket">[</span>108<span class="cite-bracket">]</span></a></sup> When applied correctly, machine learning algorithms (MLAs) can utilize a wide range of company characteristics to predict stock returns without <a href="/wiki/Overfitting" title="Overfitting">overfitting</a>. By employing effective feature engineering and combining forecasts, MLAs can generate results that far surpass those obtained from basic linear techniques like <a href="/wiki/Ordinary_least_squares" title="Ordinary least squares">OLS</a>.<sup id="cite_ref-109" class="reference"><a href="#cite_note-109"><span class="cite-bracket">[</span>109<span class="cite-bracket">]</span></a></sup> </p><p>Recent advancements in machine learning have extended into the field of quantum chemistry, where novel algorithms now enable the prediction of solvent effects on chemical reactions, thereby offering new tools for chemists to tailor experimental conditions for optimal outcomes.<sup id="cite_ref-110" class="reference"><a href="#cite_note-110"><span class="cite-bracket">[</span>110<span class="cite-bracket">]</span></a></sup> </p><p>Machine Learning is becoming a useful tool to investigate and predict evacuation decision making in large scale and small scale disasters. Different solutions have been tested to predict if and when householders decide to evacuate during wildfires and hurricanes.<sup id="cite_ref-111" class="reference"><a href="#cite_note-111"><span class="cite-bracket">[</span>111<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-112" class="reference"><a href="#cite_note-112"><span class="cite-bracket">[</span>112<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-113" class="reference"><a href="#cite_note-113"><span class="cite-bracket">[</span>113<span class="cite-bracket">]</span></a></sup> Other applications have been focusing on pre evacuation decisions in building fires.<sup id="cite_ref-114" class="reference"><a href="#cite_note-114"><span class="cite-bracket">[</span>114<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-115" class="reference"><a href="#cite_note-115"><span class="cite-bracket">[</span>115<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Limitations">Limitations</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=35" title="Edit section: Limitations"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Although machine learning has been transformative in some fields, machine-learning programs often fail to deliver expected results.<sup id="cite_ref-116" class="reference"><a href="#cite_note-116"><span class="cite-bracket">[</span>116<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-117" class="reference"><a href="#cite_note-117"><span class="cite-bracket">[</span>117<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-118" class="reference"><a href="#cite_note-118"><span class="cite-bracket">[</span>118<span class="cite-bracket">]</span></a></sup> Reasons for this are numerous: lack of (suitable) data, lack of access to the data, data bias, privacy problems, badly chosen tasks and algorithms, wrong tools and people, lack of resources, and evaluation problems.<sup id="cite_ref-119" class="reference"><a href="#cite_note-119"><span class="cite-bracket">[</span>119<span class="cite-bracket">]</span></a></sup> </p><p>The "<a href="/wiki/Black_box" title="Black box">black box theory</a>" poses another yet significant challenge. Black box refers to a situation where the algorithm or the process of producing an output is entirely opaque, meaning that even the coders of the algorithm cannot audit the pattern that the machine extracted out of the data.<sup id="cite_ref-Babuta-2018_120-0" class="reference"><a href="#cite_note-Babuta-2018-120"><span class="cite-bracket">[</span>120<span class="cite-bracket">]</span></a></sup> The House of Lords Select Committee, which claimed that such an "intelligence system" that could have a "substantial impact on an individual's life" would not be considered acceptable unless it provided "a full and satisfactory explanation for the decisions" it makes.<sup id="cite_ref-Babuta-2018_120-1" class="reference"><a href="#cite_note-Babuta-2018-120"><span class="cite-bracket">[</span>120<span class="cite-bracket">]</span></a></sup> </p><p>In 2018, a self-driving car from <a href="/wiki/Uber" title="Uber">Uber</a> failed to detect a pedestrian, who was killed after a collision.<sup id="cite_ref-121" class="reference"><a href="#cite_note-121"><span class="cite-bracket">[</span>121<span class="cite-bracket">]</span></a></sup> Attempts to use machine learning in healthcare with the <a href="/wiki/Watson_(computer)" class="mw-redirect" title="Watson (computer)">IBM Watson</a> system failed to deliver even after years of time and billions of dollars invested.<sup id="cite_ref-122" class="reference"><a href="#cite_note-122"><span class="cite-bracket">[</span>122<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-123" class="reference"><a href="#cite_note-123"><span class="cite-bracket">[</span>123<span class="cite-bracket">]</span></a></sup> Microsoft's <a href="/wiki/Bing_Chat" class="mw-redirect" title="Bing Chat">Bing Chat</a> chatbot has been reported to produce hostile and offensive response against its users.<sup id="cite_ref-124" class="reference"><a href="#cite_note-124"><span class="cite-bracket">[</span>124<span class="cite-bracket">]</span></a></sup> </p><p>Machine learning has been used as a strategy to update the evidence related to a systematic review and increased reviewer burden related to the growth of biomedical literature. While it has improved with training sets, it has not yet developed sufficiently to reduce the workload burden without limiting the necessary sensitivity for the findings research themselves.<sup id="cite_ref-125" class="reference"><a href="#cite_note-125"><span class="cite-bracket">[</span>125<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Bias">Bias</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=36" title="Edit section: Bias"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Algorithmic_bias" title="Algorithmic bias">Algorithmic bias</a></div> <p>Different machine learning approaches can suffer from different data biases. A machine learning system trained specifically on current customers may not be able to predict the needs of new customer groups that are not represented in the training data. When trained on human-made data, machine learning is likely to pick up the constitutional and unconscious biases already present in society.<sup id="cite_ref-Garcia-2016_126-0" class="reference"><a href="#cite_note-Garcia-2016-126"><span class="cite-bracket">[</span>126<span class="cite-bracket">]</span></a></sup> </p><p>Language models learned from data have been shown to contain human-like biases.<sup id="cite_ref-127" class="reference"><a href="#cite_note-127"><span class="cite-bracket">[</span>127<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-128" class="reference"><a href="#cite_note-128"><span class="cite-bracket">[</span>128<span class="cite-bracket">]</span></a></sup> In an experiment carried out by <a href="/wiki/ProPublica" title="ProPublica">ProPublica</a>, an <a href="/wiki/Investigative_journalism" title="Investigative journalism">investigative journalism</a> organization, a machine learning algorithm's insight into the recidivism rates among prisoners falsely flagged "black defendants high risk twice as often as white defendants."<sup id="cite_ref-Silva-2018_129-0" class="reference"><a href="#cite_note-Silva-2018-129"><span class="cite-bracket">[</span>129<span class="cite-bracket">]</span></a></sup> In 2015, Google Photos once tagged a couple of black people as gorillas, which caused controversy. The gorilla label was subsequently removed, and in 2023, it still cannot recognize gorillas.<sup id="cite_ref-130" class="reference"><a href="#cite_note-130"><span class="cite-bracket">[</span>130<span class="cite-bracket">]</span></a></sup> Similar issues with recognizing non-white people have been found in many other systems.<sup id="cite_ref-131" class="reference"><a href="#cite_note-131"><span class="cite-bracket">[</span>131<span class="cite-bracket">]</span></a></sup> In 2016, Microsoft tested <a href="/wiki/Tay_(chatbot)" title="Tay (chatbot)">Tay</a>, a <a href="/wiki/Chatbot" title="Chatbot">chatbot</a> that learned from Twitter, and it quickly picked up racist and sexist language.<sup id="cite_ref-132" class="reference"><a href="#cite_note-132"><span class="cite-bracket">[</span>132<span class="cite-bracket">]</span></a></sup> </p><p>Because of such challenges, the effective use of machine learning may take longer to be adopted in other domains.<sup id="cite_ref-133" class="reference"><a href="#cite_note-133"><span class="cite-bracket">[</span>133<span class="cite-bracket">]</span></a></sup> Concern for <a href="/wiki/Fairness_(machine_learning)" title="Fairness (machine learning)">fairness</a> in machine learning, that is, reducing bias in machine learning and propelling its use for human good, is increasingly expressed by artificial intelligence scientists, including <a href="/wiki/Fei-Fei_Li" title="Fei-Fei Li">Fei-Fei Li</a>, who reminds engineers that "[t]here's nothing artificial about AI. It's inspired by people, it's created by people, and—most importantly—it impacts people. It is a powerful tool we are only just beginning to understand, and that is a profound responsibility."<sup id="cite_ref-134" class="reference"><a href="#cite_note-134"><span class="cite-bracket">[</span>134<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Explainability">Explainability</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=37" title="Edit section: Explainability"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Explainable_artificial_intelligence" title="Explainable artificial intelligence">Explainable artificial intelligence</a></div> <p>Explainable AI (XAI), or Interpretable AI, or Explainable Machine Learning (XML), is artificial intelligence (AI) in which humans can understand the decisions or predictions made by the AI.<sup id="cite_ref-135" class="reference"><a href="#cite_note-135"><span class="cite-bracket">[</span>135<span class="cite-bracket">]</span></a></sup> It contrasts with the "black box" concept in machine learning where even its designers cannot explain why an AI arrived at a specific decision.<sup id="cite_ref-136" class="reference"><a href="#cite_note-136"><span class="cite-bracket">[</span>136<span class="cite-bracket">]</span></a></sup> By refining the mental models of users of AI-powered systems and dismantling their misconceptions, XAI promises to help users perform more effectively. XAI may be an implementation of the social right to explanation. </p> <div class="mw-heading mw-heading3"><h3 id="Overfitting">Overfitting</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=38" title="Edit section: Overfitting"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Overfitting" title="Overfitting">Overfitting</a></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Overfitted_Data.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/68/Overfitted_Data.png/220px-Overfitted_Data.png" decoding="async" width="220" height="149" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/68/Overfitted_Data.png/330px-Overfitted_Data.png 1.5x, //upload.wikimedia.org/wikipedia/commons/6/68/Overfitted_Data.png 2x" data-file-width="377" data-file-height="256" /></a><figcaption>The blue line could be an example of overfitting a linear function due to random noise.</figcaption></figure> <p>Settling on a bad, overly complex theory gerrymandered to fit all the past training data is known as overfitting. Many systems attempt to reduce overfitting by rewarding a theory in accordance with how well it fits the data but penalizing the theory in accordance with how complex the theory is.<sup id="cite_ref-FOOTNOTEDomingos2015Chapter_6,_Chapter_7_137-0" class="reference"><a href="#cite_note-FOOTNOTEDomingos2015Chapter_6,_Chapter_7-137"><span class="cite-bracket">[</span>137<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Other_limitations_and_vulnerabilities">Other limitations and vulnerabilities</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=39" title="Edit section: Other limitations and vulnerabilities"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Learners can also disappoint by "learning the wrong lesson". A toy example is that an image classifier trained only on pictures of brown horses and black cats might conclude that all brown patches are likely to be horses.<sup id="cite_ref-FOOTNOTEDomingos2015286_138-0" class="reference"><a href="#cite_note-FOOTNOTEDomingos2015286-138"><span class="cite-bracket">[</span>138<span class="cite-bracket">]</span></a></sup> A real-world example is that, unlike humans, current image classifiers often do not primarily make judgments from the spatial relationship between components of the picture, and they learn relationships between pixels that humans are oblivious to, but that still correlate with images of certain types of real objects. Modifying these patterns on a legitimate image can result in "adversarial" images that the system misclassifies.<sup id="cite_ref-139" class="reference"><a href="#cite_note-139"><span class="cite-bracket">[</span>139<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-140" class="reference"><a href="#cite_note-140"><span class="cite-bracket">[</span>140<span class="cite-bracket">]</span></a></sup> </p><p>Adversarial vulnerabilities can also result in nonlinear systems, or from non-pattern perturbations. For some systems, it is possible to change the output by only changing a single adversarially chosen pixel.<sup id="cite_ref-TD_1_141-0" class="reference"><a href="#cite_note-TD_1-141"><span class="cite-bracket">[</span>141<span class="cite-bracket">]</span></a></sup> Machine learning models are often vulnerable to manipulation and/or evasion via <a href="/wiki/Adversarial_machine_learning" title="Adversarial machine learning">adversarial machine learning</a>.<sup id="cite_ref-142" class="reference"><a href="#cite_note-142"><span class="cite-bracket">[</span>142<span class="cite-bracket">]</span></a></sup> </p><p>Researchers have demonstrated how <a href="/wiki/Backdoor_(computing)" title="Backdoor (computing)">backdoors</a> can be placed undetectably into classifying (e.g., for categories "spam" and well-visible "not spam" of posts) machine learning models that are often developed and/or trained by third parties. Parties can change the classification of any input, including in cases for which a type of <a href="/wiki/Algorithmic_transparency" title="Algorithmic transparency">data/software transparency</a> is provided, possibly including <a href="/wiki/White-box_testing" title="White-box testing">white-box access</a>.<sup id="cite_ref-143" class="reference"><a href="#cite_note-143"><span class="cite-bracket">[</span>143<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-144" class="reference"><a href="#cite_note-144"><span class="cite-bracket">[</span>144<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-145" class="reference"><a href="#cite_note-145"><span class="cite-bracket">[</span>145<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Model_assessments">Model assessments</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=40" title="Edit section: Model assessments"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Classification of machine learning models can be validated by accuracy estimation techniques like the <a href="/wiki/Test_set" class="mw-redirect" title="Test set">holdout</a> method, which splits the data in a training and test set (conventionally 2/3 training set and 1/3 test set designation) and evaluates the performance of the training model on the test set. In comparison, the K-fold-<a href="/wiki/Cross-validation_(statistics)" title="Cross-validation (statistics)">cross-validation</a> method randomly partitions the data into K subsets and then K experiments are performed each respectively considering 1 subset for evaluation and the remaining K-1 subsets for training the model. In addition to the holdout and cross-validation methods, <a href="/wiki/Bootstrapping_(statistics)" title="Bootstrapping (statistics)">bootstrap</a>, which samples n instances with replacement from the dataset, can be used to assess model accuracy.<sup id="cite_ref-146" class="reference"><a href="#cite_note-146"><span class="cite-bracket">[</span>146<span class="cite-bracket">]</span></a></sup> </p><p>In addition to overall accuracy, investigators frequently report <a href="/wiki/Sensitivity_and_specificity" title="Sensitivity and specificity">sensitivity and specificity</a> meaning true positive rate (TPR) and true negative rate (TNR) respectively. Similarly, investigators sometimes report the <a href="/wiki/False_positive_rate" title="False positive rate">false positive rate</a> (FPR) as well as the <a href="/wiki/False_negative_rate" class="mw-redirect" title="False negative rate">false negative rate</a> (FNR). However, these rates are ratios that fail to reveal their numerators and denominators. <a href="/wiki/Receiver_operating_characteristic" title="Receiver operating characteristic">Receiver operating characteristic</a> (ROC) along with the accompanying Area Under the ROC Curve (AUC) offer additional tools for classification model assessment. Higher AUC is associated with a better performing model.<sup id="cite_ref-147" class="reference"><a href="#cite_note-147"><span class="cite-bracket">[</span>147<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Ethics">Ethics</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=41" title="Edit section: Ethics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">See also: <a href="/wiki/AI_alignment" title="AI alignment">AI alignment</a>, <a href="/wiki/Toronto_Declaration" title="Toronto Declaration">Toronto Declaration</a>, and <a href="/wiki/Ethics_of_artificial_intelligence" title="Ethics of artificial intelligence">Ethics of artificial intelligence</a></div> <p>Machine learning poses a host of <a href="/wiki/Machine_ethics" title="Machine ethics">ethical questions</a>. Systems that are trained on datasets collected with biases may exhibit these biases upon use (<a href="/wiki/Algorithmic_bias" title="Algorithmic bias">algorithmic bias</a>), thus digitizing cultural prejudices.<sup id="cite_ref-148" class="reference"><a href="#cite_note-148"><span class="cite-bracket">[</span>148<span class="cite-bracket">]</span></a></sup> For example, in 1988, the UK's <a href="/wiki/Commission_for_Racial_Equality" title="Commission for Racial Equality">Commission for Racial Equality</a> found that <a href="/wiki/St_George%27s,_University_of_London" title="St George's, University of London">St. George's Medical School</a> had been using a computer program trained from data of previous admissions staff and that this program had denied nearly 60 candidates who were found to either be women or have non-European sounding names.<sup id="cite_ref-Garcia-2016_126-1" class="reference"><a href="#cite_note-Garcia-2016-126"><span class="cite-bracket">[</span>126<span class="cite-bracket">]</span></a></sup> Using job hiring data from a firm with racist hiring policies may lead to a machine learning system duplicating the bias by scoring job applicants by similarity to previous successful applicants.<sup id="cite_ref-Edionwe_Outline_149-0" class="reference"><a href="#cite_note-Edionwe_Outline-149"><span class="cite-bracket">[</span>149<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Jeffries_Outline_150-0" class="reference"><a href="#cite_note-Jeffries_Outline-150"><span class="cite-bracket">[</span>150<span class="cite-bracket">]</span></a></sup> Another example includes predictive policing company <a href="/wiki/Geolitica" title="Geolitica">Geolitica</a>'s predictive algorithm that resulted in "disproportionately high levels of over-policing in low-income and minority communities" after being trained with historical crime data.<sup id="cite_ref-Silva-2018_129-1" class="reference"><a href="#cite_note-Silva-2018-129"><span class="cite-bracket">[</span>129<span class="cite-bracket">]</span></a></sup> </p><p>While responsible <a href="/wiki/Data_collection" title="Data collection">collection of data</a> and documentation of algorithmic rules used by a system is considered a critical part of machine learning, some researchers blame lack of participation and representation of minority population in the field of AI for machine learning's vulnerability to biases.<sup id="cite_ref-151" class="reference"><a href="#cite_note-151"><span class="cite-bracket">[</span>151<span class="cite-bracket">]</span></a></sup> In fact, according to research carried out by the Computing Research Association (CRA) in 2021, "female faculty merely make up 16.1%" of all faculty members who focus on AI among several universities around the world.<sup id="cite_ref-Zhang_152-0" class="reference"><a href="#cite_note-Zhang-152"><span class="cite-bracket">[</span>152<span class="cite-bracket">]</span></a></sup> Furthermore, among the group of "new U.S. resident AI PhD graduates," 45% identified as white, 22.4% as Asian, 3.2% as Hispanic, and 2.4% as African American, which further demonstrates a lack of diversity in the field of AI.<sup id="cite_ref-Zhang_152-1" class="reference"><a href="#cite_note-Zhang-152"><span class="cite-bracket">[</span>152<span class="cite-bracket">]</span></a></sup> </p><p>AI can be well-equipped to make decisions in technical fields, which rely heavily on data and historical information. These decisions rely on objectivity and logical reasoning.<sup id="cite_ref-153" class="reference"><a href="#cite_note-153"><span class="cite-bracket">[</span>153<span class="cite-bracket">]</span></a></sup> Because human languages contain biases, machines trained on language <i><a href="/wiki/Text_corpus" title="Text corpus">corpora</a></i> will necessarily also learn these biases.<sup id="cite_ref-154" class="reference"><a href="#cite_note-154"><span class="cite-bracket">[</span>154<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-155" class="reference"><a href="#cite_note-155"><span class="cite-bracket">[</span>155<span class="cite-bracket">]</span></a></sup> </p><p>Other forms of ethical challenges, not related to personal biases, are seen in health care. There are concerns among health care professionals that these systems might not be designed in the public's interest but as income-generating machines.<sup id="cite_ref-156" class="reference"><a href="#cite_note-156"><span class="cite-bracket">[</span>156<span class="cite-bracket">]</span></a></sup> This is especially true in the United States where there is a long-standing ethical dilemma of improving health care, but also increasing profits. For example, the algorithms could be designed to provide patients with unnecessary tests or medication in which the algorithm's proprietary owners hold stakes. There is potential for machine learning in health care to provide professionals an additional tool to diagnose, medicate, and plan recovery paths for patients, but this requires these biases to be mitigated.<sup id="cite_ref-157" class="reference"><a href="#cite_note-157"><span class="cite-bracket">[</span>157<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Hardware">Hardware</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=42" title="Edit section: Hardware"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Since the 2010s, advances in both machine learning algorithms and computer hardware have led to more efficient methods for training <a href="/wiki/Deep_neural_network" class="mw-redirect" title="Deep neural network">deep neural networks</a> (a particular narrow subdomain of machine learning) that contain many layers of nonlinear hidden units.<sup id="cite_ref-158" class="reference"><a href="#cite_note-158"><span class="cite-bracket">[</span>158<span class="cite-bracket">]</span></a></sup> By 2019, graphics processing units (<a href="/wiki/GPU" class="mw-redirect" title="GPU">GPUs</a>), often with AI-specific enhancements, had displaced CPUs as the dominant method of training large-scale commercial cloud AI.<sup id="cite_ref-159" class="reference"><a href="#cite_note-159"><span class="cite-bracket">[</span>159<span class="cite-bracket">]</span></a></sup> <a href="/wiki/OpenAI" title="OpenAI">OpenAI</a> estimated the hardware compute used in the largest deep learning projects from <a href="/wiki/AlexNet" title="AlexNet">AlexNet</a> (2012) to <a href="/wiki/AlphaZero" title="AlphaZero">AlphaZero</a> (2017), and found a 300,000-fold increase in the amount of compute required, with a doubling-time trendline of 3.4 months.<sup id="cite_ref-160" class="reference"><a href="#cite_note-160"><span class="cite-bracket">[</span>160<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-161" class="reference"><a href="#cite_note-161"><span class="cite-bracket">[</span>161<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Neuromorphic_computing">Neuromorphic computing</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=43" title="Edit section: Neuromorphic computing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Neuromorphic_computing" title="Neuromorphic computing">Neuromorphic computing</a> refers to a class of computing systems designed to emulate the structure and functionality of biological neural networks. These systems may be implemented through software-based simulations on conventional hardware or through specialized hardware architectures.<sup id="cite_ref-162" class="reference"><a href="#cite_note-162"><span class="cite-bracket">[</span>162<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="physical_neural_networks">physical neural networks</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=44" title="Edit section: physical neural networks"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>A <a href="/wiki/Physical_neural_network" title="Physical neural network">physical neural network</a> is a specific type of neuromorphic hardware that relies on electrically adjustable materials, such as memristors, to emulate the function of <a href="/wiki/Chemical_synapse" title="Chemical synapse">neural synapses</a>. The term "physical neural network" highlights the use of physical hardware for computation, as opposed to software-based implementations. It broadly refers to artificial neural networks that use materials with adjustable resistance to replicate neural synapses.<sup id="cite_ref-163" class="reference"><a href="#cite_note-163"><span class="cite-bracket">[</span>163<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-164" class="reference"><a href="#cite_note-164"><span class="cite-bracket">[</span>164<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Embedded_machine_learning">Embedded machine learning</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=45" title="Edit section: Embedded machine learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Embedded machine learning is a sub-field of machine learning where models are deployed on <a href="/wiki/Embedded_systems" class="mw-redirect" title="Embedded systems">embedded systems</a> with limited computing resources, such as <a href="/wiki/Wearable_computer" title="Wearable computer">wearable computers</a>, <a href="/wiki/Edge_device" title="Edge device">edge devices</a> and <a href="/wiki/Microcontrollers" class="mw-redirect" title="Microcontrollers">microcontrollers</a>.<sup id="cite_ref-165" class="reference"><a href="#cite_note-165"><span class="cite-bracket">[</span>165<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-166" class="reference"><a href="#cite_note-166"><span class="cite-bracket">[</span>166<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-167" class="reference"><a href="#cite_note-167"><span class="cite-bracket">[</span>167<span class="cite-bracket">]</span></a></sup> Running models directly on these devices eliminates the need to transfer and store data on cloud servers for further processing, thereby reducing the risk of data breaches, privacy leaks and theft of intellectual property, personal data and business secrets. Embedded machine learning can be achieved through various techniques, such as <a href="/wiki/Hardware_acceleration" title="Hardware acceleration">hardware acceleration</a>,<sup id="cite_ref-168" class="reference"><a href="#cite_note-168"><span class="cite-bracket">[</span>168<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-169" class="reference"><a href="#cite_note-169"><span class="cite-bracket">[</span>169<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Approximate_computing" title="Approximate computing">approximate computing</a>,<sup id="cite_ref-170" class="reference"><a href="#cite_note-170"><span class="cite-bracket">[</span>170<span class="cite-bracket">]</span></a></sup> and model optimization.<sup id="cite_ref-171" class="reference"><a href="#cite_note-171"><span class="cite-bracket">[</span>171<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-172" class="reference"><a href="#cite_note-172"><span class="cite-bracket">[</span>172<span class="cite-bracket">]</span></a></sup> Common optimization techniques include <a href="/wiki/Pruning_(artificial_neural_network)" title="Pruning (artificial neural network)">pruning</a>, <a href="/w/index.php?title=Quantization_(Embedded_Machine_Learning)&action=edit&redlink=1" class="new" title="Quantization (Embedded Machine Learning) (page does not exist)">quantization</a>, <a href="/wiki/Knowledge_distillation" title="Knowledge distillation">knowledge distillation</a>, low-rank factorization, network architecture search, and parameter sharing. </p> <div class="mw-heading mw-heading2"><h2 id="Software">Software</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=46" title="Edit section: Software"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Software_suite" title="Software suite">Software suites</a> containing a variety of machine learning algorithms include the following: </p> <div class="mw-heading mw-heading3"><h3 id="Free_and_open-source_software">Free and open-source software<span class="anchor" id="Open-source_software"></span></h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=47" title="Edit section: Free and open-source software"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1184024115"><div class="div-col" style="column-width: 18em;"> <ul><li><a href="/wiki/Caffe_(software)" title="Caffe (software)">Caffe</a></li> <li><a href="/wiki/Deeplearning4j" title="Deeplearning4j">Deeplearning4j</a></li> <li><a href="/wiki/DeepSpeed" title="DeepSpeed">DeepSpeed</a></li> <li><a href="/wiki/ELKI" title="ELKI">ELKI</a></li> <li><a href="/wiki/Google_JAX" title="Google JAX">Google JAX</a></li> <li><a href="/wiki/Infer.NET" title="Infer.NET">Infer.NET</a></li> <li><a href="/wiki/Keras" title="Keras">Keras</a></li> <li><a href="/wiki/Kubeflow" title="Kubeflow">Kubeflow</a></li> <li><a href="/wiki/LightGBM" title="LightGBM">LightGBM</a></li> <li><a href="/wiki/Apache_Mahout" title="Apache Mahout">Mahout</a></li> <li><a href="/wiki/Mallet_(software_project)" title="Mallet (software project)">Mallet</a></li> <li><a href="/wiki/Microsoft_Cognitive_Toolkit" title="Microsoft Cognitive Toolkit">Microsoft Cognitive Toolkit</a></li> <li><a href="/wiki/ML.NET" title="ML.NET">ML.NET</a></li> <li><a href="/wiki/Mlpack" title="Mlpack">mlpack</a></li> <li><a href="/wiki/MXNet" class="mw-redirect" title="MXNet">MXNet</a></li> <li><a href="/wiki/OpenNN" title="OpenNN">OpenNN</a></li> <li><a href="/wiki/Orange_(software)" title="Orange (software)">Orange</a></li> <li><a href="/wiki/Pandas_(software)" title="Pandas (software)">pandas (software)</a></li> <li><a href="/wiki/ROOT" title="ROOT">ROOT</a> (TMVA with ROOT)</li> <li><a href="/wiki/Scikit-learn" title="Scikit-learn">scikit-learn</a></li> <li><a href="/wiki/Shogun_(toolbox)" title="Shogun (toolbox)">Shogun</a></li> <li><a href="/wiki/Apache_Spark#MLlib_Machine_Learning_Library" title="Apache Spark">Spark MLlib</a></li> <li><a href="/wiki/Apache_SystemML" class="mw-redirect" title="Apache SystemML">SystemML</a></li> <li><a href="/wiki/TensorFlow" title="TensorFlow">TensorFlow</a></li> <li><a href="/wiki/Torch_(machine_learning)" title="Torch (machine learning)">Torch</a> / <a href="/wiki/PyTorch" title="PyTorch">PyTorch</a></li> <li><a href="/wiki/Weka_(machine_learning)" class="mw-redirect" title="Weka (machine learning)">Weka</a> / <a href="/wiki/MOA_(Massive_Online_Analysis)" class="mw-redirect" title="MOA (Massive Online Analysis)">MOA</a></li> <li><a href="/wiki/XGBoost" title="XGBoost">XGBoost</a></li> <li><a href="/wiki/Yooreeka" title="Yooreeka">Yooreeka</a></li></ul> </div> <div class="mw-heading mw-heading3"><h3 id="Proprietary_software_with_free_and_open-source_editions">Proprietary software with free and open-source editions</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=48" title="Edit section: Proprietary software with free and open-source editions"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/KNIME" title="KNIME">KNIME</a></li> <li><a href="/wiki/RapidMiner" title="RapidMiner">RapidMiner</a></li></ul> <div class="mw-heading mw-heading3"><h3 id="Proprietary_software">Proprietary software</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=49" title="Edit section: Proprietary software"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1184024115"><div class="div-col" style="column-width: 18em;"> <ul><li><a href="/wiki/Amazon_Machine_Learning" class="mw-redirect" title="Amazon Machine Learning">Amazon Machine Learning</a></li> <li><a href="/wiki/Angoss" title="Angoss">Angoss</a> KnowledgeSTUDIO</li> <li><a href="/wiki/Azure_Machine_Learning" class="mw-redirect" title="Azure Machine Learning">Azure Machine Learning</a></li> <li><a href="/wiki/IBM_Watson_Studio" title="IBM Watson Studio">IBM Watson Studio</a></li> <li><a href="/wiki/Google_Cloud_Platform#Cloud_AI" title="Google Cloud Platform">Google Cloud Vertex AI</a></li> <li><a href="/wiki/Google_APIs" title="Google APIs">Google Prediction API</a></li> <li><a href="/wiki/SPSS_Modeler" title="SPSS Modeler">IBM SPSS Modeler</a></li> <li><a href="/wiki/KXEN_Inc." title="KXEN Inc.">KXEN Modeler</a></li> <li><a href="/wiki/LIONsolver" title="LIONsolver">LIONsolver</a></li> <li><a href="/wiki/Mathematica" class="mw-redirect" title="Mathematica">Mathematica</a></li> <li><a href="/wiki/MATLAB" title="MATLAB">MATLAB</a></li> <li><a href="/wiki/Neural_Designer" title="Neural Designer">Neural Designer</a></li> <li><a href="/wiki/NeuroSolutions" title="NeuroSolutions">NeuroSolutions</a></li> <li><a href="/wiki/Oracle_Data_Mining" title="Oracle Data Mining">Oracle Data Mining</a></li> <li><a href="/wiki/Oracle_Cloud#Platform_as_a_Service_(PaaS)" title="Oracle Cloud">Oracle AI Platform Cloud Service</a></li> <li><a href="/wiki/PolyAnalyst" title="PolyAnalyst">PolyAnalyst</a></li> <li><a href="/wiki/RCASE" class="mw-redirect" title="RCASE">RCASE</a></li> <li><a href="/wiki/SAS_(software)#Components" title="SAS (software)">SAS Enterprise Miner</a></li> <li><a href="/wiki/SequenceL" title="SequenceL">SequenceL</a></li> <li><a href="/wiki/Splunk" title="Splunk">Splunk</a></li> <li><a href="/wiki/STATISTICA" class="mw-redirect" title="STATISTICA">STATISTICA</a> Data Miner</li></ul> </div> <div class="mw-heading mw-heading2"><h2 id="Journals">Journals</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=50" title="Edit section: Journals"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/Journal_of_Machine_Learning_Research" title="Journal of Machine Learning Research">Journal of Machine Learning Research</a></li> <li><a href="/wiki/Machine_Learning_(journal)" title="Machine Learning (journal)">Machine Learning</a></li> <li><a href="/wiki/Nature_Machine_Intelligence" title="Nature Machine Intelligence">Nature Machine Intelligence</a></li> <li><a href="/wiki/Neural_Computation_(journal)" title="Neural Computation (journal)">Neural Computation</a></li> <li><a href="/wiki/IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence" title="IEEE Transactions on Pattern Analysis and Machine Intelligence">IEEE Transactions on Pattern Analysis and Machine Intelligence</a></li></ul> <div class="mw-heading mw-heading2"><h2 id="Conferences">Conferences</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=51" title="Edit section: Conferences"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/AAAI_Conference_on_Artificial_Intelligence" title="AAAI Conference on Artificial Intelligence">AAAI Conference on Artificial Intelligence</a></li> <li><a href="/wiki/Association_for_Computational_Linguistics" title="Association for Computational Linguistics">Association for Computational Linguistics (<b>ACL</b>)</a></li> <li><a href="/wiki/European_Conference_on_Machine_Learning_and_Principles_and_Practice_of_Knowledge_Discovery_in_Databases" class="mw-redirect" title="European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases">European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (<b>ECML PKDD</b>)</a></li> <li><a href="/wiki/International_Conference_on_Computational_Intelligence_Methods_for_Bioinformatics_and_Biostatistics" title="International Conference on Computational Intelligence Methods for Bioinformatics and Biostatistics">International Conference on Computational Intelligence Methods for Bioinformatics and Biostatistics (<b>CIBB</b>)</a></li> <li><a href="/wiki/International_Conference_on_Machine_Learning" title="International Conference on Machine Learning">International Conference on Machine Learning (<b>ICML</b>)</a></li> <li><a href="/wiki/International_Conference_on_Learning_Representations" title="International Conference on Learning Representations">International Conference on Learning Representations (<b>ICLR</b>)</a></li> <li><a href="/wiki/International_Conference_on_Intelligent_Robots_and_Systems" title="International Conference on Intelligent Robots and Systems">International Conference on Intelligent Robots and Systems (<b>IROS</b>)</a></li> <li><a href="/wiki/Conference_on_Knowledge_Discovery_and_Data_Mining" class="mw-redirect" title="Conference on Knowledge Discovery and Data Mining">Conference on Knowledge Discovery and Data Mining (<b>KDD</b>)</a></li> <li><a href="/wiki/Conference_on_Neural_Information_Processing_Systems" title="Conference on Neural Information Processing Systems">Conference on Neural Information Processing Systems (<b>NeurIPS</b>)</a></li></ul> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=52" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/Automated_machine_learning" title="Automated machine learning">Automated machine learning</a> – Process of automating the application of machine learning</li> <li><a href="/wiki/Big_data" title="Big data">Big data</a> – Extremely large or complex datasets</li> <li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a> — branch of ML concerned with <a href="/wiki/Artificial_neural_network" class="mw-redirect" title="Artificial neural network">artificial neural networks</a></li> <li><a href="/wiki/Differentiable_programming" title="Differentiable programming">Differentiable programming</a> – Programming paradigm</li> <li><a href="/wiki/List_of_datasets_for_machine-learning_research" title="List of datasets for machine-learning research">List of datasets for machine-learning research</a> – Machine learning based fault detection in Electronics Circuit</li> <li><a href="/wiki/M-theory_(learning_framework)" title="M-theory (learning framework)">M-theory (learning framework)</a></li></ul> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=53" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist reflist-columns references-column-width" style="column-width: 30em;"> <ol class="references"> <li id="cite_note-1"><span class="mw-cite-backlink"><b><a href="#cite_ref-1">^</a></b></span> <span class="reference-text">The definition "without being explicitly programmed" is often attributed to <a href="/wiki/Arthur_Samuel_(computer_scientist)" title="Arthur Samuel (computer scientist)">Arthur Samuel</a>, who coined the term "machine learning" in 1959, but the phrase is not found verbatim in this publication, and may be a <a href="/wiki/Paraphrase" title="Paraphrase">paraphrase</a> that appeared later. Confer "Paraphrasing Arthur Samuel (1959), the question is: How can computers learn to solve problems without being explicitly programmed?" in <style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite id="CITEREFKozaBennettAndreKeane1996" class="citation conference cs1">Koza, John R.; Bennett, Forrest H.; Andre, David; Keane, Martin A. (1996). "Automated Design of Both the Topology and Sizing of Analog Electrical Circuits Using Genetic Programming". <i>Artificial Intelligence in Design '96</i>. Artificial Intelligence in Design '96. Dordrecht, Netherlands: Springer Netherlands. pp. 151–170. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-94-009-0279-4_9">10.1007/978-94-009-0279-4_9</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-94-010-6610-5" title="Special:BookSources/978-94-010-6610-5"><bdi>978-94-010-6610-5</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.atitle=Automated+Design+of+Both+the+Topology+and+Sizing+of+Analog+Electrical+Circuits+Using+Genetic+Programming&rft.btitle=Artificial+Intelligence+in+Design+%2796&rft.place=Dordrecht%2C+Netherlands&rft.pages=151-170&rft.pub=Springer+Netherlands&rft.date=1996&rft_id=info%3Adoi%2F10.1007%2F978-94-009-0279-4_9&rft.isbn=978-94-010-6610-5&rft.aulast=Koza&rft.aufirst=John+R.&rft.au=Bennett%2C+Forrest+H.&rft.au=Andre%2C+David&rft.au=Keane%2C+Martin+A.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-ibm-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-ibm_2-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.ibm.com/topics/machine-learning">"What is Machine Learning?"</a>. <i>IBM</i>. 22 September 2021. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231227153910/https://www.ibm.com/topics/machine-learning">Archived</a> from the original on 2023-12-27<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-06-27</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=IBM&rft.atitle=What+is+Machine+Learning%3F&rft.date=2021-09-22&rft_id=https%3A%2F%2Fwww.ibm.com%2Ftopics%2Fmachine-learning&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-tvt-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-tvt_3-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHuNiuCarrascoLennox2020" class="citation journal cs1">Hu, Junyan; Niu, Hanlin; Carrasco, Joaquin; Lennox, Barry; Arvin, Farshad (2020). <a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ftvt.2020.3034800">"Voronoi-Based Multi-Robot Autonomous Exploration in Unknown Environments via Deep Reinforcement Learning"</a>. <i>IEEE Transactions on Vehicular Technology</i>. <b>69</b> (12): 14413–14423. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ftvt.2020.3034800">10.1109/tvt.2020.3034800</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0018-9545">0018-9545</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:228989788">228989788</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Vehicular+Technology&rft.atitle=Voronoi-Based+Multi-Robot+Autonomous+Exploration+in+Unknown+Environments+via+Deep+Reinforcement+Learning&rft.volume=69&rft.issue=12&rft.pages=14413-14423&rft.date=2020&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A228989788%23id-name%3DS2CID&rft.issn=0018-9545&rft_id=info%3Adoi%2F10.1109%2Ftvt.2020.3034800&rft.aulast=Hu&rft.aufirst=Junyan&rft.au=Niu%2C+Hanlin&rft.au=Carrasco%2C+Joaquin&rft.au=Lennox%2C+Barry&rft.au=Arvin%2C+Farshad&rft_id=https%3A%2F%2Fdoi.org%2F10.1109%252Ftvt.2020.3034800&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-YoosefzadehNajafabadi-2021-4"><span class="mw-cite-backlink">^ <a href="#cite_ref-YoosefzadehNajafabadi-2021_4-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-YoosefzadehNajafabadi-2021_4-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYoosefzadeh-NajafabadiHughTulpanSulik2021" class="citation journal cs1">Yoosefzadeh-Najafabadi, Mohsen; Hugh, Earl; Tulpan, Dan; Sulik, John; Eskandari, Milad (2021). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7835636">"Application of Machine Learning Algorithms in Plant Breeding: Predicting Yield From Hyperspectral Reflectance in Soybean?"</a>. <i>Front. Plant Sci</i>. <b>11</b>: 624273. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3389%2Ffpls.2020.624273">10.3389/fpls.2020.624273</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7835636">7835636</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/33510761">33510761</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Front.+Plant+Sci.&rft.atitle=Application+of+Machine+Learning+Algorithms+in+Plant+Breeding%3A+Predicting+Yield+From+Hyperspectral+Reflectance+in+Soybean%3F&rft.volume=11&rft.pages=624273&rft.date=2021&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7835636%23id-name%3DPMC&rft_id=info%3Apmid%2F33510761&rft_id=info%3Adoi%2F10.3389%2Ffpls.2020.624273&rft.aulast=Yoosefzadeh-Najafabadi&rft.aufirst=Mohsen&rft.au=Hugh%2C+Earl&rft.au=Tulpan%2C+Dan&rft.au=Sulik%2C+John&rft.au=Eskandari%2C+Milad&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7835636&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-bishop2006-5"><span class="mw-cite-backlink">^ <a href="#cite_ref-bishop2006_5-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-bishop2006_5-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-bishop2006_5-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBishop2006" class="citation cs2"><a href="/wiki/Christopher_M._Bishop" class="mw-redirect" title="Christopher M. Bishop">Bishop, C. M.</a> (2006), <i>Pattern Recognition and Machine Learning</i>, Springer, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-387-31073-2" title="Special:BookSources/978-0-387-31073-2"><bdi>978-0-387-31073-2</bdi></a></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Pattern+Recognition+and+Machine+Learning&rft.pub=Springer&rft.date=2006&rft.isbn=978-0-387-31073-2&rft.aulast=Bishop&rft.aufirst=C.+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-6">^</a></b></span> <span class="reference-text">Machine learning and pattern recognition "can be viewed as two facets of the same field".<sup id="cite_ref-bishop2006_5-0" class="reference"><a href="#cite_note-bishop2006-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup><sup class="reference nowrap"><span title="Page / location: vii">: vii </span></sup></span> </li> <li id="cite_note-Friedman-1998-7"><span class="mw-cite-backlink">^ <a href="#cite_ref-Friedman-1998_7-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Friedman-1998_7-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFriedman1998" class="citation journal cs1"><a href="/wiki/Jerome_H._Friedman" title="Jerome H. Friedman">Friedman, Jerome H.</a> (1998). "Data Mining and Statistics: What's the connection?". <i>Computing Science and Statistics</i>. <b>29</b> (1): 3–9.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Computing+Science+and+Statistics&rft.atitle=Data+Mining+and+Statistics%3A+What%27s+the+connection%3F&rft.volume=29&rft.issue=1&rft.pages=3-9&rft.date=1998&rft.aulast=Friedman&rft.aufirst=Jerome+H.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Samuel-8"><span class="mw-cite-backlink"><b><a href="#cite_ref-Samuel_8-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSamuel1959" class="citation journal cs1">Samuel, Arthur (1959). "Some Studies in Machine Learning Using the Game of Checkers". <i>IBM Journal of Research and Development</i>. <b>3</b> (3): 210–229. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.368.2254">10.1.1.368.2254</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1147%2Frd.33.0210">10.1147/rd.33.0210</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2126705">2126705</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IBM+Journal+of+Research+and+Development&rft.atitle=Some+Studies+in+Machine+Learning+Using+the+Game+of+Checkers&rft.volume=3&rft.issue=3&rft.pages=210-229&rft.date=1959&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.368.2254%23id-name%3DCiteSeerX&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2126705%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1147%2Frd.33.0210&rft.aulast=Samuel&rft.aufirst=Arthur&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Kohavi-9"><span class="mw-cite-backlink">^ <a href="#cite_ref-Kohavi_9-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Kohavi_9-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">R. Kohavi and F. Provost, "Glossary of terms", Machine Learning, vol. 30, no. 2–3, pp. 271–274, 1998.</span> </li> <li id="cite_note-cyberthreat-10"><span class="mw-cite-backlink"><b><a href="#cite_ref-cyberthreat_10-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGerovitch2015" class="citation news cs1">Gerovitch, Slava (9 April 2015). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210922175839/https://nautil.us/issue/23/Dominoes/how-the-computer-got-its-revenge-on-the-soviet-union">"How the Computer Got Its Revenge on the Soviet Union"</a>. <i>Nautilus</i>. Archived from <a rel="nofollow" class="external text" href="https://nautil.us/issue/23/dominoes/how-the-computer-got-its-revenge-on-the-soviet-union">the original</a> on 22 September 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">19 September</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nautilus&rft.atitle=How+the+Computer+Got+Its+Revenge+on+the+Soviet+Union&rft.date=2015-04-09&rft.aulast=Gerovitch&rft.aufirst=Slava&rft_id=https%3A%2F%2Fnautil.us%2Fissue%2F23%2Fdominoes%2Fhow-the-computer-got-its-revenge-on-the-soviet-union&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-11">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLindsay1964" class="citation journal cs1">Lindsay, Richard P. (1 September 1964). <a rel="nofollow" class="external text" href="https://journals.sagepub.com/doi/10.1177/106591296401700364">"The Impact of Automation On Public Administration"</a>. <i>Western Political Quarterly</i>. <b>17</b> (3): 78–81. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1177%2F106591296401700364">10.1177/106591296401700364</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0043-4078">0043-4078</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:154021253">154021253</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211006190841/https://journals.sagepub.com/doi/10.1177/106591296401700364">Archived</a> from the original on 6 October 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">6 October</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Western+Political+Quarterly&rft.atitle=The+Impact+of+Automation+On+Public+Administration&rft.volume=17&rft.issue=3&rft.pages=78-81&rft.date=1964-09-01&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A154021253%23id-name%3DS2CID&rft.issn=0043-4078&rft_id=info%3Adoi%2F10.1177%2F106591296401700364&rft.aulast=Lindsay&rft.aufirst=Richard+P.&rft_id=https%3A%2F%2Fjournals.sagepub.com%2Fdoi%2F10.1177%2F106591296401700364&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-WhatIs-12"><span class="mw-cite-backlink">^ <a href="#cite_ref-WhatIs_12-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-WhatIs_12-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-WhatIs_12-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.techtarget.com/whatis/A-Timeline-of-Machine-Learning-History">"History and Evolution of Machine Learning: A Timeline"</a>. <i>WhatIs</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231208220935/https://www.techtarget.com/whatis/A-Timeline-of-Machine-Learning-History">Archived</a> from the original on 2023-12-08<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-08</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=WhatIs&rft.atitle=History+and+Evolution+of+Machine+Learning%3A+A+Timeline&rft_id=https%3A%2F%2Fwww.techtarget.com%2Fwhatis%2FA-Timeline-of-Machine-Learning-History&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-13"><span class="mw-cite-backlink"><b><a href="#cite_ref-13">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMilner1993" class="citation journal cs1">Milner, Peter M. (1993). <a rel="nofollow" class="external text" href="https://www.jstor.org/stable/24941344">"The Mind and Donald O. Hebb"</a>. <i>Scientific American</i>. <b>268</b> (1): 124–129. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1993SciAm.268a.124M">1993SciAm.268a.124M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fscientificamerican0193-124">10.1038/scientificamerican0193-124</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0036-8733">0036-8733</a>. <a href="/wiki/JSTOR_(identifier)" class="mw-redirect" title="JSTOR (identifier)">JSTOR</a> <a rel="nofollow" class="external text" href="https://www.jstor.org/stable/24941344">24941344</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/8418480">8418480</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231220163326/https://www.jstor.org/stable/24941344">Archived</a> from the original on 2023-12-20<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Scientific+American&rft.atitle=The+Mind+and+Donald+O.+Hebb&rft.volume=268&rft.issue=1&rft.pages=124-129&rft.date=1993&rft_id=https%3A%2F%2Fwww.jstor.org%2Fstable%2F24941344%23id-name%3DJSTOR&rft_id=info%3Abibcode%2F1993SciAm.268a.124M&rft_id=info%3Apmid%2F8418480&rft_id=info%3Adoi%2F10.1038%2Fscientificamerican0193-124&rft.issn=0036-8733&rft.aulast=Milner&rft.aufirst=Peter+M.&rft_id=https%3A%2F%2Fwww.jstor.org%2Fstable%2F24941344&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-14"><span class="mw-cite-backlink"><b><a href="#cite_ref-14">^</a></b></span> <span class="reference-text">"Science: The Goof Button", <a href="/wiki/Time_(magazine)" title="Time (magazine)">Time (magazine)</a>, 18 August 1961.</span> </li> <li id="cite_note-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-15">^</a></b></span> <span class="reference-text">Nilsson N. Learning Machines, McGraw Hill, 1965.</span> </li> <li id="cite_note-16"><span class="mw-cite-backlink"><b><a href="#cite_ref-16">^</a></b></span> <span class="reference-text">Duda, R., Hart P. Pattern Recognition and Scene Analysis, Wiley Interscience, 1973</span> </li> <li id="cite_note-17"><span class="mw-cite-backlink"><b><a href="#cite_ref-17">^</a></b></span> <span class="reference-text">S. Bozinovski "Teaching space: A representation concept for adaptive pattern classification" COINS Technical Report No. 81-28, Computer and Information Science Department, University of Massachusetts at Amherst, MA, 1981. <a rel="nofollow" class="external free" href="https://web.cs.umass.edu/publication/docs/1981/UM-CS-1981-028.pdf">https://web.cs.umass.edu/publication/docs/1981/UM-CS-1981-028.pdf</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210225070218/https://web.cs.umass.edu/publication/docs/1981/UM-CS-1981-028.pdf">Archived</a> 2021-02-25 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></span> </li> <li id="cite_note-Mitchell-1997-18"><span class="mw-cite-backlink">^ <a href="#cite_ref-Mitchell-1997_18-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Mitchell-1997_18-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMitchell,_T.1997" class="citation book cs1">Mitchell, T. (1997). <i>Machine Learning</i>. McGraw Hill. p. 2. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-07-042807-2" title="Special:BookSources/978-0-07-042807-2"><bdi>978-0-07-042807-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Machine+Learning&rft.pages=2&rft.pub=McGraw+Hill&rft.date=1997&rft.isbn=978-0-07-042807-2&rft.au=Mitchell%2C+T.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-19"><span class="mw-cite-backlink"><b><a href="#cite_ref-19">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHarnad2008" class="citation cs2"><a href="/wiki/Stevan_Harnad" title="Stevan Harnad">Harnad, Stevan</a> (2008), <a rel="nofollow" class="external text" href="https://web.archive.org/web/20120309113922/http://eprints.ecs.soton.ac.uk/12954/">"The Annotation Game: On Turing (1950) on Computing, Machinery, and Intelligence"</a>, in Epstein, Robert; Peters, Grace (eds.), <i>The Turing Test Sourcebook: Philosophical and Methodological Issues in the Quest for the Thinking Computer</i>, Kluwer, pp. 23–66, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781402067082" title="Special:BookSources/9781402067082"><bdi>9781402067082</bdi></a>, archived from <a rel="nofollow" class="external text" href="http://eprints.ecs.soton.ac.uk/12954/">the original</a> on 2012-03-09<span class="reference-accessdate">, retrieved <span class="nowrap">2012-12-11</span></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=The+Annotation+Game%3A+On+Turing+%281950%29+on+Computing%2C+Machinery%2C+and+Intelligence&rft.btitle=The+Turing+Test+Sourcebook%3A+Philosophical+and+Methodological+Issues+in+the+Quest+for+the+Thinking+Computer&rft.pages=23-66&rft.pub=Kluwer&rft.date=2008&rft.isbn=9781402067082&rft.aulast=Harnad&rft.aufirst=Stevan&rft_id=http%3A%2F%2Feprints.ecs.soton.ac.uk%2F12954%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-20"><span class="mw-cite-backlink"><b><a href="#cite_ref-20">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://edzion.com/2020/12/09/introduction-to-ai-part-1/">"Introduction to AI Part 1"</a>. <i>Edzion</i>. 2020-12-08. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210218005157/https://edzion.com/2020/12/09/introduction-to-ai-part-1/">Archived</a> from the original on 2021-02-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-12-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Edzion&rft.atitle=Introduction+to+AI+Part+1&rft.date=2020-12-08&rft_id=https%3A%2F%2Fedzion.com%2F2020%2F12%2F09%2Fintroduction-to-ai-part-1%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-journalimcms.org-21"><span class="mw-cite-backlink"><b><a href="#cite_ref-journalimcms.org_21-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSindhuNivedhaPrakash2020" class="citation journal cs1">Sindhu V, Nivedha S, Prakash M (February 2020). <a rel="nofollow" class="external text" href="https://doi.org/10.26782%2Fjmcms.spl.7%2F2020.02.00006">"An Empirical Science Research on Bioinformatics in Machine Learning"</a>. <i>Journal of Mechanics of Continua and Mathematical Sciences</i> (7). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.26782%2Fjmcms.spl.7%2F2020.02.00006">10.26782/jmcms.spl.7/2020.02.00006</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Mechanics+of+Continua+and+Mathematical+Sciences&rft.atitle=An+Empirical+Science+Research+on+Bioinformatics+in+Machine+Learning&rft.issue=7&rft.date=2020-02&rft_id=info%3Adoi%2F10.26782%2Fjmcms.spl.7%2F2020.02.00006&rft.aulast=Sindhu&rft.aufirst=V&rft.au=Nivedha%2C+S&rft.au=Prakash%2C+M&rft_id=https%3A%2F%2Fdoi.org%2F10.26782%252Fjmcms.spl.7%252F2020.02.00006&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-22"><span class="mw-cite-backlink"><b><a href="#cite_ref-22">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSarle1994" class="citation book cs1">Sarle, Warren S. (1994). "Neural Networks and statistical models". <i>SUGI 19: proceedings of the Nineteenth Annual SAS Users Group International Conference</i>. SAS Institute. pp. 1538–50. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781555446116" title="Special:BookSources/9781555446116"><bdi>9781555446116</bdi></a>. <a href="/wiki/OCLC_(identifier)" class="mw-redirect" title="OCLC (identifier)">OCLC</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/oclc/35546178">35546178</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Neural+Networks+and+statistical+models&rft.btitle=SUGI+19%3A+proceedings+of+the+Nineteenth+Annual+SAS+Users+Group+International+Conference&rft.pages=1538-50&rft.pub=SAS+Institute&rft.date=1994&rft_id=info%3Aoclcnum%2F35546178&rft.isbn=9781555446116&rft.aulast=Sarle&rft.aufirst=Warren+S.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-aima-23"><span class="mw-cite-backlink">^ <a href="#cite_ref-aima_23-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-aima_23-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-aima_23-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-aima_23-3"><sup><i><b>d</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussellNorvig2003" class="citation book cs1"><a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Russell, Stuart</a>; <a href="/wiki/Peter_Norvig" title="Peter Norvig">Norvig, Peter</a> (2003) [1995]. <i><a href="/wiki/Artificial_Intelligence:_A_Modern_Approach" title="Artificial Intelligence: A Modern Approach">Artificial Intelligence: A Modern Approach</a></i> (2nd ed.). Prentice Hall. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0137903955" title="Special:BookSources/978-0137903955"><bdi>978-0137903955</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Artificial+Intelligence%3A+A+Modern+Approach&rft.edition=2nd&rft.pub=Prentice+Hall&rft.date=2003&rft.isbn=978-0137903955&rft.aulast=Russell&rft.aufirst=Stuart&rft.au=Norvig%2C+Peter&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-changing-24"><span class="mw-cite-backlink">^ <a href="#cite_ref-changing_24-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-changing_24-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLangley2011" class="citation journal cs1">Langley, Pat (2011). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10994-011-5242-y">"The changing science of machine learning"</a>. <i><a href="/wiki/Machine_Learning_(journal)" title="Machine Learning (journal)">Machine Learning</a></i>. <b>82</b> (3): 275–9. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10994-011-5242-y">10.1007/s10994-011-5242-y</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Machine+Learning&rft.atitle=The+changing+science+of+machine+learning&rft.volume=82&rft.issue=3&rft.pages=275-9&rft.date=2011&rft_id=info%3Adoi%2F10.1007%2Fs10994-011-5242-y&rft.aulast=Langley&rft.aufirst=Pat&rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252Fs10994-011-5242-y&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Data_compression_Mahoney-25"><span class="mw-cite-backlink"><b><a href="#cite_ref-Data_compression_Mahoney_25-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMahoney" class="citation web cs1">Mahoney, Matt. <a rel="nofollow" class="external text" href="http://cs.fit.edu/~mmahoney/compression/rationale.html">"Rationale for a Large Text Compression Benchmark"</a>. Florida Institute of Technology<span class="reference-accessdate">. Retrieved <span class="nowrap">5 March</span> 2013</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Rationale+for+a+Large+Text+Compression+Benchmark&rft.pub=Florida+Institute+of+Technology&rft.aulast=Mahoney&rft.aufirst=Matt&rft_id=http%3A%2F%2Fcs.fit.edu%2F~mmahoney%2Fcompression%2Frationale.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Data_compression_Market_Efficiency-26"><span class="mw-cite-backlink"><b><a href="#cite_ref-Data_compression_Market_Efficiency_26-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFShmilovici_A.Kahiri_Y.Ben-Gal_I.Hauser_S.2009" class="citation journal cs1">Shmilovici A.; Kahiri Y.; Ben-Gal I.; Hauser S. (2009). <a rel="nofollow" class="external text" href="http://www.eng.tau.ac.il/~bengal/28.pdf">"Measuring the Efficiency of the Intraday Forex Market with a Universal Data Compression Algorithm"</a> <span class="cs1-format">(PDF)</span>. <i>Computational Economics</i>. <b>33</b> (2): 131–154. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.627.3751">10.1.1.627.3751</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10614-008-9153-3">10.1007/s10614-008-9153-3</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:17234503">17234503</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20090709143601/http://www.eng.tau.ac.il/~bengal/28.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2009-07-09.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Computational+Economics&rft.atitle=Measuring+the+Efficiency+of+the+Intraday+Forex+Market+with+a+Universal+Data+Compression+Algorithm&rft.volume=33&rft.issue=2&rft.pages=131-154&rft.date=2009&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.627.3751%23id-name%3DCiteSeerX&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A17234503%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1007%2Fs10614-008-9153-3&rft.au=Shmilovici+A.&rft.au=Kahiri+Y.&rft.au=Ben-Gal+I.&rft.au=Hauser+S.&rft_id=http%3A%2F%2Fwww.eng.tau.ac.il%2F~bengal%2F28.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Data_compression_Ben-Gal-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-Data_compression_Ben-Gal_27-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFI._Ben-Gal2008" class="citation journal cs1">I. Ben-Gal (2008). <a rel="nofollow" class="external text" href="http://www.eng.tau.ac.il/~bengal/Journal%20Paper.pdf">"On the Use of Data Compression Measures to Analyze Robust Designs"</a> <span class="cs1-format">(PDF)</span>. <i>IEEE Transactions on Reliability</i>. <b>54</b> (3): 381–388. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTR.2005.853280">10.1109/TR.2005.853280</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:9376086">9376086</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Reliability&rft.atitle=On+the+Use+of+Data+Compression+Measures+to+Analyze+Robust+Designs&rft.volume=54&rft.issue=3&rft.pages=381-388&rft.date=2008&rft_id=info%3Adoi%2F10.1109%2FTR.2005.853280&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A9376086%23id-name%3DS2CID&rft.au=I.+Ben-Gal&rft_id=http%3A%2F%2Fwww.eng.tau.ac.il%2F~bengal%2FJournal%2520Paper.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Data_compression_ScullyBrodley-28"><span class="mw-cite-backlink"><b><a href="#cite_ref-Data_compression_ScullyBrodley_28-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFD._ScullyCarla_E._Brodley2006" class="citation book cs1">D. Scully; <a href="/wiki/Carla_Brodley" title="Carla Brodley">Carla E. Brodley</a> (2006). "Compression and Machine Learning: A New Perspective on Feature Space Vectors". <i>Data Compression Conference (DCC'06)</i>. p. 332. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FDCC.2006.13">10.1109/DCC.2006.13</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-7695-2545-8" title="Special:BookSources/0-7695-2545-8"><bdi>0-7695-2545-8</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:12311412">12311412</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Compression+and+Machine+Learning%3A+A+New+Perspective+on+Feature+Space+Vectors&rft.btitle=Data+Compression+Conference+%28DCC%2706%29&rft.pages=332&rft.date=2006&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A12311412%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FDCC.2006.13&rft.isbn=0-7695-2545-8&rft.au=D.+Scully&rft.au=Carla+E.+Brodley&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-29"><span class="mw-cite-backlink"><b><a href="#cite_ref-29">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGary_Adcock2023" class="citation web cs1">Gary Adcock (January 5, 2023). <a rel="nofollow" class="external text" href="https://massive.io/file-transfer/what-is-ai-video-compression/">"What Is AI Video Compression?"</a>. <i>massive.io</i><span class="reference-accessdate">. Retrieved <span class="nowrap">6 April</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=massive.io&rft.atitle=What+Is+AI+Video+Compression%3F&rft.date=2023-01-05&rft.au=Gary+Adcock&rft_id=https%3A%2F%2Fmassive.io%2Ffile-transfer%2Fwhat-is-ai-video-compression%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-30"><span class="mw-cite-backlink"><b><a href="#cite_ref-30">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMentzerTodericiTschannenAgustsson2020" class="citation arxiv cs1">Mentzer, Fabian; Toderici, George; Tschannen, Michael; Agustsson, Eirikur (2020). "High-Fidelity Generative Image Compression". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2006.09965">2006.09965</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/eess.IV">eess.IV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=High-Fidelity+Generative+Image+Compression&rft.date=2020&rft_id=info%3Aarxiv%2F2006.09965&rft.aulast=Mentzer&rft.aufirst=Fabian&rft.au=Toderici%2C+George&rft.au=Tschannen%2C+Michael&rft.au=Agustsson%2C+Eirikur&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-31"><span class="mw-cite-backlink"><b><a href="#cite_ref-31">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.ibm.com/topics/unsupervised-learning">"What is Unsupervised Learning? | IBM"</a>. <i>www.ibm.com</i>. 23 September 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-02-05</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.ibm.com&rft.atitle=What+is+Unsupervised+Learning%3F+%7C+IBM&rft.date=2021-09-23&rft_id=https%3A%2F%2Fwww.ibm.com%2Ftopics%2Funsupervised-learning&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-32">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://blog.research.google/2023/05/differentially-private-clustering-for.html">"Differentially private clustering for large-scale datasets"</a>. <i>blog.research.google</i>. 2023-05-25<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-03-16</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=blog.research.google&rft.atitle=Differentially+private+clustering+for+large-scale+datasets&rft.date=2023-05-25&rft_id=https%3A%2F%2Fblog.research.google%2F2023%2F05%2Fdifferentially-private-clustering-for.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-33"><span class="mw-cite-backlink"><b><a href="#cite_ref-33">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEdwards2023" class="citation web cs1">Edwards, Benj (2023-09-28). <a rel="nofollow" class="external text" href="https://arstechnica.com/information-technology/2023/09/ai-language-models-can-exceed-png-and-flac-in-lossless-compression-says-study/">"AI language models can exceed PNG and FLAC in lossless compression, says study"</a>. <i>Ars Technica</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-03-07</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Ars+Technica&rft.atitle=AI+language+models+can+exceed+PNG+and+FLAC+in+lossless+compression%2C+says+study&rft.date=2023-09-28&rft.aulast=Edwards&rft.aufirst=Benj&rft_id=https%3A%2F%2Farstechnica.com%2Finformation-technology%2F2023%2F09%2Fai-language-models-can-exceed-png-and-flac-in-lossless-compression-says-study%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-34"><span class="mw-cite-backlink"><b><a href="#cite_ref-34">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLe_RouxBengioFitzgibbon2012" class="citation encyclopaedia cs1">Le Roux, Nicolas; Bengio, Yoshua; Fitzgibbon, Andrew (2012). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=JPQx7s2L1A8C&q=%22Improving+First+and+Second-Order+Methods+by+Modeling+Uncertainty&pg=PA403">"Improving First and Second-Order Methods by Modeling Uncertainty"</a>. In Sra, Suvrit; Nowozin, Sebastian; Wright, Stephen J. (eds.). <i>Optimization for Machine Learning</i>. MIT Press. p. 404. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780262016469" title="Special:BookSources/9780262016469"><bdi>9780262016469</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230117053335/https://books.google.com/books?id=JPQx7s2L1A8C&q=%22Improving+First+and+Second-Order+Methods+by+Modeling+Uncertainty&pg=PA403">Archived</a> from the original on 2023-01-17<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-11-12</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Improving+First+and+Second-Order+Methods+by+Modeling+Uncertainty&rft.btitle=Optimization+for+Machine+Learning&rft.pages=404&rft.pub=MIT+Press&rft.date=2012&rft.isbn=9780262016469&rft.aulast=Le+Roux&rft.aufirst=Nicolas&rft.au=Bengio%2C+Yoshua&rft.au=Fitzgibbon%2C+Andrew&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3DJPQx7s2L1A8C%26q%3D%2522Improving%2BFirst%2Band%2BSecond-Order%2BMethods%2Bby%2BModeling%2BUncertainty%26pg%3DPA403&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-35"><span class="mw-cite-backlink"><b><a href="#cite_ref-35">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBzdokAltmanKrzywinski2018" class="citation journal cs1">Bzdok, Danilo; <a href="/wiki/Naomi_Altman" title="Naomi Altman">Altman, Naomi</a>; Krzywinski, Martin (2018). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6082636">"Statistics versus Machine Learning"</a>. <i><a href="/wiki/Nature_Methods" title="Nature Methods">Nature Methods</a></i>. <b>15</b> (4): 233–234. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fnmeth.4642">10.1038/nmeth.4642</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6082636">6082636</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/30100822">30100822</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature+Methods&rft.atitle=Statistics+versus+Machine+Learning&rft.volume=15&rft.issue=4&rft.pages=233-234&rft.date=2018&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6082636%23id-name%3DPMC&rft_id=info%3Apmid%2F30100822&rft_id=info%3Adoi%2F10.1038%2Fnmeth.4642&rft.aulast=Bzdok&rft.aufirst=Danilo&rft.au=Altman%2C+Naomi&rft.au=Krzywinski%2C+Martin&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6082636&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-mi_jordan_ama-36"><span class="mw-cite-backlink">^ <a href="#cite_ref-mi_jordan_ama_36-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-mi_jordan_ama_36-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMichael_I._Jordan2014" class="citation web cs1"><a href="/wiki/Michael_I._Jordan" title="Michael I. Jordan">Michael I. Jordan</a> (2014-09-10). <a rel="nofollow" class="external text" href="https://www.reddit.com/r/MachineLearning/comments/2fxi6v/ama_michael_i_jordan/ckelmtt?context=3">"statistics and machine learning"</a>. reddit. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171018192328/https://www.reddit.com/r/MachineLearning/comments/2fxi6v/ama_michael_i_jordan/ckelmtt/?context=3">Archived</a> from the original on 2017-10-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2014-10-01</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=statistics+and+machine+learning&rft.pub=reddit&rft.date=2014-09-10&rft.au=Michael+I.+Jordan&rft_id=https%3A%2F%2Fwww.reddit.com%2Fr%2FMachineLearning%2Fcomments%2F2fxi6v%2Fama_michael_i_jordan%2Fckelmtt%3Fcontext%3D3&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-37"><span class="mw-cite-backlink"><b><a href="#cite_ref-37">^</a></b></span> <span class="reference-text">Hung et al. Algorithms to Measure Surgeon Performance and Anticipate Clinical Outcomes in Robotic Surgery. JAMA Surg. 2018</span> </li> <li id="cite_note-Cornell-University-Library-2001-38"><span class="mw-cite-backlink"><b><a href="#cite_ref-Cornell-University-Library-2001_38-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCornell_University_Library2001" class="citation journal cs1">Cornell University Library (August 2001). <a rel="nofollow" class="external text" href="http://projecteuclid.org/download/pdf_1/euclid.ss/1009213726">"Breiman: Statistical Modeling: The Two Cultures (with comments and a rejoinder by the author)"</a>. <i>Statistical Science</i>. <b>16</b> (3). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1214%2Fss%2F1009213726">10.1214/ss/1009213726</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:62729017">62729017</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170626042637/http://projecteuclid.org/download/pdf_1/euclid.ss/1009213726">Archived</a> from the original on 26 June 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">8 August</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Statistical+Science&rft.atitle=Breiman%3A+Statistical+Modeling%3A+The+Two+Cultures+%28with+comments+and+a+rejoinder+by+the+author%29&rft.volume=16&rft.issue=3&rft.date=2001-08&rft_id=info%3Adoi%2F10.1214%2Fss%2F1009213726&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A62729017%23id-name%3DS2CID&rft.au=Cornell+University+Library&rft_id=http%3A%2F%2Fprojecteuclid.org%2Fdownload%2Fpdf_1%2Feuclid.ss%2F1009213726&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-islr-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-islr_39-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGareth_JamesDaniela_WittenTrevor_HastieRobert_Tibshirani2013" class="citation book cs1">Gareth James; Daniela Witten; Trevor Hastie; Robert Tibshirani (2013). <a rel="nofollow" class="external text" href="http://www-bcf.usc.edu/~gareth/ISL/"><i>An Introduction to Statistical Learning</i></a>. Springer. p. vii. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190623150237/http://www-bcf.usc.edu/~gareth/ISL/">Archived</a> from the original on 2019-06-23<span class="reference-accessdate">. Retrieved <span class="nowrap">2014-10-25</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=An+Introduction+to+Statistical+Learning&rft.pages=vii&rft.pub=Springer&rft.date=2013&rft.au=Gareth+James&rft.au=Daniela+Witten&rft.au=Trevor+Hastie&rft.au=Robert+Tibshirani&rft_id=http%3A%2F%2Fwww-bcf.usc.edu%2F~gareth%2FISL%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-SP_1-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-SP_1_40-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRamezanpour,_A.Beam,_A.L.Chen,_J.H.Mashaghi,_A.2020" class="citation journal cs1">Ramezanpour, A.; Beam, A.L.; Chen, J.H.; Mashaghi, A. (17 November 2020). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7699346">"Statistical Physics for Medical Diagnostics: Learning, Inference, and Optimization Algorithms"</a>. <i>Diagnostics</i>. <b>10</b> (11): 972. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fdiagnostics10110972">10.3390/diagnostics10110972</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7699346">7699346</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/33228143">33228143</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Diagnostics&rft.atitle=Statistical+Physics+for+Medical+Diagnostics%3A+Learning%2C+Inference%2C+and+Optimization+Algorithms&rft.volume=10&rft.issue=11&rft.pages=972&rft.date=2020-11-17&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7699346%23id-name%3DPMC&rft_id=info%3Apmid%2F33228143&rft_id=info%3Adoi%2F10.3390%2Fdiagnostics10110972&rft.au=Ramezanpour%2C+A.&rft.au=Beam%2C+A.L.&rft.au=Chen%2C+J.H.&rft.au=Mashaghi%2C+A.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7699346&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-SP_2-41"><span class="mw-cite-backlink"><b><a href="#cite_ref-SP_2_41-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMashaghi,_A.Ramezanpour,_A.2018" class="citation journal cs1">Mashaghi, A.; Ramezanpour, A. (16 March 2018). "Statistical physics of medical diagnostics: Study of a probabilistic model". <i><a href="/wiki/Physical_Review_E" title="Physical Review E">Physical Review E</a></i>. <b>97</b> (3–1): 032118. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1803.10019">1803.10019</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2018PhRvE..97c2118M">2018PhRvE..97c2118M</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1103%2FPhysRevE.97.032118">10.1103/PhysRevE.97.032118</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/29776109">29776109</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:4955393">4955393</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Physical+Review+E&rft.atitle=Statistical+physics+of+medical+diagnostics%3A+Study+of+a+probabilistic+model&rft.volume=97&rft.issue=3%E2%80%931&rft.pages=032118&rft.date=2018-03-16&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A4955393%23id-name%3DS2CID&rft_id=info%3Abibcode%2F2018PhRvE..97c2118M&rft_id=info%3Aarxiv%2F1803.10019&rft_id=info%3Apmid%2F29776109&rft_id=info%3Adoi%2F10.1103%2FPhysRevE.97.032118&rft.au=Mashaghi%2C+A.&rft.au=Ramezanpour%2C+A.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Mohri-2012-42"><span class="mw-cite-backlink"><b><a href="#cite_ref-Mohri-2012_42-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMohriRostamizadehTalwalkar2012" class="citation book cs1"><a href="/wiki/Mehryar_Mohri" title="Mehryar Mohri">Mohri, Mehryar</a>; Rostamizadeh, Afshin; Talwalkar, Ameet (2012). <i>Foundations of Machine Learning</i>. US, Massachusetts: MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780262018258" title="Special:BookSources/9780262018258"><bdi>9780262018258</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Foundations+of+Machine+Learning&rft.place=US%2C+Massachusetts&rft.pub=MIT+Press&rft.date=2012&rft.isbn=9780262018258&rft.aulast=Mohri&rft.aufirst=Mehryar&rft.au=Rostamizadeh%2C+Afshin&rft.au=Talwalkar%2C+Ameet&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-alpaydin-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-alpaydin_43-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAlpaydin,_Ethem2010" class="citation book cs1">Alpaydin, Ethem (2010). <span class="id-lock-registration" title="Free registration required"><a rel="nofollow" class="external text" href="https://archive.org/details/introductiontoma00alpa_0"><i>Introduction to Machine Learning</i></a></span>. London: The MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-01243-0" title="Special:BookSources/978-0-262-01243-0"><bdi>978-0-262-01243-0</bdi></a><span class="reference-accessdate">. Retrieved <span class="nowrap">4 February</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Introduction+to+Machine+Learning&rft.place=London&rft.pub=The+MIT+Press&rft.date=2010&rft.isbn=978-0-262-01243-0&rft.au=Alpaydin%2C+Ethem&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fintroductiontoma00alpa_0&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-44"><span class="mw-cite-backlink"><b><a href="#cite_ref-44">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJordanMitchell2015" class="citation journal cs1">Jordan, M. I.; Mitchell, T. M. (17 July 2015). "Machine learning: Trends, perspectives, and prospects". <i>Science</i>. <b>349</b> (6245): 255–260. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015Sci...349..255J">2015Sci...349..255J</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.aaa8415">10.1126/science.aaa8415</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/26185243">26185243</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:677218">677218</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Science&rft.atitle=Machine+learning%3A+Trends%2C+perspectives%2C+and+prospects&rft.volume=349&rft.issue=6245&rft.pages=255-260&rft.date=2015-07-17&rft_id=info%3Adoi%2F10.1126%2Fscience.aaa8415&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A677218%23id-name%3DS2CID&rft_id=info%3Apmid%2F26185243&rft_id=info%3Abibcode%2F2015Sci...349..255J&rft.aulast=Jordan&rft.aufirst=M.+I.&rft.au=Mitchell%2C+T.+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-45"><span class="mw-cite-backlink"><b><a href="#cite_ref-45">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEl_NaqaMurphy2015" class="citation book cs1">El Naqa, Issam; Murphy, Martin J. (2015). "What is Machine Learning?". <i>Machine Learning in Radiation Oncology</i>. pp. 3–11. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-319-18305-3_1">10.1007/978-3-319-18305-3_1</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-319-18304-6" title="Special:BookSources/978-3-319-18304-6"><bdi>978-3-319-18304-6</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:178586107">178586107</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=What+is+Machine+Learning%3F&rft.btitle=Machine+Learning+in+Radiation+Oncology&rft.pages=3-11&rft.date=2015&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A178586107%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1007%2F978-3-319-18305-3_1&rft.isbn=978-3-319-18304-6&rft.aulast=El+Naqa&rft.aufirst=Issam&rft.au=Murphy%2C+Martin+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-46">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOkolieSavageOgbagaGunes2022" class="citation journal cs1">Okolie, Jude A.; Savage, Shauna; Ogbaga, Chukwuma C.; Gunes, Burcu (June 2022). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.totert.2022.100001">"Assessing the potential of machine learning methods to study the removal of pharmaceuticals from wastewater using biochar or activated carbon"</a>. <i>Total Environment Research Themes</i>. <b>1–2</b>: 100001. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2022TERT....100001O">2022TERT....100001O</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.totert.2022.100001">10.1016/j.totert.2022.100001</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:249022386">249022386</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Total+Environment+Research+Themes&rft.atitle=Assessing+the+potential+of+machine+learning+methods+to+study+the+removal+of+pharmaceuticals+from+wastewater+using+biochar+or+activated+carbon&rft.volume=1%E2%80%932&rft.pages=100001&rft.date=2022-06&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A249022386%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1016%2Fj.totert.2022.100001&rft_id=info%3Abibcode%2F2022TERT....100001O&rft.aulast=Okolie&rft.aufirst=Jude+A.&rft.au=Savage%2C+Shauna&rft.au=Ogbaga%2C+Chukwuma+C.&rft.au=Gunes%2C+Burcu&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.totert.2022.100001&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-47">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussellNorvig2010" class="citation book cs1">Russell, Stuart J.; Norvig, Peter (2010). <a href="/wiki/Artificial_Intelligence:_A_Modern_Approach" title="Artificial Intelligence: A Modern Approach"><i>Artificial Intelligence: A Modern Approach</i></a> (Third ed.). Prentice Hall. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780136042594" title="Special:BookSources/9780136042594"><bdi>9780136042594</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Artificial+Intelligence%3A+A+Modern+Approach&rft.edition=Third&rft.pub=Prentice+Hall&rft.date=2010&rft.isbn=9780136042594&rft.aulast=Russell&rft.aufirst=Stuart+J.&rft.au=Norvig%2C+Peter&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-48">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMohriRostamizadehTalwalkar2012" class="citation book cs1">Mohri, Mehryar; Rostamizadeh, Afshin; Talwalkar, Ameet (2012). <i>Foundations of Machine Learning</i>. The MIT Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780262018258" title="Special:BookSources/9780262018258"><bdi>9780262018258</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Foundations+of+Machine+Learning&rft.pub=The+MIT+Press&rft.date=2012&rft.isbn=9780262018258&rft.aulast=Mohri&rft.aufirst=Mehryar&rft.au=Rostamizadeh%2C+Afshin&rft.au=Talwalkar%2C+Ameet&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Alpaydin-2010-49"><span class="mw-cite-backlink"><b><a href="#cite_ref-Alpaydin-2010_49-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAlpaydin2010" class="citation book cs1">Alpaydin, Ethem (2010). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=7f5bBAAAQBAJ"><i>Introduction to Machine Learning</i></a>. MIT Press. p. 9. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-01243-0" title="Special:BookSources/978-0-262-01243-0"><bdi>978-0-262-01243-0</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230117053338/https://books.google.com/books?id=7f5bBAAAQBAJ">Archived</a> from the original on 2023-01-17<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-11-25</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Introduction+to+Machine+Learning&rft.pages=9&rft.pub=MIT+Press&rft.date=2010&rft.isbn=978-0-262-01243-0&rft.aulast=Alpaydin&rft.aufirst=Ethem&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3D7f5bBAAAQBAJ&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-50"><span class="mw-cite-backlink"><b><a href="#cite_ref-50">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.cs.cornell.edu/courses/cs4780/2022sp/notes/LectureNotes02.html">"Lecture 2 Notes: Supervised Learning"</a>. <i>www.cs.cornell.edu</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2024-07-01</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.cs.cornell.edu&rft.atitle=Lecture+2+Notes%3A+Supervised+Learning&rft_id=https%3A%2F%2Fwww.cs.cornell.edu%2Fcourses%2Fcs4780%2F2022sp%2Fnotes%2FLectureNotes02.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-JordanBishop2004-51"><span class="mw-cite-backlink"><b><a href="#cite_ref-JordanBishop2004_51-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJordanBishop2004" class="citation book cs1">Jordan, Michael I.; Bishop, Christopher M. (2004). "Neural Networks". In Allen B. Tucker (ed.). <i>Computer Science Handbook, Second Edition (Section VII: Intelligent Systems)</i>. Boca Raton, Florida: Chapman & Hall/CRC Press LLC. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-58488-360-9" title="Special:BookSources/978-1-58488-360-9"><bdi>978-1-58488-360-9</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Neural+Networks&rft.btitle=Computer+Science+Handbook%2C+Second+Edition+%28Section+VII%3A+Intelligent+Systems%29&rft.place=Boca+Raton%2C+Florida&rft.pub=Chapman+%26+Hall%2FCRC+Press+LLC&rft.date=2004&rft.isbn=978-1-58488-360-9&rft.aulast=Jordan&rft.aufirst=Michael+I.&rft.au=Bishop%2C+Christopher+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-52"><span class="mw-cite-backlink"><b><a href="#cite_ref-52">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhangHuangTibbs-CortesVanous2023" class="citation journal cs1">Zhang, Bosen; Huang, Haiyan; Tibbs-Cortes, Laura E.; Vanous, Adam; Zhang, Zhiwu; Sanguinet, Karen; Garland-Campbell, Kimberly A.; Yu, Jianming; Li, Xianran (2023). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.molp.2023.05.005">"Streamline unsupervised machine learning to survey and graph indel-based haplotypes from pan-genomes"</a>. <i>Molecular Plant</i>. <b>16</b> (6): 975–978. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.molp.2023.05.005">10.1016/j.molp.2023.05.005</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/37202927">37202927</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Molecular+Plant&rft.atitle=Streamline+unsupervised+machine+learning+to+survey+and+graph+indel-based+haplotypes+from+pan-genomes&rft.volume=16&rft.issue=6&rft.pages=975-978&rft.date=2023&rft_id=info%3Adoi%2F10.1016%2Fj.molp.2023.05.005&rft_id=info%3Apmid%2F37202927&rft.aulast=Zhang&rft.aufirst=Bosen&rft.au=Huang%2C+Haiyan&rft.au=Tibbs-Cortes%2C+Laura+E.&rft.au=Vanous%2C+Adam&rft.au=Zhang%2C+Zhiwu&rft.au=Sanguinet%2C+Karen&rft.au=Garland-Campbell%2C+Kimberly+A.&rft.au=Yu%2C+Jianming&rft.au=Li%2C+Xianran&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.molp.2023.05.005&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-53"><span class="mw-cite-backlink"><b><a href="#cite_ref-53">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhangHuangTibbs-CortesVanous2023" class="citation report cs1">Zhang, Bosen; Huang, Haiyan; Tibbs-Cortes, Laura E.; Vanous, Adam; Zhang, Zhiwu; Sanguinet, Karen; Garland-Campbell, Kimberly A.; Yu, Jianming; Li, Xianran (2023-02-13). Streamline unsupervised machine learning to survey and graph indel-based haplotypes from pan-genomes (Report). <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1101%2F2023.02.11.527743">10.1101/2023.02.11.527743</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=report&rft.btitle=Streamline+unsupervised+machine+learning+to+survey+and+graph+indel-based+haplotypes+from+pan-genomes&rft.date=2023-02-13&rft_id=info%3Adoi%2F10.1101%2F2023.02.11.527743&rft.aulast=Zhang&rft.aufirst=Bosen&rft.au=Huang%2C+Haiyan&rft.au=Tibbs-Cortes%2C+Laura+E.&rft.au=Vanous%2C+Adam&rft.au=Zhang%2C+Zhiwu&rft.au=Sanguinet%2C+Karen&rft.au=Garland-Campbell%2C+Kimberly+A.&rft.au=Yu%2C+Jianming&rft.au=Li%2C+Xianran&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-54"><span class="mw-cite-backlink"><b><a href="#cite_ref-54">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMisraMaaten2020" class="citation conference cs1">Misra, Ishan; Maaten, Laurens van der (2020). <a rel="nofollow" class="external text" href="https://openaccess.thecvf.com/content_CVPR_2020/html/Misra_Self-Supervised_Learning_of_Pretext-Invariant_Representations_CVPR_2020_paper.html"><i>Self-Supervised Learning of Pretext-Invariant Representations</i></a>. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). Seattle, WA, USA: <a href="/wiki/Institute_of_Electrical_and_Electronics_Engineers" title="Institute of Electrical and Electronics Engineers">IEEE</a>. pp. 6707–6717. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1912.01991">1912.01991</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FCVPR42600.2020.00674">10.1109/CVPR42600.2020.00674</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=Self-Supervised+Learning+of+Pretext-Invariant+Representations&rft.place=Seattle%2C+WA%2C+USA&rft.pages=6707-6717&rft.pub=IEEE&rft.date=2020&rft_id=info%3Aarxiv%2F1912.01991&rft_id=info%3Adoi%2F10.1109%2FCVPR42600.2020.00674&rft.aulast=Misra&rft.aufirst=Ishan&rft.au=Maaten%2C+Laurens+van+der&rft_id=https%3A%2F%2Fopenaccess.thecvf.com%2Fcontent_CVPR_2020%2Fhtml%2FMisra_Self-Supervised_Learning_of_Pretext-Invariant_Representations_CVPR_2020_paper.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-55"><span class="mw-cite-backlink"><b><a href="#cite_ref-55">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJaiswalBabuZadehBanerjee2021" class="citation journal cs1">Jaiswal, Ashish; Babu, Ashwin Ramesh; Zadeh, Mohammad Zaki; Banerjee, Debapriya; Makedon, Fillia (March 2021). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Ftechnologies9010002">"A Survey on Contrastive Self-Supervised Learning"</a>. <i>Technologies</i>. <b>9</b> (1): 2. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2011.00362">2011.00362</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Ftechnologies9010002">10.3390/technologies9010002</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2227-7080">2227-7080</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Technologies&rft.atitle=A+Survey+on+Contrastive+Self-Supervised+Learning&rft.volume=9&rft.issue=1&rft.pages=2&rft.date=2021-03&rft_id=info%3Aarxiv%2F2011.00362&rft.issn=2227-7080&rft_id=info%3Adoi%2F10.3390%2Ftechnologies9010002&rft.aulast=Jaiswal&rft.aufirst=Ashish&rft.au=Babu%2C+Ashwin+Ramesh&rft.au=Zadeh%2C+Mohammad+Zaki&rft.au=Banerjee%2C+Debapriya&rft.au=Makedon%2C+Fillia&rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Ftechnologies9010002&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-56"><span class="mw-cite-backlink"><b><a href="#cite_ref-56">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAlex_RatnerStephen_BachParoma_VarmaChris" class="citation web cs1">Alex Ratner; Stephen Bach; Paroma Varma; Chris. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190606043931/https://hazyresearch.github.io/snorkel/blog/ws_blog_post.html">"Weak Supervision: The New Programming Paradigm for Machine Learning"</a>. <i>hazyresearch.github.io</i>. referencing work by many other members of Hazy Research. Archived from <a rel="nofollow" class="external text" href="https://hazyresearch.github.io/snorkel/blog/ws_blog_post.html">the original</a> on 2019-06-06<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-06-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=hazyresearch.github.io&rft.atitle=Weak+Supervision%3A+The+New+Programming+Paradigm+for+Machine+Learning&rft.au=Alex+Ratner&rft.au=Stephen+Bach&rft.au=Paroma+Varma&rft.au=Chris&rft_id=https%3A%2F%2Fhazyresearch.github.io%2Fsnorkel%2Fblog%2Fws_blog_post.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-57"><span class="mw-cite-backlink"><b><a href="#cite_ref-57">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFvan_Otterlo,_M.Wiering,_M.2012" class="citation book cs1">van Otterlo, M.; Wiering, M. (2012). "Reinforcement Learning and Markov Decision Processes". <i>Reinforcement Learning</i>. Adaptation, Learning, and Optimization. Vol. 12. pp. 3–42. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-642-27645-3_1">10.1007/978-3-642-27645-3_1</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-642-27644-6" title="Special:BookSources/978-3-642-27644-6"><bdi>978-3-642-27644-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Reinforcement+Learning+and+Markov+Decision+Processes&rft.btitle=Reinforcement+Learning&rft.series=Adaptation%2C+Learning%2C+and+Optimization&rft.pages=3-42&rft.date=2012&rft_id=info%3Adoi%2F10.1007%2F978-3-642-27645-3_1&rft.isbn=978-3-642-27644-6&rft.au=van+Otterlo%2C+M.&rft.au=Wiering%2C+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-58"><span class="mw-cite-backlink"><b><a href="#cite_ref-58">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRoweisSaul2000" class="citation journal cs1">Roweis, Sam T.; Saul, Lawrence K. (22 Dec 2000). <a rel="nofollow" class="external text" href="https://science.sciencemag.org/content/290/5500/2323">"Nonlinear Dimensionality Reduction by Locally Linear Embedding"</a>. <i>Science</i>. <b>290</b> (5500): 2323–2326. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2000Sci...290.2323R">2000Sci...290.2323R</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.290.5500.2323">10.1126/science.290.5500.2323</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/11125150">11125150</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:5987139">5987139</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210815021528/https://science.sciencemag.org/content/290/5500/2323">Archived</a> from the original on 15 August 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">17 July</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Science&rft.atitle=Nonlinear+Dimensionality+Reduction+by+Locally+Linear+Embedding&rft.volume=290&rft.issue=5500&rft.pages=2323-2326&rft.date=2000-12-22&rft_id=info%3Adoi%2F10.1126%2Fscience.290.5500.2323&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A5987139%23id-name%3DS2CID&rft_id=info%3Apmid%2F11125150&rft_id=info%3Abibcode%2F2000Sci...290.2323R&rft.aulast=Roweis&rft.aufirst=Sam+T.&rft.au=Saul%2C+Lawrence+K.&rft_id=https%3A%2F%2Fscience.sciencemag.org%2Fcontent%2F290%2F5500%2F2323&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-59"><span class="mw-cite-backlink"><b><a href="#cite_ref-59">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPavel_BrazdilChristophe_Giraud_CarrierCarlos_SoaresRicardo_Vilalta2009" class="citation book cs1">Pavel Brazdil; Christophe Giraud Carrier; Carlos Soares; Ricardo Vilalta (2009). <i>Metalearning: Applications to Data Mining</i> (Fourth ed.). <a href="/wiki/Springer_Science%2BBusiness_Media" title="Springer Science+Business Media">Springer Science+Business Media</a>. pp. 10–14, <i>passim</i>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3540732624" title="Special:BookSources/978-3540732624"><bdi>978-3540732624</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Metalearning%3A+Applications+to+Data+Mining&rft.pages=10-14%2C+%27%27passim%27%27&rft.edition=Fourth&rft.pub=Springer+Science%2BBusiness+Media&rft.date=2009&rft.isbn=978-3540732624&rft.au=Pavel+Brazdil&rft.au=Christophe+Giraud+Carrier&rft.au=Carlos+Soares&rft.au=Ricardo+Vilalta&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-60"><span class="mw-cite-backlink"><b><a href="#cite_ref-60">^</a></b></span> <span class="reference-text">Bozinovski, S. (1982). "A self-learning system using secondary reinforcement". In Trappl, Robert (ed.). Cybernetics and Systems Research: Proceedings of the Sixth European Meeting on Cybernetics and Systems Research. North-Holland. pp. 397–402. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-444-86488-8" title="Special:BookSources/978-0-444-86488-8">978-0-444-86488-8</a>.</span> </li> <li id="cite_note-61"><span class="mw-cite-backlink"><b><a href="#cite_ref-61">^</a></b></span> <span class="reference-text">Bozinovski, Stevo (2014) "Modeling mechanisms of cognition-emotion interaction in artificial neural networks, since 1981." Procedia Computer Science p. 255-263</span> </li> <li id="cite_note-62"><span class="mw-cite-backlink"><b><a href="#cite_ref-62">^</a></b></span> <span class="reference-text">Bozinovski, S. (2001) "Self-learning agents: A connectionist theory of emotion based on crossbar value judgment." Cybernetics and Systems 32(6) 637–667.</span> </li> <li id="cite_note-pami-63"><span class="mw-cite-backlink"><b><a href="#cite_ref-pami_63-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFY._BengioA._CourvilleP._Vincent2013" class="citation journal cs1">Y. Bengio; A. Courville; P. Vincent (2013). "Representation Learning: A Review and New Perspectives". <i>IEEE Transactions on Pattern Analysis and Machine Intelligence</i>. <b>35</b> (8): 1798–1828. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1206.5538">1206.5538</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Ftpami.2013.50">10.1109/tpami.2013.50</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/23787338">23787338</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:393948">393948</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Pattern+Analysis+and+Machine+Intelligence&rft.atitle=Representation+Learning%3A+A+Review+and+New+Perspectives&rft.volume=35&rft.issue=8&rft.pages=1798-1828&rft.date=2013&rft_id=info%3Aarxiv%2F1206.5538&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A393948%23id-name%3DS2CID&rft_id=info%3Apmid%2F23787338&rft_id=info%3Adoi%2F10.1109%2Ftpami.2013.50&rft.au=Y.+Bengio&rft.au=A.+Courville&rft.au=P.+Vincent&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-64"><span class="mw-cite-backlink"><b><a href="#cite_ref-64">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNathan_SrebroJason_D._M._RennieTommi_S._Jaakkola2004" class="citation conference cs1">Nathan Srebro; Jason D. M. Rennie; Tommi S. Jaakkola (2004). <i>Maximum-Margin Matrix Factorization</i>. <a href="/wiki/Conference_on_Neural_Information_Processing_Systems" title="Conference on Neural Information Processing Systems">NIPS</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=Maximum-Margin+Matrix+Factorization&rft.date=2004&rft.au=Nathan+Srebro&rft.au=Jason+D.+M.+Rennie&rft.au=Tommi+S.+Jaakkola&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-coates2011-65"><span class="mw-cite-backlink"><b><a href="#cite_ref-coates2011_65-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCoatesLeeNg2011" class="citation conference cs1">Coates, Adam; Lee, Honglak; Ng, Andrew Y. (2011). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170813153615/http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2011_CoatesNL11.pdf"><i>An analysis of single-layer networks in unsupervised feature learning</i></a> <span class="cs1-format">(PDF)</span>. Int'l Conf. on AI and Statistics (AISTATS). Archived from <a rel="nofollow" class="external text" href="http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2011_CoatesNL11.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 2017-08-13<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-11-25</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=An+analysis+of+single-layer+networks+in+unsupervised+feature+learning&rft.date=2011&rft.aulast=Coates&rft.aufirst=Adam&rft.au=Lee%2C+Honglak&rft.au=Ng%2C+Andrew+Y.&rft_id=http%3A%2F%2Fmachinelearning.wustl.edu%2Fmlpapers%2Fpaper_files%2FAISTATS2011_CoatesNL11.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-66"><span class="mw-cite-backlink"><b><a href="#cite_ref-66">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCsurkaDanceFanWillamowski2004" class="citation conference cs1">Csurka, Gabriella; Dance, Christopher C.; Fan, Lixin; Willamowski, Jutta; Bray, Cédric (2004). <a rel="nofollow" class="external text" href="https://www.cs.cmu.edu/~efros/courses/LBMV07/Papers/csurka-eccv-04.pdf"><i>Visual categorization with bags of keypoints</i></a> <span class="cs1-format">(PDF)</span>. ECCV Workshop on Statistical Learning in Computer Vision. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190713040210/http://www.cs.cmu.edu/~efros/courses/LBMV07/Papers/csurka-eccv-04.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2019-07-13<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-08-29</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=Visual+categorization+with+bags+of+keypoints&rft.date=2004&rft.aulast=Csurka&rft.aufirst=Gabriella&rft.au=Dance%2C+Christopher+C.&rft.au=Fan%2C+Lixin&rft.au=Willamowski%2C+Jutta&rft.au=Bray%2C+C%C3%A9dric&rft_id=https%3A%2F%2Fwww.cs.cmu.edu%2F~efros%2Fcourses%2FLBMV07%2FPapers%2Fcsurka-eccv-04.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-jurafsky-67"><span class="mw-cite-backlink"><b><a href="#cite_ref-jurafsky_67-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDaniel_JurafskyJames_H._Martin2009" class="citation book cs1">Daniel Jurafsky; James H. Martin (2009). <i>Speech and Language Processing</i>. Pearson Education International. pp. 145–146.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Speech+and+Language+Processing&rft.pages=145-146&rft.pub=Pearson+Education+International&rft.date=2009&rft.au=Daniel+Jurafsky&rft.au=James+H.+Martin&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-68"><span class="mw-cite-backlink"><b><a href="#cite_ref-68">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLuPlataniotisVenetsanopoulos2011" class="citation journal cs1">Lu, Haiping; Plataniotis, K.N.; Venetsanopoulos, A.N. (2011). <a rel="nofollow" class="external text" href="http://www.dsp.utoronto.ca/~haiping/Publication/SurveyMSL_PR2011.pdf">"A Survey of Multilinear Subspace Learning for Tensor Data"</a> <span class="cs1-format">(PDF)</span>. <i>Pattern Recognition</i>. <b>44</b> (7): 1540–1551. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2011PatRe..44.1540L">2011PatRe..44.1540L</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.patcog.2011.01.004">10.1016/j.patcog.2011.01.004</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190710225429/http://www.dsp.utoronto.ca/~haiping/Publication/SurveyMSL_PR2011.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2019-07-10<span class="reference-accessdate">. Retrieved <span class="nowrap">2015-09-04</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Pattern+Recognition&rft.atitle=A+Survey+of+Multilinear+Subspace+Learning+for+Tensor+Data&rft.volume=44&rft.issue=7&rft.pages=1540-1551&rft.date=2011&rft_id=info%3Adoi%2F10.1016%2Fj.patcog.2011.01.004&rft_id=info%3Abibcode%2F2011PatRe..44.1540L&rft.aulast=Lu&rft.aufirst=Haiping&rft.au=Plataniotis%2C+K.N.&rft.au=Venetsanopoulos%2C+A.N.&rft_id=http%3A%2F%2Fwww.dsp.utoronto.ca%2F~haiping%2FPublication%2FSurveyMSL_PR2011.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-69"><span class="mw-cite-backlink"><b><a href="#cite_ref-69">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYoshua_Bengio2009" class="citation book cs1"><a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a> (2009). <a rel="nofollow" class="external text" href="https://books.google.com/books?id=cq5ewg7FniMC&pg=PA3"><i>Learning Deep Architectures for AI</i></a>. Now Publishers Inc. pp. 1–3. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-60198-294-0" title="Special:BookSources/978-1-60198-294-0"><bdi>978-1-60198-294-0</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230117053339/https://books.google.com/books?id=cq5ewg7FniMC&pg=PA3">Archived</a> from the original on 2023-01-17<span class="reference-accessdate">. Retrieved <span class="nowrap">2016-02-15</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Learning+Deep+Architectures+for+AI&rft.pages=1-3&rft.pub=Now+Publishers+Inc.&rft.date=2009&rft.isbn=978-1-60198-294-0&rft.au=Yoshua+Bengio&rft_id=https%3A%2F%2Fbooks.google.com%2Fbooks%3Fid%3Dcq5ewg7FniMC%26pg%3DPA3&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-70"><span class="mw-cite-backlink"><b><a href="#cite_ref-70">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTillmann2015" class="citation journal cs1">Tillmann, A. M. (2015). "On the Computational Intractability of Exact and Approximate Dictionary Learning". <i>IEEE Signal Processing Letters</i>. <b>22</b> (1): 45–49. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1405.6664">1405.6664</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2015ISPL...22...45T">2015ISPL...22...45T</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FLSP.2014.2345761">10.1109/LSP.2014.2345761</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:13342762">13342762</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Signal+Processing+Letters&rft.atitle=On+the+Computational+Intractability+of+Exact+and+Approximate+Dictionary+Learning&rft.volume=22&rft.issue=1&rft.pages=45-49&rft.date=2015&rft_id=info%3Aarxiv%2F1405.6664&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A13342762%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FLSP.2014.2345761&rft_id=info%3Abibcode%2F2015ISPL...22...45T&rft.aulast=Tillmann&rft.aufirst=A.+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-71"><span class="mw-cite-backlink"><b><a href="#cite_ref-71">^</a></b></span> <span class="reference-text"><a href="/wiki/Michal_Aharon" title="Michal Aharon">Aharon, M</a>, M Elad, and A Bruckstein. 2006. "<a rel="nofollow" class="external text" href="http://sites.fas.harvard.edu/~cs278/papers/ksvd.pdf">K-SVD: An Algorithm for Designing Overcomplete Dictionaries for Sparse Representation</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181123142158/http://sites.fas.harvard.edu/~cs278/papers/ksvd.pdf">Archived</a> 2018-11-23 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>." Signal Processing, IEEE Transactions on 54 (11): 4311–4322</span> </li> <li id="cite_note-Zimek-2017-72"><span class="mw-cite-backlink"><b><a href="#cite_ref-Zimek-2017_72-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZimekSchubert2017" class="citation cs2">Zimek, Arthur; Schubert, Erich (2017), "Outlier Detection", <i>Encyclopedia of Database Systems</i>, Springer New York, pp. 1–5, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-1-4899-7993-3_80719-1">10.1007/978-1-4899-7993-3_80719-1</a>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781489979933" title="Special:BookSources/9781489979933"><bdi>9781489979933</bdi></a></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Outlier+Detection&rft.btitle=Encyclopedia+of+Database+Systems&rft.pages=1-5&rft.pub=Springer+New+York&rft.date=2017&rft_id=info%3Adoi%2F10.1007%2F978-1-4899-7993-3_80719-1&rft.isbn=9781489979933&rft.aulast=Zimek&rft.aufirst=Arthur&rft.au=Schubert%2C+Erich&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-73"><span class="mw-cite-backlink"><b><a href="#cite_ref-73">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHodgeAustin2004" class="citation journal cs1">Hodge, V. J.; Austin, J. (2004). <a rel="nofollow" class="external text" href="http://eprints.whiterose.ac.uk/767/1/hodgevj4.pdf">"A Survey of Outlier Detection Methodologies"</a> <span class="cs1-format">(PDF)</span>. <i>Artificial Intelligence Review</i>. <b>22</b> (2): 85–126. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.318.4023">10.1.1.318.4023</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10462-004-4304-y">10.1007/s10462-004-4304-y</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:59941878">59941878</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150622042146/http://eprints.whiterose.ac.uk/767/1/hodgevj4.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2015-06-22<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-11-25</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Artificial+Intelligence+Review&rft.atitle=A+Survey+of+Outlier+Detection+Methodologies&rft.volume=22&rft.issue=2&rft.pages=85-126&rft.date=2004&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.318.4023%23id-name%3DCiteSeerX&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A59941878%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1007%2Fs10462-004-4304-y&rft.aulast=Hodge&rft.aufirst=V.+J.&rft.au=Austin%2C+J.&rft_id=http%3A%2F%2Feprints.whiterose.ac.uk%2F767%2F1%2Fhodgevj4.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-74"><span class="mw-cite-backlink"><b><a href="#cite_ref-74">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDokasErtozKumarLazarevic2002" class="citation journal cs1">Dokas, Paul; Ertoz, Levent; Kumar, Vipin; Lazarevic, Aleksandar; Srivastava, Jaideep; Tan, Pang-Ning (2002). <a rel="nofollow" class="external text" href="https://www-users.cse.umn.edu/~lazar027/MINDS/papers/nsf_ngdm_2002.pdf">"Data mining for network intrusion detection"</a> <span class="cs1-format">(PDF)</span>. <i>Proceedings NSF Workshop on Next Generation Data Mining</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150923211542/http://www.csee.umbc.edu/~kolari1/Mining/ngdm/dokas.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2015-09-23<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-03-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+NSF+Workshop+on+Next+Generation+Data+Mining&rft.atitle=Data+mining+for+network+intrusion+detection&rft.date=2002&rft.aulast=Dokas&rft.aufirst=Paul&rft.au=Ertoz%2C+Levent&rft.au=Kumar%2C+Vipin&rft.au=Lazarevic%2C+Aleksandar&rft.au=Srivastava%2C+Jaideep&rft.au=Tan%2C+Pang-Ning&rft_id=https%3A%2F%2Fwww-users.cse.umn.edu%2F~lazar027%2FMINDS%2Fpapers%2Fnsf_ngdm_2002.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-ChandolaSurvey-75"><span class="mw-cite-backlink"><b><a href="#cite_ref-ChandolaSurvey_75-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChandolaBanerjeeKumar2009" class="citation journal cs1">Chandola, V.; Banerjee, A.; Kumar, V. (2009). "Anomaly detection: A survey". <i><a href="/wiki/ACM_Computing_Surveys" title="ACM Computing Surveys">ACM Computing Surveys</a></i>. <b>41</b> (3): 1–58. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F1541880.1541882">10.1145/1541880.1541882</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:207172599">207172599</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=ACM+Computing+Surveys&rft.atitle=Anomaly+detection%3A+A+survey&rft.volume=41&rft.issue=3&rft.pages=1-58&rft.date=2009&rft_id=info%3Adoi%2F10.1145%2F1541880.1541882&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A207172599%23id-name%3DS2CID&rft.aulast=Chandola&rft.aufirst=V.&rft.au=Banerjee%2C+A.&rft.au=Kumar%2C+V.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-76"><span class="mw-cite-backlink"><b><a href="#cite_ref-76">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFleerMoringenKlatzkyRitter2020" class="citation journal cs1">Fleer, S.; Moringen, A.; Klatzky, R. L.; Ritter, H. (2020). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6940144">"Learning efficient haptic shape exploration with a rigid tactile sensor array, S. Fleer, A. Moringen, R. Klatzky, H. Ritter"</a>. <i>PLOS ONE</i>. <b>15</b> (1): e0226880. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1902.07501">1902.07501</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1371%2Fjournal.pone.0226880">10.1371/journal.pone.0226880</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6940144">6940144</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/31896135">31896135</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=PLOS+ONE&rft.atitle=Learning+efficient+haptic+shape+exploration+with+a+rigid+tactile+sensor+array%2C+S.+Fleer%2C+A.+Moringen%2C+R.+Klatzky%2C+H.+Ritter&rft.volume=15&rft.issue=1&rft.pages=e0226880&rft.date=2020&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6940144%23id-name%3DPMC&rft_id=info%3Apmid%2F31896135&rft_id=info%3Aarxiv%2F1902.07501&rft_id=info%3Adoi%2F10.1371%2Fjournal.pone.0226880&rft.aulast=Fleer&rft.aufirst=S.&rft.au=Moringen%2C+A.&rft.au=Klatzky%2C+R.+L.&rft.au=Ritter%2C+H.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC6940144&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-77"><span class="mw-cite-backlink"><b><a href="#cite_ref-77">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMoringenFleerWalckRitter2020" class="citation cs2">Moringen, Alexandra; Fleer, Sascha; Walck, Guillaume; Ritter, Helge (2020), Nisky, Ilana; Hartcher-O'Brien, Jess; Wiertlewski, Michaël; Smeets, Jeroen (eds.), "Attention-Based Robot Learning of Haptic Interaction", <i>Haptics: Science, Technology, Applications</i>, Lecture Notes in Computer Science, vol. 12272, Cham: Springer International Publishing, pp. 462–470, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-030-58147-3_51">10.1007/978-3-030-58147-3_51</a></span>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-030-58146-6" title="Special:BookSources/978-3-030-58146-6"><bdi>978-3-030-58146-6</bdi></a>, <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:220069113">220069113</a></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Haptics%3A+Science%2C+Technology%2C+Applications&rft.atitle=Attention-Based+Robot+Learning+of+Haptic+Interaction&rft.volume=12272&rft.pages=462-470&rft.date=2020&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A220069113%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1007%2F978-3-030-58147-3_51&rft.isbn=978-3-030-58146-6&rft.aulast=Moringen&rft.aufirst=Alexandra&rft.au=Fleer%2C+Sascha&rft.au=Walck%2C+Guillaume&rft.au=Ritter%2C+Helge&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-piatetsky-78"><span class="mw-cite-backlink"><b><a href="#cite_ref-piatetsky_78-0">^</a></b></span> <span class="reference-text">Piatetsky-Shapiro, Gregory (1991), <i>Discovery, analysis, and presentation of strong rules</i>, in Piatetsky-Shapiro, Gregory; and Frawley, William J.; eds., <i>Knowledge Discovery in Databases</i>, AAAI/MIT Press, Cambridge, MA.</span> </li> <li id="cite_note-79"><span class="mw-cite-backlink"><b><a href="#cite_ref-79">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBasselGlaabMarquezHoldsworth2011" class="citation journal cs1">Bassel, George W.; Glaab, Enrico; Marquez, Julietta; Holdsworth, Michael J.; Bacardit, Jaume (2011-09-01). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3203449">"Functional Network Construction in Arabidopsis Using Rule-Based Machine Learning on Large-Scale Data Sets"</a>. <i>The Plant Cell</i>. <b>23</b> (9): 3101–3116. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2011PlanC..23.3101B">2011PlanC..23.3101B</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1105%2Ftpc.111.088153">10.1105/tpc.111.088153</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1532-298X">1532-298X</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3203449">3203449</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/21896882">21896882</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Plant+Cell&rft.atitle=Functional+Network+Construction+in+Arabidopsis+Using+Rule-Based+Machine+Learning+on+Large-Scale+Data+Sets&rft.volume=23&rft.issue=9&rft.pages=3101-3116&rft.date=2011-09-01&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC3203449%23id-name%3DPMC&rft_id=info%3Abibcode%2F2011PlanC..23.3101B&rft_id=info%3Apmid%2F21896882&rft_id=info%3Adoi%2F10.1105%2Ftpc.111.088153&rft.issn=1532-298X&rft.aulast=Bassel&rft.aufirst=George+W.&rft.au=Glaab%2C+Enrico&rft.au=Marquez%2C+Julietta&rft.au=Holdsworth%2C+Michael+J.&rft.au=Bacardit%2C+Jaume&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC3203449&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-mining-80"><span class="mw-cite-backlink"><b><a href="#cite_ref-mining_80-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAgrawalImielińskiSwami1993" class="citation book cs1">Agrawal, R.; Imieliński, T.; Swami, A. (1993). "Mining association rules between sets of items in large databases". <i>Proceedings of the 1993 ACM SIGMOD international conference on Management of data - SIGMOD '93</i>. p. 207. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.40.6984">10.1.1.40.6984</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F170035.170072">10.1145/170035.170072</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0897915922" title="Special:BookSources/978-0897915922"><bdi>978-0897915922</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:490415">490415</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Mining+association+rules+between+sets+of+items+in+large+databases&rft.btitle=Proceedings+of+the+1993+ACM+SIGMOD+international+conference+on+Management+of+data+-+SIGMOD+%2793&rft.pages=207&rft.date=1993&rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.40.6984%23id-name%3DCiteSeerX&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A490415%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1145%2F170035.170072&rft.isbn=978-0897915922&rft.aulast=Agrawal&rft.aufirst=R.&rft.au=Imieli%C5%84ski%2C+T.&rft.au=Swami%2C+A.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-81"><span class="mw-cite-backlink"><b><a href="#cite_ref-81">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFUrbanowiczMoore2009" class="citation journal cs1">Urbanowicz, Ryan J.; Moore, Jason H. (2009-09-22). <a rel="nofollow" class="external text" href="https://doi.org/10.1155%2F2009%2F736398">"Learning Classifier Systems: A Complete Introduction, Review, and Roadmap"</a>. <i>Journal of Artificial Evolution and Applications</i>. <b>2009</b>: 1–25. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1155%2F2009%2F736398">10.1155/2009/736398</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1687-6229">1687-6229</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Artificial+Evolution+and+Applications&rft.atitle=Learning+Classifier+Systems%3A+A+Complete+Introduction%2C+Review%2C+and+Roadmap&rft.volume=2009&rft.pages=1-25&rft.date=2009-09-22&rft_id=info%3Adoi%2F10.1155%2F2009%2F736398&rft.issn=1687-6229&rft.aulast=Urbanowicz&rft.aufirst=Ryan+J.&rft.au=Moore%2C+Jason+H.&rft_id=https%3A%2F%2Fdoi.org%2F10.1155%252F2009%252F736398&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-82"><span class="mw-cite-backlink"><b><a href="#cite_ref-82">^</a></b></span> <span class="reference-text">Plotkin G.D. <a rel="nofollow" class="external text" href="https://www.era.lib.ed.ac.uk/bitstream/handle/1842/6656/Plotkin1972.pdf;sequence=1">Automatic Methods of Inductive Inference</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171222051034/https://www.era.lib.ed.ac.uk/bitstream/handle/1842/6656/Plotkin1972.pdf;sequence=1">Archived</a> 2017-12-22 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, PhD thesis, University of Edinburgh, 1970.</span> </li> <li id="cite_note-83"><span class="mw-cite-backlink"><b><a href="#cite_ref-83">^</a></b></span> <span class="reference-text">Shapiro, Ehud Y. <a rel="nofollow" class="external text" href="http://ftp.cs.yale.edu/publications/techreports/tr192.pdf">Inductive inference of theories from facts</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210821071609/http://ftp.cs.yale.edu/publications/techreports/tr192.pdf">Archived</a> 2021-08-21 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, Research Report 192, Yale University, Department of Computer Science, 1981. Reprinted in J.-L. Lassez, G. Plotkin (Eds.), Computational Logic, The MIT Press, Cambridge, MA, 1991, pp. 199–254.</span> </li> <li id="cite_note-84"><span class="mw-cite-backlink"><b><a href="#cite_ref-84">^</a></b></span> <span class="reference-text">Shapiro, Ehud Y. (1983). <i>Algorithmic program debugging</i>. Cambridge, Mass: MIT Press. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-262-19218-7" title="Special:BookSources/0-262-19218-7">0-262-19218-7</a></span> </li> <li id="cite_note-85"><span class="mw-cite-backlink"><b><a href="#cite_ref-85">^</a></b></span> <span class="reference-text">Shapiro, Ehud Y. "<a rel="nofollow" class="external text" href="http://dl.acm.org/citation.cfm?id=1623364">The model inference system</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230406011006/https://dl.acm.org/citation.cfm?id=1623364">Archived</a> 2023-04-06 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>." Proceedings of the 7th international joint conference on Artificial intelligence-Volume 2. Morgan Kaufmann Publishers Inc., 1981.</span> </li> <li id="cite_note-86"><span class="mw-cite-backlink"><b><a href="#cite_ref-86">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBurkov2019" class="citation book cs1">Burkov, Andriy (2019). <i>The hundred-page machine learning book</i>. Polen: Andriy Burkov. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-9995795-0-0" title="Special:BookSources/978-1-9995795-0-0"><bdi>978-1-9995795-0-0</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+hundred-page+machine+learning+book&rft.place=Polen&rft.pub=Andriy+Burkov&rft.date=2019&rft.isbn=978-1-9995795-0-0&rft.aulast=Burkov&rft.aufirst=Andriy&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-87"><span class="mw-cite-backlink"><b><a href="#cite_ref-87">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussellNorvig2021" class="citation book cs1">Russell, Stuart J.; Norvig, Peter (2021). <i>Artificial intelligence: a modern approach</i>. Pearson series in artificial intelligence (Fourth ed.). Hoboken: Pearson. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-13-461099-3" title="Special:BookSources/978-0-13-461099-3"><bdi>978-0-13-461099-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Artificial+intelligence%3A+a+modern+approach&rft.place=Hoboken&rft.series=Pearson+series+in+artificial+intelligence&rft.edition=Fourth&rft.pub=Pearson&rft.date=2021&rft.isbn=978-0-13-461099-3&rft.aulast=Russell&rft.aufirst=Stuart+J.&rft.au=Norvig%2C+Peter&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-88"><span class="mw-cite-backlink"><b><a href="#cite_ref-88">^</a></b></span> <span class="reference-text">Honglak Lee, Roger Grosse, Rajesh Ranganath, Andrew Y. Ng. "<a rel="nofollow" class="external text" href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.149.802&rep=rep1&type=pdf">Convolutional Deep Belief Networks for Scalable Unsupervised Learning of Hierarchical Representations</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171018182235/http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.149.802&rep=rep1&type=pdf">Archived</a> 2017-10-18 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>" Proceedings of the 26th Annual International Conference on Machine Learning, 2009.</span> </li> <li id="cite_note-CorinnaCortes-89"><span class="mw-cite-backlink"><b><a href="#cite_ref-CorinnaCortes_89-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCortesVapnik1995" class="citation journal cs1"><a href="/wiki/Corinna_Cortes" title="Corinna Cortes">Cortes, Corinna</a>; Vapnik, Vladimir N. (1995). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2FBF00994018">"Support-vector networks"</a>. <i><a href="/wiki/Machine_Learning_(journal)" title="Machine Learning (journal)">Machine Learning</a></i>. <b>20</b> (3): 273–297. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2FBF00994018">10.1007/BF00994018</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Machine+Learning&rft.atitle=Support-vector+networks&rft.volume=20&rft.issue=3&rft.pages=273-297&rft.date=1995&rft_id=info%3Adoi%2F10.1007%2FBF00994018&rft.aulast=Cortes&rft.aufirst=Corinna&rft.au=Vapnik%2C+Vladimir+N.&rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252FBF00994018&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-90"><span class="mw-cite-backlink"><b><a href="#cite_ref-90">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFStevenson" class="citation web cs1">Stevenson, Christopher. <a rel="nofollow" class="external text" href="https://facultystaff.richmond.edu/~cstevens/301/Excel4.html">"Tutorial: Polynomial Regression in Excel"</a>. <i>facultystaff.richmond.edu</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20130602200850/https://facultystaff.richmond.edu/~cstevens/301/Excel4.html">Archived</a> from the original on 2 June 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">22 January</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=facultystaff.richmond.edu&rft.atitle=Tutorial%3A+Polynomial+Regression+in+Excel&rft.aulast=Stevenson&rft.aufirst=Christopher&rft_id=https%3A%2F%2Ffacultystaff.richmond.edu%2F~cstevens%2F301%2FExcel4.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-91"><span class="mw-cite-backlink"><b><a href="#cite_ref-91">^</a></b></span> <span class="reference-text">The documentation for <a href="/wiki/Scikit-learn" title="Scikit-learn">scikit-learn</a> also has similar <a rel="nofollow" class="external text" href="http://scikit-learn.org/stable/auto_examples/gaussian_process/plot_compare_gpr_krr.html">examples</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221102184805/https://scikit-learn.org/stable/auto_examples/gaussian_process/plot_compare_gpr_krr.html">Archived</a> 2022-11-02 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>.</span> </li> <li id="cite_note-92"><span class="mw-cite-backlink"><b><a href="#cite_ref-92">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGoldbergHolland1988" class="citation journal cs1">Goldberg, David E.; Holland, John H. (1988). <a rel="nofollow" class="external text" href="https://deepblue.lib.umich.edu/bitstream/2027.42/46947/1/10994_2005_Article_422926.pdf">"Genetic algorithms and machine learning"</a> <span class="cs1-format">(PDF)</span>. <i><a href="/wiki/Machine_Learning_(journal)" title="Machine Learning (journal)">Machine Learning</a></i>. <b>3</b> (2): 95–99. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fbf00113892">10.1007/bf00113892</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:35506513">35506513</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20110516025803/http://deepblue.lib.umich.edu/bitstream/2027.42/46947/1/10994_2005_Article_422926.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2011-05-16<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-09-03</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Machine+Learning&rft.atitle=Genetic+algorithms+and+machine+learning&rft.volume=3&rft.issue=2&rft.pages=95-99&rft.date=1988&rft_id=info%3Adoi%2F10.1007%2Fbf00113892&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A35506513%23id-name%3DS2CID&rft.aulast=Goldberg&rft.aufirst=David+E.&rft.au=Holland%2C+John+H.&rft_id=https%3A%2F%2Fdeepblue.lib.umich.edu%2Fbitstream%2F2027.42%2F46947%2F1%2F10994_2005_Article_422926.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-93"><span class="mw-cite-backlink"><b><a href="#cite_ref-93">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMichieSpiegelhalterTaylor1994" class="citation journal cs1">Michie, D.; Spiegelhalter, D. J.; Taylor, C. C. (1994). "Machine Learning, Neural and Statistical Classification". <i>Ellis Horwood Series in Artificial Intelligence</i>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1994mlns.book.....M">1994mlns.book.....M</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Ellis+Horwood+Series+in+Artificial+Intelligence&rft.atitle=Machine+Learning%2C+Neural+and+Statistical+Classification&rft.date=1994&rft_id=info%3Abibcode%2F1994mlns.book.....M&rft.aulast=Michie&rft.aufirst=D.&rft.au=Spiegelhalter%2C+D.+J.&rft.au=Taylor%2C+C.+C.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-94"><span class="mw-cite-backlink"><b><a href="#cite_ref-94">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhangZhanLinChen2011" class="citation journal cs1">Zhang, Jun; Zhan, Zhi-hui; Lin, Ying; Chen, Ni; Gong, Yue-jiao; Zhong, Jing-hui; Chung, Henry S.H.; Li, Yun; Shi, Yu-hui (2011). "Evolutionary Computation Meets Machine Learning: A Survey". <i>Computational Intelligence Magazine</i>. <b>6</b> (4): 68–75. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fmci.2011.942584">10.1109/mci.2011.942584</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:6760276">6760276</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Computational+Intelligence+Magazine&rft.atitle=Evolutionary+Computation+Meets+Machine+Learning%3A+A+Survey&rft.volume=6&rft.issue=4&rft.pages=68-75&rft.date=2011&rft_id=info%3Adoi%2F10.1109%2Fmci.2011.942584&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A6760276%23id-name%3DS2CID&rft.aulast=Zhang&rft.aufirst=Jun&rft.au=Zhan%2C+Zhi-hui&rft.au=Lin%2C+Ying&rft.au=Chen%2C+Ni&rft.au=Gong%2C+Yue-jiao&rft.au=Zhong%2C+Jing-hui&rft.au=Chung%2C+Henry+S.H.&rft.au=Li%2C+Yun&rft.au=Shi%2C+Yu-hui&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-95"><span class="mw-cite-backlink"><b><a href="#cite_ref-95">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://ai.googleblog.com/2017/04/federated-learning-collaborative.html">"Federated Learning: Collaborative Machine Learning without Centralized Training Data"</a>. <i>Google AI Blog</i>. 6 April 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190607054623/https://ai.googleblog.com/2017/04/federated-learning-collaborative.html">Archived</a> from the original on 2019-06-07<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-06-08</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Google+AI+Blog&rft.atitle=Federated+Learning%3A+Collaborative+Machine+Learning+without+Centralized+Training+Data&rft.date=2017-04-06&rft_id=http%3A%2F%2Fai.googleblog.com%2F2017%2F04%2Ffederated-learning-collaborative.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-96"><span class="mw-cite-backlink"><b><a href="#cite_ref-96">^</a></b></span> <span class="reference-text">Machine learning is included in the <a href="/wiki/Chartered_Financial_Analyst_(CFA)#Curriculum" class="mw-redirect" title="Chartered Financial Analyst (CFA)">CFA Curriculum</a> (discussion is top-down); see: <a rel="nofollow" class="external text" href="https://www.cfainstitute.org/-/media/documents/study-session/2020-l2-ss3.ashx">Kathleen DeRose and Christophe Le Lanno (2020). "Machine Learning"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200113085425/https://www.cfainstitute.org/-/media/documents/study-session/2020-l2-ss3.ashx">Archived</a> 2020-01-13 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>.</span> </li> <li id="cite_note-97"><span class="mw-cite-backlink"><b><a href="#cite_ref-97">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFIvanenkoSmolikWantaMidura2023" class="citation journal cs1">Ivanenko, Mikhail; Smolik, Waldemar T.; Wanta, Damian; Midura, Mateusz; Wróblewski, Przemysław; Hou, Xiaohan; Yan, Xiaoheng (2023). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10538128">"Image Reconstruction Using Supervised Learning in Wearable Electrical Impedance Tomography of the Thorax"</a>. <i>Sensors</i>. <b>23</b> (18): 7774. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2023Senso..23.7774I">2023Senso..23.7774I</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Fs23187774">10.3390/s23187774</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10538128">10538128</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/37765831">37765831</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Sensors&rft.atitle=Image+Reconstruction+Using+Supervised+Learning+in+Wearable+Electrical+Impedance+Tomography+of+the+Thorax&rft.volume=23&rft.issue=18&rft.pages=7774&rft.date=2023&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10538128%23id-name%3DPMC&rft_id=info%3Apmid%2F37765831&rft_id=info%3Adoi%2F10.3390%2Fs23187774&rft_id=info%3Abibcode%2F2023Senso..23.7774I&rft.aulast=Ivanenko&rft.aufirst=Mikhail&rft.au=Smolik%2C+Waldemar+T.&rft.au=Wanta%2C+Damian&rft.au=Midura%2C+Mateusz&rft.au=Wr%C3%B3blewski%2C+Przemys%C5%82aw&rft.au=Hou%2C+Xiaohan&rft.au=Yan%2C+Xiaoheng&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10538128&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-98"><span class="mw-cite-backlink"><b><a href="#cite_ref-98">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20151110062742/http://www2.research.att.com/~volinsky/netflix/">"BelKor Home Page"</a> research.att.com</span> </li> <li id="cite_note-99"><span class="mw-cite-backlink"><b><a href="#cite_ref-99">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20160531002916/http://techblog.netflix.com/2012/04/netflix-recommendations-beyond-5-stars.html">"The Netflix Tech Blog: Netflix Recommendations: Beyond the 5 stars (Part 1)"</a>. 2012-04-06. Archived from <a rel="nofollow" class="external text" href="http://techblog.netflix.com/2012/04/netflix-recommendations-beyond-5-stars.html">the original</a> on 31 May 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">8 August</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=The+Netflix+Tech+Blog%3A+Netflix+Recommendations%3A+Beyond+the+5+stars+%28Part+1%29&rft.date=2012-04-06&rft_id=http%3A%2F%2Ftechblog.netflix.com%2F2012%2F04%2Fnetflix-recommendations-beyond-5-stars.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-100"><span class="mw-cite-backlink"><b><a href="#cite_ref-100">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFScott_Patterson2010" class="citation web cs1">Scott Patterson (13 July 2010). <a rel="nofollow" class="external text" href="https://www.wsj.com/articles/SB10001424052748703834604575365310813948080">"Letting the Machines Decide"</a>. <a href="/wiki/The_Wall_Street_Journal" title="The Wall Street Journal">The Wall Street Journal</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180624151019/https://www.wsj.com/articles/SB10001424052748703834604575365310813948080">Archived</a> from the original on 24 June 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">24 June</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Letting+the+Machines+Decide&rft.pub=The+Wall+Street+Journal&rft.date=2010-07-13&rft.au=Scott+Patterson&rft_id=https%3A%2F%2Fwww.wsj.com%2Farticles%2FSB10001424052748703834604575365310813948080&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-101"><span class="mw-cite-backlink"><b><a href="#cite_ref-101">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVinod_Khosla2012" class="citation web cs1">Vinod Khosla (January 10, 2012). <a rel="nofollow" class="external text" href="https://techcrunch.com/2012/01/10/doctors-or-algorithms/">"Do We Need Doctors or Algorithms?"</a>. Tech Crunch. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180618175811/https://techcrunch.com/2012/01/10/doctors-or-algorithms/">Archived</a> from the original on June 18, 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">October 20,</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Do+We+Need+Doctors+or+Algorithms%3F&rft.pub=Tech+Crunch&rft.date=2012-01-10&rft.au=Vinod+Khosla&rft_id=https%3A%2F%2Ftechcrunch.com%2F2012%2F01%2F10%2Fdoctors-or-algorithms%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-102"><span class="mw-cite-backlink"><b><a href="#cite_ref-102">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://medium.com/the-physics-arxiv-blog/when-a-machine-learning-algorithm-studied-fine-art-paintings-it-saw-things-art-historians-had-never-b8e4e7bf7d3e">When A Machine Learning Algorithm Studied Fine Art Paintings, It Saw Things Art Historians Had Never Noticed</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160604072143/https://medium.com/the-physics-arxiv-blog/when-a-machine-learning-algorithm-studied-fine-art-paintings-it-saw-things-art-historians-had-never-b8e4e7bf7d3e">Archived</a> 2016-06-04 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>, <i>The Physics at <a href="/wiki/ArXiv" title="ArXiv">ArXiv</a> blog</i></span> </li> <li id="cite_note-103"><span class="mw-cite-backlink"><b><a href="#cite_ref-103">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVincent2019" class="citation web cs1">Vincent, James (2019-04-10). <a rel="nofollow" class="external text" href="https://www.theverge.com/2019/4/10/18304558/ai-writing-academic-research-book-springer-nature-artificial-intelligence">"The first AI-generated textbook shows what robot writers are actually good at"</a>. <i>The Verge</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190505200409/https://www.theverge.com/2019/4/10/18304558/ai-writing-academic-research-book-springer-nature-artificial-intelligence">Archived</a> from the original on 2019-05-05<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-05-05</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Verge&rft.atitle=The+first+AI-generated+textbook+shows+what+robot+writers+are+actually+good+at&rft.date=2019-04-10&rft.aulast=Vincent&rft.aufirst=James&rft_id=https%3A%2F%2Fwww.theverge.com%2F2019%2F4%2F10%2F18304558%2Fai-writing-academic-research-book-springer-nature-artificial-intelligence&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-104"><span class="mw-cite-backlink"><b><a href="#cite_ref-104">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVaishyaJavaidKhanHaleem2020" class="citation journal cs1">Vaishya, Raju; Javaid, Mohd; Khan, Ibrahim Haleem; Haleem, Abid (July 1, 2020). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7195043">"Artificial Intelligence (AI) applications for COVID-19 pandemic"</a>. <i>Diabetes & Metabolic Syndrome: Clinical Research & Reviews</i>. <b>14</b> (4): 337–339. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.dsx.2020.04.012">10.1016/j.dsx.2020.04.012</a></span>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7195043">7195043</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/32305024">32305024</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Diabetes+%26+Metabolic+Syndrome%3A+Clinical+Research+%26+Reviews&rft.atitle=Artificial+Intelligence+%28AI%29+applications+for+COVID-19+pandemic&rft.volume=14&rft.issue=4&rft.pages=337-339&rft.date=2020-07-01&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7195043%23id-name%3DPMC&rft_id=info%3Apmid%2F32305024&rft_id=info%3Adoi%2F10.1016%2Fj.dsx.2020.04.012&rft.aulast=Vaishya&rft.aufirst=Raju&rft.au=Javaid%2C+Mohd&rft.au=Khan%2C+Ibrahim+Haleem&rft.au=Haleem%2C+Abid&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7195043&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-105"><span class="mw-cite-backlink"><b><a href="#cite_ref-105">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRezapouraghdamAkhshikRamkissoon2021" class="citation journal cs1">Rezapouraghdam, Hamed; Akhshik, Arash; Ramkissoon, Haywantee (March 10, 2021). <a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F09669582.2021.1887878">"Application of machine learning to predict visitors' green behavior in marine protected areas: evidence from Cyprus"</a>. <i>Journal of Sustainable Tourism</i>. <b>31</b> (11): 2479–2505. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F09669582.2021.1887878">10.1080/09669582.2021.1887878</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/10037%2F24073">10037/24073</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Sustainable+Tourism&rft.atitle=Application+of+machine+learning+to+predict+visitors%27+green+behavior+in+marine+protected+areas%3A+evidence+from+Cyprus&rft.volume=31&rft.issue=11&rft.pages=2479-2505&rft.date=2021-03-10&rft_id=info%3Ahdl%2F10037%2F24073&rft_id=info%3Adoi%2F10.1080%2F09669582.2021.1887878&rft.aulast=Rezapouraghdam&rft.aufirst=Hamed&rft.au=Akhshik%2C+Arash&rft.au=Ramkissoon%2C+Haywantee&rft_id=https%3A%2F%2Fdoi.org%2F10.1080%252F09669582.2021.1887878&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-106"><span class="mw-cite-backlink"><b><a href="#cite_ref-106">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDeySinghWangMcDonald-Maier2020" class="citation book cs1">Dey, Somdip; Singh, Amit Kumar; Wang, Xiaohang; McDonald-Maier, Klaus (2020-06-15). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/9116294">"User Interaction Aware Reinforcement Learning for Power and Thermal Efficiency of CPU-GPU Mobile MPSoCs"</a>. <a rel="nofollow" class="external text" href="http://repository.essex.ac.uk/27546/1/User%20Interaction%20Aware%20Reinforcement%20Learning.pdf"><i>2020 Design, Automation & Test in Europe Conference & Exhibition (DATE)</i></a> <span class="cs1-format">(PDF)</span>. pp. 1728–1733. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.23919%2FDATE48585.2020.9116294">10.23919/DATE48585.2020.9116294</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-9819263-4-7" title="Special:BookSources/978-3-9819263-4-7"><bdi>978-3-9819263-4-7</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:219858480">219858480</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211213192526/https://ieeexplore.ieee.org/document/9116294/">Archived</a> from the original on 2021-12-13<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-01-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=User+Interaction+Aware+Reinforcement+Learning+for+Power+and+Thermal+Efficiency+of+CPU-GPU+Mobile+MPSoCs&rft.btitle=2020+Design%2C+Automation+%26+Test+in+Europe+Conference+%26+Exhibition+%28DATE%29&rft.pages=1728-1733&rft.date=2020-06-15&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A219858480%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.23919%2FDATE48585.2020.9116294&rft.isbn=978-3-9819263-4-7&rft.aulast=Dey&rft.aufirst=Somdip&rft.au=Singh%2C+Amit+Kumar&rft.au=Wang%2C+Xiaohang&rft.au=McDonald-Maier%2C+Klaus&rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F9116294&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-107"><span class="mw-cite-backlink"><b><a href="#cite_ref-107">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFQuested" class="citation news cs1">Quested, Tony. <a rel="nofollow" class="external text" href="https://www.businessweekly.co.uk/news/academia-research/smartphones-get-smarter-essex-innovation">"Smartphones get smarter with Essex innovation"</a>. <i>Business Weekly</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210624200126/https://www.businessweekly.co.uk/news/academia-research/smartphones-get-smarter-essex-innovation">Archived</a> from the original on 2021-06-24<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-06-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Business+Weekly&rft.atitle=Smartphones+get+smarter+with+Essex+innovation&rft.aulast=Quested&rft.aufirst=Tony&rft_id=https%3A%2F%2Fwww.businessweekly.co.uk%2Fnews%2Facademia-research%2Fsmartphones-get-smarter-essex-innovation&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-108"><span class="mw-cite-backlink"><b><a href="#cite_ref-108">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWilliams2020" class="citation news cs1">Williams, Rhiannon (2020-07-21). <a rel="nofollow" class="external text" href="https://inews.co.uk/news/technology/future-smartphones-prolong-battery-life-monitoring-behaviour-558689">"Future smartphones 'will prolong their own battery life by monitoring owners' behaviour'<span class="cs1-kern-right"></span>"</a>. <i><a href="/wiki/I_(newspaper)" title="I (newspaper)">i</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210624201153/https://inews.co.uk/news/technology/future-smartphones-prolong-battery-life-monitoring-behaviour-558689">Archived</a> from the original on 2021-06-24<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-06-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=i&rft.atitle=Future+smartphones+%27will+prolong+their+own+battery+life+by+monitoring+owners%27+behaviour%27&rft.date=2020-07-21&rft.aulast=Williams&rft.aufirst=Rhiannon&rft_id=https%3A%2F%2Finews.co.uk%2Fnews%2Ftechnology%2Ffuture-smartphones-prolong-battery-life-monitoring-behaviour-558689&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-109"><span class="mw-cite-backlink"><b><a href="#cite_ref-109">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRasekhschaffeJones2019" class="citation journal cs1">Rasekhschaffe, Keywan Christian; Jones, Robert C. (2019-07-01). <a rel="nofollow" class="external text" href="https://www.tandfonline.com/doi/full/10.1080/0015198X.2019.1596678">"Machine Learning for Stock Selection"</a>. <i>Financial Analysts Journal</i>. <b>75</b> (3): 70–88. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1080%2F0015198X.2019.1596678">10.1080/0015198X.2019.1596678</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0015-198X">0015-198X</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:108312507">108312507</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231126160605/https://www.tandfonline.com/doi/full/10.1080/0015198X.2019.1596678">Archived</a> from the original on 2023-11-26<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-11-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Financial+Analysts+Journal&rft.atitle=Machine+Learning+for+Stock+Selection&rft.volume=75&rft.issue=3&rft.pages=70-88&rft.date=2019-07-01&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A108312507%23id-name%3DS2CID&rft.issn=0015-198X&rft_id=info%3Adoi%2F10.1080%2F0015198X.2019.1596678&rft.aulast=Rasekhschaffe&rft.aufirst=Keywan+Christian&rft.au=Jones%2C+Robert+C.&rft_id=https%3A%2F%2Fwww.tandfonline.com%2Fdoi%2Ffull%2F10.1080%2F0015198X.2019.1596678&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-110"><span class="mw-cite-backlink"><b><a href="#cite_ref-110">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChungGreen2024" class="citation journal cs1">Chung, Yunsie; Green, William H. (2024). <a rel="nofollow" class="external text" href="https://xlink.rsc.org/?DOI=D3SC05353A">"Machine learning from quantum chemistry to predict experimental solvent effects on reaction rates"</a>. <i>Chemical Science</i>. <b>15</b> (7): 2410–2424. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1039%2FD3SC05353A">10.1039/D3SC05353A</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2041-6520">2041-6520</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10866337">10866337</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/38362410">38362410</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519121547/https://pubs.rsc.org/en/content/articlelanding/2024/sc/d3sc05353a">Archived</a> from the original on 2024-05-19<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-04-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Chemical+Science&rft.atitle=Machine+learning+from+quantum+chemistry+to+predict+experimental+solvent+effects+on+reaction+rates&rft.volume=15&rft.issue=7&rft.pages=2410-2424&rft.date=2024&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC10866337%23id-name%3DPMC&rft.issn=2041-6520&rft_id=info%3Apmid%2F38362410&rft_id=info%3Adoi%2F10.1039%2FD3SC05353A&rft.aulast=Chung&rft.aufirst=Yunsie&rft.au=Green%2C+William+H.&rft_id=https%3A%2F%2Fxlink.rsc.org%2F%3FDOI%3DD3SC05353A&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-111"><span class="mw-cite-backlink"><b><a href="#cite_ref-111">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSunHuangZhao2024" class="citation journal cs1">Sun, Yuran; Huang, Shih-Kai; Zhao, Xilei (2024-02-01). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs13753-024-00541-1">"Predicting Hurricane Evacuation Decisions with Interpretable Machine Learning Methods"</a>. <i>International Journal of Disaster Risk Science</i>. <b>15</b> (1): 134–148. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2303.06557">2303.06557</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2024IJDRS..15..134S">2024IJDRS..15..134S</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs13753-024-00541-1">10.1007/s13753-024-00541-1</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2192-6395">2192-6395</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=International+Journal+of+Disaster+Risk+Science&rft.atitle=Predicting+Hurricane+Evacuation+Decisions+with+Interpretable+Machine+Learning+Methods&rft.volume=15&rft.issue=1&rft.pages=134-148&rft.date=2024-02-01&rft_id=info%3Aarxiv%2F2303.06557&rft.issn=2192-6395&rft_id=info%3Adoi%2F10.1007%2Fs13753-024-00541-1&rft_id=info%3Abibcode%2F2024IJDRS..15..134S&rft.aulast=Sun&rft.aufirst=Yuran&rft.au=Huang%2C+Shih-Kai&rft.au=Zhao%2C+Xilei&rft_id=https%3A%2F%2Fdoi.org%2F10.1007%252Fs13753-024-00541-1&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-112"><span class="mw-cite-backlink"><b><a href="#cite_ref-112">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSunZhaoLovreglioKuligowski2024" class="citation cs2">Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (2024-01-01), Naser, M. Z. (ed.), <a rel="nofollow" class="external text" href="https://www.sciencedirect.com/science/article/pii/B9780128240731000149">"8 - AI for large-scale evacuation modeling: promises and challenges"</a>, <i>Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure</i>, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-12-824073-1" title="Special:BookSources/978-0-12-824073-1"><bdi>978-0-12-824073-1</bdi></a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519121547/https://www.sciencedirect.com/science/article/abs/pii/B9780128240731000149">archived</a> from the original on 2024-05-19<span class="reference-accessdate">, retrieved <span class="nowrap">2024-05-19</span></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Interpretable+Machine+Learning+for+the+Analysis%2C+Design%2C+Assessment%2C+and+Informed+Decision+Making+for+Civil+Infrastructure&rft.atitle=8+-+AI+for+large-scale+evacuation+modeling%3A+promises+and+challenges&rft.pages=185-204&rft.date=2024-01-01&rft.isbn=978-0-12-824073-1&rft.aulast=Sun&rft.aufirst=Yuran&rft.au=Zhao%2C+Xilei&rft.au=Lovreglio%2C+Ruggiero&rft.au=Kuligowski%2C+Erica&rft_id=https%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FB9780128240731000149&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-113"><span class="mw-cite-backlink"><b><a href="#cite_ref-113">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFXuLovreglioKuligowskiCova2023" class="citation journal cs1">Xu, Ningzhe; Lovreglio, Ruggiero; Kuligowski, Erica D.; Cova, Thomas J.; Nilsson, Daniel; Zhao, Xilei (2023-03-01). <a rel="nofollow" class="external text" href="https://doi.org/10.1007/s10694-023-01363-1">"Predicting and Assessing Wildfire Evacuation Decision-Making Using Machine Learning: Findings from the 2019 Kincade Fire"</a>. <i>Fire Technology</i>. <b>59</b> (2): 793–825. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2Fs10694-023-01363-1">10.1007/s10694-023-01363-1</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1572-8099">1572-8099</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519121534/https://link.springer.com/article/10.1007/s10694-023-01363-1">Archived</a> from the original on 2024-05-19<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-05-19</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Fire+Technology&rft.atitle=Predicting+and+Assessing+Wildfire+Evacuation+Decision-Making+Using+Machine+Learning%3A+Findings+from+the+2019+Kincade+Fire&rft.volume=59&rft.issue=2&rft.pages=793-825&rft.date=2023-03-01&rft_id=info%3Adoi%2F10.1007%2Fs10694-023-01363-1&rft.issn=1572-8099&rft.aulast=Xu&rft.aufirst=Ningzhe&rft.au=Lovreglio%2C+Ruggiero&rft.au=Kuligowski%2C+Erica+D.&rft.au=Cova%2C+Thomas+J.&rft.au=Nilsson%2C+Daniel&rft.au=Zhao%2C+Xilei&rft_id=https%3A%2F%2Fdoi.org%2F10.1007%2Fs10694-023-01363-1&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-114"><span class="mw-cite-backlink"><b><a href="#cite_ref-114">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWangShiGohQian2019" class="citation journal cs1">Wang, Ke; Shi, Xiupeng; Goh, Algena Pei Xuan; Qian, Shunzhi (2019-06-01). <a rel="nofollow" class="external text" href="https://www.sciencedirect.com/science/article/pii/S037971121830376X">"A machine learning based study on pedestrian movement dynamics under emergency evacuation"</a>. <i>Fire Safety Journal</i>. <b>106</b>: 163–176. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2019FirSJ.106..163W">2019FirSJ.106..163W</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.firesaf.2019.04.008">10.1016/j.firesaf.2019.04.008</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/10356%2F143390">10356/143390</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0379-7112">0379-7112</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519121539/https://www.sciencedirect.com/science/article/abs/pii/S037971121830376X">Archived</a> from the original on 2024-05-19<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-05-19</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Fire+Safety+Journal&rft.atitle=A+machine+learning+based+study+on+pedestrian+movement+dynamics+under+emergency+evacuation&rft.volume=106&rft.pages=163-176&rft.date=2019-06-01&rft_id=info%3Ahdl%2F10356%2F143390&rft.issn=0379-7112&rft_id=info%3Adoi%2F10.1016%2Fj.firesaf.2019.04.008&rft_id=info%3Abibcode%2F2019FirSJ.106..163W&rft.aulast=Wang&rft.aufirst=Ke&rft.au=Shi%2C+Xiupeng&rft.au=Goh%2C+Algena+Pei+Xuan&rft.au=Qian%2C+Shunzhi&rft_id=https%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS037971121830376X&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-115"><span class="mw-cite-backlink"><b><a href="#cite_ref-115">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhaoLovreglioNilsson2020" class="citation journal cs1">Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (2020-05-01). <a rel="nofollow" class="external text" href="https://www.sciencedirect.com/science/article/pii/S0926580519313184">"Modelling and interpreting pre-evacuation decision-making using machine learning"</a>. <i>Automation in Construction</i>. <b>113</b>: 103140. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.autcon.2020.103140">10.1016/j.autcon.2020.103140</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/10179%2F17315">10179/17315</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0926-5805">0926-5805</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519121548/https://www.sciencedirect.com/science/article/abs/pii/S0926580519313184">Archived</a> from the original on 2024-05-19<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-05-19</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Automation+in+Construction&rft.atitle=Modelling+and+interpreting+pre-evacuation+decision-making+using+machine+learning&rft.volume=113&rft.pages=103140&rft.date=2020-05-01&rft_id=info%3Ahdl%2F10179%2F17315&rft.issn=0926-5805&rft_id=info%3Adoi%2F10.1016%2Fj.autcon.2020.103140&rft.aulast=Zhao&rft.aufirst=Xilei&rft.au=Lovreglio%2C+Ruggiero&rft.au=Nilsson%2C+Daniel&rft_id=https%3A%2F%2Fwww.sciencedirect.com%2Fscience%2Farticle%2Fpii%2FS0926580519313184&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-116"><span class="mw-cite-backlink"><b><a href="#cite_ref-116">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20170320225010/https://www.bloomberg.com/news/articles/2016-11-10/why-machine-learning-models-often-fail-to-learn-quicktake-q-a">"Why Machine Learning Models Often Fail to Learn: QuickTake Q&A"</a>. <i>Bloomberg.com</i>. 2016-11-10. Archived from <a rel="nofollow" class="external text" href="https://www.bloomberg.com/news/articles/2016-11-10/why-machine-learning-models-often-fail-to-learn-quicktake-q-a">the original</a> on 2017-03-20<span class="reference-accessdate">. Retrieved <span class="nowrap">2017-04-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Bloomberg.com&rft.atitle=Why+Machine+Learning+Models+Often+Fail+to+Learn%3A+QuickTake+Q%26A&rft.date=2016-11-10&rft_id=https%3A%2F%2Fwww.bloomberg.com%2Fnews%2Farticles%2F2016-11-10%2Fwhy-machine-learning-models-often-fail-to-learn-quicktake-q-a&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-117"><span class="mw-cite-backlink"><b><a href="#cite_ref-117">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://hbr.org/2017/04/the-first-wave-of-corporate-ai-is-doomed-to-fail">"The First Wave of Corporate AI Is Doomed to Fail"</a>. <i>Harvard Business Review</i>. 2017-04-18. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180821032004/https://hbr.org/2017/04/the-first-wave-of-corporate-ai-is-doomed-to-fail">Archived</a> from the original on 2018-08-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Harvard+Business+Review&rft.atitle=The+First+Wave+of+Corporate+AI+Is+Doomed+to+Fail&rft.date=2017-04-18&rft_id=https%3A%2F%2Fhbr.org%2F2017%2F04%2Fthe-first-wave-of-corporate-ai-is-doomed-to-fail&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-118"><span class="mw-cite-backlink"><b><a href="#cite_ref-118">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://venturebeat.com/2016/09/17/why-the-a-i-euphoria-is-doomed-to-fail/">"Why the A.I. euphoria is doomed to fail"</a>. <i>VentureBeat</i>. 2016-09-18. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180819124138/https://venturebeat.com/2016/09/17/why-the-a-i-euphoria-is-doomed-to-fail/">Archived</a> from the original on 2018-08-19<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=VentureBeat&rft.atitle=Why+the+A.I.+euphoria+is+doomed+to+fail&rft.date=2016-09-18&rft_id=https%3A%2F%2Fventurebeat.com%2F2016%2F09%2F17%2Fwhy-the-a-i-euphoria-is-doomed-to-fail%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-119"><span class="mw-cite-backlink"><b><a href="#cite_ref-119">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.kdnuggets.com/2018/07/why-machine-learning-project-fail.html">"9 Reasons why your machine learning project will fail"</a>. <i>www.kdnuggets.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180821031802/https://www.kdnuggets.com/2018/07/why-machine-learning-project-fail.html">Archived</a> from the original on 2018-08-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.kdnuggets.com&rft.atitle=9+Reasons+why+your+machine+learning+project+will+fail&rft_id=https%3A%2F%2Fwww.kdnuggets.com%2F2018%2F07%2Fwhy-machine-learning-project-fail.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Babuta-2018-120"><span class="mw-cite-backlink">^ <a href="#cite_ref-Babuta-2018_120-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Babuta-2018_120-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBabutaOswaldRinik2018" class="citation report cs1">Babuta, Alexander; Oswald, Marion; Rinik, Christine (2018). <a rel="nofollow" class="external text" href="https://www.jstor.org/stable/resrep37375.8">Transparency and Intelligibility</a> (Report). Royal United Services Institute (RUSI). pp. 17–22. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231209002929/https://www.jstor.org/stable/resrep37375.8">Archived</a> from the original on 2023-12-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=report&rft.btitle=Transparency+and+Intelligibility&rft.pages=17-22&rft.pub=Royal+United+Services+Institute+%28RUSI%29&rft.date=2018&rft.aulast=Babuta&rft.aufirst=Alexander&rft.au=Oswald%2C+Marion&rft.au=Rinik%2C+Christine&rft_id=https%3A%2F%2Fwww.jstor.org%2Fstable%2Fresrep37375.8&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-121"><span class="mw-cite-backlink"><b><a href="#cite_ref-121">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.economist.com/the-economist-explains/2018/05/29/why-ubers-self-driving-car-killed-a-pedestrian">"Why Uber's self-driving car killed a pedestrian"</a>. <i>The Economist</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180821031818/https://www.economist.com/the-economist-explains/2018/05/29/why-ubers-self-driving-car-killed-a-pedestrian">Archived</a> from the original on 2018-08-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Economist&rft.atitle=Why+Uber%27s+self-driving+car+killed+a+pedestrian&rft_id=https%3A%2F%2Fwww.economist.com%2Fthe-economist-explains%2F2018%2F05%2F29%2Fwhy-ubers-self-driving-car-killed-a-pedestrian&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-122"><span class="mw-cite-backlink"><b><a href="#cite_ref-122">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.statnews.com/2018/07/25/ibm-watson-recommended-unsafe-incorrect-treatments/">"IBM's Watson recommended 'unsafe and incorrect' cancer treatments – STAT"</a>. <i>STAT</i>. 2018-07-25. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180821062616/https://www.statnews.com/2018/07/25/ibm-watson-recommended-unsafe-incorrect-treatments/">Archived</a> from the original on 2018-08-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=STAT&rft.atitle=IBM%27s+Watson+recommended+%27unsafe+and+incorrect%27+cancer+treatments+%E2%80%93+STAT&rft.date=2018-07-25&rft_id=https%3A%2F%2Fwww.statnews.com%2F2018%2F07%2F25%2Fibm-watson-recommended-unsafe-incorrect-treatments%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-123"><span class="mw-cite-backlink"><b><a href="#cite_ref-123">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHernandezGreenwald2018" class="citation news cs1">Hernandez, Daniela; Greenwald, Ted (2018-08-11). <a rel="nofollow" class="external text" href="https://www.wsj.com/articles/ibm-bet-billions-that-watson-could-improve-cancer-treatment-it-hasnt-worked-1533961147">"IBM Has a Watson Dilemma"</a>. <i><a href="/wiki/The_Wall_Street_Journal" title="The Wall Street Journal">The Wall Street Journal</a></i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0099-9660">0099-9660</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180821031906/https://www.wsj.com/articles/ibm-bet-billions-that-watson-could-improve-cancer-treatment-it-hasnt-worked-1533961147">Archived</a> from the original on 2018-08-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Wall+Street+Journal&rft.atitle=IBM+Has+a+Watson+Dilemma&rft.date=2018-08-11&rft.issn=0099-9660&rft.aulast=Hernandez&rft.aufirst=Daniela&rft.au=Greenwald%2C+Ted&rft_id=https%3A%2F%2Fwww.wsj.com%2Farticles%2Fibm-bet-billions-that-watson-could-improve-cancer-treatment-it-hasnt-worked-1533961147&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-124"><span class="mw-cite-backlink"><b><a href="#cite_ref-124">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAllyn2023" class="citation web cs1">Allyn, Bobby (Feb 27, 2023). <a rel="nofollow" class="external text" href="https://www.npr.org/2023/02/27/1159630243/how-microsofts-experiment-in-artificial-intelligence-tech-backfired">"How Microsoft's experiment in artificial intelligence tech backfired"</a>. <i>National Public Radio</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231208234056/https://www.npr.org/2023/02/27/1159630243/how-microsofts-experiment-in-artificial-intelligence-tech-backfired">Archived</a> from the original on December 8, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">Dec 8,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=National+Public+Radio&rft.atitle=How+Microsoft%27s+experiment+in+artificial+intelligence+tech+backfired&rft.date=2023-02-27&rft.aulast=Allyn&rft.aufirst=Bobby&rft_id=https%3A%2F%2Fwww.npr.org%2F2023%2F02%2F27%2F1159630243%2Fhow-microsofts-experiment-in-artificial-intelligence-tech-backfired&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-125"><span class="mw-cite-backlink"><b><a href="#cite_ref-125">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFReddyPatelWeyrichFenton2020" class="citation journal cs1">Reddy, Shivani M.; Patel, Sheila; Weyrich, Meghan; Fenton, Joshua; Viswanathan, Meera (2020). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7574591">"Comparison of a traditional systematic review approach with review-of-reviews and semi-automation as strategies to update the evidence"</a>. <i>Systematic Reviews</i>. <b>9</b> (1): 243. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1186%2Fs13643-020-01450-2">10.1186/s13643-020-01450-2</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2046-4053">2046-4053</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7574591">7574591</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/33076975">33076975</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Systematic+Reviews&rft.atitle=Comparison+of+a+traditional+systematic+review+approach+with+review-of-reviews+and+semi-automation+as+strategies+to+update+the+evidence&rft.volume=9&rft.issue=1&rft.pages=243&rft.date=2020&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7574591%23id-name%3DPMC&rft.issn=2046-4053&rft_id=info%3Apmid%2F33076975&rft_id=info%3Adoi%2F10.1186%2Fs13643-020-01450-2&rft.aulast=Reddy&rft.aufirst=Shivani+M.&rft.au=Patel%2C+Sheila&rft.au=Weyrich%2C+Meghan&rft.au=Fenton%2C+Joshua&rft.au=Viswanathan%2C+Meera&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC7574591&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Garcia-2016-126"><span class="mw-cite-backlink">^ <a href="#cite_ref-Garcia-2016_126-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Garcia-2016_126-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGarcia2016" class="citation journal cs1">Garcia, Megan (2016). "Racist in the Machine". <i>World Policy Journal</i>. <b>33</b> (4): 111–117. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1215%2F07402775-3813015">10.1215/07402775-3813015</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0740-2775">0740-2775</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:151595343">151595343</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=World+Policy+Journal&rft.atitle=Racist+in+the+Machine&rft.volume=33&rft.issue=4&rft.pages=111-117&rft.date=2016&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A151595343%23id-name%3DS2CID&rft.issn=0740-2775&rft_id=info%3Adoi%2F10.1215%2F07402775-3813015&rft.aulast=Garcia&rft.aufirst=Megan&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-127"><span class="mw-cite-backlink"><b><a href="#cite_ref-127">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCaliskanBrysonNarayanan2017" class="citation journal cs1">Caliskan, Aylin; Bryson, Joanna J.; Narayanan, Arvind (2017-04-14). "Semantics derived automatically from language corpora contain human-like biases". <i>Science</i>. <b>356</b> (6334): 183–186. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1608.07187">1608.07187</a></span>. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/2017Sci...356..183C">2017Sci...356..183C</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.aal4230">10.1126/science.aal4230</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0036-8075">0036-8075</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/28408601">28408601</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:23163324">23163324</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Science&rft.atitle=Semantics+derived+automatically+from+language+corpora+contain+human-like+biases&rft.volume=356&rft.issue=6334&rft.pages=183-186&rft.date=2017-04-14&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A23163324%23id-name%3DS2CID&rft_id=info%3Abibcode%2F2017Sci...356..183C&rft_id=info%3Aarxiv%2F1608.07187&rft.issn=0036-8075&rft_id=info%3Adoi%2F10.1126%2Fscience.aal4230&rft_id=info%3Apmid%2F28408601&rft.aulast=Caliskan&rft.aufirst=Aylin&rft.au=Bryson%2C+Joanna+J.&rft.au=Narayanan%2C+Arvind&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-128"><span class="mw-cite-backlink"><b><a href="#cite_ref-128">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWangDasgupta2016" class="citation cs2">Wang, Xinan; Dasgupta, Sanjoy (2016), Lee, D. D.; Sugiyama, M.; Luxburg, U. V.; Guyon, I. (eds.), <a rel="nofollow" class="external text" href="http://papers.nips.cc/paper/6227-an-algorithm-for-l1-nearest-neighbor-search-via-monotonic-embedding.pdf">"An algorithm for L1 nearest neighbor search via monotonic embedding"</a> <span class="cs1-format">(PDF)</span>, <i>Advances in Neural Information Processing Systems 29</i>, Curran Associates, Inc., pp. 983–991, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20170407051313/http://papers.nips.cc/paper/6227-an-algorithm-for-l1-nearest-neighbor-search-via-monotonic-embedding.pdf">archived</a> <span class="cs1-format">(PDF)</span> from the original on 2017-04-07<span class="reference-accessdate">, retrieved <span class="nowrap">2018-08-20</span></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Advances+in+Neural+Information+Processing+Systems+29&rft.atitle=An+algorithm+for+L1+nearest+neighbor+search+via+monotonic+embedding&rft.pages=983-991&rft.date=2016&rft.aulast=Wang&rft.aufirst=Xinan&rft.au=Dasgupta%2C+Sanjoy&rft_id=http%3A%2F%2Fpapers.nips.cc%2Fpaper%2F6227-an-algorithm-for-l1-nearest-neighbor-search-via-monotonic-embedding.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Silva-2018-129"><span class="mw-cite-backlink">^ <a href="#cite_ref-Silva-2018_129-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Silva-2018_129-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSilvaKenney2018" class="citation journal cs1">Silva, Selena; Kenney, Martin (2018). <a rel="nofollow" class="external text" href="https://brie.berkeley.edu/sites/default/files/brie_wp_2018-3.pdf">"Algorithms, Platforms, and Ethnic Bias: An Integrative Essay"</a> <span class="cs1-format">(PDF)</span>. <i>Phylon</i>. <b>55</b> (1 & 2): 9–37. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0031-8906">0031-8906</a>. <a href="/wiki/JSTOR_(identifier)" class="mw-redirect" title="JSTOR (identifier)">JSTOR</a> <a rel="nofollow" class="external text" href="https://www.jstor.org/stable/26545017">26545017</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240127200319/https://brie.berkeley.edu/sites/default/files/brie_wp_2018-3.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on Jan 27, 2024.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Phylon&rft.atitle=Algorithms%2C+Platforms%2C+and+Ethnic+Bias%3A+An+Integrative+Essay&rft.volume=55&rft.issue=1+%26+2&rft.pages=9-37&rft.date=2018&rft_id=https%3A%2F%2Fwww.jstor.org%2Fstable%2F26545017%23id-name%3DJSTOR&rft.issn=0031-8906&rft.aulast=Silva&rft.aufirst=Selena&rft.au=Kenney%2C+Martin&rft_id=https%3A%2F%2Fbrie.berkeley.edu%2Fsites%2Fdefault%2Ffiles%2Fbrie_wp_2018-3.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-130"><span class="mw-cite-backlink"><b><a href="#cite_ref-130">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVincent2018" class="citation news cs1">Vincent, James (Jan 12, 2018). <a rel="nofollow" class="external text" href="https://www.theverge.com/2018/1/12/16882408/google-racist-gorillas-photo-recognition-algorithm-ai">"Google 'fixed' its racist algorithm by removing gorillas from its image-labeling tech"</a>. <i>The Verge</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180821031830/https://www.theverge.com/2018/1/12/16882408/google-racist-gorillas-photo-recognition-algorithm-ai">Archived</a> from the original on 2018-08-21<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Verge&rft.atitle=Google+%27fixed%27+its+racist+algorithm+by+removing+gorillas+from+its+image-labeling+tech&rft.date=2018-01-12&rft.aulast=Vincent&rft.aufirst=James&rft_id=https%3A%2F%2Fwww.theverge.com%2F2018%2F1%2F12%2F16882408%2Fgoogle-racist-gorillas-photo-recognition-algorithm-ai&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-131"><span class="mw-cite-backlink"><b><a href="#cite_ref-131">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCrawford2016" class="citation news cs1">Crawford, Kate (25 June 2016). <span class="id-lock-subscription" title="Paid subscription required"><a rel="nofollow" class="external text" href="https://www.nytimes.com/2016/06/26/opinion/sunday/artificial-intelligences-white-guy-problem.html">"Opinion | Artificial Intelligence's White Guy Problem"</a></span>. <i><a href="/wiki/New_York_Times" class="mw-redirect" title="New York Times">New York Times</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210114220619/https://www.nytimes.com/2016/06/26/opinion/sunday/artificial-intelligences-white-guy-problem.html">Archived</a> from the original on 2021-01-14<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=New+York+Times&rft.atitle=Opinion+%7C+Artificial+Intelligence%27s+White+Guy+Problem&rft.date=2016-06-25&rft.aulast=Crawford&rft.aufirst=Kate&rft_id=https%3A%2F%2Fwww.nytimes.com%2F2016%2F06%2F26%2Fopinion%2Fsunday%2Fartificial-intelligences-white-guy-problem.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-132"><span class="mw-cite-backlink"><b><a href="#cite_ref-132">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMetz2016" class="citation news cs1">Metz, Rachel (March 24, 2016). <span class="id-lock-limited" title="Free access subject to limited trial, subscription normally required"><a rel="nofollow" class="external text" href="https://www.technologyreview.com/s/601111/why-microsoft-accidentally-unleashed-a-neo-nazi-sexbot/">"Why Microsoft Accidentally Unleashed a Neo-Nazi Sexbot"</a></span>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181109023754/https://www.technologyreview.com/s/601111/why-microsoft-accidentally-unleashed-a-neo-nazi-sexbot/">Archived</a> from the original on 2018-11-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=MIT+Technology+Review&rft.atitle=Why+Microsoft+Accidentally+Unleashed+a+Neo-Nazi+Sexbot&rft.date=2016-03-24&rft.aulast=Metz&rft.aufirst=Rachel&rft_id=https%3A%2F%2Fwww.technologyreview.com%2Fs%2F601111%2Fwhy-microsoft-accidentally-unleashed-a-neo-nazi-sexbot%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-133"><span class="mw-cite-backlink"><b><a href="#cite_ref-133">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSimonite2017" class="citation news cs1">Simonite, Tom (March 30, 2017). <a rel="nofollow" class="external text" href="https://www.technologyreview.com/s/603944/microsoft-ai-isnt-yet-adaptable-enough-to-help-businesses/">"Microsoft: AI Isn't Yet Adaptable Enough to Help Businesses"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20181109022820/https://www.technologyreview.com/s/603944/microsoft-ai-isnt-yet-adaptable-enough-to-help-businesses/">Archived</a> from the original on 2018-11-09<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-20</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=MIT+Technology+Review&rft.atitle=Microsoft%3A+AI+Isn%27t+Yet+Adaptable+Enough+to+Help+Businesses&rft.date=2017-03-30&rft.aulast=Simonite&rft.aufirst=Tom&rft_id=https%3A%2F%2Fwww.technologyreview.com%2Fs%2F603944%2Fmicrosoft-ai-isnt-yet-adaptable-enough-to-help-businesses%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-134"><span class="mw-cite-backlink"><b><a href="#cite_ref-134">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHempel2018" class="citation news cs1">Hempel, Jessi (2018-11-13). <a rel="nofollow" class="external text" href="https://www.wired.com/story/fei-fei-li-artificial-intelligence-humanity/">"Fei-Fei Li's Quest to Make Machines Better for Humanity"</a>. <i>Wired</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1059-1028">1059-1028</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20201214095220/https://www.wired.com/story/fei-fei-li-artificial-intelligence-humanity/">Archived</a> from the original on 2020-12-14<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-02-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Wired&rft.atitle=Fei-Fei+Li%27s+Quest+to+Make+Machines+Better+for+Humanity&rft.date=2018-11-13&rft.issn=1059-1028&rft.aulast=Hempel&rft.aufirst=Jessi&rft_id=https%3A%2F%2Fwww.wired.com%2Fstory%2Ffei-fei-li-artificial-intelligence-humanity%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-135"><span class="mw-cite-backlink"><b><a href="#cite_ref-135">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRudin2019" class="citation journal cs1">Rudin, Cynthia (2019). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9122117">"Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead"</a>. <i>Nature Machine Intelligence</i>. <b>1</b> (5): 206–215. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fs42256-019-0048-x">10.1038/s42256-019-0048-x</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9122117">9122117</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/35603010">35603010</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature+Machine+Intelligence&rft.atitle=Stop+explaining+black+box+machine+learning+models+for+high+stakes+decisions+and+use+interpretable+models+instead&rft.volume=1&rft.issue=5&rft.pages=206-215&rft.date=2019&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC9122117%23id-name%3DPMC&rft_id=info%3Apmid%2F35603010&rft_id=info%3Adoi%2F10.1038%2Fs42256-019-0048-x&rft.aulast=Rudin&rft.aufirst=Cynthia&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC9122117&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-136"><span class="mw-cite-backlink"><b><a href="#cite_ref-136">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHuZhangBohrerLiu2023" class="citation journal cs1">Hu, Tongxi; Zhang, Xuesong; Bohrer, Gil; Liu, Yanlan; Zhou, Yuyu; Martin, Jay; LI, Yang; Zhao, Kaiguang (2023). <a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.agrformet.2023.109458">"Crop yield prediction via explainable AI and interpretable machine learning: Dangers of black box models for evaluating climate change impacts on crop yield"</a>. <i>Agricultural and Forest Meteorology</i>. <b>336</b>: 109458. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fj.agrformet.2023.109458">10.1016/j.agrformet.2023.109458</a></span>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:258552400">258552400</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Agricultural+and+Forest+Meteorology&rft.atitle=Crop+yield+prediction+via+explainable+AI+and+interpretable+machine+learning%3A+Dangers+of+black+box+models+for+evaluating+climate+change+impacts+on+crop+yield&rft.volume=336&rft.pages=109458&rft.date=2023&rft_id=info%3Adoi%2F10.1016%2Fj.agrformet.2023.109458&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A258552400%23id-name%3DS2CID&rft.aulast=Hu&rft.aufirst=Tongxi&rft.au=Zhang%2C+Xuesong&rft.au=Bohrer%2C+Gil&rft.au=Liu%2C+Yanlan&rft.au=Zhou%2C+Yuyu&rft.au=Martin%2C+Jay&rft.au=LI%2C+Yang&rft.au=Zhao%2C+Kaiguang&rft_id=https%3A%2F%2Fdoi.org%2F10.1016%252Fj.agrformet.2023.109458&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-FOOTNOTEDomingos2015Chapter_6,_Chapter_7-137"><span class="mw-cite-backlink"><b><a href="#cite_ref-FOOTNOTEDomingos2015Chapter_6,_Chapter_7_137-0">^</a></b></span> <span class="reference-text"><a href="#CITEREFDomingos2015">Domingos 2015</a>, Chapter 6, Chapter 7.</span> </li> <li id="cite_note-FOOTNOTEDomingos2015286-138"><span class="mw-cite-backlink"><b><a href="#cite_ref-FOOTNOTEDomingos2015286_138-0">^</a></b></span> <span class="reference-text"><a href="#CITEREFDomingos2015">Domingos 2015</a>, p. 286.</span> </li> <li id="cite_note-139"><span class="mw-cite-backlink"><b><a href="#cite_ref-139">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.bbc.com/news/technology-41845878">"Single pixel change fools AI programs"</a>. <i>BBC News</i>. 3 November 2017. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180322011306/http://www.bbc.com/news/technology-41845878">Archived</a> from the original on 22 March 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">12 March</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=BBC+News&rft.atitle=Single+pixel+change+fools+AI+programs&rft.date=2017-11-03&rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Ftechnology-41845878&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-140"><span class="mw-cite-backlink"><b><a href="#cite_ref-140">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.wired.com/story/ai-has-a-hallucination-problem-thats-proving-tough-to-fix/">"AI Has a Hallucination Problem That's Proving Tough to Fix"</a>. <i>WIRED</i>. 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180312024533/https://www.wired.com/story/ai-has-a-hallucination-problem-thats-proving-tough-to-fix/">Archived</a> from the original on 12 March 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">12 March</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=WIRED&rft.atitle=AI+Has+a+Hallucination+Problem+That%27s+Proving+Tough+to+Fix&rft.date=2018&rft_id=https%3A%2F%2Fwww.wired.com%2Fstory%2Fai-has-a-hallucination-problem-thats-proving-tough-to-fix%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-TD_1-141"><span class="mw-cite-backlink"><b><a href="#cite_ref-TD_1_141-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMadry,_A.Makelov,_A.Schmidt,_L.Tsipras,_D.2019" class="citation arxiv cs1">Madry, A.; Makelov, A.; Schmidt, L.; Tsipras, D.; Vladu, A. (4 September 2019). "Towards deep learning models resistant to adversarial attacks". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1706.06083">1706.06083</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/stat.ML">stat.ML</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Towards+deep+learning+models+resistant+to+adversarial+attacks&rft.date=2019-09-04&rft_id=info%3Aarxiv%2F1706.06083&rft.au=Madry%2C+A.&rft.au=Makelov%2C+A.&rft.au=Schmidt%2C+L.&rft.au=Tsipras%2C+D.&rft.au=Vladu%2C+A.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-142"><span class="mw-cite-backlink"><b><a href="#cite_ref-142">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://cltc.berkeley.edu/aml/">"Adversarial Machine Learning – CLTC UC Berkeley Center for Long-Term Cybersecurity"</a>. <i>CLTC</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220517045352/https://cltc.berkeley.edu/aml/">Archived</a> from the original on 2022-05-17<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-05-25</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=CLTC&rft.atitle=Adversarial+Machine+Learning+%E2%80%93+CLTC+UC+Berkeley+Center+for+Long-Term+Cybersecurity&rft_id=https%3A%2F%2Fcltc.berkeley.edu%2Faml%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-143"><span class="mw-cite-backlink"><b><a href="#cite_ref-143">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.theregister.com/2022/04/21/machine_learning_models_backdoors/">"Machine-learning models vulnerable to undetectable backdoors"</a>. <i><a href="/wiki/The_Register" title="The Register">The Register</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220513171215/https://www.theregister.com/2022/04/21/machine_learning_models_backdoors/">Archived</a> from the original on 13 May 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">13 May</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Register&rft.atitle=Machine-learning+models+vulnerable+to+undetectable+backdoors&rft_id=https%3A%2F%2Fwww.theregister.com%2F2022%2F04%2F21%2Fmachine_learning_models_backdoors%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-144"><span class="mw-cite-backlink"><b><a href="#cite_ref-144">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://spectrum.ieee.org/machine-learningbackdoor">"Undetectable Backdoors Plantable In Any Machine-Learning Algorithm"</a>. <i><a href="/wiki/IEEE_Spectrum" title="IEEE Spectrum">IEEE Spectrum</a></i>. 10 May 2022. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220511152052/https://spectrum.ieee.org/machine-learningbackdoor">Archived</a> from the original on 11 May 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">13 May</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Spectrum&rft.atitle=Undetectable+Backdoors+Plantable+In+Any+Machine-Learning+Algorithm&rft.date=2022-05-10&rft_id=https%3A%2F%2Fspectrum.ieee.org%2Fmachine-learningbackdoor&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-145"><span class="mw-cite-backlink"><b><a href="#cite_ref-145">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGoldwasserKimVaikuntanathanZamir2022" class="citation arxiv cs1">Goldwasser, Shafi; Kim, Michael P.; Vaikuntanathan, Vinod; Zamir, Or (14 April 2022). "Planting Undetectable Backdoors in Machine Learning Models". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2204.06974">2204.06974</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Planting+Undetectable+Backdoors+in+Machine+Learning+Models&rft.date=2022-04-14&rft_id=info%3Aarxiv%2F2204.06974&rft.aulast=Goldwasser&rft.aufirst=Shafi&rft.au=Kim%2C+Michael+P.&rft.au=Vaikuntanathan%2C+Vinod&rft.au=Zamir%2C+Or&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-146"><span class="mw-cite-backlink"><b><a href="#cite_ref-146">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKohavi1995" class="citation journal cs1">Kohavi, Ron (1995). <a rel="nofollow" class="external text" href="https://ai.stanford.edu/~ronnyk/accEst.pdf">"A Study of Cross-Validation and Bootstrap for Accuracy Estimation and Model Selection"</a> <span class="cs1-format">(PDF)</span>. <i>International Joint Conference on Artificial Intelligence</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180712102706/http://web.cs.iastate.edu/~jtian/cs573/Papers/Kohavi-IJCAI-95.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2018-07-12<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-03-26</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=International+Joint+Conference+on+Artificial+Intelligence&rft.atitle=A+Study+of+Cross-Validation+and+Bootstrap+for+Accuracy+Estimation+and+Model+Selection&rft.date=1995&rft.aulast=Kohavi&rft.aufirst=Ron&rft_id=https%3A%2F%2Fai.stanford.edu%2F~ronnyk%2FaccEst.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-147"><span class="mw-cite-backlink"><b><a href="#cite_ref-147">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCatal2012" class="citation journal cs1">Catal, Cagatay (2012). <a rel="nofollow" class="external text" href="http://www.uni-obuda.hu/journal/Catal_36.pdf">"Performance Evaluation Metrics for Software Fault Prediction Studies"</a> <span class="cs1-format">(PDF)</span>. <i>Acta Polytechnica Hungarica</i>. <b>9</b> (4)<span class="reference-accessdate">. Retrieved <span class="nowrap">2 October</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Acta+Polytechnica+Hungarica&rft.atitle=Performance+Evaluation+Metrics+for+Software+Fault+Prediction+Studies&rft.volume=9&rft.issue=4&rft.date=2012&rft.aulast=Catal&rft.aufirst=Cagatay&rft_id=http%3A%2F%2Fwww.uni-obuda.hu%2Fjournal%2FCatal_36.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-148"><span class="mw-cite-backlink"><b><a href="#cite_ref-148">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostrom2011" class="citation web cs1">Bostrom, Nick (2011). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160304015020/http://www.nickbostrom.com/ethics/artificial-intelligence.pdf">"The Ethics of Artificial Intelligence"</a> <span class="cs1-format">(PDF)</span>. Archived from <a rel="nofollow" class="external text" href="http://www.nickbostrom.com/ethics/artificial-intelligence.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 4 March 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">11 April</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=The+Ethics+of+Artificial+Intelligence&rft.date=2011&rft.aulast=Bostrom&rft.aufirst=Nick&rft_id=http%3A%2F%2Fwww.nickbostrom.com%2Fethics%2Fartificial-intelligence.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Edionwe_Outline-149"><span class="mw-cite-backlink"><b><a href="#cite_ref-Edionwe_Outline_149-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEdionwe" class="citation web cs1">Edionwe, Tolulope. <a rel="nofollow" class="external text" href="https://theoutline.com/post/1571/the-fight-against-racist-algorithms">"The fight against racist algorithms"</a>. <i>The Outline</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171117174504/https://theoutline.com/post/1571/the-fight-against-racist-algorithms">Archived</a> from the original on 17 November 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">17 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Outline&rft.atitle=The+fight+against+racist+algorithms&rft.aulast=Edionwe&rft.aufirst=Tolulope&rft_id=https%3A%2F%2Ftheoutline.com%2Fpost%2F1571%2Fthe-fight-against-racist-algorithms&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Jeffries_Outline-150"><span class="mw-cite-backlink"><b><a href="#cite_ref-Jeffries_Outline_150-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJeffries" class="citation web cs1">Jeffries, Adrianne. <a rel="nofollow" class="external text" href="https://theoutline.com/post/1439/machine-learning-is-racist-because-the-internet-is-racist">"Machine learning is racist because the internet is racist"</a>. <i>The Outline</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20171117174503/https://theoutline.com/post/1439/machine-learning-is-racist-because-the-internet-is-racist">Archived</a> from the original on 17 November 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">17 November</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Outline&rft.atitle=Machine+learning+is+racist+because+the+internet+is+racist&rft.aulast=Jeffries&rft.aufirst=Adrianne&rft_id=https%3A%2F%2Ftheoutline.com%2Fpost%2F1439%2Fmachine-learning-is-racist-because-the-internet-is-racist&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-151"><span class="mw-cite-backlink"><b><a href="#cite_ref-151">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWong2023" class="citation journal cs1">Wong, Carissa (2023-03-30). <a rel="nofollow" class="external text" href="https://www.nature.com/articles/d41586-023-00935-z">"AI 'fairness' research held back by lack of diversity"</a>. <i>Nature</i>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1038%2Fd41586-023-00935-z">10.1038/d41586-023-00935-z</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/36997714">36997714</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:257857012">257857012</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230412120505/https://www.nature.com/articles/d41586-023-00935-z">Archived</a> from the original on 2023-04-12<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Nature&rft.atitle=AI+%27fairness%27+research+held+back+by+lack+of+diversity&rft.date=2023-03-30&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A257857012%23id-name%3DS2CID&rft_id=info%3Apmid%2F36997714&rft_id=info%3Adoi%2F10.1038%2Fd41586-023-00935-z&rft.aulast=Wong&rft.aufirst=Carissa&rft_id=https%3A%2F%2Fwww.nature.com%2Farticles%2Fd41586-023-00935-z&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-Zhang-152"><span class="mw-cite-backlink">^ <a href="#cite_ref-Zhang_152-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Zhang_152-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhang" class="citation journal cs1">Zhang, Jack Clark. <a rel="nofollow" class="external text" href="https://aiindex.stanford.edu/wp-content/uploads/2021/11/2021-AI-Index-Report_Master.pdf">"Artificial Intelligence Index Report 2021"</a> <span class="cs1-format">(PDF)</span>. <i>Stanford Institute for Human-Centered Artificial Intelligence</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20240519121545/https://aiindex.stanford.edu/wp-content/uploads/2021/11/2021-AI-Index-Report_Master.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2024-05-19<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-12-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Stanford+Institute+for+Human-Centered+Artificial+Intelligence&rft.atitle=Artificial+Intelligence+Index+Report+2021&rft.aulast=Zhang&rft.aufirst=Jack+Clark&rft_id=https%3A%2F%2Faiindex.stanford.edu%2Fwp-content%2Fuploads%2F2021%2F11%2F2021-AI-Index-Report_Master.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-153"><span class="mw-cite-backlink"><b><a href="#cite_ref-153">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBostromYudkowsky2011" class="citation web cs1">Bostrom, Nick; Yudkowsky, Eliezer (2011). <a rel="nofollow" class="external text" href="https://www.nickbostrom.com/ethics/artificial-intelligence.pdf">"THE ETHICS OF ARTIFICIAL INTELLIGENCE"</a> <span class="cs1-format">(PDF)</span>. <i>Nick Bostrom</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20151220175220/http://www.nickbostrom.com/ethics/artificial-intelligence.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on 2015-12-20<span class="reference-accessdate">. Retrieved <span class="nowrap">2020-11-18</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Nick+Bostrom&rft.atitle=THE+ETHICS+OF+ARTIFICIAL+INTELLIGENCE&rft.date=2011&rft.aulast=Bostrom&rft.aufirst=Nick&rft.au=Yudkowsky%2C+Eliezer&rft_id=https%3A%2F%2Fwww.nickbostrom.com%2Fethics%2Fartificial-intelligence.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-154"><span class="mw-cite-backlink"><b><a href="#cite_ref-154">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFM.O.R._PratesP.H.C._AvelarL.C._Lamb2019" class="citation arxiv cs1">M.O.R. Prates; P.H.C. Avelar; L.C. Lamb (11 Mar 2019). "Assessing Gender Bias in Machine Translation – A Case Study with Google Translate". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1809.02208">1809.02208</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CY">cs.CY</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Assessing+Gender+Bias+in+Machine+Translation+%E2%80%93+A+Case+Study+with+Google+Translate&rft.date=2019-03-11&rft_id=info%3Aarxiv%2F1809.02208&rft.au=M.O.R.+Prates&rft.au=P.H.C.+Avelar&rft.au=L.C.+Lamb&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-155"><span class="mw-cite-backlink"><b><a href="#cite_ref-155">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNarayanan2016" class="citation web cs1">Narayanan, Arvind (August 24, 2016). <a rel="nofollow" class="external text" href="https://freedom-to-tinker.com/2016/08/24/language-necessarily-contains-human-biases-and-so-will-machines-trained-on-language-corpora/">"Language necessarily contains human biases, and so will machines trained on language corpora"</a>. <i>Freedom to Tinker</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180625021555/https://freedom-to-tinker.com/2016/08/24/language-necessarily-contains-human-biases-and-so-will-machines-trained-on-language-corpora/">Archived</a> from the original on June 25, 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">November 19,</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Freedom+to+Tinker&rft.atitle=Language+necessarily+contains+human+biases%2C+and+so+will+machines+trained+on+language+corpora&rft.date=2016-08-24&rft.aulast=Narayanan&rft.aufirst=Arvind&rft_id=https%3A%2F%2Ffreedom-to-tinker.com%2F2016%2F08%2F24%2Flanguage-necessarily-contains-human-biases-and-so-will-machines-trained-on-language-corpora%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-156"><span class="mw-cite-backlink"><b><a href="#cite_ref-156">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCharShahMagnus2018" class="citation journal cs1">Char, Danton S.; Shah, Nigam H.; Magnus, David (2018-03-15). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5962261">"Implementing Machine Learning in Health Care — Addressing Ethical Challenges"</a>. <i>New England Journal of Medicine</i>. <b>378</b> (11): 981–983. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1056%2FNEJMp1714229">10.1056/NEJMp1714229</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0028-4793">0028-4793</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5962261">5962261</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/29539284">29539284</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=New+England+Journal+of+Medicine&rft.atitle=Implementing+Machine+Learning+in+Health+Care+%E2%80%94+Addressing+Ethical+Challenges&rft.volume=378&rft.issue=11&rft.pages=981-983&rft.date=2018-03-15&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5962261%23id-name%3DPMC&rft.issn=0028-4793&rft_id=info%3Apmid%2F29539284&rft_id=info%3Adoi%2F10.1056%2FNEJMp1714229&rft.aulast=Char&rft.aufirst=Danton+S.&rft.au=Shah%2C+Nigam+H.&rft.au=Magnus%2C+David&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5962261&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-157"><span class="mw-cite-backlink"><b><a href="#cite_ref-157">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCharShahMagnus2018" class="citation journal cs1">Char, D. S.; Shah, N. H.; Magnus, D. (2018). <a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5962261">"Implementing Machine Learning in Health Care—Addressing Ethical Challenges"</a>. <i><a href="/wiki/New_England_Journal_of_Medicine" class="mw-redirect" title="New England Journal of Medicine">New England Journal of Medicine</a></i>. <b>378</b> (11): 981–983. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1056%2Fnejmp1714229">10.1056/nejmp1714229</a>. <a href="/wiki/PMC_(identifier)" class="mw-redirect" title="PMC (identifier)">PMC</a> <span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5962261">5962261</a></span>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/29539284">29539284</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=New+England+Journal+of+Medicine&rft.atitle=Implementing+Machine+Learning+in+Health+Care%E2%80%94Addressing+Ethical+Challenges&rft.volume=378&rft.issue=11&rft.pages=981-983&rft.date=2018&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5962261%23id-name%3DPMC&rft_id=info%3Apmid%2F29539284&rft_id=info%3Adoi%2F10.1056%2Fnejmp1714229&rft.aulast=Char&rft.aufirst=D.+S.&rft.au=Shah%2C+N.+H.&rft.au=Magnus%2C+D.&rft_id=https%3A%2F%2Fwww.ncbi.nlm.nih.gov%2Fpmc%2Farticles%2FPMC5962261&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-158"><span class="mw-cite-backlink"><b><a href="#cite_ref-158">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFResearch2015" class="citation web cs1">Research, AI (23 October 2015). <a rel="nofollow" class="external text" href="http://airesearch.com/ai-research-papers/deep-neural-networks-for-acoustic-modeling-in-speech-recognition/">"Deep Neural Networks for Acoustic Modeling in Speech Recognition"</a>. <i>airesearch.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160201033801/http://airesearch.com/ai-research-papers/deep-neural-networks-for-acoustic-modeling-in-speech-recognition/">Archived</a> from the original on 1 February 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">23 October</span> 2015</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=airesearch.com&rft.atitle=Deep+Neural+Networks+for+Acoustic+Modeling+in+Speech+Recognition&rft.date=2015-10-23&rft.aulast=Research&rft.aufirst=AI&rft_id=http%3A%2F%2Fairesearch.com%2Fai-research-papers%2Fdeep-neural-networks-for-acoustic-modeling-in-speech-recognition%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-159"><span class="mw-cite-backlink"><b><a href="#cite_ref-159">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.informationweek.com/big-data/ai-machine-learning/gpus-continue-to-dominate-the-ai-accelerator-market-for-now/a/d-id/1336475">"GPUs Continue to Dominate the AI Accelerator Market for Now"</a>. <i>InformationWeek</i>. December 2019. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200610094310/https://www.informationweek.com/big-data/ai-machine-learning/gpus-continue-to-dominate-the-ai-accelerator-market-for-now/a/d-id/1336475">Archived</a> from the original on 10 June 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 June</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=InformationWeek&rft.atitle=GPUs+Continue+to+Dominate+the+AI+Accelerator+Market+for+Now&rft.date=2019-12&rft_id=https%3A%2F%2Fwww.informationweek.com%2Fbig-data%2Fai-machine-learning%2Fgpus-continue-to-dominate-the-ai-accelerator-market-for-now%2Fa%2Fd-id%2F1336475&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-160"><span class="mw-cite-backlink"><b><a href="#cite_ref-160">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRay2019" class="citation news cs1">Ray, Tiernan (2019). <a rel="nofollow" class="external text" href="https://www.zdnet.com/article/ai-is-changing-the-entire-nature-of-compute/">"AI is changing the entire nature of compute"</a>. <i>ZDNet</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200525144635/https://www.zdnet.com/article/ai-is-changing-the-entire-nature-of-compute/">Archived</a> from the original on 25 May 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 June</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=ZDNet&rft.atitle=AI+is+changing+the+entire+nature+of+compute&rft.date=2019&rft.aulast=Ray&rft.aufirst=Tiernan&rft_id=https%3A%2F%2Fwww.zdnet.com%2Farticle%2Fai-is-changing-the-entire-nature-of-compute%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-161"><span class="mw-cite-backlink"><b><a href="#cite_ref-161">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://openai.com/blog/ai-and-compute/">"AI and Compute"</a>. <i>OpenAI</i>. 16 May 2018. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200617200602/https://openai.com/blog/ai-and-compute/">Archived</a> from the original on 17 June 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 June</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=OpenAI&rft.atitle=AI+and+Compute&rft.date=2018-05-16&rft_id=https%3A%2F%2Fopenai.com%2Fblog%2Fai-and-compute%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-162"><span class="mw-cite-backlink"><b><a href="#cite_ref-162">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.zdnet.com/article/what-is-neuromorphic-computing-everything-you-need-to-know-about-how-it-will-change-the-future-of-computing/">"What is neuromorphic computing? Everything you need to know about how it is changing the future of computing"</a>. <i>ZDNET</i>. 8 December 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">2024-11-21</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=ZDNET&rft.atitle=What+is+neuromorphic+computing%3F+Everything+you+need+to+know+about+how+it+is+changing+the+future+of+computing&rft.date=2020-12-08&rft_id=https%3A%2F%2Fwww.zdnet.com%2Farticle%2Fwhat-is-neuromorphic-computing-everything-you-need-to-know-about-how-it-will-change-the-future-of-computing%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-163"><span class="mw-cite-backlink"><b><a href="#cite_ref-163">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://syncedreview.com/2021/05/27/deepmind-podracer-tpu-based-rl-frameworks-deliver-exceptional-performance-at-low-cost-28/">"Cornell & NTT's Physical Neural Networks: A "Radical Alternative for Implementing Deep Neural Networks" That Enables Arbitrary Physical Systems Training"</a>. <i>Synced</i>. 27 May 2021. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211027183428/https://syncedreview.com/2021/05/27/deepmind-podracer-tpu-based-rl-frameworks-deliver-exceptional-performance-at-low-cost-28/">Archived</a> from the original on 27 October 2021<span class="reference-accessdate">. Retrieved <span class="nowrap">12 October</span> 2021</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Synced&rft.atitle=Cornell+%26+NTT%27s+Physical+Neural+Networks%3A+A+%22Radical+Alternative+for+Implementing+Deep+Neural+Networks%22+That+Enables+Arbitrary+Physical+Systems+Training&rft.date=2021-05-27&rft_id=https%3A%2F%2Fsyncedreview.com%2F2021%2F05%2F27%2Fdeepmind-podracer-tpu-based-rl-frameworks-deliver-exceptional-performance-at-low-cost-28%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-164"><span class="mw-cite-backlink"><b><a href="#cite_ref-164">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.theregister.com/2021/10/05/analogue_neural_network_research/">"Nano-spaghetti to solve neural network power consumption"</a>. <i>The Register</i>. 5 October 2021. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20211006150057/https://www.theregister.com/2021/10/05/analogue_neural_network_research/">Archived</a> from the original on 2021-10-06<span class="reference-accessdate">. Retrieved <span class="nowrap">2021-10-12</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Register&rft.atitle=Nano-spaghetti+to+solve+neural+network+power+consumption&rft.date=2021-10-05&rft_id=https%3A%2F%2Fwww.theregister.com%2F2021%2F10%2F05%2Fanalogue_neural_network_research%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-165"><span class="mw-cite-backlink"><b><a href="#cite_ref-165">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFafoutisMarchegianiElstsPope2018" class="citation book cs1">Fafoutis, Xenofon; Marchegiani, Letizia; Elsts, Atis; Pope, James; Piechocki, Robert; Craddock, Ian (2018-05-07). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/8355116">"Extending the battery lifetime of wearable sensors with embedded machine learning"</a>. <a rel="nofollow" class="external text" href="https://research-information.bris.ac.uk/en/publications/b8fdb58b-7114-45c6-82e4-4ab239c1327f"><i>2018 IEEE 4th World Forum on Internet of Things (WF-IoT)</i></a>. pp. 269–274. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FWF-IoT.2018.8355116">10.1109/WF-IoT.2018.8355116</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/1983%2Fb8fdb58b-7114-45c6-82e4-4ab239c1327f">1983/b8fdb58b-7114-45c6-82e4-4ab239c1327f</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4673-9944-9" title="Special:BookSources/978-1-4673-9944-9"><bdi>978-1-4673-9944-9</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:19192912">19192912</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220118182543/https://ieeexplore.ieee.org/abstract/document/8355116?casa_token=LCpUeGLS1e8AAAAA:2OjuJfNwZBnV2pgDxfnEAC-jbrETv_BpTcX35_aFqN6IULFxu1xbYbVSRpD-zMd4GCUMELyG">Archived</a> from the original on 2022-01-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-01-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Extending+the+battery+lifetime+of+wearable+sensors+with+embedded+machine+learning&rft.btitle=2018+IEEE+4th+World+Forum+on+Internet+of+Things+%28WF-IoT%29&rft.pages=269-274&rft.date=2018-05-07&rft_id=info%3Ahdl%2F1983%2Fb8fdb58b-7114-45c6-82e4-4ab239c1327f&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A19192912%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FWF-IoT.2018.8355116&rft.isbn=978-1-4673-9944-9&rft.aulast=Fafoutis&rft.aufirst=Xenofon&rft.au=Marchegiani%2C+Letizia&rft.au=Elsts%2C+Atis&rft.au=Pope%2C+James&rft.au=Piechocki%2C+Robert&rft.au=Craddock%2C+Ian&rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F8355116&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-166"><span class="mw-cite-backlink"><b><a href="#cite_ref-166">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://analyticsindiamag.com/a-beginners-guide-to-machine-learning-for-embedded-systems/">"A Beginner's Guide To Machine learning For Embedded Systems"</a>. <i>Analytics India Magazine</i>. 2021-06-02. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220118182754/https://analyticsindiamag.com/a-beginners-guide-to-machine-learning-for-embedded-systems/">Archived</a> from the original on 2022-01-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-01-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Analytics+India+Magazine&rft.atitle=A+Beginner%27s+Guide+To+Machine+learning+For+Embedded+Systems&rft.date=2021-06-02&rft_id=https%3A%2F%2Fanalyticsindiamag.com%2Fa-beginners-guide-to-machine-learning-for-embedded-systems%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-167"><span class="mw-cite-backlink"><b><a href="#cite_ref-167">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSynced2022" class="citation web cs1">Synced (2022-01-12). <a rel="nofollow" class="external text" href="https://syncedreview.com/2022/01/12/deepmind-podracer-tpu-based-rl-frameworks-deliver-exceptional-performance-at-low-cost-183/">"Google, Purdue & Harvard U's Open-Source Framework for TinyML Achieves up to 75x Speedups on FPGAs | Synced"</a>. <i>syncedreview.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220118182404/https://syncedreview.com/2022/01/12/deepmind-podracer-tpu-based-rl-frameworks-deliver-exceptional-performance-at-low-cost-183/">Archived</a> from the original on 2022-01-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-01-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=syncedreview.com&rft.atitle=Google%2C+Purdue+%26+Harvard+U%27s+Open-Source+Framework+for+TinyML+Achieves+up+to+75x+Speedups+on+FPGAs+%7C+Synced&rft.date=2022-01-12&rft.au=Synced&rft_id=https%3A%2F%2Fsyncedreview.com%2F2022%2F01%2F12%2Fdeepmind-podracer-tpu-based-rl-frameworks-deliver-exceptional-performance-at-low-cost-183%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-168"><span class="mw-cite-backlink"><b><a href="#cite_ref-168">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGiriChiuDi_GuglielmoMantovani2020" class="citation book cs1">Giri, Davide; Chiu, Kuan-Lin; Di Guglielmo, Giuseppe; Mantovani, Paolo; Carloni, Luca P. (2020-06-15). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/9116317">"ESP4ML: Platform-Based Design of Systems-on-Chip for Embedded Machine Learning"</a>. <i>2020 Design, Automation & Test in Europe Conference & Exhibition (DATE)</i>. pp. 1049–1054. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2004.03640">2004.03640</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.23919%2FDATE48585.2020.9116317">10.23919/DATE48585.2020.9116317</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-9819263-4-7" title="Special:BookSources/978-3-9819263-4-7"><bdi>978-3-9819263-4-7</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:210928161">210928161</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220118182342/https://ieeexplore.ieee.org/abstract/document/9116317?casa_token=5I_Tmgrrbu4AAAAA:v7pDHPEWlRuo2Vk3pU06194PO0-W21UOdyZqADrZxrRdPBZDMLwQrjJSAHUhHtzJmLu_VdgW">Archived</a> from the original on 2022-01-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-01-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=ESP4ML%3A+Platform-Based+Design+of+Systems-on-Chip+for+Embedded+Machine+Learning&rft.btitle=2020+Design%2C+Automation+%26+Test+in+Europe+Conference+%26+Exhibition+%28DATE%29&rft.pages=1049-1054&rft.date=2020-06-15&rft_id=info%3Aarxiv%2F2004.03640&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A210928161%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.23919%2FDATE48585.2020.9116317&rft.isbn=978-3-9819263-4-7&rft.aulast=Giri&rft.aufirst=Davide&rft.au=Chiu%2C+Kuan-Lin&rft.au=Di+Guglielmo%2C+Giuseppe&rft.au=Mantovani%2C+Paolo&rft.au=Carloni%2C+Luca+P.&rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F9116317&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-169"><span class="mw-cite-backlink"><b><a href="#cite_ref-169">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLouisAzadDelshadtehraniGupta2019" class="citation web cs1">Louis, Marcia Sahaya; Azad, Zahra; Delshadtehrani, Leila; Gupta, Suyog; Warden, Pete; Reddi, Vijay Janapa; Joshi, Ajay (2019). <a rel="nofollow" class="external text" href="https://edge.seas.harvard.edu/publications/towards-deep-learning-using-tensorflow-lite-risc-v">"Towards Deep Learning using TensorFlow Lite on RISC-V"</a>. <i><a href="/wiki/Harvard_University" title="Harvard University">Harvard University</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220117031909/https://edge.seas.harvard.edu/publications/towards-deep-learning-using-tensorflow-lite-risc-v">Archived</a> from the original on 2022-01-17<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-01-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Harvard+University&rft.atitle=Towards+Deep+Learning+using+TensorFlow+Lite+on+RISC-V&rft.date=2019&rft.aulast=Louis&rft.aufirst=Marcia+Sahaya&rft.au=Azad%2C+Zahra&rft.au=Delshadtehrani%2C+Leila&rft.au=Gupta%2C+Suyog&rft.au=Warden%2C+Pete&rft.au=Reddi%2C+Vijay+Janapa&rft.au=Joshi%2C+Ajay&rft_id=https%3A%2F%2Fedge.seas.harvard.edu%2Fpublications%2Ftowards-deep-learning-using-tensorflow-lite-risc-v&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-170"><span class="mw-cite-backlink"><b><a href="#cite_ref-170">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFIbrahimOstaAlamehSaleh2019" class="citation book cs1">Ibrahim, Ali; Osta, Mario; Alameh, Mohamad; Saleh, Moustafa; Chible, Hussein; Valle, Maurizio (2019-01-21). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/8617877">"Approximate Computing Methods for Embedded Machine Learning"</a>. <i>2018 25th IEEE International Conference on Electronics, Circuits and Systems (ICECS)</i>. pp. 845–848. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FICECS.2018.8617877">10.1109/ICECS.2018.8617877</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-5386-9562-3" title="Special:BookSources/978-1-5386-9562-3"><bdi>978-1-5386-9562-3</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:58670712">58670712</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220117031855/https://ieeexplore.ieee.org/abstract/document/8617877?casa_token=arUW5Oy-tzwAAAAA:I9x6edlfskM6kGNFUN9zAFrjEBv_8kYTz7ERTxtXu9jAqdrYCcDbbwjBdgwXvb6QAH_-0VJJ">Archived</a> from the original on 2022-01-17<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-01-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Approximate+Computing+Methods+for+Embedded+Machine+Learning&rft.btitle=2018+25th+IEEE+International+Conference+on+Electronics%2C+Circuits+and+Systems+%28ICECS%29&rft.pages=845-848&rft.date=2019-01-21&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A58670712%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FICECS.2018.8617877&rft.isbn=978-1-5386-9562-3&rft.aulast=Ibrahim&rft.aufirst=Ali&rft.au=Osta%2C+Mario&rft.au=Alameh%2C+Mohamad&rft.au=Saleh%2C+Moustafa&rft.au=Chible%2C+Hussein&rft.au=Valle%2C+Maurizio&rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F8617877&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-171"><span class="mw-cite-backlink"><b><a href="#cite_ref-171">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://dblp.org/rec/journals/corr/abs-1903-01855.html">"dblp: TensorFlow Eager: A Multi-Stage, Python-Embedded DSL for Machine Learning"</a>. <i>dblp.org</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220118182335/https://dblp.org/rec/journals/corr/abs-1903-01855.html">Archived</a> from the original on 2022-01-18<span class="reference-accessdate">. Retrieved <span class="nowrap">2022-01-17</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=dblp.org&rft.atitle=dblp%3A+TensorFlow+Eager%3A+A+Multi-Stage%2C+Python-Embedded+DSL+for+Machine+Learning.&rft_id=https%3A%2F%2Fdblp.org%2Frec%2Fjournals%2Fcorr%2Fabs-1903-01855.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> <li id="cite_note-172"><span class="mw-cite-backlink"><b><a href="#cite_ref-172">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBrancoFerreiraCabral2019" class="citation journal cs1">Branco, Sérgio; Ferreira, André G.; Cabral, Jorge (2019-11-05). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Felectronics8111289">"Machine Learning in Resource-Scarce Embedded Systems, FPGAs, and End-Devices: A Survey"</a>. <i>Electronics</i>. <b>8</b> (11): 1289. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Felectronics8111289">10.3390/electronics8111289</a></span>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://hdl.handle.net/1822%2F62521">1822/62521</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2079-9292">2079-9292</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Electronics&rft.atitle=Machine+Learning+in+Resource-Scarce+Embedded+Systems%2C+FPGAs%2C+and+End-Devices%3A+A+Survey&rft.volume=8&rft.issue=11&rft.pages=1289&rft.date=2019-11-05&rft_id=info%3Ahdl%2F1822%2F62521&rft.issn=2079-9292&rft_id=info%3Adoi%2F10.3390%2Felectronics8111289&rft.aulast=Branco&rft.aufirst=S%C3%A9rgio&rft.au=Ferreira%2C+Andr%C3%A9+G.&rft.au=Cabral%2C+Jorge&rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Felectronics8111289&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></span> </li> </ol></div> <div class="mw-heading mw-heading2"><h2 id="Sources">Sources</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=54" title="Edit section: Sources"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDomingos2015" class="citation book cs1"><a href="/wiki/Pedro_Domingos" title="Pedro Domingos">Domingos, Pedro</a> (September 22, 2015). <i>The Master Algorithm: How the Quest for the Ultimate Learning Machine Will Remake Our World</i>. Basic Books. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0465065707" title="Special:BookSources/978-0465065707"><bdi>978-0465065707</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Master+Algorithm%3A+How+the+Quest+for+the+Ultimate+Learning+Machine+Will+Remake+Our+World&rft.pub=Basic+Books&rft.date=2015-09-22&rft.isbn=978-0465065707&rft.aulast=Domingos&rft.aufirst=Pedro&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNilsson1998" class="citation book cs1"><a href="/wiki/Nils_Nilsson_(researcher)" class="mw-redirect" title="Nils Nilsson (researcher)">Nilsson, Nils</a> (1998). <span class="id-lock-registration" title="Free registration required"><a rel="nofollow" class="external text" href="https://archive.org/details/artificialintell0000nils"><i>Artificial Intelligence: A New Synthesis</i></a></span>. Morgan Kaufmann. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-55860-467-4" title="Special:BookSources/978-1-55860-467-4"><bdi>978-1-55860-467-4</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200726131654/https://archive.org/details/artificialintell0000nils">Archived</a> from the original on 26 July 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">18 November</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Artificial+Intelligence%3A+A+New+Synthesis&rft.pub=Morgan+Kaufmann&rft.date=1998&rft.isbn=978-1-55860-467-4&rft.aulast=Nilsson&rft.aufirst=Nils&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fartificialintell0000nils&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPooleMackworthGoebel1998" class="citation book cs1">Poole, David; <a href="/wiki/Alan_Mackworth" title="Alan Mackworth">Mackworth, Alan</a>; Goebel, Randy (1998). <a rel="nofollow" class="external text" href="https://archive.org/details/computationalint00pool"><i>Computational Intelligence: A Logical Approach</i></a>. New York: Oxford University Press. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-19-510270-3" title="Special:BookSources/978-0-19-510270-3"><bdi>978-0-19-510270-3</bdi></a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20200726131436/https://archive.org/details/computationalint00pool">Archived</a> from the original on 26 July 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">22 August</span> 2020</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Computational+Intelligence%3A+A+Logical+Approach&rft.place=New+York&rft.pub=Oxford+University+Press&rft.date=1998&rft.isbn=978-0-19-510270-3&rft.aulast=Poole&rft.aufirst=David&rft.au=Mackworth%2C+Alan&rft.au=Goebel%2C+Randy&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fcomputationalint00pool&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussellNorvig2003" class="citation cs2"><a href="/wiki/Stuart_J._Russell" title="Stuart J. Russell">Russell, Stuart J.</a>; <a href="/wiki/Peter_Norvig" title="Peter Norvig">Norvig, Peter</a> (2003), <a rel="nofollow" class="external text" href="http://aima.cs.berkeley.edu/"><i>Artificial Intelligence: A Modern Approach</i></a> (2nd ed.), Upper Saddle River, New Jersey: Prentice Hall, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-13-790395-2" title="Special:BookSources/0-13-790395-2"><bdi>0-13-790395-2</bdi></a></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Artificial+Intelligence%3A+A+Modern+Approach&rft.place=Upper+Saddle+River%2C+New+Jersey&rft.edition=2nd&rft.pub=Prentice+Hall&rft.date=2003&rft.isbn=0-13-790395-2&rft.aulast=Russell&rft.aufirst=Stuart+J.&rft.au=Norvig%2C+Peter&rft_id=http%3A%2F%2Faima.cs.berkeley.edu%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AMachine+learning" class="Z3988"></span>.</li></ul> <div class="mw-heading mw-heading2"><h2 id="Further_reading">Further reading</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=55" title="Edit section: Further reading"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239549316">.mw-parser-output .refbegin{margin-bottom:0.5em}.mw-parser-output .refbegin-hanging-indents>ul{margin-left:0}.mw-parser-output .refbegin-hanging-indents>ul>li{margin-left:0;padding-left:3.2em;text-indent:-3.2em}.mw-parser-output .refbegin-hanging-indents ul,.mw-parser-output .refbegin-hanging-indents ul li{list-style:none}@media(max-width:720px){.mw-parser-output .refbegin-hanging-indents>ul>li{padding-left:1.6em;text-indent:-1.6em}}.mw-parser-output .refbegin-columns{margin-top:0.3em}.mw-parser-output .refbegin-columns ul{margin-top:0}.mw-parser-output .refbegin-columns li{page-break-inside:avoid;break-inside:avoid-column}@media screen{.mw-parser-output .refbegin{font-size:90%}}</style><div class="refbegin refbegin-columns references-column-width" style="column-width: 30em"> <ul><li>Nils J. Nilsson, <i><a rel="nofollow" class="external text" href="https://ai.stanford.edu/people/nilsson/mlbook.html">Introduction to Machine Learning</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20190816182600/http://ai.stanford.edu/people/nilsson/mlbook.html">Archived</a> 2019-08-16 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></i>.</li> <li><a href="/wiki/Trevor_Hastie" title="Trevor Hastie">Trevor Hastie</a>, <a href="/wiki/Robert_Tibshirani" title="Robert Tibshirani">Robert Tibshirani</a> and <a href="/wiki/Jerome_H._Friedman" title="Jerome H. Friedman">Jerome H. Friedman</a> (2001). <i><a rel="nofollow" class="external text" href="https://web.stanford.edu/~hastie/ElemStatLearn/">The Elements of Statistical Learning</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20131027220938/http://www-stat.stanford.edu/%7Etibs/ElemStatLearn//">Archived</a> 2013-10-27 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></i>, Springer. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-387-95284-5" title="Special:BookSources/0-387-95284-5">0-387-95284-5</a>.</li> <li><a href="/wiki/Pedro_Domingos" title="Pedro Domingos">Pedro Domingos</a> (September 2015), <i><a href="/wiki/The_Master_Algorithm" title="The Master Algorithm">The Master Algorithm</a></i>, Basic Books, <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-465-06570-7" title="Special:BookSources/978-0-465-06570-7">978-0-465-06570-7</a></li> <li>Ian H. Witten and Eibe Frank (2011). <i>Data Mining: Practical machine learning tools and techniques</i> Morgan Kaufmann, 664pp., <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-12-374856-0" title="Special:BookSources/978-0-12-374856-0">978-0-12-374856-0</a>.</li> <li>Ethem Alpaydin (2004). <i>Introduction to Machine Learning</i>, MIT Press, <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-262-01243-0" title="Special:BookSources/978-0-262-01243-0">978-0-262-01243-0</a>.</li> <li><a href="/wiki/David_J._C._MacKay" title="David J. C. MacKay">David J. C. MacKay</a>. <i><a rel="nofollow" class="external text" href="http://www.inference.phy.cam.ac.uk/mackay/itila/book.html">Information Theory, Inference, and Learning Algorithms</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20160217105359/http://www.inference.phy.cam.ac.uk/mackay/itila/book.html">Archived</a> 2016-02-17 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></i> Cambridge: Cambridge University Press, 2003. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-521-64298-1" title="Special:BookSources/0-521-64298-1">0-521-64298-1</a></li> <li><a href="/wiki/Richard_O._Duda" title="Richard O. Duda">Richard O. Duda</a>, <a href="/wiki/Peter_E._Hart" title="Peter E. Hart">Peter E. Hart</a>, David G. Stork (2001) <i>Pattern classification</i> (2nd edition), Wiley, New York, <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-471-05669-3" title="Special:BookSources/0-471-05669-3">0-471-05669-3</a>.</li> <li><a href="/wiki/Christopher_Bishop" title="Christopher Bishop">Christopher Bishop</a> (1995). <i>Neural Networks for Pattern Recognition</i>, Oxford University Press. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0-19-853864-2" title="Special:BookSources/0-19-853864-2">0-19-853864-2</a>.</li> <li>Stuart Russell & Peter Norvig, (2009). <i><a rel="nofollow" class="external text" href="http://aima.cs.berkeley.edu/">Artificial Intelligence – A Modern Approach</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20110228023805/http://aima.cs.berkeley.edu/">Archived</a> 2011-02-28 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></i>. Pearson, <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9789332543515" title="Special:BookSources/9789332543515">9789332543515</a>.</li> <li><a href="/wiki/Ray_Solomonoff" title="Ray Solomonoff">Ray Solomonoff</a>, <i>An Inductive Inference Machine</i>, IRE Convention Record, Section on Information Theory, Part 2, pp., 56–62, 1957.</li> <li><a href="/wiki/Ray_Solomonoff" title="Ray Solomonoff">Ray Solomonoff</a>, <i><a rel="nofollow" class="external text" href="http://world.std.com/~rjs/indinf56.pdf">An Inductive Inference Machine</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20110426161749/http://world.std.com/~rjs/indinf56.pdf">Archived</a> 2011-04-26 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></i> A privately circulated report from the 1956 <a href="/wiki/Dartmouth_workshop" title="Dartmouth workshop">Dartmouth Summer Research Conference on AI</a>.</li> <li>Kevin P. Murphy (2021). <i><a rel="nofollow" class="external text" href="https://probml.github.io/pml-book/book1.html">Probabilistic Machine Learning: An Introduction</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210411153246/https://probml.github.io/pml-book/book1.html">Archived</a> 2021-04-11 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a></i>, MIT Press.</li></ul> </div> <div class="mw-heading mw-heading2"><h2 id="External_links">External links</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Machine_learning&action=edit&section=56" title="Edit section: External links"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1235681985">.mw-parser-output .side-box{margin:4px 0;box-sizing:border-box;border:1px solid #aaa;font-size:88%;line-height:1.25em;background-color:var(--background-color-interactive-subtle,#f8f9fa);display:flow-root}.mw-parser-output .side-box-abovebelow,.mw-parser-output .side-box-text{padding:0.25em 0.9em}.mw-parser-output .side-box-image{padding:2px 0 2px 0.9em;text-align:center}.mw-parser-output .side-box-imageright{padding:2px 0.9em 2px 0;text-align:center}@media(min-width:500px){.mw-parser-output .side-box-flex{display:flex;align-items:center}.mw-parser-output .side-box-text{flex:1;min-width:0}}@media(min-width:720px){.mw-parser-output .side-box{width:238px}.mw-parser-output .side-box-right{clear:right;float:right;margin-left:1em}.mw-parser-output .side-box-left{margin-right:1em}}</style><style data-mw-deduplicate="TemplateStyles:r1237033735">@media print{body.ns-0 .mw-parser-output .sistersitebox{display:none!important}}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sistersitebox img[src*="Wiktionary-logo-en-v2.svg"]{background-color:white}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sistersitebox img[src*="Wiktionary-logo-en-v2.svg"]{background-color:white}}</style><div class="side-box side-box-right plainlinks sistersitebox"><style data-mw-deduplicate="TemplateStyles:r1126788409">.mw-parser-output .plainlist ol,.mw-parser-output .plainlist ul{line-height:inherit;list-style:none;margin:0;padding:0}.mw-parser-output .plainlist ol li,.mw-parser-output .plainlist ul li{margin-bottom:0}</style> <div class="side-box-flex"> <div class="side-box-image"><span class="noviewer" typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/30px-Commons-logo.svg.png" decoding="async" width="30" height="40" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/45px-Commons-logo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/59px-Commons-logo.svg.png 2x" data-file-width="1024" data-file-height="1376" /></span></span></div> <div class="side-box-text plainlist">Wikimedia Commons has media related to <span style="font-weight: bold; font-style: italic;"><a href="https://commons.wikimedia.org/wiki/Category:Machine_learning" class="extiw" title="commons:Category:Machine learning">Machine learning</a></span>.</div></div> </div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1235681985"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1237033735"><div class="side-box side-box-right plainlinks sistersitebox"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1126788409"> <div class="side-box-flex"> <div class="side-box-image"><span class="noviewer" typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/f/fa/Wikiquote-logo.svg/34px-Wikiquote-logo.svg.png" decoding="async" width="34" height="40" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/f/fa/Wikiquote-logo.svg/51px-Wikiquote-logo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/f/fa/Wikiquote-logo.svg/68px-Wikiquote-logo.svg.png 2x" data-file-width="300" data-file-height="355" /></span></span></div> <div class="side-box-text plainlist">Wikiquote has quotations related to <i><b><a href="https://en.wikiquote.org/wiki/Special:Search/Machine_learning" class="extiw" title="q:Special:Search/Machine learning">Machine learning</a></b></i>.</div></div> </div> <ul><li><a rel="nofollow" class="external text" href="https://web.archive.org/web/20171230081341/http://machinelearning.org/">International Machine Learning Society</a></li> <li><a rel="nofollow" class="external text" href="https://mloss.org/">mloss</a> is an academic database of open-source machine learning software.</li></ul> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Artificial_intelligence" style="padding:3px"><table class="nowraplinks hlist mw-collapsible {{{state}}} navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Artificial_intelligence_navbox" title="Template:Artificial intelligence navbox"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Artificial_intelligence_navbox" title="Template talk:Artificial intelligence navbox"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Artificial_intelligence_navbox" title="Special:EditPage/Template:Artificial intelligence navbox"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Artificial_intelligence" style="font-size:114%;margin:0 4em"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Parameter" title="Parameter">Parameter</a> <ul><li><a href="/wiki/Hyperparameter_(machine_learning)" title="Hyperparameter (machine learning)">Hyperparameter</a></li></ul></li> <li><a href="/wiki/Loss_functions_for_classification" title="Loss functions for classification">Loss functions</a></li> <li><a href="/wiki/Regression_analysis" title="Regression analysis">Regression</a> <ul><li><a href="/wiki/Bias%E2%80%93variance_tradeoff" title="Bias–variance tradeoff">Bias–variance tradeoff</a></li> <li><a href="/wiki/Double_descent" title="Double descent">Double descent</a></li> <li><a href="/wiki/Overfitting" title="Overfitting">Overfitting</a></li></ul></li> <li><a href="/wiki/Cluster_analysis" title="Cluster analysis">Clustering</a></li> <li><a href="/wiki/Gradient_descent" title="Gradient descent">Gradient descent</a> <ul><li><a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">SGD</a></li> <li><a href="/wiki/Quasi-Newton_method" title="Quasi-Newton method">Quasi-Newton method</a></li> <li><a href="/wiki/Conjugate_gradient_method" title="Conjugate gradient method">Conjugate gradient method</a></li></ul></li> <li><a href="/wiki/Backpropagation" title="Backpropagation">Backpropagation</a></li> <li><a href="/wiki/Attention_(machine_learning)" title="Attention (machine learning)">Attention</a></li> <li><a href="/wiki/Convolution" title="Convolution">Convolution</a></li> <li><a href="/wiki/Normalization_(machine_learning)" title="Normalization (machine learning)">Normalization</a> <ul><li><a href="/wiki/Batch_normalization" title="Batch normalization">Batchnorm</a></li></ul></li> <li><a href="/wiki/Activation_function" title="Activation function">Activation</a> <ul><li><a href="/wiki/Softmax_function" title="Softmax function">Softmax</a></li> <li><a href="/wiki/Sigmoid_function" title="Sigmoid function">Sigmoid</a></li> <li><a href="/wiki/Rectifier_(neural_networks)" title="Rectifier (neural networks)">Rectifier</a></li></ul></li> <li><a href="/wiki/Gating_mechanism" title="Gating mechanism">Gating</a></li> <li><a href="/wiki/Weight_initialization" title="Weight initialization">Weight initialization</a></li> <li><a href="/wiki/Regularization_(mathematics)" title="Regularization (mathematics)">Regularization</a></li> <li><a href="/wiki/Training,_validation,_and_test_data_sets" title="Training, validation, and test data sets">Datasets</a> <ul><li><a href="/wiki/Data_augmentation" title="Data augmentation">Augmentation</a></li></ul></li> <li><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a> <ul><li><a href="/wiki/Q-learning" title="Q-learning">Q-learning</a></li> <li><a href="/wiki/State%E2%80%93action%E2%80%93reward%E2%80%93state%E2%80%93action" title="State–action–reward–state–action">SARSA</a></li> <li><a href="/wiki/Imitation_learning" title="Imitation learning">Imitation</a></li></ul></li> <li><a href="/wiki/Diffusion_process" title="Diffusion process">Diffusion</a></li> <li><a href="/wiki/Autoregressive_model" title="Autoregressive model">Autoregression</a></li> <li><a href="/wiki/Adversarial_machine_learning" title="Adversarial machine learning">Adversary</a></li> <li><a href="/wiki/Hallucination_(artificial_intelligence)" title="Hallucination (artificial intelligence)">Hallucination</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Applications</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a class="mw-selflink selflink">Machine learning</a> <ul><li><a href="/wiki/Prompt_engineering#In-context_learning" title="Prompt engineering">In-context learning</a></li></ul></li> <li><a href="/wiki/Neural_network_(machine_learning)" title="Neural network (machine learning)">Artificial neural network</a> <ul><li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li></ul></li> <li><a href="/wiki/Language_model" title="Language model">Language model</a> <ul><li><a href="/wiki/Large_language_model" title="Large language model">Large language model</a></li> <li><a href="/wiki/Neural_machine_translation" title="Neural machine translation">NMT</a></li></ul></li> <li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Implementations</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%">Audio–visual</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/AlexNet" title="AlexNet">AlexNet</a></li> <li><a href="/wiki/WaveNet" title="WaveNet">WaveNet</a></li> <li><a href="/wiki/Human_image_synthesis" title="Human image synthesis">Human image synthesis</a></li> <li><a href="/wiki/Handwriting_recognition" title="Handwriting recognition">HWR</a></li> <li><a href="/wiki/Optical_character_recognition" title="Optical character recognition">OCR</a></li> <li><a href="/wiki/Deep_learning_speech_synthesis" title="Deep learning speech synthesis">Speech synthesis</a> <ul><li><a href="/wiki/ElevenLabs" title="ElevenLabs">ElevenLabs</a></li></ul></li> <li><a href="/wiki/Speech_recognition" title="Speech recognition">Speech recognition</a></li> <li><a href="/wiki/Facial_recognition_system" title="Facial recognition system">Facial recognition</a></li> <li><a href="/wiki/AlphaFold" title="AlphaFold">AlphaFold</a></li> <li><a href="/wiki/Text-to-image_model" title="Text-to-image model">Text-to-image models</a> <ul><li><a href="/wiki/Latent_diffusion_model" title="Latent diffusion model">Latent diffusion model</a></li> <li><a href="/wiki/DALL-E" title="DALL-E">DALL-E</a></li> <li><a href="/wiki/Flux_(text-to-image_model)" title="Flux (text-to-image model)">Flux</a></li> <li><a href="/wiki/Ideogram_(text-to-image_model)" title="Ideogram (text-to-image model)">Ideogram</a></li> <li><a href="/wiki/Midjourney" title="Midjourney">Midjourney</a></li> <li><a href="/wiki/Stable_Diffusion" title="Stable Diffusion">Stable Diffusion</a></li></ul></li> <li><a href="/wiki/Text-to-video_model" title="Text-to-video model">Text-to-video models</a> <ul><li><a href="/wiki/Sora_(text-to-video_model)" title="Sora (text-to-video model)">Sora</a></li> <li><a href="/wiki/Dream_Machine_(text-to-video_model)" title="Dream Machine (text-to-video model)">Dream Machine</a></li> <li><a href="/wiki/VideoPoet" title="VideoPoet">VideoPoet</a></li></ul></li> <li><a href="/wiki/Whisper_(speech_recognition_system)" title="Whisper (speech recognition system)">Whisper</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Text</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Word2vec" title="Word2vec">Word2vec</a></li> <li><a href="/wiki/Seq2seq" title="Seq2seq">Seq2seq</a></li> <li><a href="/wiki/GloVe" title="GloVe">GloVe</a></li> <li><a href="/wiki/BERT_(language_model)" title="BERT (language model)">BERT</a></li> <li><a href="/wiki/T5_(language_model)" title="T5 (language model)">T5</a></li> <li><a href="/wiki/Llama_(language_model)" title="Llama (language model)">Llama</a></li> <li><a href="/wiki/Chinchilla_(language_model)" title="Chinchilla (language model)">Chinchilla AI</a></li> <li><a href="/wiki/PaLM" title="PaLM">PaLM</a></li> <li><a href="/wiki/Generative_pre-trained_transformer" title="Generative pre-trained transformer">GPT</a> <ul><li><a href="/wiki/GPT-1" title="GPT-1">1</a></li> <li><a href="/wiki/GPT-J" title="GPT-J">J</a></li> <li><a href="/wiki/GPT-2" title="GPT-2">2</a></li> <li><a href="/wiki/GPT-3" title="GPT-3">3</a></li> <li><a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a></li> <li><a href="/wiki/GPT-4" title="GPT-4">4</a></li> <li><a href="/wiki/GPT-4o" title="GPT-4o">4o</a></li> <li><a href="/wiki/OpenAI_o1" title="OpenAI o1">o1</a></li></ul></li> <li><a href="/wiki/Claude_(language_model)" title="Claude (language model)">Claude</a></li> <li><a href="/wiki/Gemini_(language_model)" title="Gemini (language model)">Gemini</a></li> <li><a href="/wiki/Grok_(chatbot)" title="Grok (chatbot)">Grok</a></li> <li><a href="/wiki/LaMDA" title="LaMDA">LaMDA</a></li> <li><a href="/wiki/BLOOM_(language_model)" title="BLOOM (language model)">BLOOM</a></li> <li><a href="/wiki/Project_Debater" title="Project Debater">Project Debater</a></li> <li><a href="/wiki/IBM_Watson" title="IBM Watson">IBM Watson</a></li> <li><a href="/wiki/IBM_Watsonx" title="IBM Watsonx">IBM Watsonx</a></li> <li><a href="/wiki/IBM_Granite" title="IBM Granite">Granite</a></li> <li><a href="/wiki/Huawei_PanGu" title="Huawei PanGu">PanGu-Σ</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Decisional</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/AlphaGo" title="AlphaGo">AlphaGo</a></li> <li><a href="/wiki/AlphaZero" title="AlphaZero">AlphaZero</a></li> <li><a href="/wiki/OpenAI_Five" title="OpenAI Five">OpenAI Five</a></li> <li><a href="/wiki/Self-driving_car" title="Self-driving car">Self-driving car</a></li> <li><a href="/wiki/MuZero" title="MuZero">MuZero</a></li> <li><a href="/wiki/Action_selection" title="Action selection">Action selection</a> <ul><li><a href="/wiki/AutoGPT" title="AutoGPT">AutoGPT</a></li></ul></li> <li><a href="/wiki/Robot_control" title="Robot control">Robot control</a></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">People</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a></li> <li><a href="/wiki/Claude_Shannon" title="Claude Shannon">Claude Shannon</a></li> <li><a href="/wiki/Allen_Newell" title="Allen Newell">Allen Newell</a></li> <li><a href="/wiki/Herbert_A._Simon" title="Herbert A. Simon">Herbert A. Simon</a></li> <li><a href="/wiki/Frank_Rosenblatt" title="Frank Rosenblatt">Frank Rosenblatt</a></li> <li><a href="/wiki/Marvin_Minsky" title="Marvin Minsky">Marvin Minsky</a></li> <li><a href="/wiki/John_McCarthy_(computer_scientist)" title="John McCarthy (computer scientist)">John McCarthy</a></li> <li><a href="/wiki/Nathaniel_Rochester_(computer_scientist)" title="Nathaniel Rochester (computer scientist)">Nathaniel Rochester</a></li> <li><a href="/wiki/Seymour_Papert" title="Seymour Papert">Seymour Papert</a></li> <li><a href="/wiki/Joseph_Weizenbaum" title="Joseph Weizenbaum">Joseph Weizenbaum</a></li> <li><a href="/wiki/Bernard_Widrow" title="Bernard Widrow">Bernard Widrow</a></li> <li><a href="/wiki/Paul_Werbos" title="Paul Werbos">Paul Werbos</a></li> <li><a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a></li> <li><a href="/wiki/Alex_Graves_(computer_scientist)" title="Alex Graves (computer scientist)">Alex Graves</a></li> <li><a href="/wiki/Ian_Goodfellow" title="Ian Goodfellow">Ian Goodfellow</a></li> <li><a href="/wiki/Stephen_Grossberg" title="Stephen Grossberg">Stephen Grossberg</a></li> <li><a href="/wiki/Demis_Hassabis" title="Demis Hassabis">Demis Hassabis</a></li> <li><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a></li> <li><a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a></li> <li><a href="/wiki/Fei-Fei_Li" title="Fei-Fei Li">Fei-Fei Li</a></li> <li><a href="/wiki/Andrew_Ng" title="Andrew Ng">Andrew Ng</a></li> <li><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a></li> <li><a href="/wiki/David_Silver_(computer_scientist)" title="David Silver (computer scientist)">David Silver</a></li> <li><a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Ilya Sutskever</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Organizations</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Anthropic" title="Anthropic">Anthropic</a></li> <li><a href="/wiki/EleutherAI" title="EleutherAI">EleutherAI</a></li> <li><a href="/wiki/Google_DeepMind" title="Google DeepMind">Google DeepMind</a></li> <li><a href="/wiki/Hugging_Face" title="Hugging Face">Hugging Face</a></li> <li><a href="/wiki/Kuaishou" title="Kuaishou">Kuaishou</a></li> <li><a href="/wiki/Meta_AI" title="Meta AI">Meta AI</a></li> <li><a href="/wiki/Mila_(research_institute)" title="Mila (research institute)">Mila</a></li> <li><a href="/wiki/MiniMax_(company)" title="MiniMax (company)">MiniMax</a></li> <li><a href="/wiki/Mistral_AI" title="Mistral AI">Mistral AI</a></li> <li><a href="/wiki/MIT_Computer_Science_and_Artificial_Intelligence_Laboratory" title="MIT Computer Science and Artificial Intelligence Laboratory">MIT CSAIL</a></li> <li><a href="/wiki/OpenAI" title="OpenAI">OpenAI</a></li> <li><a href="/wiki/Runway_(company)" title="Runway (company)">Runway</a></li> <li><a href="/wiki/XAI_(company)" title="XAI (company)">xAI</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Architectures</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Neural_Turing_machine" title="Neural Turing machine">Neural Turing machine</a></li> <li><a href="/wiki/Differentiable_neural_computer" title="Differentiable neural computer">Differentiable neural computer</a></li> <li><a href="/wiki/Transformer_(deep_learning_architecture)" title="Transformer (deep learning architecture)">Transformer</a> <ul><li><a href="/wiki/Vision_transformer" title="Vision transformer">Vision transformer (ViT)</a></li></ul></li> <li><a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">Recurrent neural network (RNN)</a></li> <li><a href="/wiki/Long_short-term_memory" title="Long short-term memory">Long short-term memory (LSTM)</a></li> <li><a href="/wiki/Gated_recurrent_unit" title="Gated recurrent unit">Gated recurrent unit (GRU)</a></li> <li><a href="/wiki/Echo_state_network" title="Echo state network">Echo state network</a></li> <li><a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">Multilayer perceptron (MLP)</a></li> <li><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">Convolutional neural network (CNN)</a></li> <li><a href="/wiki/Residual_neural_network" title="Residual neural network">Residual neural network (RNN)</a></li> <li><a href="/wiki/Highway_network" title="Highway network">Highway network</a></li> <li><a href="/wiki/Mamba_(deep_learning_architecture)" title="Mamba (deep learning architecture)">Mamba</a></li> <li><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></li> <li><a href="/wiki/Variational_autoencoder" title="Variational autoencoder">Variational autoencoder (VAE)</a></li> <li><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">Generative adversarial network (GAN)</a></li> <li><a href="/wiki/Graph_neural_network" title="Graph neural network">Graph neural network (GNN)</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><a href="/wiki/File:Symbol_portal_class.svg" class="mw-file-description" title="Portal"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/16px-Symbol_portal_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/23px-Symbol_portal_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/31px-Symbol_portal_class.svg.png 2x" data-file-width="180" data-file-height="185" /></a></span> Portals <ul><li><a href="/wiki/Portal:Technology" title="Portal:Technology">Technology</a></li></ul></li> <li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> Categories <ul><li><a href="/wiki/Category:Artificial_neural_networks" title="Category:Artificial neural networks">Artificial neural networks</a></li> <li><a href="/wiki/Category:Machine_learning" title="Category:Machine learning">Machine learning</a></li></ul></li></ul> </div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Computer_science" style="padding:3px"><table class="nowraplinks hlist mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Computer_science" title="Template:Computer science"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Computer_science" title="Template talk:Computer science"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Computer_science" title="Special:EditPage/Template:Computer science"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Computer_science" style="font-size:114%;margin:0 4em"><a href="/wiki/Computer_science" title="Computer science">Computer science</a></div></th></tr><tr><td class="navbox-abovebelow" colspan="2"><div>Note: This template roughly follows the 2012 <a href="/wiki/ACM_Computing_Classification_System" title="ACM Computing Classification System">ACM Computing Classification System</a>.</div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_hardware" title="Computer hardware">Hardware</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Printed_circuit_board" title="Printed circuit board">Printed circuit board</a></li> <li><a href="/wiki/Peripheral" title="Peripheral">Peripheral</a></li> <li><a href="/wiki/Integrated_circuit" title="Integrated circuit">Integrated circuit</a></li> <li><a href="/wiki/Very_Large_Scale_Integration" class="mw-redirect" title="Very Large Scale Integration">Very Large Scale Integration</a></li> <li><a href="/wiki/System_on_a_chip" title="System on a chip">Systems on Chip (SoCs)</a></li> <li><a href="/wiki/Green_computing" title="Green computing">Energy consumption (Green computing)</a></li> <li><a href="/wiki/Electronic_design_automation" title="Electronic design automation">Electronic design automation</a></li> <li><a href="/wiki/Hardware_acceleration" title="Hardware acceleration">Hardware acceleration</a></li> <li><a href="/wiki/Processor_(computing)" title="Processor (computing)">Processor</a></li> <li><a href="/wiki/List_of_computer_size_categories" title="List of computer size categories">Size</a> / <a href="/wiki/Form_factor_(design)" title="Form factor (design)">Form</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Computer systems organization</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Computer_architecture" title="Computer architecture">Computer architecture</a></li> <li><a href="/wiki/Computational_complexity" title="Computational complexity">Computational complexity</a></li> <li><a href="/wiki/Dependability" title="Dependability">Dependability</a></li> <li><a href="/wiki/Embedded_system" title="Embedded system">Embedded system</a></li> <li><a href="/wiki/Real-time_computing" title="Real-time computing">Real-time computing</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_network" title="Computer network">Networks</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Network_architecture" title="Network architecture">Network architecture</a></li> <li><a href="/wiki/Network_protocol" class="mw-redirect" title="Network protocol">Network protocol</a></li> <li><a href="/wiki/Networking_hardware" title="Networking hardware">Network components</a></li> <li><a href="/wiki/Network_scheduler" title="Network scheduler">Network scheduler</a></li> <li><a href="/wiki/Network_performance" title="Network performance">Network performance evaluation</a></li> <li><a href="/wiki/Network_service" title="Network service">Network service</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Software organization</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Interpreter_(computing)" title="Interpreter (computing)">Interpreter</a></li> <li><a href="/wiki/Middleware" title="Middleware">Middleware</a></li> <li><a href="/wiki/Virtual_machine" title="Virtual machine">Virtual machine</a></li> <li><a href="/wiki/Operating_system" title="Operating system">Operating system</a></li> <li><a href="/wiki/Software_quality" title="Software quality">Software quality</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Programming_language_theory" title="Programming language theory">Software notations</a> and <a href="/wiki/Programming_tool" title="Programming tool">tools</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Programming_paradigm" title="Programming paradigm">Programming paradigm</a></li> <li><a href="/wiki/Programming_language" title="Programming language">Programming language</a></li> <li><a href="/wiki/Compiler_construction" class="mw-redirect" title="Compiler construction">Compiler</a></li> <li><a href="/wiki/Domain-specific_language" title="Domain-specific language">Domain-specific language</a></li> <li><a href="/wiki/Modeling_language" title="Modeling language">Modeling language</a></li> <li><a href="/wiki/Software_framework" title="Software framework">Software framework</a></li> <li><a href="/wiki/Integrated_development_environment" title="Integrated development environment">Integrated development environment</a></li> <li><a href="/wiki/Software_configuration_management" title="Software configuration management">Software configuration management</a></li> <li><a href="/wiki/Library_(computing)" title="Library (computing)">Software library</a></li> <li><a href="/wiki/Software_repository" title="Software repository">Software repository</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Software_development" title="Software development">Software development</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Control_variable_(programming)" class="mw-redirect" title="Control variable (programming)">Control variable</a></li> <li><a href="/wiki/Software_development_process" title="Software development process">Software development process</a></li> <li><a href="/wiki/Requirements_analysis" title="Requirements analysis">Requirements analysis</a></li> <li><a href="/wiki/Software_design" title="Software design">Software design</a></li> <li><a href="/wiki/Software_construction" title="Software construction">Software construction</a></li> <li><a href="/wiki/Software_deployment" title="Software deployment">Software deployment</a></li> <li><a href="/wiki/Software_engineering" title="Software engineering">Software engineering</a></li> <li><a href="/wiki/Software_maintenance" title="Software maintenance">Software maintenance</a></li> <li><a href="/wiki/Programming_team" title="Programming team">Programming team</a></li> <li><a href="/wiki/Open-source_software" title="Open-source software">Open-source model</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Theory_of_computation" title="Theory of computation">Theory of computation</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Model_of_computation" title="Model of computation">Model of computation</a> <ul><li><a href="/wiki/Stochastic_computing" title="Stochastic computing">Stochastic</a></li></ul></li> <li><a href="/wiki/Formal_language" title="Formal language">Formal language</a></li> <li><a href="/wiki/Automata_theory" title="Automata theory">Automata theory</a></li> <li><a href="/wiki/Computability_theory" title="Computability theory">Computability theory</a></li> <li><a href="/wiki/Computational_complexity_theory" title="Computational complexity theory">Computational complexity theory</a></li> <li><a href="/wiki/Logic_in_computer_science" title="Logic in computer science">Logic</a></li> <li><a href="/wiki/Semantics_(computer_science)" title="Semantics (computer science)">Semantics</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Algorithm" title="Algorithm">Algorithms</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Algorithm_design" class="mw-redirect" title="Algorithm design">Algorithm design</a></li> <li><a href="/wiki/Analysis_of_algorithms" title="Analysis of algorithms">Analysis of algorithms</a></li> <li><a href="/wiki/Algorithmic_efficiency" title="Algorithmic efficiency">Algorithmic efficiency</a></li> <li><a href="/wiki/Randomized_algorithm" title="Randomized algorithm">Randomized algorithm</a></li> <li><a href="/wiki/Computational_geometry" title="Computational geometry">Computational geometry</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Mathematics of <a href="/wiki/Computing" title="Computing">computing</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Discrete_mathematics" title="Discrete mathematics">Discrete mathematics</a></li> <li><a href="/wiki/Probability" title="Probability">Probability</a></li> <li><a href="/wiki/Statistics" title="Statistics">Statistics</a></li> <li><a href="/wiki/Mathematical_software" title="Mathematical software">Mathematical software</a></li> <li><a href="/wiki/Information_theory" title="Information theory">Information theory</a></li> <li><a href="/wiki/Mathematical_analysis" title="Mathematical analysis">Mathematical analysis</a></li> <li><a href="/wiki/Numerical_analysis" title="Numerical analysis">Numerical analysis</a></li> <li><a href="/wiki/Theoretical_computer_science" title="Theoretical computer science">Theoretical computer science</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Information_system" title="Information system">Information systems</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Database" title="Database">Database management system</a></li> <li><a href="/wiki/Computer_data_storage" title="Computer data storage">Information storage systems</a></li> <li><a href="/wiki/Enterprise_information_system" title="Enterprise information system">Enterprise information system</a></li> <li><a href="/wiki/Social_software" title="Social software">Social information systems</a></li> <li><a href="/wiki/Geographic_information_system" title="Geographic information system">Geographic information system</a></li> <li><a href="/wiki/Decision_support_system" title="Decision support system">Decision support system</a></li> <li><a href="/wiki/Process_control" class="mw-redirect" title="Process control">Process control system</a></li> <li><a href="/wiki/Multimedia_database" title="Multimedia database">Multimedia information system</a></li> <li><a href="/wiki/Data_mining" title="Data mining">Data mining</a></li> <li><a href="/wiki/Digital_library" title="Digital library">Digital library</a></li> <li><a href="/wiki/Computing_platform" title="Computing platform">Computing platform</a></li> <li><a href="/wiki/Digital_marketing" title="Digital marketing">Digital marketing</a></li> <li><a href="/wiki/World_Wide_Web" title="World Wide Web">World Wide Web</a></li> <li><a href="/wiki/Information_retrieval" title="Information retrieval">Information retrieval</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_security" title="Computer security">Security</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Cryptography" title="Cryptography">Cryptography</a></li> <li><a href="/wiki/Formal_methods" title="Formal methods">Formal methods</a></li> <li><a href="/wiki/Security_hacker" title="Security hacker">Security hacker</a></li> <li><a href="/wiki/Security_service_(telecommunication)" title="Security service (telecommunication)">Security services</a></li> <li><a href="/wiki/Intrusion_detection_system" title="Intrusion detection system">Intrusion detection system</a></li> <li><a href="/wiki/Hardware_security" title="Hardware security">Hardware security</a></li> <li><a href="/wiki/Network_security" title="Network security">Network security</a></li> <li><a href="/wiki/Information_security" title="Information security">Information security</a></li> <li><a href="/wiki/Application_security" title="Application security">Application security</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Human%E2%80%93computer_interaction" title="Human–computer interaction">Human–computer interaction</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Interaction_design" title="Interaction design">Interaction design</a></li> <li><a href="/wiki/Social_computing" title="Social computing">Social computing</a></li> <li><a href="/wiki/Ubiquitous_computing" title="Ubiquitous computing">Ubiquitous computing</a></li> <li><a href="/wiki/Visualization_(graphics)" title="Visualization (graphics)">Visualization</a></li> <li><a href="/wiki/Computer_accessibility" title="Computer accessibility">Accessibility</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Concurrency_(computer_science)" title="Concurrency (computer science)">Concurrency</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Concurrent_computing" title="Concurrent computing">Concurrent computing</a></li> <li><a href="/wiki/Parallel_computing" title="Parallel computing">Parallel computing</a></li> <li><a href="/wiki/Distributed_computing" title="Distributed computing">Distributed computing</a></li> <li><a href="/wiki/Multithreading_(computer_architecture)" title="Multithreading (computer architecture)">Multithreading</a></li> <li><a href="/wiki/Multiprocessing" title="Multiprocessing">Multiprocessing</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Natural_language_processing" title="Natural language processing">Natural language processing</a></li> <li><a href="/wiki/Knowledge_representation_and_reasoning" title="Knowledge representation and reasoning">Knowledge representation and reasoning</a></li> <li><a href="/wiki/Computer_vision" title="Computer vision">Computer vision</a></li> <li><a href="/wiki/Automated_planning_and_scheduling" title="Automated planning and scheduling">Automated planning and scheduling</a></li> <li><a href="/wiki/Mathematical_optimization" title="Mathematical optimization">Search methodology</a></li> <li><a href="/wiki/Control_theory" title="Control theory">Control method</a></li> <li><a href="/wiki/Philosophy_of_artificial_intelligence" title="Philosophy of artificial intelligence">Philosophy of artificial intelligence</a></li> <li><a href="/wiki/Distributed_artificial_intelligence" title="Distributed artificial intelligence">Distributed artificial intelligence</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a class="mw-selflink selflink">Machine learning</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Supervised_learning" title="Supervised learning">Supervised learning</a></li> <li><a href="/wiki/Unsupervised_learning" title="Unsupervised learning">Unsupervised learning</a></li> <li><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a></li> <li><a href="/wiki/Multi-task_learning" title="Multi-task learning">Multi-task learning</a></li> <li><a href="/wiki/Cross-validation_(statistics)" title="Cross-validation (statistics)">Cross-validation</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_graphics" title="Computer graphics">Graphics</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Computer_animation" title="Computer animation">Animation</a></li> <li><a href="/wiki/Rendering_(computer_graphics)" title="Rendering (computer graphics)">Rendering</a></li> <li><a href="/wiki/Photograph_manipulation" title="Photograph manipulation">Photograph manipulation</a></li> <li><a href="/wiki/Graphics_processing_unit" title="Graphics processing unit">Graphics processing unit</a></li> <li><a href="/wiki/Mixed_reality" title="Mixed reality">Mixed reality</a></li> <li><a href="/wiki/Virtual_reality" title="Virtual reality">Virtual reality</a></li> <li><a href="/wiki/Image_compression" title="Image compression">Image compression</a></li> <li><a href="/wiki/Solid_modeling" title="Solid modeling">Solid modeling</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Applied computing</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Quantum_Computing" class="mw-redirect" title="Quantum Computing">Quantum Computing</a></li> <li><a href="/wiki/E-commerce" title="E-commerce">E-commerce</a></li> <li><a href="/wiki/Enterprise_software" title="Enterprise software">Enterprise software</a></li> <li><a href="/wiki/Computational_mathematics" title="Computational mathematics">Computational mathematics</a></li> <li><a href="/wiki/Computational_physics" title="Computational physics">Computational physics</a></li> <li><a href="/wiki/Computational_chemistry" title="Computational chemistry">Computational chemistry</a></li> <li><a href="/wiki/Computational_biology" title="Computational biology">Computational biology</a></li> <li><a href="/wiki/Computational_social_science" title="Computational social science">Computational social science</a></li> <li><a href="/wiki/Computational_engineering" title="Computational engineering">Computational engineering</a></li> <li><a href="/wiki/Template:Differentiable_computing" title="Template:Differentiable computing">Differentiable computing</a></li> <li><a href="/wiki/Health_informatics" title="Health informatics">Computational healthcare</a></li> <li><a href="/wiki/Digital_art" title="Digital art">Digital art</a></li> <li><a href="/wiki/Electronic_publishing" title="Electronic publishing">Electronic publishing</a></li> <li><a href="/wiki/Cyberwarfare" title="Cyberwarfare">Cyberwarfare</a></li> <li><a href="/wiki/Electronic_voting" title="Electronic voting">Electronic voting</a></li> <li><a href="/wiki/Video_game" title="Video game">Video games</a></li> <li><a href="/wiki/Word_processor" title="Word processor">Word processing</a></li> <li><a href="/wiki/Operations_research" title="Operations research">Operations research</a></li> <li><a href="/wiki/Educational_technology" title="Educational technology">Educational technology</a></li> <li><a href="/wiki/Document_management_system" title="Document management system">Document management</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Category:Computer_science" title="Category:Computer science">Category</a></li> <li><span class="noviewer" typeof="mw:File"><span title="Outline"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/41/Global_thinking.svg/10px-Global_thinking.svg.png" decoding="async" width="10" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/41/Global_thinking.svg/15px-Global_thinking.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/41/Global_thinking.svg/21px-Global_thinking.svg.png 2x" data-file-width="130" data-file-height="200" /></span></span> <a href="/wiki/Outline_of_computer_science" title="Outline of computer science">Outline</a></li> <li><span class="noviewer" typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/e/e0/Symbol_question.svg/16px-Symbol_question.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/e/e0/Symbol_question.svg/23px-Symbol_question.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/e/e0/Symbol_question.svg/31px-Symbol_question.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Template:Glossaries_of_computers" title="Template:Glossaries of computers">Glossaries</a></li></ul> </div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"><style data-mw-deduplicate="TemplateStyles:r1038841319">.mw-parser-output .tooltip-dotted{border-bottom:1px dotted;cursor:help}</style><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1038841319"></div><div role="navigation" class="navbox authority-control" aria-label="Navbox" style="padding:3px"><table class="nowraplinks hlist navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Help:Authority_control" title="Help:Authority control">Authority control databases</a>: National <span class="mw-valign-text-top noprint" typeof="mw:File/Frameless"><a href="https://www.wikidata.org/wiki/Q2539#identifiers" title="Edit this at Wikidata"><img alt="Edit this at Wikidata" src="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/10px-OOjs_UI_icon_edit-ltr-progressive.svg.png" decoding="async" width="10" height="10" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/15px-OOjs_UI_icon_edit-ltr-progressive.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/20px-OOjs_UI_icon_edit-ltr-progressive.svg.png 2x" data-file-width="20" data-file-height="20" /></a></span></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"><ul><li><span class="uid"><a rel="nofollow" class="external text" href="https://d-nb.info/gnd/4193754-5">Germany</a></span></li><li><span class="uid"><span class="rt-commentedText tooltip tooltip-dotted" title="Machine learning"><a rel="nofollow" class="external text" href="https://id.loc.gov/authorities/sh85079324">United States</a></span></span></li><li><span class="uid"><a rel="nofollow" class="external text" href="https://id.ndl.go.jp/auth/ndlna/001210569">Japan</a></span></li><li><span class="uid"><span class="rt-commentedText tooltip tooltip-dotted" title="strojové učení"><a rel="nofollow" class="external text" href="https://aleph.nkp.cz/F/?func=find-c&local_base=aut&ccl_term=ica=ph126143&CON_LNG=ENG">Czech Republic</a></span></span></li><li><span class="uid"><a rel="nofollow" class="external text" href="http://olduli.nli.org.il/F/?func=find-b&local_base=NLX10&find_code=UID&request=987007541156405171">Israel</a></span></li></ul></div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐api‐int.codfw.main‐849f99967d‐t5jlv Cached time: 20241123193706 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 2.199 seconds Real time usage: 3.165 seconds Preprocessor visited node count: 15553/1000000 Post‐expand include size: 514678/2097152 bytes Template argument size: 8360/2097152 bytes Highest expansion depth: 21/100 Expensive parser function count: 36/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 681572/5000000 bytes Lua time usage: 1.414/10.000 seconds Lua memory usage: 29417872/52428800 bytes Lua Profile: ? 240 ms 17.6% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::callParserFunction 200 ms 14.7% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::match 120 ms 8.8% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::sub 80 ms 5.9% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::find 80 ms 5.9% dataWrapper <mw.lua:672> 80 ms 5.9% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::gsub 80 ms 5.9% citation0 <Module:Citation/CS1:2614> 40 ms 2.9% match 40 ms 2.9% MediaWiki\Extension\Scribunto\Engines\LuaSandbox\LuaSandboxCallback::plain 40 ms 2.9% [others] 360 ms 26.5% Number of Wikibase entities loaded: 1/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 2761.017 1 -total 33.01% 911.433 1 Template:Reflist 16.04% 442.930 1 Template:Excerpt 11.41% 315.055 54 Template:Cite_journal 5.14% 141.904 26 Template:Cite_book 4.69% 129.484 33 Template:Cite_web 4.67% 128.839 3 Template:Anchor 4.64% 128.069 2 Template:Sfn 4.63% 127.938 1 Template:Machine_learning_bar 4.60% 127.099 4 Template:Annotated_link --> <!-- Saved in parser cache with key enwiki:pcache:233488:|#|:idhash:canonical and timestamp 20241123193706 and revision id 1259171820. Rendering was triggered because: api-parse --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Machine_learning&oldid=1259171820">https://en.wikipedia.org/w/index.php?title=Machine_learning&oldid=1259171820</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Categories</a>: <ul><li><a href="/wiki/Category:Machine_learning" title="Category:Machine learning">Machine learning</a></li><li><a href="/wiki/Category:Cybernetics" title="Category:Cybernetics">Cybernetics</a></li><li><a href="/wiki/Category:Learning" title="Category:Learning">Learning</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:Webarchive_template_wayback_links" title="Category:Webarchive template wayback links">Webarchive template wayback links</a></li><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_is_different_from_Wikidata" title="Category:Short description is different from Wikidata">Short description is different from Wikidata</a></li><li><a href="/wiki/Category:Articles_with_excerpts" title="Category:Articles with excerpts">Articles with excerpts</a></li><li><a href="/wiki/Category:Wikipedia_articles_needing_clarification_from_January_2024" title="Category:Wikipedia articles needing clarification from January 2024">Wikipedia articles needing clarification from January 2024</a></li><li><a href="/wiki/Category:Commons_category_link_from_Wikidata" title="Category:Commons category link from Wikidata">Commons category link from Wikidata</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 23 November 2024, at 19:36<span class="anonymous-show"> (UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Machine_learning&mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/w/resources/assets/poweredby_mediawiki.svg" alt="Powered by MediaWiki" width="88" height="31" loading="lazy"></a></li> </ul> </footer> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.canary-84779d6bf6-vqkvc","wgBackendResponseTime":190,"wgPageParseReport":{"limitreport":{"cputime":"2.199","walltime":"3.165","ppvisitednodes":{"value":15553,"limit":1000000},"postexpandincludesize":{"value":514678,"limit":2097152},"templateargumentsize":{"value":8360,"limit":2097152},"expansiondepth":{"value":21,"limit":100},"expensivefunctioncount":{"value":36,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":681572,"limit":5000000},"entityaccesscount":{"value":1,"limit":400},"timingprofile":["100.00% 2761.017 1 -total"," 33.01% 911.433 1 Template:Reflist"," 16.04% 442.930 1 Template:Excerpt"," 11.41% 315.055 54 Template:Cite_journal"," 5.14% 141.904 26 Template:Cite_book"," 4.69% 129.484 33 Template:Cite_web"," 4.67% 128.839 3 Template:Anchor"," 4.64% 128.069 2 Template:Sfn"," 4.63% 127.938 1 Template:Machine_learning_bar"," 4.60% 127.099 4 Template:Annotated_link"]},"scribunto":{"limitreport-timeusage":{"value":"1.414","limit":"10.000"},"limitreport-memusage":{"value":29417872,"limit":52428800},"limitreport-logs":"anchor_id_list = table#1 {\n [\"Algorithm_types\"] = 1,\n [\"CITEREFAgrawalImielińskiSwami1993\"] = 1,\n [\"CITEREFAlex_RatnerStephen_BachParoma_VarmaChris\"] = 1,\n [\"CITEREFAllyn2023\"] = 1,\n [\"CITEREFAlpaydin,_Ethem2010\"] = 1,\n [\"CITEREFAlpaydin2010\"] = 1,\n [\"CITEREFBabutaOswaldRinik2018\"] = 1,\n [\"CITEREFBasselGlaabMarquezHoldsworth2011\"] = 1,\n [\"CITEREFBishop2006\"] = 1,\n [\"CITEREFBostrom2011\"] = 1,\n [\"CITEREFBostromYudkowsky2011\"] = 1,\n [\"CITEREFBrancoFerreiraCabral2019\"] = 1,\n [\"CITEREFBurkov2019\"] = 1,\n [\"CITEREFBzdokAltmanKrzywinski2018\"] = 1,\n [\"CITEREFCaliskanBrysonNarayanan2017\"] = 1,\n [\"CITEREFCatal2012\"] = 1,\n [\"CITEREFChandolaBanerjeeKumar2009\"] = 1,\n [\"CITEREFCharShahMagnus2018\"] = 2,\n [\"CITEREFChungGreen2024\"] = 1,\n [\"CITEREFCoatesLeeNg2011\"] = 1,\n [\"CITEREFCornell_University_Library2001\"] = 1,\n [\"CITEREFCortesVapnik1995\"] = 1,\n [\"CITEREFCrawford2016\"] = 1,\n [\"CITEREFCsurkaDanceFanWillamowski2004\"] = 1,\n [\"CITEREFDaniel_JurafskyJames_H._Martin2009\"] = 1,\n [\"CITEREFDeySinghWangMcDonald-Maier2020\"] = 1,\n [\"CITEREFDokasErtozKumarLazarevic2002\"] = 1,\n [\"CITEREFDomingos2015\"] = 1,\n [\"CITEREFEdionwe\"] = 1,\n [\"CITEREFEl_NaqaMurphy2015\"] = 1,\n [\"CITEREFFafoutisMarchegianiElstsPope2018\"] = 1,\n [\"CITEREFFleerMoringenKlatzkyRitter2020\"] = 1,\n [\"CITEREFFriedman1998\"] = 1,\n [\"CITEREFGarcia2016\"] = 1,\n [\"CITEREFGareth_JamesDaniela_WittenTrevor_HastieRobert_Tibshirani2013\"] = 1,\n [\"CITEREFGerovitch2015\"] = 1,\n [\"CITEREFGiriChiuDi_GuglielmoMantovani2020\"] = 1,\n [\"CITEREFGoldbergHolland1988\"] = 1,\n [\"CITEREFGoldwasserKimVaikuntanathanZamir2022\"] = 1,\n [\"CITEREFHarnad2008\"] = 1,\n [\"CITEREFHempel2018\"] = 1,\n [\"CITEREFHernandezGreenwald2018\"] = 1,\n [\"CITEREFHodgeAustin2004\"] = 1,\n [\"CITEREFHuNiuCarrascoLennox2020\"] = 1,\n [\"CITEREFHuZhangBohrerLiu2023\"] = 1,\n [\"CITEREFIbrahimOstaAlamehSaleh2019\"] = 1,\n [\"CITEREFIvanenkoSmolikWantaMidura2023\"] = 1,\n [\"CITEREFJaiswalBabuZadehBanerjee2021\"] = 1,\n [\"CITEREFJeffries\"] = 1,\n [\"CITEREFJordanBishop2004\"] = 1,\n [\"CITEREFJordanMitchell2015\"] = 1,\n [\"CITEREFKohavi1995\"] = 1,\n [\"CITEREFKozaBennettAndreKeane1996\"] = 1,\n [\"CITEREFLangley2011\"] = 1,\n [\"CITEREFLe_RouxBengioFitzgibbon2012\"] = 1,\n [\"CITEREFLindsay1964\"] = 1,\n [\"CITEREFLouisAzadDelshadtehraniGupta2019\"] = 1,\n [\"CITEREFLuPlataniotisVenetsanopoulos2011\"] = 1,\n [\"CITEREFM.O.R._PratesP.H.C._AvelarL.C._Lamb2019\"] = 1,\n [\"CITEREFMadry,_A.Makelov,_A.Schmidt,_L.Tsipras,_D.2019\"] = 1,\n [\"CITEREFMashaghi,_A.Ramezanpour,_A.2018\"] = 1,\n [\"CITEREFMetz2016\"] = 1,\n [\"CITEREFMichael_I._Jordan2014\"] = 1,\n [\"CITEREFMichieSpiegelhalterTaylor1994\"] = 1,\n [\"CITEREFMilner1993\"] = 1,\n [\"CITEREFMisraMaaten2020\"] = 1,\n [\"CITEREFMitchell,_T.1997\"] = 1,\n [\"CITEREFMohriRostamizadehTalwalkar2012\"] = 1,\n [\"CITEREFMoringenFleerWalckRitter2020\"] = 1,\n [\"CITEREFNarayanan2016\"] = 1,\n [\"CITEREFNathan_SrebroJason_D._M._RennieTommi_S._Jaakkola2004\"] = 1,\n [\"CITEREFNilsson1998\"] = 1,\n [\"CITEREFOkolieSavageOgbagaGunes2022\"] = 1,\n [\"CITEREFPavel_BrazdilChristophe_Giraud_CarrierCarlos_SoaresRicardo_Vilalta2009\"] = 1,\n [\"CITEREFPooleMackworthGoebel1998\"] = 1,\n [\"CITEREFQuested\"] = 1,\n [\"CITEREFRamezanpour,_A.Beam,_A.L.Chen,_J.H.Mashaghi,_A.2020\"] = 1,\n [\"CITEREFRasekhschaffeJones2019\"] = 1,\n [\"CITEREFRay2019\"] = 1,\n [\"CITEREFReddyPatelWeyrichFenton2020\"] = 1,\n [\"CITEREFResearch2015\"] = 1,\n [\"CITEREFRezapouraghdamAkhshikRamkissoon2021\"] = 1,\n [\"CITEREFRoweisSaul2000\"] = 1,\n [\"CITEREFRudin2019\"] = 1,\n [\"CITEREFRussellNorvig2010\"] = 1,\n [\"CITEREFRussellNorvig2021\"] = 1,\n [\"CITEREFSamuel1959\"] = 1,\n [\"CITEREFSarle1994\"] = 1,\n [\"CITEREFScott_Patterson2010\"] = 1,\n [\"CITEREFSilvaKenney2018\"] = 1,\n [\"CITEREFSimonite2017\"] = 1,\n [\"CITEREFSindhuNivedhaPrakash2020\"] = 1,\n [\"CITEREFStevenson\"] = 1,\n [\"CITEREFSunHuangZhao2024\"] = 1,\n [\"CITEREFSunZhaoLovreglioKuligowski2024\"] = 1,\n [\"CITEREFSynced2022\"] = 1,\n [\"CITEREFTillmann2015\"] = 1,\n [\"CITEREFUrbanowiczMoore2009\"] = 1,\n [\"CITEREFVaishyaJavaidKhanHaleem2020\"] = 1,\n [\"CITEREFVincent2018\"] = 1,\n [\"CITEREFVincent2019\"] = 1,\n [\"CITEREFVinod_Khosla2012\"] = 1,\n [\"CITEREFWangDasgupta2016\"] = 1,\n [\"CITEREFWangShiGohQian2019\"] = 1,\n [\"CITEREFWilliams2020\"] = 1,\n [\"CITEREFWong2023\"] = 1,\n [\"CITEREFXuLovreglioKuligowskiCova2023\"] = 1,\n [\"CITEREFY._BengioA._CourvilleP._Vincent2013\"] = 1,\n [\"CITEREFYoosefzadeh-NajafabadiHughTulpanSulik2021\"] = 1,\n [\"CITEREFYoshua_Bengio2009\"] = 1,\n [\"CITEREFZhang\"] = 1,\n [\"CITEREFZhangHuangTibbs-CortesVanous2023\"] = 2,\n [\"CITEREFZhangZhanLinChen2011\"] = 1,\n [\"CITEREFZhaoLovreglioNilsson2020\"] = 1,\n [\"CITEREFZimekSchubert2017\"] = 1,\n [\"CITEREFvan_Otterlo,_M.Wiering,_M.2012\"] = 1,\n [\"Generalization\"] = 1,\n [\"Open-source_software\"] = 1,\n}\ntemplate_list = table#1 {\n [\"!\"] = 3,\n [\"Anchor\"] = 3,\n [\"Annotated link\"] = 4,\n [\"Artificial intelligence\"] = 1,\n [\"Artificial intelligence (AI)\"] = 1,\n [\"Authority control\"] = 1,\n [\"Citation\"] = 6,\n [\"Cite AIMA\"] = 1,\n [\"Cite Mehryar Afshin Ameet 2012\"] = 1,\n [\"Cite arXiv\"] = 3,\n [\"Cite book\"] = 23,\n [\"Cite conference\"] = 5,\n [\"Cite encyclopedia\"] = 1,\n [\"Cite journal\"] = 52,\n [\"Cite news\"] = 21,\n [\"Cite report\"] = 2,\n [\"Cite web\"] = 28,\n [\"Clarify\"] = 1,\n [\"Colend\"] = 1,\n [\"Cols\"] = 1,\n [\"Commons category\"] = 1,\n [\"Computer science\"] = 1,\n [\"Div col\"] = 2,\n [\"Div col end\"] = 2,\n [\"Excerpt\"] = 1,\n [\"For\"] = 1,\n [\"ISBN\"] = 10,\n [\"Machine learning bar\"] = 1,\n [\"Main\"] = 21,\n [\"Redirect\"] = 1,\n [\"Refbegin\"] = 1,\n [\"Refend\"] = 1,\n [\"Reflist\"] = 1,\n [\"Refn\"] = 2,\n [\"Rp\"] = 5,\n [\"Russell Norvig 2003\"] = 1,\n [\"See also\"] = 5,\n [\"Sfn\"] = 2,\n [\"Short description\"] = 1,\n [\"Toclimit\"] = 1,\n [\"Vanchor\"] = 1,\n [\"Webarchive\"] = 15,\n [\"Wikiquote\"] = 1,\n}\narticle_whitelist = table#1 {\n}\n","limitreport-profile":[["?","240","17.6"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::callParserFunction","200","14.7"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::match","120","8.8"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::sub","80","5.9"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::find","80","5.9"],["dataWrapper \u003Cmw.lua:672\u003E","80","5.9"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::gsub","80","5.9"],["citation0 \u003CModule:Citation/CS1:2614\u003E","40","2.9"],["match","40","2.9"],["MediaWiki\\Extension\\Scribunto\\Engines\\LuaSandbox\\LuaSandboxCallback::plain","40","2.9"],["[others]","360","26.5"]]},"cachereport":{"origin":"mw-api-int.codfw.main-849f99967d-t5jlv","timestamp":"20241123193706","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Machine learning","url":"https:\/\/en.wikipedia.org\/wiki\/Machine_learning","sameAs":"http:\/\/www.wikidata.org\/entity\/Q2539","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q2539","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2003-05-25T06:03:17Z","dateModified":"2024-11-23T19:36:48Z","headline":"scientific study of algorithms and statistical models that computer systems use to perform tasks without explicit instructions"}</script> </body> </html>