CINXE.COM
ImageNet - Wikipedia
<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>ImageNet - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy", "wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"595ac426-4a45-4765-ab65-2aef3770a6c2","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"ImageNet","wgTitle":"ImageNet","wgCurRevisionId":1259151460,"wgRevisionId":1259151460,"wgArticleId":50896194,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["CS1 errors: missing periodical","Articles with short description","Short description matches Wikidata","Use dmy dates from September 2019","Wikipedia articles needing clarification from December 2023","Wikipedia articles needing clarification from August 2019","Official website different in Wikidata and Wikipedia","Computer science competitions","2009 in computing","Object recognition and categorization","Databases","Datasets in computer vision"],"wgPageViewLanguage":"en", "wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"ImageNet","wgRelevantArticleId":50896194,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":30000,"wgRelatedArticlesCompat":[],"wgCentralAuthMobileDomain":false,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q24901201", "wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","mediawiki.page.media","site","mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js", "ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.quicksurveys.init","ext.growthExperiments.SuggestedEditSession","wikibase.sidebar.tracking"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&modules=ext.cite.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&only=styles&skin=vector-2022"> <script async="" src="/w/load.php?lang=en&modules=startup&only=scripts&raw=1&skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&modules=site.styles&only=styles&skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.4"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="ImageNet - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/ImageNet"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=ImageNet&action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/ImageNet"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-ImageNet rootpage-ImageNet skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page's font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&returnto=ImageNet" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&returnto=ImageNet" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&returnto=ImageNet" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&returnto=ImageNet" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-History" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#History"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>History</span> </div> </a> <button aria-controls="toc-History-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle History subsection</span> </button> <ul id="toc-History-sublist" class="vector-toc-list"> <li id="toc-Significance_for_deep_learning" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Significance_for_deep_learning"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.1</span> <span>Significance for deep learning</span> </div> </a> <ul id="toc-Significance_for_deep_learning-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Dataset" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Dataset"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Dataset</span> </div> </a> <button aria-controls="toc-Dataset-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Dataset subsection</span> </button> <ul id="toc-Dataset-sublist" class="vector-toc-list"> <li id="toc-Categories" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Categories"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1</span> <span>Categories</span> </div> </a> <ul id="toc-Categories-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Image_format" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Image_format"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2</span> <span>Image format</span> </div> </a> <ul id="toc-Image_format-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Labels_and_annotations" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Labels_and_annotations"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3</span> <span>Labels and annotations</span> </div> </a> <ul id="toc-Labels_and_annotations-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-ImageNet-21K" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#ImageNet-21K"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.4</span> <span>ImageNet-21K</span> </div> </a> <ul id="toc-ImageNet-21K-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-ImageNet-1K" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#ImageNet-1K"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.5</span> <span>ImageNet-1K</span> </div> </a> <ul id="toc-ImageNet-1K-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Later_developments" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Later_developments"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.6</span> <span>Later developments</span> </div> </a> <ul id="toc-Later_developments-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-History_of_the_ImageNet_challenge" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#History_of_the_ImageNet_challenge"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>History of the ImageNet challenge</span> </div> </a> <ul id="toc-History_of_the_ImageNet_challenge-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Bias_in_ImageNet" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Bias_in_ImageNet"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>Bias in ImageNet</span> </div> </a> <ul id="toc-Bias_in_ImageNet-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-External_links" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#External_links"> <div class="vector-toc-text"> <span class="vector-toc-numb">7</span> <span>External links</span> </div> </a> <ul id="toc-External_links-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">ImageNet</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 14 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-14" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">14 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/ImageNet" title="ImageNet – Catalan" lang="ca" hreflang="ca" data-title="ImageNet" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/ImageNet" title="ImageNet – German" lang="de" hreflang="de" data-title="ImageNet" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%D8%A7%DB%8C%D9%85%DB%8C%D8%AC%E2%80%8C%D9%86%D8%AA" title="ایمیجنت – Persian" lang="fa" hreflang="fa" data-title="ایمیجنت" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/ImageNet" title="ImageNet – French" lang="fr" hreflang="fr" data-title="ImageNet" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EC%9D%B4%EB%AF%B8%EC%A7%80%EB%84%B7" title="이미지넷 – Korean" lang="ko" hreflang="ko" data-title="이미지넷" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-id mw-list-item"><a href="https://id.wikipedia.org/wiki/ImageNet" title="ImageNet – Indonesian" lang="id" hreflang="id" data-title="ImageNet" data-language-autonym="Bahasa Indonesia" data-language-local-name="Indonesian" class="interlanguage-link-target"><span>Bahasa Indonesia</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/ImageNet" title="ImageNet – Italian" lang="it" hreflang="it" data-title="ImageNet" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/ImageNet" title="ImageNet – Hebrew" lang="he" hreflang="he" data-title="ImageNet" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-ja badge-Q17437798 badge-goodarticle mw-list-item" title="good article badge"><a href="https://ja.wikipedia.org/wiki/ImageNet" title="ImageNet – Japanese" lang="ja" hreflang="ja" data-title="ImageNet" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/ImageNet" title="ImageNet – Russian" lang="ru" hreflang="ru" data-title="ImageNet" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/ImageNet" title="ImageNet – Serbian" lang="sr" hreflang="sr" data-title="ImageNet" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-th mw-list-item"><a href="https://th.wikipedia.org/wiki/%E0%B8%AD%E0%B8%B4%E0%B8%A1%E0%B9%80%E0%B8%A1%E0%B8%88%E0%B9%80%E0%B8%99%E0%B9%87%E0%B8%95" title="อิมเมจเน็ต – Thai" lang="th" hreflang="th" data-title="อิมเมจเน็ต" data-language-autonym="ไทย" data-language-local-name="Thai" class="interlanguage-link-target"><span>ไทย</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/ImageNet" title="ImageNet – Turkish" lang="tr" hreflang="tr" data-title="ImageNet" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/ImageNet" title="ImageNet – Chinese" lang="zh" hreflang="zh" data-title="ImageNet" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q24901201#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/ImageNet" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:ImageNet" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/ImageNet"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=ImageNet&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=ImageNet&action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/ImageNet"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=ImageNet&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=ImageNet&action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/ImageNet" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/ImageNet" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=ImageNet&oldid=1259151460" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=ImageNet&action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&page=ImageNet&id=1259151460&wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FImageNet"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FImageNet"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&page=ImageNet&action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=ImageNet&printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q24901201" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Image dataset</div><p>The <b>ImageNet</b> project is a large visual <a href="/wiki/Database" title="Database">database</a> designed for use in <a href="/wiki/Outline_of_object_recognition" title="Outline of object recognition">visual object recognition software</a> research. More than 14 million<sup id="cite_ref-New_Scientist_1-0" class="reference"><a href="#cite_note-New_Scientist-1"><span class="cite-bracket">[</span>1<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-nytimes_2012_2-0" class="reference"><a href="#cite_note-nytimes_2012-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup> images have been hand-annotated by the project to indicate what objects are pictured and in at least one million of the images, bounding boxes are also provided.<sup id="cite_ref-3" class="reference"><a href="#cite_note-3"><span class="cite-bracket">[</span>3<span class="cite-bracket">]</span></a></sup> ImageNet contains more than 20,000 categories,<sup id="cite_ref-nytimes_2012_2-1" class="reference"><a href="#cite_note-nytimes_2012-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup> with a typical category, such as "balloon" or "strawberry", consisting of several hundred images.<sup id="cite_ref-economist_4-0" class="reference"><a href="#cite_note-economist-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup> The database of annotations of third-party image <a href="/wiki/URL" title="URL">URLs</a> is freely available directly from ImageNet, though the actual images are not owned by ImageNet.<sup id="cite_ref-5" class="reference"><a href="#cite_note-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup> Since 2010, the ImageNet project runs an annual software contest, the ImageNet Large Scale Visual Recognition Challenge (<a href="#History_of_the_ImageNet_challenge">ILSVRC</a>), where software programs compete to correctly classify and detect objects and scenes. The challenge uses a "trimmed" list of one thousand non-overlapping classes.<sup id="cite_ref-ILJVRC-2015_6-0" class="reference"><a href="#cite_note-ILJVRC-2015-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup> </p><meta property="mw:PageProp/toc" /> <div class="mw-heading mw-heading2"><h2 id="History">History</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=1" title="Edit section: History"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>AI researcher <a href="/wiki/Fei-Fei_Li" title="Fei-Fei Li">Fei-Fei Li</a> began working on the idea for ImageNet in 2006. At a time when most AI research focused on models and algorithms, Li wanted to expand and improve the data available to train AI algorithms.<sup id="cite_ref-WiredQuest_7-0" class="reference"><a href="#cite_note-WiredQuest-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> In 2007, Li met with Princeton professor <a href="/wiki/Christiane_Fellbaum" title="Christiane Fellbaum">Christiane Fellbaum</a>, one of the creators of <a href="/wiki/WordNet" title="WordNet">WordNet</a> , to discuss the project. As a result of this meeting, Li went on to build ImageNet starting from the roughly 22,000 nouns of WordNet and using many of its features.<sup id="cite_ref-Gershgorn_8-0" class="reference"><a href="#cite_note-Gershgorn-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> She was also inspired by a 1987 estimate<sup id="cite_ref-9" class="reference"><a href="#cite_note-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> that the average person recognizes roughly 30,000 different kinds of objects.<sup id="cite_ref-:1_10-0" class="reference"><a href="#cite_note-:1-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> </p><p>As an assistant professor at <a href="/wiki/Princeton_University" title="Princeton University">Princeton</a>, Li assembled a team of researchers to work on the ImageNet project. They used <a href="/wiki/Amazon_Mechanical_Turk" title="Amazon Mechanical Turk">Amazon Mechanical Turk</a> to help with the classification of images. Labeling started in July 2008 and ended in April 2010. It took 2.5 years to complete the labeling.<sup id="cite_ref-Gershgorn_8-1" class="reference"><a href="#cite_note-Gershgorn-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> They had enough budget to have each of the 14 million images labelled three times.<sup id="cite_ref-:1_10-1" class="reference"><a href="#cite_note-:1-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> </p><p>The original plan called for 10,000 images per category, for 40,000 categories at 400 million images, each verified 3 times. They found that humans can classify at most 2 images/sec. At this rate, it was estimated to take 19 human-years of labor (without rest).<sup id="cite_ref-:5_11-0" class="reference"><a href="#cite_note-:5-11"><span class="cite-bracket">[</span>11<span class="cite-bracket">]</span></a></sup> </p><p>They presented their database for the first time as a poster at the 2009 <a href="/wiki/Conference_on_Computer_Vision_and_Pattern_Recognition" title="Conference on Computer Vision and Pattern Recognition">Conference on Computer Vision and Pattern Recognition</a> (CVPR) in Florida, titled "ImageNet: A Preview of a Large-scale Hierarchical Dataset".<sup id="cite_ref-12" class="reference"><a href="#cite_note-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Gershgorn_8-2" class="reference"><a href="#cite_note-Gershgorn-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:2_13-0" class="reference"><a href="#cite_note-:2-13"><span class="cite-bracket">[</span>13<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-14" class="reference"><a href="#cite_note-14"><span class="cite-bracket">[</span>14<span class="cite-bracket">]</span></a></sup> The poster was reused at Vision Sciences Society 2009.<sup id="cite_ref-15" class="reference"><a href="#cite_note-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup> </p><p>In 2009, Alex Berg suggested adding object localization as a task. Li approached <a rel="nofollow" class="external text" href="http://host.robots.ox.ac.uk/pascal/VOC/">PASCAL Visual Object Classes</a> contest in 2009 for a collaboration. It resulted in the subsequent <a href="/wiki/ImageNet_Large_Scale_Visual_Recognition_Challenge" class="mw-redirect" title="ImageNet Large Scale Visual Recognition Challenge">ImageNet Large Scale Visual Recognition Challenge</a> starting in 2010, which has 1000 classes and object localization, as compared to <a rel="nofollow" class="external text" href="http://host.robots.ox.ac.uk/pascal/VOC/">PASCAL VOC</a> which had just 20 classes and 19,737 images (in 2010).<sup id="cite_ref-ILJVRC-2015_6-1" class="reference"><a href="#cite_note-ILJVRC-2015-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Gershgorn_8-3" class="reference"><a href="#cite_note-Gershgorn-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Significance_for_deep_learning">Significance for deep learning</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=2" title="Edit section: Significance for deep learning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>On 30 September 2012, a <a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">convolutional neural network</a> (CNN) called <a href="/wiki/AlexNet" title="AlexNet">AlexNet</a><sup id="cite_ref-:0_16-0" class="reference"><a href="#cite_note-:0-16"><span class="cite-bracket">[</span>16<span class="cite-bracket">]</span></a></sup> achieved a top-5 error of 15.3% in the ImageNet 2012 Challenge, more than 10.8 percentage points lower than that of the runner up. Using convolutional neural networks was feasible due to the use of <a href="/wiki/Graphics_processing_unit" title="Graphics processing unit">graphics processing units</a> (GPUs) during training,<sup id="cite_ref-:0_16-1" class="reference"><a href="#cite_note-:0-16"><span class="cite-bracket">[</span>16<span class="cite-bracket">]</span></a></sup> an essential ingredient of the <a href="/wiki/Deep_learning" title="Deep learning">deep learning</a> revolution. According to <i><a href="/wiki/The_Economist" title="The Economist">The Economist</a></i>, "Suddenly people started to pay attention, not just within the AI community but across the technology industry as a whole."<sup id="cite_ref-economist_4-1" class="reference"><a href="#cite_note-economist-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-17" class="reference"><a href="#cite_note-17"><span class="cite-bracket">[</span>17<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-18" class="reference"><a href="#cite_note-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup> </p><p>In 2015, AlexNet was outperformed by <a href="/wiki/Microsoft" title="Microsoft">Microsoft</a>'s <a href="/wiki/ResNets" class="mw-redirect" title="ResNets">very deep CNN</a> with over 100 layers, which won the ImageNet 2015 contest.<sup id="cite_ref-microsoft2015_19-0" class="reference"><a href="#cite_note-microsoft2015-19"><span class="cite-bracket">[</span>19<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Dataset">Dataset</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=3" title="Edit section: Dataset"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>ImageNet <a href="/wiki/Crowdsources" class="mw-redirect" title="Crowdsources">crowdsources</a> its annotation process. Image-level annotations indicate the presence or absence of an object class in an image, such as "there are tigers in this image" or "there are no tigers in this image". Object-level annotations provide a bounding box around the (visible part of the) indicated object. ImageNet uses a variant of the broad <a href="/wiki/WordNet" title="WordNet">WordNet</a> schema to categorize objects, augmented with 120 categories of <a href="/wiki/Dog_breeds" class="mw-redirect" title="Dog breeds">dog breeds</a> to showcase fine-grained classification.<sup id="cite_ref-ILJVRC-2015_6-2" class="reference"><a href="#cite_note-ILJVRC-2015-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup> </p><p>In 2012, ImageNet was the world's largest academic user of <a href="/wiki/Amazon_Mechanical_Turk" title="Amazon Mechanical Turk">Mechanical Turk</a>. The average worker identified 50 images per minute.<sup id="cite_ref-nytimes_2012_2-2" class="reference"><a href="#cite_note-nytimes_2012-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup> </p><p>The original plan of the full ImageNet would have roughly 50M clean, diverse and full resolution images spread over approximately 50K synsets.<sup id="cite_ref-:2_13-1" class="reference"><a href="#cite_note-:2-13"><span class="cite-bracket">[</span>13<span class="cite-bracket">]</span></a></sup> This was not achieved. </p><p>The summary statistics given on April 30, 2010:<sup id="cite_ref-20" class="reference"><a href="#cite_note-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup> </p> <ul><li>Total number of non-empty synsets: 21841</li> <li>Total number of images: 14,197,122</li> <li>Number of images with bounding box annotations: 1,034,908</li> <li>Number of synsets with SIFT features: 1000</li> <li>Number of images with SIFT features: 1.2 million</li></ul> <div class="mw-heading mw-heading3"><h3 id="Categories">Categories</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=4" title="Edit section: Categories"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The categories of ImageNet were filtered from the WordNet concepts. Each concept, since it can contain multiple synonyms (for example, "kitty" and "young cat"), so each concept is called a "synonym set" or "<a href="/wiki/Synset" title="Synset">synset</a>". There were more than 100,000 synsets in WordNet 3.0, majority of them are nouns (80,000+). The ImageNet dataset filtered these to 21,841 synsets that are <a href="/wiki/Count_noun" title="Count noun">countable nouns</a> that can be visually illustrated. </p><p>Each synset in WordNet 3.0 has a "WordNet ID" (wnid), which is a concatenation of <a href="/wiki/Part_of_speech" title="Part of speech">part of speech</a> and an "offset" (a <a href="/wiki/Unique_key" title="Unique key">unique identifying number</a>). Every wnid starts with "n" because ImageNet only includes <a href="/wiki/Noun" title="Noun">nouns</a>. For example, the wnid of synset "<a href="/wiki/Dog" title="Dog">dog, domestic dog, Canis familiaris</a>" is "n02084071".<sup id="cite_ref-21" class="reference"><a href="#cite_note-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup> </p><p>The categories in ImageNet fall into 9 levels, from level 1 (such as "mammal") to level 9 (such as "German shepherd").<sup id="cite_ref-:5_11-1" class="reference"><a href="#cite_note-:5-11"><span class="cite-bracket">[</span>11<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Image_format">Image format</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=5" title="Edit section: Image format"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The images were scraped from online image search (<a href="/wiki/Google_Search" title="Google Search">Google</a>, <a href="/wiki/Picsearch" title="Picsearch">Picsearch</a>, <a href="/wiki/MSN" title="MSN">MSN</a>, <a href="/wiki/Yahoo" title="Yahoo">Yahoo</a>, <a href="/wiki/Flickr" title="Flickr">Flickr</a>, etc) using synonyms in multiple languages. For example: <i>German shepherd, German police dog, German shepherd dog, Alsatian, ovejero alemán, pastore tedesco, 德国牧羊犬</i>.<sup id="cite_ref-:4_22-0" class="reference"><a href="#cite_note-:4-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup> </p><p>ImageNet consists of images in <a href="/wiki/RGB_color_model" title="RGB color model">RGB</a> format with varying resolutions. For example, in ImageNet 2012, "fish" category, the resolution ranges from 4288 x 2848 to 75 x 56. In machine learning, these are typically preprocessed into a standard constant resolution, and whitened, before further processing by neural networks. </p><p>For example, in PyTorch, ImageNet images are by default normalized by dividing the pixel values so that they fall between 0 and 1, then subtracting by [0.485, 0.456, 0.406], then dividing by [0.229, 0.224, 0.225]. These are the mean and standard deviations, for ImageNet, so these <a href="/wiki/Whitening_transformation" title="Whitening transformation">whitens</a> the input data.<sup id="cite_ref-23" class="reference"><a href="#cite_note-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Labels_and_annotations">Labels and annotations</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=6" title="Edit section: Labels and annotations"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Each image is labelled with exactly one wnid. </p><p>Dense <a href="/wiki/Scale-invariant_feature_transform" title="Scale-invariant feature transform">SIFT features</a> (raw SIFT descriptors, quantized codewords, and coordinates of each descriptor/codeword) for ImageNet-1K were available for download, designed for <a href="/wiki/Bag_of_visual_words" class="mw-redirect" title="Bag of visual words">bag of visual words</a>.<sup id="cite_ref-24" class="reference"><a href="#cite_note-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> </p><p>The bounding boxes of objects were available for about 3000 popular synsets<sup id="cite_ref-25" class="reference"><a href="#cite_note-25"><span class="cite-bracket">[</span>25<span class="cite-bracket">]</span></a></sup> with on average 150 images in each synset.<sup id="cite_ref-26" class="reference"><a href="#cite_note-26"><span class="cite-bracket">[</span>26<span class="cite-bracket">]</span></a></sup> </p><p>Furthermore, some images have attributes. They released 25 attributes for ~400 popular synsets:<sup id="cite_ref-27" class="reference"><a href="#cite_note-27"><span class="cite-bracket">[</span>27<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-28" class="reference"><a href="#cite_note-28"><span class="cite-bracket">[</span>28<span class="cite-bracket">]</span></a></sup> </p> <ul><li><b>Color</b>: black, blue, brown, gray, green, orange, pink, red, violet, white, yellow</li> <li><b>Pattern</b>: spotted, striped</li> <li><b>Shape</b>: long, round, rectangular, square</li> <li><b>Texture</b>: furry, smooth, rough, shiny, metallic, vegetation, wooden, wet</li></ul> <div class="mw-heading mw-heading3"><h3 id="ImageNet-21K">ImageNet-21K</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=7" title="Edit section: ImageNet-21K"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The full original dataset is referred to as ImageNet-21K. ImageNet-21k contains 14,197,122 images divided into 21,841 classes. Some papers round this up and name it ImageNet-22k.<sup id="cite_ref-:3_29-0" class="reference"><a href="#cite_note-:3-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> </p><p>The full ImageNet-21k was released in Fall of 2011, as <code>fall11_whole.tar</code>. There is no official train-validation-test split for ImageNet-21k. Some classes contain only 1-10 samples, while others contain thousands.<sup id="cite_ref-:3_29-1" class="reference"><a href="#cite_note-:3-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="ImageNet-1K">ImageNet-1K</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=8" title="Edit section: ImageNet-1K"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>There are various subsets of the ImageNet dataset used in various context, sometimes referred to as "versions".<sup id="cite_ref-:0_16-2" class="reference"><a href="#cite_note-:0-16"><span class="cite-bracket">[</span>16<span class="cite-bracket">]</span></a></sup> </p><p>One of the most highly used subset of ImageNet is the "ImageNet Large Scale Visual Recognition Challenge (ILSVRC) 2012–2017 image classification and localization dataset". This is also referred to in the research literature as ImageNet-1K or ILSVRC2017, reflecting the original ILSVRC challenge that involved 1,000 classes. ImageNet-1K contains 1,281,167 training images, 50,000 validation images and 100,000 test images.<sup id="cite_ref-30" class="reference"><a href="#cite_note-30"><span class="cite-bracket">[</span>30<span class="cite-bracket">]</span></a></sup> </p><p>Each category in ImageNet-1K is a leaf category, meaning that there are no child nodes below it, unlike ImageNet-21K. For example, in ImageNet-21K, there are some images categorized as simply "mammal", whereas in ImageNet-1K, there are only images categorized as things like "German shepherd", since there are no child-words below "German shepherd".<sup id="cite_ref-:4_22-1" class="reference"><a href="#cite_note-:4-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Later_developments">Later developments</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=9" title="Edit section: Later developments"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In 2021 winter, ImageNet-21k was updated. 2,702 categories in the "person" subtree were filtered to prevent "problematic behaviors" in a trained model. In 2021, ImageNet-1k was updated by annotating faces appearing in the 997 non-person categories. They found training models on the dataset with these faces blurred caused minimal loss in performance.<sup id="cite_ref-31" class="reference"><a href="#cite_note-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> </p><p>ImageNetV2 was a new dataset containing three test sets with 10,000 each, constructed by the same methodology as the original ImageNet.<sup id="cite_ref-32" class="reference"><a href="#cite_note-32"><span class="cite-bracket">[</span>32<span class="cite-bracket">]</span></a></sup> </p><p>ImageNet-21K-P was a filtered and cleaned subset of ImageNet-21K, with 12,358,688 images from 11,221 categories.<sup id="cite_ref-:3_29-2" class="reference"><a href="#cite_note-:3-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> </p> <table class="wikitable"> <caption>Table of datasets </caption> <tbody><tr> <th>Name </th> <th>Published </th> <th>Classes </th> <th>Training </th> <th>Validation </th> <th>Test </th> <th>Size </th></tr> <tr> <td>PASCAL VOC </td> <td>2005 </td> <td>20 </td> <td> </td> <td> </td> <td> </td> <td> </td></tr> <tr> <td>ImageNet-1K </td> <td>2009 </td> <td>1,000 </td> <td>1,281,167 </td> <td>50,000 </td> <td>100,000 </td> <td>130 GB </td></tr> <tr> <td>ImageNet-21K </td> <td>2011 </td> <td>21,841 </td> <td>14,197,122 </td> <td> </td> <td> </td> <td>1.31 TB </td></tr> <tr> <td>ImageNetV2 </td> <td>2019 </td> <td> </td> <td> </td> <td> </td> <td>30,000 </td> <td> </td></tr> <tr> <td>ImageNet-21K-P </td> <td>2021 </td> <td>11,221 </td> <td>11,797,632 </td> <td> </td> <td>561,052 </td> <td> </td></tr></tbody></table> <div class="mw-heading mw-heading2"><h2 id="History_of_the_ImageNet_challenge">History of the ImageNet challenge</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=10" title="Edit section: History of the ImageNet challenge"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:ImageNet_error_rate_history_(just_systems).svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/4/4f/ImageNet_error_rate_history_%28just_systems%29.svg/220px-ImageNet_error_rate_history_%28just_systems%29.svg.png" decoding="async" width="220" height="269" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/4f/ImageNet_error_rate_history_%28just_systems%29.svg/330px-ImageNet_error_rate_history_%28just_systems%29.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/4f/ImageNet_error_rate_history_%28just_systems%29.svg/440px-ImageNet_error_rate_history_%28just_systems%29.svg.png 2x" data-file-width="810" data-file-height="990" /></a><figcaption>Error rate history on ImageNet (showing best result per team and up to 10 entries per year)</figcaption></figure> <p>The ILSVRC aims to "follow in the footsteps" of the smaller-scale <a rel="nofollow" class="external text" href="http://host.robots.ox.ac.uk/pascal/VOC/">PASCAL VOC</a> challenge, established in 2005, which contained only about 20,000 images and twenty object classes.<sup id="cite_ref-ILJVRC-2015_6-3" class="reference"><a href="#cite_note-ILJVRC-2015-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup> To "democratize" ImageNet, Fei-Fei Li proposed to the <a rel="nofollow" class="external text" href="http://host.robots.ox.ac.uk/pascal/VOC/">PASCAL VOC</a> team a collaboration, beginning in 2010, where research teams would evaluate their algorithms on the given data set, and compete to achieve higher accuracy on several visual recognition tasks.<sup id="cite_ref-Gershgorn_8-4" class="reference"><a href="#cite_note-Gershgorn-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> </p><p>The resulting annual competition is now known as the ImageNet Large Scale Visual Recognition Challenge (ILSVRC). The ILSVRC uses a "trimmed" list of only 1000 image categories or "classes", including 90 of the 120 dog breeds classified by the full ImageNet schema.<sup id="cite_ref-ILJVRC-2015_6-4" class="reference"><a href="#cite_note-ILJVRC-2015-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup> </p><p>The 2010s saw dramatic progress in image processing. </p><p>The first competition in 2010 had 11 participating teams. The winning team was a linear <a href="/wiki/Support_vector_machine" title="Support vector machine">support vector machine</a> (SVM). The features are a dense grid of <a href="/wiki/Histogram_of_oriented_gradients" title="Histogram of oriented gradients">HoG</a> and <a href="/wiki/Local_binary_patterns" title="Local binary patterns">LBP</a>, sparsified by local coordinate coding and pooling.<sup id="cite_ref-33" class="reference"><a href="#cite_note-33"><span class="cite-bracket">[</span>33<span class="cite-bracket">]</span></a></sup> It achieved 52.9% in classification accuracy and 71.8% in top-5 accuracy. It was trained for 4 days on three 8-core machines (dual quad-core 2GHz <a href="/wiki/Xeon" title="Xeon">Intel Xeon</a> CPU).<sup id="cite_ref-34" class="reference"><a href="#cite_note-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> </p><p>The second competition in 2011 had fewer teams, with another SVM winning at top-5 error rate 25%.<sup id="cite_ref-:1_10-2" class="reference"><a href="#cite_note-:1-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> The winning team was XRCE by Florent Perronnin, Jorge Sanchez. The system was another linear SVM, running on quantized<sup id="cite_ref-35" class="reference"><a href="#cite_note-35"><span class="cite-bracket">[</span>35<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Fisher_kernel" title="Fisher kernel">Fisher vectors</a>.<sup id="cite_ref-36" class="reference"><a href="#cite_note-36"><span class="cite-bracket">[</span>36<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-37" class="reference"><a href="#cite_note-37"><span class="cite-bracket">[</span>37<span class="cite-bracket">]</span></a></sup> It achieved 74.2% in top-5 accuracy. </p><p>In 2012, a deep <a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">convolutional neural net</a> called <a href="/wiki/AlexNet" title="AlexNet">AlexNet</a> achieved 84.7% in top-5 accuracy, a great leap forward.<sup id="cite_ref-38" class="reference"><a href="#cite_note-38"><span class="cite-bracket">[</span>38<span class="cite-bracket">]</span></a></sup> In the next couple of years, top-5 accuracy grew to above 90%. While the 2012 breakthrough "combined pieces that were all there before", the dramatic quantitative improvement marked the start of an industry-wide artificial intelligence boom.<sup id="cite_ref-economist_4-2" class="reference"><a href="#cite_note-economist-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup> </p><p>By 2014, more than fifty institutions participated in the ILSVRC.<sup id="cite_ref-ILJVRC-2015_6-5" class="reference"><a href="#cite_note-ILJVRC-2015-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup> In 2017, 29 of 38 competing teams had greater than 95% accuracy.<sup id="cite_ref-39" class="reference"><a href="#cite_note-39"><span class="cite-bracket">[</span>39<span class="cite-bracket">]</span></a></sup> In 2017 ImageNet stated it would roll out a new, much more difficult challenge in 2018 that involves classifying 3D objects using natural language. Because creating 3D data is more costly than annotating a pre-existing 2D image, the dataset is expected to be smaller. The applications of progress in this area would range from robotic navigation to <a href="/wiki/Augmented_reality" title="Augmented reality">augmented reality</a>.<sup id="cite_ref-New_Scientist_1-1" class="reference"><a href="#cite_note-New_Scientist-1"><span class="cite-bracket">[</span>1<span class="cite-bracket">]</span></a></sup> </p><p>By 2015, researchers at Microsoft reported that their CNNs exceeded human ability at the narrow ILSVRC tasks.<sup id="cite_ref-microsoft2015_19-1" class="reference"><a href="#cite_note-microsoft2015-19"><span class="cite-bracket">[</span>19<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-40" class="reference"><a href="#cite_note-40"><span class="cite-bracket">[</span>40<span class="cite-bracket">]</span></a></sup> However, as one of the challenge's organizers, <a href="/wiki/Olga_Russakovsky" title="Olga Russakovsky">Olga Russakovsky</a>, pointed out in 2015, the contest is over only 1000 categories; humans can recognize a larger number of categories, and also (unlike the programs) can judge the context of an image.<sup id="cite_ref-41" class="reference"><a href="#cite_note-41"><span class="cite-bracket">[</span>41<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Bias_in_ImageNet">Bias in ImageNet</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=11" title="Edit section: Bias in ImageNet"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>It is estimated that over 6% of labels in the ImageNet-1k validation set are wrong.<sup id="cite_ref-42" class="reference"><a href="#cite_note-42"><span class="cite-bracket">[</span>42<span class="cite-bracket">]</span></a></sup> It is also found that around 10% of ImageNet-1k contains ambiguous or erroneous labels, and that, when presented with a model's prediction and the original ImageNet label, human annotators prefer the prediction of a state of the art model in 2020 trained on the original ImageNet, suggesting that ImageNet-1k has been saturated.<sup id="cite_ref-43" class="reference"><a href="#cite_note-43"><span class="cite-bracket">[</span>43<span class="cite-bracket">]</span></a></sup> </p><p>A study of the history of the multiple layers (<a href="/wiki/Taxonomy_(general)" class="mw-redirect" title="Taxonomy (general)">taxonomy</a>, object classes and labeling) of ImageNet and WordNet in 2019 described how <a href="/wiki/Algorithmic_bias" title="Algorithmic bias">bias</a><sup class="noprint Inline-Template" style="margin-left:0.1em; white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Please_clarify" title="Wikipedia:Please clarify"><span title="The text near this tag may need clarification or removal of jargon. (December 2023)">clarification needed</span></a></i>]</sup> is deeply embedded in most classification approaches for all sorts of images.<sup id="cite_ref-44" class="reference"><a href="#cite_note-44"><span class="cite-bracket">[</span>44<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-45" class="reference"><a href="#cite_note-45"><span class="cite-bracket">[</span>45<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-46" class="reference"><a href="#cite_note-46"><span class="cite-bracket">[</span>46<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-47" class="reference"><a href="#cite_note-47"><span class="cite-bracket">[</span>47<span class="cite-bracket">]</span></a></sup> ImageNet is working to address various sources of bias.<sup id="cite_ref-48" class="reference"><a href="#cite_note-48"><span class="cite-bracket">[</span>48<span class="cite-bracket">]</span></a></sup> </p><p>One downside of WordNet use is the categories may be more "elevated" than would be optimal for ImageNet: "Most people are more interested in Lady Gaga or the iPod Mini than in this rare kind of <a href="/wiki/Diplodocus" title="Diplodocus">diplodocus</a>."<sup class="noprint Inline-Template" style="margin-left:0.1em; white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Please_clarify" title="Wikipedia:Please clarify"><span title="The text near this tag may need clarification or removal of jargon. (August 2019)">clarification needed</span></a></i>]</sup> </p> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=12" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/Computer_vision" title="Computer vision">Computer vision</a></li> <li><a href="/wiki/List_of_datasets_for_machine_learning_research" class="mw-redirect" title="List of datasets for machine learning research">List of datasets for machine learning research</a></li> <li><a href="/wiki/WordNet" title="WordNet">WordNet</a></li></ul> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=13" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist reflist-columns references-column-width" style="column-width: 30em;"> <ol class="references"> <li id="cite_note-New_Scientist-1"><span class="mw-cite-backlink">^ <a href="#cite_ref-New_Scientist_1-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-New_Scientist_1-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.newscientist.com/article/2127131-new-computer-vision-challenge-wants-to-teach-robots-to-see-in-3d/">"New computer vision challenge wants to teach robots to see in 3D"</a>. <i>New Scientist</i>. 7 April 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">3 February</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=New+Scientist&rft.atitle=New+computer+vision+challenge+wants+to+teach+robots+to+see+in+3D&rft.date=2017-04-07&rft_id=https%3A%2F%2Fwww.newscientist.com%2Farticle%2F2127131-new-computer-vision-challenge-wants-to-teach-robots-to-see-in-3d%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-nytimes_2012-2"><span class="mw-cite-backlink">^ <a href="#cite_ref-nytimes_2012_2-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-nytimes_2012_2-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-nytimes_2012_2-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMarkoff2012" class="citation news cs1">Markoff, John (19 November 2012). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2012/11/20/science/for-web-images-creating-new-technology-to-seek-and-find.html">"For Web Images, Creating New Technology to Seek and Find"</a>. <i>The New York Times</i><span class="reference-accessdate">. Retrieved <span class="nowrap">3 February</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+New+York+Times&rft.atitle=For+Web+Images%2C+Creating+New+Technology+to+Seek+and+Find&rft.date=2012-11-19&rft.aulast=Markoff&rft.aufirst=John&rft_id=https%3A%2F%2Fwww.nytimes.com%2F2012%2F11%2F20%2Fscience%2Ffor-web-images-creating-new-technology-to-seek-and-find.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-3">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20200907212153/http://image-net.org/about-stats.php">"ImageNet"</a>. 7 September 2020. Archived from <a rel="nofollow" class="external text" href="http://image-net.org/about-stats.php">the original</a> on 7 September 2020<span class="reference-accessdate">. Retrieved <span class="nowrap">11 October</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=ImageNet&rft.date=2020-09-07&rft_id=http%3A%2F%2Fimage-net.org%2Fabout-stats.php&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-economist-4"><span class="mw-cite-backlink">^ <a href="#cite_ref-economist_4-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-economist_4-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-economist_4-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.economist.com/news/special-report/21700756-artificial-intelligence-boom-based-old-idea-modern-twist-not">"From not working to neural networking"</a>. <i>The Economist</i>. 25 June 2016<span class="reference-accessdate">. Retrieved <span class="nowrap">3 February</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Economist&rft.atitle=From+not+working+to+neural+networking&rft.date=2016-06-25&rft_id=https%3A%2F%2Fwww.economist.com%2Fnews%2Fspecial-report%2F21700756-artificial-intelligence-boom-based-old-idea-modern-twist-not&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-5"><span class="mw-cite-backlink"><b><a href="#cite_ref-5">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://image-net.org/about.php">"ImageNet Overview"</a>. ImageNet<span class="reference-accessdate">. Retrieved <span class="nowrap">15 October</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=ImageNet+Overview&rft.pub=ImageNet&rft_id=https%3A%2F%2Fimage-net.org%2Fabout.php&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-ILJVRC-2015-6"><span class="mw-cite-backlink">^ <a href="#cite_ref-ILJVRC-2015_6-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-ILJVRC-2015_6-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-ILJVRC-2015_6-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-ILJVRC-2015_6-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-ILJVRC-2015_6-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-ILJVRC-2015_6-5"><sup><i><b>f</b></i></sup></a></span> <span class="reference-text">Olga Russakovsky*, Jia Deng*, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, <a href="/wiki/Andrej_Karpathy" title="Andrej Karpathy">Andrej Karpathy</a>, Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei. (* = equal contribution) ImageNet Large Scale Visual Recognition Challenge. IJCV, 2015.</span> </li> <li id="cite_note-WiredQuest-7"><span class="mw-cite-backlink"><b><a href="#cite_ref-WiredQuest_7-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHempel2018" class="citation magazine cs1">Hempel, Jesse (13 November 2018). <a rel="nofollow" class="external text" href="https://www.wired.com/story/fei-fei-li-artificial-intelligence-humanity/">"Fei-Fei Li's Quest to Make AI Better for Humanity"</a>. <i>Wired</i><span class="reference-accessdate">. Retrieved <span class="nowrap">5 May</span> 2019</span>. <q>When Li, who had moved back to Princeton to take a job as an assistant professor in 2007, talked up her idea for ImageNet, she had a hard time getting faculty members to help out. Finally, a professor who specialized in computer architecture agreed to join her as a collaborator.</q></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Wired&rft.atitle=Fei-Fei+Li%27s+Quest+to+Make+AI+Better+for+Humanity&rft.date=2018-11-13&rft.aulast=Hempel&rft.aufirst=Jesse&rft_id=https%3A%2F%2Fwww.wired.com%2Fstory%2Ffei-fei-li-artificial-intelligence-humanity%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-Gershgorn-8"><span class="mw-cite-backlink">^ <a href="#cite_ref-Gershgorn_8-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Gershgorn_8-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-Gershgorn_8-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-Gershgorn_8-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-Gershgorn_8-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGershgorn2017" class="citation web cs1">Gershgorn, Dave (26 July 2017). <a rel="nofollow" class="external text" href="https://qz.com/1034972/the-data-that-changed-the-direction-of-ai-research-and-possibly-the-world/">"The data that transformed AI research—and possibly the world"</a>. <i>Quartz</i>. Atlantic Media Co<span class="reference-accessdate">. Retrieved <span class="nowrap">26 July</span> 2017</span>. <q>Having read about WordNet's approach, Li met with professor Christiane Fellbaum, a researcher influential in the continued work on WordNet, during a 2006 visit to Princeton.</q></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Quartz&rft.atitle=The+data+that+transformed+AI+research%E2%80%94and+possibly+the+world&rft.date=2017-07-26&rft.aulast=Gershgorn&rft.aufirst=Dave&rft_id=https%3A%2F%2Fqz.com%2F1034972%2Fthe-data-that-changed-the-direction-of-ai-research-and-possibly-the-world%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-9"><span class="mw-cite-backlink"><b><a href="#cite_ref-9">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBiederman1987" class="citation journal cs1"><a href="/wiki/Irving_Biederman" title="Irving Biederman">Biederman, Irving</a> (1987). <a rel="nofollow" class="external text" href="https://dx.doi.org/10.1037//0033-295x.94.2.115">"Recognition-by-components: A theory of human image understanding"</a>. <i>Psychological Review</i>. <b>94</b> (2): 115–117. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1037%2F0033-295x.94.2.115">10.1037/0033-295x.94.2.115</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0033-295X">0033-295X</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/3575582">3575582</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Psychological+Review&rft.atitle=Recognition-by-components%3A+A+theory+of+human+image+understanding.&rft.volume=94&rft.issue=2&rft.pages=115-117&rft.date=1987&rft.issn=0033-295X&rft_id=info%3Apmid%2F3575582&rft_id=info%3Adoi%2F10.1037%2F0033-295x.94.2.115&rft.aulast=Biederman&rft.aufirst=Irving&rft_id=http%3A%2F%2Fdx.doi.org%2F10.1037%2F%2F0033-295x.94.2.115&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-:1-10"><span class="mw-cite-backlink">^ <a href="#cite_ref-:1_10-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:1_10-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:1_10-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLee2024" class="citation web cs1">Lee, Timothy B. (11 November 2024). <a rel="nofollow" class="external text" href="https://arstechnica.com/ai/2024/11/how-a-stubborn-computer-scientist-accidentally-launched-the-deep-learning-boom/">"How a stubborn computer scientist accidentally launched the deep learning boom"</a>. <i>Ars Technica</i><span class="reference-accessdate">. Retrieved <span class="nowrap">12 November</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Ars+Technica&rft.atitle=How+a+stubborn+computer+scientist+accidentally+launched+the+deep+learning+boom&rft.date=2024-11-11&rft.aulast=Lee&rft.aufirst=Timothy+B.&rft_id=https%3A%2F%2Farstechnica.com%2Fai%2F2024%2F11%2Fhow-a-stubborn-computer-scientist-accidentally-launched-the-deep-learning-boom%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-:5-11"><span class="mw-cite-backlink">^ <a href="#cite_ref-:5_11-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:5_11-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">Li, F-F. ImageNet. "<a rel="nofollow" class="external text" href="https://web.archive.org/web/20130115112543/http://www.image-net.org/papers/ImageNet_2010.pdf">Crowdsourcing, benchmarking & other cool things</a>." <i>CMU VASC Semin</i> 16 (2010): 18-25.</span> </li> <li id="cite_note-12"><span class="mw-cite-backlink"><b><a href="#cite_ref-12">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://tab.computer.org/pamitc/archive/cvpr2009/posters.html">"CVPR 2009: IEEE Computer Society Conference on Computer Vision and Pattern Recognition"</a>. <i>tab.computer.org</i><span class="reference-accessdate">. Retrieved <span class="nowrap">13 November</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=tab.computer.org&rft.atitle=CVPR+2009%3A+IEEE+Computer+Society+Conference+on+Computer+Vision+and+Pattern+Recognition&rft_id=http%3A%2F%2Ftab.computer.org%2Fpamitc%2Farchive%2Fcvpr2009%2Fposters.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-:2-13"><span class="mw-cite-backlink">^ <a href="#cite_ref-:2_13-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:2_13-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDengDongSocherLi2009" class="citation cs2">Deng, Jia; Dong, Wei; Socher, Richard; Li, Li-Jia; Li, Kai; Fei-Fei, Li (2009), <a rel="nofollow" class="external text" href="https://web.archive.org/web/20210115185228/http://www.image-net.org/papers/imagenet_cvpr09.pdf">"ImageNet: A Large-Scale Hierarchical Image Database"</a> <span class="cs1-format">(PDF)</span>, <i>2009 conference on Computer Vision and Pattern Recognition</i>, archived from <a rel="nofollow" class="external text" href="http://www.image-net.org/papers/imagenet_cvpr09.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 15 January 2021<span class="reference-accessdate">, retrieved <span class="nowrap">26 July</span> 2017</span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=ImageNet%3A+A+Large-Scale+Hierarchical+Image+Database&rft.btitle=2009+conference+on+Computer+Vision+and+Pattern+Recognition&rft.date=2009&rft.aulast=Deng&rft.aufirst=Jia&rft.au=Dong%2C+Wei&rft.au=Socher%2C+Richard&rft.au=Li%2C+Li-Jia&rft.au=Li%2C+Kai&rft.au=Fei-Fei%2C+Li&rft_id=http%3A%2F%2Fwww.image-net.org%2Fpapers%2Fimagenet_cvpr09.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-14"><span class="mw-cite-backlink"><b><a href="#cite_ref-14">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLi2015" class="citation cs2">Li, Fei-Fei (23 March 2015), <a rel="nofollow" class="external text" href="https://www.ted.com/talks/fei_fei_li_how_we_re_teaching_computers_to_understand_pictures?language=en"><i>How we're teaching computers to understand pictures</i></a><span class="reference-accessdate">, retrieved <span class="nowrap">16 December</span> 2018</span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=How+we%27re+teaching+computers+to+understand+pictures&rft.date=2015-03-23&rft.aulast=Li&rft.aufirst=Fei-Fei&rft_id=https%3A%2F%2Fwww.ted.com%2Ftalks%2Ffei_fei_li_how_we_re_teaching_computers_to_understand_pictures%3Flanguage%3Den&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-15">^</a></b></span> <span class="reference-text">Deng, Jia, et al. "<a rel="nofollow" class="external text" href="https://web.archive.org/web/20130115112451/http://www.image-net.org/papers/ImageNet_VSS2009.pdf">Construction and analysis of a large scale image ontology</a>." <i>Vision Sciences Society</i> 186.2 (2009).</span> </li> <li id="cite_note-:0-16"><span class="mw-cite-backlink">^ <a href="#cite_ref-:0_16-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:0_16-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:0_16-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKrizhevskySutskeverHinton2017" class="citation journal cs1">Krizhevsky, Alex; Sutskever, Ilya; Hinton, Geoffrey E. (June 2017). <a rel="nofollow" class="external text" href="https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf">"ImageNet classification with deep convolutional neural networks"</a> <span class="cs1-format">(PDF)</span>. <i>Communications of the ACM</i>. <b>60</b> (6): 84–90. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F3065386">10.1145/3065386</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0001-0782">0001-0782</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:195908774">195908774</a><span class="reference-accessdate">. Retrieved <span class="nowrap">24 May</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Communications+of+the+ACM&rft.atitle=ImageNet+classification+with+deep+convolutional+neural+networks&rft.volume=60&rft.issue=6&rft.pages=84-90&rft.date=2017-06&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A195908774%23id-name%3DS2CID&rft.issn=0001-0782&rft_id=info%3Adoi%2F10.1145%2F3065386&rft.aulast=Krizhevsky&rft.aufirst=Alex&rft.au=Sutskever%2C+Ilya&rft.au=Hinton%2C+Geoffrey+E.&rft_id=https%3A%2F%2Fpapers.nips.cc%2Fpaper%2F4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-17"><span class="mw-cite-backlink"><b><a href="#cite_ref-17">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.ft.com/content/4cc048f6-d5f4-11e7-a303-9060cb1e5f44">"Machines 'beat humans' for a growing number of tasks"</a>. <i>Financial Times</i>. 30 November 2017<span class="reference-accessdate">. Retrieved <span class="nowrap">3 February</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Financial+Times&rft.atitle=Machines+%27beat+humans%27+for+a+growing+number+of+tasks&rft.date=2017-11-30&rft_id=https%3A%2F%2Fwww.ft.com%2Fcontent%2F4cc048f6-d5f4-11e7-a303-9060cb1e5f44&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-18"><span class="mw-cite-backlink"><b><a href="#cite_ref-18">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGershgorn2018" class="citation web cs1">Gershgorn, Dave (18 June 2018). <a rel="nofollow" class="external text" href="https://qz.com/1307091/the-inside-story-of-how-ai-got-good-enough-to-dominate-silicon-valley/">"The inside story of how AI got good enough to dominate Silicon Valley"</a>. <i>Quartz</i><span class="reference-accessdate">. Retrieved <span class="nowrap">10 December</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Quartz&rft.atitle=The+inside+story+of+how+AI+got+good+enough+to+dominate+Silicon+Valley&rft.date=2018-06-18&rft.aulast=Gershgorn&rft.aufirst=Dave&rft_id=https%3A%2F%2Fqz.com%2F1307091%2Fthe-inside-story-of-how-ai-got-good-enough-to-dominate-silicon-valley%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-microsoft2015-19"><span class="mw-cite-backlink">^ <a href="#cite_ref-microsoft2015_19-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-microsoft2015_19-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHeZhangRenSun2016" class="citation book cs1">He, Kaiming; Zhang, Xiangyu; Ren, Shaoqing; Sun, Jian (2016). "Deep Residual Learning for Image Recognition". <i>2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)</i>. pp. 770–778. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1512.03385">1512.03385</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FCVPR.2016.90">10.1109/CVPR.2016.90</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4673-8851-1" title="Special:BookSources/978-1-4673-8851-1"><bdi>978-1-4673-8851-1</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:206594692">206594692</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Deep+Residual+Learning+for+Image+Recognition&rft.btitle=2016+IEEE+Conference+on+Computer+Vision+and+Pattern+Recognition+%28CVPR%29&rft.pages=770-778&rft.date=2016&rft_id=info%3Aarxiv%2F1512.03385&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A206594692%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FCVPR.2016.90&rft.isbn=978-1-4673-8851-1&rft.aulast=He&rft.aufirst=Kaiming&rft.au=Zhang%2C+Xiangyu&rft.au=Ren%2C+Shaoqing&rft.au=Sun%2C+Jian&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-20"><span class="mw-cite-backlink"><b><a href="#cite_ref-20">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20130115112755/http://www.image-net.org/about-stats">"ImageNet Summary and Statistics (updated on April 30, 2010)"</a>. 15 January 2013. Archived from <a rel="nofollow" class="external text" href="http://www.image-net.org/about-stats">the original</a> on 15 January 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">13 November</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=ImageNet+Summary+and+Statistics+%28updated+on+April+30%2C+2010%29&rft.date=2013-01-15&rft_id=http%3A%2F%2Fwww.image-net.org%2Fabout-stats&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-21"><span class="mw-cite-backlink"><b><a href="#cite_ref-21">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20130122145752/http://www.image-net.org/download-API">"ImageNet API documentation"</a>. 22 January 2013. Archived from <a rel="nofollow" class="external text" href="http://www.image-net.org/download-API">the original</a> on 22 January 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">13 November</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=ImageNet+API+documentation&rft.date=2013-01-22&rft_id=http%3A%2F%2Fwww.image-net.org%2Fdownload-API&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-:4-22"><span class="mw-cite-backlink">^ <a href="#cite_ref-:4_22-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:4_22-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">Berg, Alex, Jia Deng, and L. Fei-Fei. "<a rel="nofollow" class="external text" href="https://www.image-net.org/static_files/files/pascal_ilsvrc.pdf">Large scale visual recognition challenge 2010</a>." November 2010.</span> </li> <li id="cite_note-23"><span class="mw-cite-backlink"><b><a href="#cite_ref-23">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://github.com/openai/CLIP/issues/20">"std and mean for image normalization different from ImageNet · Issue #20 · openai/CLIP"</a>. <i>GitHub</i><span class="reference-accessdate">. Retrieved <span class="nowrap">19 September</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=GitHub&rft.atitle=std+and+mean+for+image+normalization+different+from+ImageNet+%C2%B7+Issue+%2320+%C2%B7+openai%2FCLIP&rft_id=https%3A%2F%2Fgithub.com%2Fopenai%2FCLIP%2Fissues%2F20&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-24"><span class="mw-cite-backlink"><b><a href="#cite_ref-24">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20130405035300/http://www.image-net.org/download-features.php">"ImageNet"</a>. 5 April 2013. Archived from <a rel="nofollow" class="external text" href="http://www.image-net.org/download-features.php">the original</a> on 5 April 2013<span class="reference-accessdate">. Retrieved <span class="nowrap">13 November</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=ImageNet&rft.date=2013-04-05&rft_id=http%3A%2F%2Fwww.image-net.org%2Fdownload-features.php&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-25"><span class="mw-cite-backlink"><b><a href="#cite_ref-25">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external free" href="https://web.archive.org/web/20181030191122/http://www.image-net.org/api/text/imagenet.sbow.obtain_synset_list">https://web.archive.org/web/20181030191122/http://www.image-net.org/api/text/imagenet.sbow.obtain_synset_list</a></span> </li> <li id="cite_note-26"><span class="mw-cite-backlink"><b><a href="#cite_ref-26">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20130405005059/http://www.image-net.org/download-bboxes">"ImageNet"</a>. Archived from <a rel="nofollow" class="external text" href="http://www.image-net.org/download-bboxes">the original</a> on 5 April 2013.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=ImageNet&rft_id=http%3A%2F%2Fwww.image-net.org%2Fdownload-bboxes&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-27">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20191222152337/http://www.image-net.org/download-attributes">"ImageNet"</a>. Archived from <a rel="nofollow" class="external text" href="http://www.image-net.org/download-attributes">the original</a> on 22 December 2019.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=ImageNet&rft_id=http%3A%2F%2Fwww.image-net.org%2Fdownload-attributes&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-28"><span class="mw-cite-backlink"><b><a href="#cite_ref-28">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRussakovskyFei-Fei2012" class="citation book cs1">Russakovsky, Olga; Fei-Fei, Li (2012). <a rel="nofollow" class="external text" href="https://link.springer.com/chapter/10.1007/978-3-642-35749-7_1">"Attribute Learning in Large-Scale Datasets"</a>. In Kutulakos, Kiriakos N. (ed.). <i>Trends and Topics in Computer Vision</i>. Lecture Notes in Computer Science. Vol. 6553. Berlin, Heidelberg: Springer. pp. 1–14. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-642-35749-7_1">10.1007/978-3-642-35749-7_1</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-642-35749-7" title="Special:BookSources/978-3-642-35749-7"><bdi>978-3-642-35749-7</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Attribute+Learning+in+Large-Scale+Datasets&rft.btitle=Trends+and+Topics+in+Computer+Vision&rft.place=Berlin%2C+Heidelberg&rft.series=Lecture+Notes+in+Computer+Science&rft.pages=1-14&rft.pub=Springer&rft.date=2012&rft_id=info%3Adoi%2F10.1007%2F978-3-642-35749-7_1&rft.isbn=978-3-642-35749-7&rft.aulast=Russakovsky&rft.aufirst=Olga&rft.au=Fei-Fei%2C+Li&rft_id=https%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-642-35749-7_1&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-:3-29"><span class="mw-cite-backlink">^ <a href="#cite_ref-:3_29-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:3_29-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:3_29-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRidnikBen-BaruchNoyZelnik-Manor2021" class="citation arxiv cs1">Ridnik, Tal; Ben-Baruch, Emanuel; Noy, Asaf; Zelnik-Manor, Lihi (5 August 2021). "ImageNet-21K Pretraining for the Masses". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2104.10972">2104.10972</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=ImageNet-21K+Pretraining+for+the+Masses&rft.date=2021-08-05&rft_id=info%3Aarxiv%2F2104.10972&rft.aulast=Ridnik&rft.aufirst=Tal&rft.au=Ben-Baruch%2C+Emanuel&rft.au=Noy%2C+Asaf&rft.au=Zelnik-Manor%2C+Lihi&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-30"><span class="mw-cite-backlink"><b><a href="#cite_ref-30">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.image-net.org/download.php">"ImageNet"</a>. <i>www.image-net.org</i><span class="reference-accessdate">. Retrieved <span class="nowrap">19 October</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.image-net.org&rft.atitle=ImageNet&rft_id=https%3A%2F%2Fwww.image-net.org%2Fdownload.php&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-31"><span class="mw-cite-backlink"><b><a href="#cite_ref-31">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.image-net.org/update-mar-11-2021.php">"An Update to the ImageNet Website and Dataset"</a>. <i>www.image-net.org</i><span class="reference-accessdate">. Retrieved <span class="nowrap">13 November</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.image-net.org&rft.atitle=An+Update+to+the+ImageNet+Website+and+Dataset&rft_id=https%3A%2F%2Fwww.image-net.org%2Fupdate-mar-11-2021.php&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-32">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRechtRoelofsSchmidtShankar2019" class="citation journal cs1">Recht, Benjamin; Roelofs, Rebecca; Schmidt, Ludwig; Shankar, Vaishaal (24 May 2019). <a rel="nofollow" class="external text" href="https://proceedings.mlr.press/v97/recht19a.html">"Do ImageNet Classifiers Generalize to ImageNet?"</a>. <i>Proceedings of the 36th International Conference on Machine Learning</i>. PMLR: 5389–5400.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Proceedings+of+the+36th+International+Conference+on+Machine+Learning&rft.atitle=Do+ImageNet+Classifiers+Generalize+to+ImageNet%3F&rft.pages=5389-5400&rft.date=2019-05-24&rft.aulast=Recht&rft.aufirst=Benjamin&rft.au=Roelofs%2C+Rebecca&rft.au=Schmidt%2C+Ludwig&rft.au=Shankar%2C+Vaishaal&rft_id=https%3A%2F%2Fproceedings.mlr.press%2Fv97%2Frecht19a.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-33"><span class="mw-cite-backlink"><b><a href="#cite_ref-33">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://www.image-net.org/static_files/files/ILSVRC2010_NEC-UIUC.pdf">ImageNet classification: fast descriptor coding and large-scale SVM training</a></span> </li> <li id="cite_note-34"><span class="mw-cite-backlink"><b><a href="#cite_ref-34">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLinLvZhuYang2011" class="citation book cs1">Lin, Yuanqing; Lv, Fengjun; Zhu, Shenghuo; Yang, Ming; Cour, Timothee; Yu, Kai; Cao, Liangliang; Huang, Thomas (June 2011). <a rel="nofollow" class="external text" href="https://dx.doi.org/10.1109/cvpr.2011.5995477">"Large-scale image classification: Fast feature extraction and SVM training"</a>. <i>CVPR 2011</i>. IEEE. pp. 1689–1696. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fcvpr.2011.5995477">10.1109/cvpr.2011.5995477</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4577-0394-2" title="Special:BookSources/978-1-4577-0394-2"><bdi>978-1-4577-0394-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Large-scale+image+classification%3A+Fast+feature+extraction+and+SVM+training&rft.btitle=CVPR+2011&rft.pages=1689-1696&rft.pub=IEEE&rft.date=2011-06&rft_id=info%3Adoi%2F10.1109%2Fcvpr.2011.5995477&rft.isbn=978-1-4577-0394-2&rft.aulast=Lin&rft.aufirst=Yuanqing&rft.au=Lv%2C+Fengjun&rft.au=Zhu%2C+Shenghuo&rft.au=Yang%2C+Ming&rft.au=Cour%2C+Timothee&rft.au=Yu%2C+Kai&rft.au=Cao%2C+Liangliang&rft.au=Huang%2C+Thomas&rft_id=http%3A%2F%2Fdx.doi.org%2F10.1109%2Fcvpr.2011.5995477&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-35"><span class="mw-cite-backlink"><b><a href="#cite_ref-35">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSanchezPerronnin2011" class="citation book cs1">Sanchez, Jorge; Perronnin, Florent (June 2011). <a rel="nofollow" class="external text" href="https://dx.doi.org/10.1109/cvpr.2011.5995504">"High-dimensional signature compression for large-scale image classification"</a>. <i>CVPR 2011</i>. IEEE. pp. 1665–1672. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2Fcvpr.2011.5995504">10.1109/cvpr.2011.5995504</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4577-0394-2" title="Special:BookSources/978-1-4577-0394-2"><bdi>978-1-4577-0394-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=High-dimensional+signature+compression+for+large-scale+image+classification&rft.btitle=CVPR+2011&rft.pages=1665-1672&rft.pub=IEEE&rft.date=2011-06&rft_id=info%3Adoi%2F10.1109%2Fcvpr.2011.5995504&rft.isbn=978-1-4577-0394-2&rft.aulast=Sanchez&rft.aufirst=Jorge&rft.au=Perronnin%2C+Florent&rft_id=http%3A%2F%2Fdx.doi.org%2F10.1109%2Fcvpr.2011.5995504&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-36"><span class="mw-cite-backlink"><b><a href="#cite_ref-36">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPerronninSánchezMensink2010" class="citation book cs1">Perronnin, Florent; Sánchez, Jorge; Mensink, Thomas (2010). <a rel="nofollow" class="external text" href="https://link.springer.com/chapter/10.1007/978-3-642-15561-1_11">"Improving the Fisher Kernel for Large-Scale Image Classification"</a>. In Daniilidis, Kostas; Maragos, Petros; Paragios, Nikos (eds.). <i>Computer Vision – ECCV 2010</i>. Lecture Notes in Computer Science. Vol. 6314. Berlin, Heidelberg: Springer. pp. 143–156. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F978-3-642-15561-1_11">10.1007/978-3-642-15561-1_11</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3-642-15561-1" title="Special:BookSources/978-3-642-15561-1"><bdi>978-3-642-15561-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Improving+the+Fisher+Kernel+for+Large-Scale+Image+Classification&rft.btitle=Computer+Vision+%E2%80%93+ECCV+2010&rft.place=Berlin%2C+Heidelberg&rft.series=Lecture+Notes+in+Computer+Science&rft.pages=143-156&rft.pub=Springer&rft.date=2010&rft_id=info%3Adoi%2F10.1007%2F978-3-642-15561-1_11&rft.isbn=978-3-642-15561-1&rft.aulast=Perronnin&rft.aufirst=Florent&rft.au=S%C3%A1nchez%2C+Jorge&rft.au=Mensink%2C+Thomas&rft_id=https%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-642-15561-1_11&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-37"><span class="mw-cite-backlink"><b><a href="#cite_ref-37">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://web.archive.org/web/20201027234359/http://image-net.org/challenges/LSVRC/2011/ilsvrc11.pdf">"XRCE@ILSVRC2011: Compressed Fisher vectors for LSVR"</a>, Florent Perronnin and Jorge Sánchez, Xerox Research Centre Europe (XRCE)</span> </li> <li id="cite_note-38"><span class="mw-cite-backlink"><b><a href="#cite_ref-38">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external free" href="https://www.image-net.org/challenges/LSVRC/2012/results">https://www.image-net.org/challenges/LSVRC/2012/results</a></span> </li> <li id="cite_note-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-39">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGershgorn2017" class="citation news cs1">Gershgorn, Dave (10 September 2017). <a rel="nofollow" class="external text" href="https://qz.com/1046350/the-quartz-guide-to-artificial-intelligence-what-is-it-why-is-it-important-and-should-we-be-afraid/">"The Quartz guide to artificial intelligence: What is it, why is it important, and should we be afraid?"</a>. <i>Quartz</i><span class="reference-accessdate">. Retrieved <span class="nowrap">3 February</span> 2018</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Quartz&rft.atitle=The+Quartz+guide+to+artificial+intelligence%3A+What+is+it%2C+why+is+it+important%2C+and+should+we+be+afraid%3F&rft.date=2017-09-10&rft.aulast=Gershgorn&rft.aufirst=Dave&rft_id=https%3A%2F%2Fqz.com%2F1046350%2Fthe-quartz-guide-to-artificial-intelligence-what-is-it-why-is-it-important-and-should-we-be-afraid%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-40">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMarkoff2015" class="citation news cs1">Markoff, John (10 December 2015). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2015/12/11/science/an-advance-in-artificial-intelligence-rivals-human-vision-abilities.html">"A Learning Advance in Artificial Intelligence Rivals Human Abilities"</a>. <i>The New York Times</i><span class="reference-accessdate">. Retrieved <span class="nowrap">22 June</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+New+York+Times&rft.atitle=A+Learning+Advance+in+Artificial+Intelligence+Rivals+Human+Abilities&rft.date=2015-12-10&rft.aulast=Markoff&rft.aufirst=John&rft_id=https%3A%2F%2Fwww.nytimes.com%2F2015%2F12%2F11%2Fscience%2Fan-advance-in-artificial-intelligence-rivals-human-vision-abilities.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-41"><span class="mw-cite-backlink"><b><a href="#cite_ref-41">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAron2015" class="citation news cs1">Aron, Jacob (21 September 2015). <a rel="nofollow" class="external text" href="https://www.newscientist.com/article/dn28206-forget-the-turing-test-there-are-better-ways-of-judging-ai/">"Forget the Turing test – there are better ways of judging AI"</a>. <i>New Scientist</i><span class="reference-accessdate">. Retrieved <span class="nowrap">22 June</span> 2016</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=New+Scientist&rft.atitle=Forget+the+Turing+test+%E2%80%93+there+are+better+ways+of+judging+AI&rft.date=2015-09-21&rft.aulast=Aron&rft.aufirst=Jacob&rft_id=https%3A%2F%2Fwww.newscientist.com%2Farticle%2Fdn28206-forget-the-turing-test-there-are-better-ways-of-judging-ai%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-42"><span class="mw-cite-backlink"><b><a href="#cite_ref-42">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNorthcuttAthalyeMueller2021" class="citation cs2">Northcutt, Curtis G.; Athalye, Anish; Mueller, Jonas (7 November 2021), <i>Pervasive Label Errors in Test Sets Destabilize Machine Learning Benchmarks</i>, <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2103.14749">2103.14749</a></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Pervasive+Label+Errors+in+Test+Sets+Destabilize+Machine+Learning+Benchmarks&rft.date=2021-11-07&rft_id=info%3Aarxiv%2F2103.14749&rft.aulast=Northcutt&rft.aufirst=Curtis+G.&rft.au=Athalye%2C+Anish&rft.au=Mueller%2C+Jonas&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-43">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBeyerHénaffKolesnikovZhai2020" class="citation cs2">Beyer, Lucas; Hénaff, Olivier J.; Kolesnikov, Alexander; Zhai, Xiaohua; Oord, Aäron van den (12 June 2020), <i>Are we done with ImageNet?</i>, <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2006.07159">2006.07159</a></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Are+we+done+with+ImageNet%3F&rft.date=2020-06-12&rft_id=info%3Aarxiv%2F2006.07159&rft.aulast=Beyer&rft.aufirst=Lucas&rft.au=H%C3%A9naff%2C+Olivier+J.&rft.au=Kolesnikov%2C+Alexander&rft.au=Zhai%2C+Xiaohua&rft.au=Oord%2C+A%C3%A4ron+van+den&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-44"><span class="mw-cite-backlink"><b><a href="#cite_ref-44">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation magazine cs1"><a rel="nofollow" class="external text" href="https://www.wired.com/story/viral-app-labels-you-isnt-what-you-think/">"The Viral App That Labels You Isn't Quite What You Think"</a>. <i>Wired</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/1059-1028">1059-1028</a><span class="reference-accessdate">. Retrieved <span class="nowrap">22 September</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Wired&rft.atitle=The+Viral+App+That+Labels+You+Isn%27t+Quite+What+You+Think&rft.issn=1059-1028&rft_id=https%3A%2F%2Fwww.wired.com%2Fstory%2Fviral-app-labels-you-isnt-what-you-think%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-45"><span class="mw-cite-backlink"><b><a href="#cite_ref-45">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWong2019" class="citation news cs1"><a href="/wiki/Julia_Carrie_Wong" title="Julia Carrie Wong">Wong, Julia Carrie</a> (18 September 2019). <a rel="nofollow" class="external text" href="https://www.theguardian.com/technology/2019/sep/17/imagenet-roulette-asian-racist-slur-selfie">"The viral selfie app ImageNet Roulette seemed fun – until it called me a racist slur"</a>. <i>The Guardian</i>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0261-3077">0261-3077</a><span class="reference-accessdate">. Retrieved <span class="nowrap">22 September</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Guardian&rft.atitle=The+viral+selfie+app+ImageNet+Roulette+seemed+fun+%E2%80%93+until+it+called+me+a+racist+slur&rft.date=2019-09-18&rft.issn=0261-3077&rft.aulast=Wong&rft.aufirst=Julia+Carrie&rft_id=https%3A%2F%2Fwww.theguardian.com%2Ftechnology%2F2019%2Fsep%2F17%2Fimagenet-roulette-asian-racist-slur-selfie&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-46">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCrawfordPaglen2019" class="citation web cs1">Crawford, Kate; Paglen, Trevor (19 September 2019). <a rel="nofollow" class="external text" href="https://www.excavating.ai/">"Excavating AI: The Politics of Training Sets for Machine Learning"</a>. <i>-</i><span class="reference-accessdate">. Retrieved <span class="nowrap">22 September</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=-&rft.atitle=Excavating+AI%3A+The+Politics+of+Training+Sets+for+Machine+Learning&rft.date=2019-09-19&rft.aulast=Crawford&rft.aufirst=Kate&rft.au=Paglen%2C+Trevor&rft_id=https%3A%2F%2Fwww.excavating.ai%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> <li id="cite_note-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-47">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLyons2020" class="citation journal cs1">Lyons, Michael (24 December 2020). "Excavating "Excavating AI": The Elephant in the Gallery". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2009.01215">2009.01215</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.5281%2Fzenodo.4037538">10.5281/zenodo.4037538</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.atitle=Excavating+%22Excavating+AI%22%3A+The+Elephant+in+the+Gallery&rft.date=2020-12-24&rft_id=info%3Aarxiv%2F2009.01215&rft_id=info%3Adoi%2F10.5281%2Fzenodo.4037538&rft.aulast=Lyons&rft.aufirst=Michael&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span> <span class="cs1-visible-error citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_journal" title="Template:Cite journal">cite journal</a>}}</code>: </span><span class="cs1-visible-error citation-comment">Cite journal requires <code class="cs1-code">|journal=</code> (<a href="/wiki/Help:CS1_errors#missing_periodical" title="Help:CS1 errors">help</a>)</span></span> </li> <li id="cite_note-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-48">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://image-net.org/update-sep-17-2019.php">"Towards Fairer Datasets: Filtering and Balancing the Distribution of the People Subtree in the ImageNet Hierarchy"</a>. <i>image-net.org</i>. 17 September 2019<span class="reference-accessdate">. Retrieved <span class="nowrap">22 September</span> 2019</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=image-net.org&rft.atitle=Towards+Fairer+Datasets%3A+Filtering+and+Balancing+the+Distribution+of+the+People+Subtree+in+the+ImageNet+Hierarchy&rft.date=2019-09-17&rft_id=http%3A%2F%2Fimage-net.org%2Fupdate-sep-17-2019.php&rfr_id=info%3Asid%2Fen.wikipedia.org%3AImageNet" class="Z3988"></span></span> </li> </ol></div> <div class="mw-heading mw-heading2"><h2 id="External_links">External links</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=ImageNet&action=edit&section=14" title="Edit section: External links"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><span class="official-website"><span class="url"><a rel="nofollow" class="external text" href="http://image-net.org">Official website</a></span></span></li></ul> <div class="navbox-styles"><style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Differentiable_computing" style="padding:3px"><table class="nowraplinks hlist mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Differentiable_computing" title="Template:Differentiable computing"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Differentiable_computing" title="Template talk:Differentiable computing"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Differentiable_computing" title="Special:EditPage/Template:Differentiable computing"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Differentiable_computing" style="font-size:114%;margin:0 4em">Differentiable computing</div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Differentiable_function" title="Differentiable function">General</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><b><a href="/wiki/Differentiable_programming" title="Differentiable programming">Differentiable programming</a></b></li> <li><a href="/wiki/Information_geometry" title="Information geometry">Information geometry</a></li> <li><a href="/wiki/Statistical_manifold" title="Statistical manifold">Statistical manifold</a></li> <li><a href="/wiki/Automatic_differentiation" title="Automatic differentiation">Automatic differentiation</a></li> <li><a href="/wiki/Neuromorphic_computing" title="Neuromorphic computing">Neuromorphic computing</a></li> <li><a href="/wiki/Pattern_recognition" title="Pattern recognition">Pattern recognition</a></li> <li><a href="/wiki/Ricci_calculus" title="Ricci calculus">Ricci calculus</a></li> <li><a href="/wiki/Computational_learning_theory" title="Computational learning theory">Computational learning theory</a></li> <li><a href="/wiki/Inductive_bias" title="Inductive bias">Inductive bias</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Hardware</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Graphcore" title="Graphcore">IPU</a></li> <li><a href="/wiki/Tensor_Processing_Unit" title="Tensor Processing Unit">TPU</a></li> <li><a href="/wiki/Vision_processing_unit" title="Vision processing unit">VPU</a></li> <li><a href="/wiki/Memristor" title="Memristor">Memristor</a></li> <li><a href="/wiki/SpiNNaker" title="SpiNNaker">SpiNNaker</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Software libraries</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/TensorFlow" title="TensorFlow">TensorFlow</a></li> <li><a href="/wiki/PyTorch" title="PyTorch">PyTorch</a></li> <li><a href="/wiki/Keras" title="Keras">Keras</a></li> <li><a href="/wiki/Scikit-learn" title="Scikit-learn">scikit-learn</a></li> <li><a href="/wiki/Theano_(software)" title="Theano (software)">Theano</a></li> <li><a href="/wiki/Google_JAX" title="Google JAX">JAX</a></li> <li><a href="/wiki/Flux_(machine-learning_framework)" title="Flux (machine-learning framework)">Flux.jl</a></li> <li><a href="/wiki/MindSpore" title="MindSpore">MindSpore</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><a href="/wiki/File:Symbol_portal_class.svg" class="mw-file-description" title="Portal"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/16px-Symbol_portal_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/23px-Symbol_portal_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/31px-Symbol_portal_class.svg.png 2x" data-file-width="180" data-file-height="185" /></a></span> Portals <ul><li><a href="/wiki/Portal:Computer_programming" title="Portal:Computer programming">Computer programming</a></li> <li><a href="/wiki/Portal:Technology" title="Portal:Technology">Technology</a></li></ul></li></ul> </div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Standard_test_items" style="padding:3px"><table class="nowraplinks mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Standard_test_item" title="Template:Standard test item"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Standard_test_item" title="Template talk:Standard test item"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Standard_test_item" title="Special:EditPage/Template:Standard test item"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Standard_test_items" style="font-size:114%;margin:0 4em">Standard test items</div></th></tr><tr><td class="navbox-abovebelow hlist" colspan="2"><div> <ul><li><a href="/wiki/Pangram" title="Pangram">Pangram</a></li> <li><a href="/wiki/Reference_implementation" title="Reference implementation">Reference implementation</a></li> <li><a href="/wiki/Sanity_check" title="Sanity check">Sanity check</a></li> <li><a href="/wiki/Standard_test_image" title="Standard test image">Standard test image</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence</a></th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Chinese_room" title="Chinese room">Chinese room</a></li> <li><a href="/wiki/Turing_test" title="Turing test">Turing test</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Television (<a href="/wiki/Test_card" title="Test card">test card</a>)</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/SMPTE_color_bars" title="SMPTE color bars">SMPTE color bars</a></li> <li><a href="/wiki/EBU_colour_bars" title="EBU colour bars">EBU colour bars</a></li> <li><a href="/wiki/Indian-head_test_pattern" title="Indian-head test pattern">Indian-head test pattern</a></li> <li><a href="/wiki/EIA_1956_resolution_chart" title="EIA 1956 resolution chart">EIA 1956 resolution chart</a></li> <li><a href="/wiki/List_of_BBC_test_cards" title="List of BBC test cards">BBC Test Card</a> <a href="/wiki/List_of_BBC_test_cards#Test_Card_A" title="List of BBC test cards">A</a>, <a href="/wiki/List_of_BBC_test_cards#Test_Card_B" title="List of BBC test cards">B</a>, <a href="/wiki/List_of_BBC_test_cards#Test_Card_C" title="List of BBC test cards">C</a>, <a href="/wiki/List_of_BBC_test_cards#Test_Card_D" title="List of BBC test cards">D</a>, <a href="/wiki/List_of_BBC_test_cards#Test_Card_E_(later_Test_Card_C)" title="List of BBC test cards">E</a>, <a href="/wiki/Test_Card_F" title="Test Card F">F</a>, <a href="/wiki/List_of_BBC_test_cards#Test_Card_G" title="List of BBC test cards">G</a>, <a href="/wiki/List_of_BBC_test_cards#Test_Card_H" title="List of BBC test cards">H</a>, <a href="/wiki/Test_Card_J" class="mw-redirect" title="Test Card J">J</a>, <a href="/wiki/Test_Card_W" class="mw-redirect" title="Test Card W">W</a>, <a href="/wiki/Test_Card_X" class="mw-redirect" title="Test Card X">X</a></li> <li><a href="/wiki/ETP-1" title="ETP-1">ETP-1</a></li> <li><a href="/wiki/Philips_circle_pattern" title="Philips circle pattern">Philips circle pattern</a> (<a href="/wiki/Philips_circle_pattern#PM5534" title="Philips circle pattern">PM 5538</a>, <a href="/wiki/Philips_PM5540" title="Philips PM5540">PM 5540</a>, <a href="/wiki/Philips_circle_pattern#PM5544" title="Philips circle pattern">PM 5544</a>, <a href="/wiki/Philips_circle_pattern#PM5644" title="Philips circle pattern">PM 5644</a>)</li> <li><a href="/wiki/Snell_%26_Wilcox_Zone_Plate" title="Snell & Wilcox Zone Plate">Snell & Wilcox SW2/SW4</a></li> <li><a href="/wiki/Telefunken_FuBK" title="Telefunken FuBK">Telefunken FuBK</a></li> <li><a href="/wiki/TVE_test_card" title="TVE test card">TVE test card</a></li> <li><a href="/wiki/Universal_Electronic_Test_Chart" title="Universal Electronic Test Chart">UEIT</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_language" title="Computer language">Computer languages</a></th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/%22Hello,_World!%22_program" title=""Hello, World!" program">"Hello, World!" program</a></li> <li><a href="/wiki/Quine_(computing)" title="Quine (computing)">Quine</a></li> <li><a href="/wiki/TPK_algorithm" title="TPK algorithm">Trabb Pardo–Knuth algorithm</a></li> <li><a href="/wiki/Man_or_boy_test" title="Man or boy test">Man or boy test</a></li> <li><a href="/wiki/Just_another_Perl_hacker" class="mw-redirect" title="Just another Perl hacker">Just another Perl hacker</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Data_compression" title="Data compression">Data compression</a></th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Calgary_corpus" title="Calgary corpus">Calgary corpus</a></li> <li><a href="/wiki/Canterbury_corpus" title="Canterbury corpus">Canterbury corpus</a></li> <li><a href="/w/index.php?title=Silesia_corpus&action=edit&redlink=1" class="new" title="Silesia corpus (page does not exist)">Silesia corpus</a></li> <li><a href="/wiki/Hutter_Prize" title="Hutter Prize">enwik8, enwik9</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/3D_computer_graphics" title="3D computer graphics">3D computer graphics</a></th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Cornell_box" title="Cornell box">Cornell box</a></li> <li><a href="/wiki/Stanford_bunny" title="Stanford bunny">Stanford bunny</a></li> <li><a href="/wiki/Stanford_dragon" title="Stanford dragon">Stanford dragon</a></li> <li><a href="/wiki/Utah_teapot" title="Utah teapot">Utah teapot</a></li> <li><a href="/wiki/List_of_common_3D_test_models" title="List of common 3D test models">List</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a></th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a class="mw-selflink selflink">ImageNet</a></li> <li><a href="/wiki/MNIST_database" title="MNIST database">MNIST database</a></li> <li><a href="/wiki/List_of_datasets_for_machine-learning_research" title="List of datasets for machine-learning research">List</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Typography" title="Typography">Typography</a> (<a href="/wiki/Filler_text" title="Filler text">filler text</a>)</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Etaoin_shrdlu" title="Etaoin shrdlu">Etaoin shrdlu</a></li> <li><a href="/wiki/Hamburgevons" title="Hamburgevons">Hamburgevons</a></li> <li><a href="/wiki/Lorem_ipsum" title="Lorem ipsum">Lorem ipsum</a></li> <li><a href="/wiki/The_quick_brown_fox_jumps_over_the_lazy_dog" title="The quick brown fox jumps over the lazy dog">The quick brown fox jumps over the lazy dog</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Other</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/3DBenchy" title="3DBenchy">3DBenchy</a></li> <li>Acid <ul><li><a href="/wiki/Acid1" title="Acid1">1</a></li> <li><a href="/wiki/Acid2" title="Acid2">2</a></li> <li><a href="/wiki/Acid3" title="Acid3">3</a></li></ul></li> <li><a href="/wiki/Bad_Apple!!#Use_as_a_graphical_and_audio_test" title="Bad Apple!!">"Bad Apple!!"</a></li> <li><a href="/wiki/EICAR_test_file" title="EICAR test file">EICAR test file</a></li> <li><a href="/wiki/Test_functions_for_optimization" title="Test functions for optimization">functions for optimization</a></li> <li><a href="/wiki/GTUBE" title="GTUBE">GTUBE</a></li> <li><a href="/wiki/Harvard_sentences" title="Harvard sentences">Harvard sentences</a></li> <li><a href="/wiki/Lenna" title="Lenna">Lenna</a></li> <li><a href="/wiki/The_North_Wind_and_the_Sun#Use_in_phonetic_demonstrations" title="The North Wind and the Sun">"The North Wind and the Sun"</a></li> <li><a href="/wiki/Tom%27s_Diner#The_"Mother_of_the_MP3"" title="Tom's Diner">"Tom's Diner"</a></li> <li><a href="/wiki/Film_leader" title="Film leader">SMPTE universal leader</a></li> <li><a href="/wiki/EURion_constellation" title="EURion constellation">EURion constellation</a></li> <li><a href="/wiki/Shakedown_(testing)" title="Shakedown (testing)">Shakedown</a></li> <li><a href="/wiki/Webdriver_Torso" title="Webdriver Torso">Webdriver Torso</a></li> <li><a href="/wiki/1951_USAF_resolution_test_chart" title="1951 USAF resolution test chart">1951 USAF resolution test chart</a></li></ul> </div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐api‐int.codfw.main‐76d76b94d7‐tntdg Cached time: 20241125210652 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 0.660 seconds Real time usage: 0.786 seconds Preprocessor visited node count: 2962/1000000 Post‐expand include size: 100832/2097152 bytes Template argument size: 1477/2097152 bytes Highest expansion depth: 12/100 Expensive parser function count: 4/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 168078/5000000 bytes Lua time usage: 0.411/10.000 seconds Lua memory usage: 6103185/52428800 bytes Number of Wikibase entities loaded: 1/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 677.375 1 -total 52.44% 355.196 1 Template:Reflist 16.49% 111.718 8 Template:Cite_news 16.17% 109.529 2 Template:Navbox 15.99% 108.311 1 Template:Differentiable_computing 13.62% 92.249 1 Template:Short_description 11.40% 77.241 16 Template:Cite_web 6.70% 45.403 2 Template:Pagetype 6.30% 42.701 2 Template:Fix-span 5.92% 40.088 1 Template:Clarification_needed --> <!-- Saved in parser cache with key enwiki:pcache:idhash:50896194-0!canonical and timestamp 20241125210652 and revision id 1259151460. Rendering was triggered because: api-parse --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=ImageNet&oldid=1259151460">https://en.wikipedia.org/w/index.php?title=ImageNet&oldid=1259151460</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Categories</a>: <ul><li><a href="/wiki/Category:Computer_science_competitions" title="Category:Computer science competitions">Computer science competitions</a></li><li><a href="/wiki/Category:2009_in_computing" title="Category:2009 in computing">2009 in computing</a></li><li><a href="/wiki/Category:Object_recognition_and_categorization" title="Category:Object recognition and categorization">Object recognition and categorization</a></li><li><a href="/wiki/Category:Databases" title="Category:Databases">Databases</a></li><li><a href="/wiki/Category:Datasets_in_computer_vision" title="Category:Datasets in computer vision">Datasets in computer vision</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:CS1_errors:_missing_periodical" title="Category:CS1 errors: missing periodical">CS1 errors: missing periodical</a></li><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_matches_Wikidata" title="Category:Short description matches Wikidata">Short description matches Wikidata</a></li><li><a href="/wiki/Category:Use_dmy_dates_from_September_2019" title="Category:Use dmy dates from September 2019">Use dmy dates from September 2019</a></li><li><a href="/wiki/Category:Wikipedia_articles_needing_clarification_from_December_2023" title="Category:Wikipedia articles needing clarification from December 2023">Wikipedia articles needing clarification from December 2023</a></li><li><a href="/wiki/Category:Wikipedia_articles_needing_clarification_from_August_2019" title="Category:Wikipedia articles needing clarification from August 2019">Wikipedia articles needing clarification from August 2019</a></li><li><a href="/wiki/Category:Official_website_different_in_Wikidata_and_Wikipedia" title="Category:Official website different in Wikidata and Wikipedia">Official website different in Wikidata and Wikipedia</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 23 November 2024, at 17:27<span class="anonymous-show"> (UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=ImageNet&mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/w/resources/assets/poweredby_mediawiki.svg" alt="Powered by MediaWiki" width="88" height="31" loading="lazy"></a></li> </ul> </footer> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-694cf4987f-b94j6","wgBackendResponseTime":245,"wgPageParseReport":{"limitreport":{"cputime":"0.660","walltime":"0.786","ppvisitednodes":{"value":2962,"limit":1000000},"postexpandincludesize":{"value":100832,"limit":2097152},"templateargumentsize":{"value":1477,"limit":2097152},"expansiondepth":{"value":12,"limit":100},"expensivefunctioncount":{"value":4,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":168078,"limit":5000000},"entityaccesscount":{"value":1,"limit":400},"timingprofile":["100.00% 677.375 1 -total"," 52.44% 355.196 1 Template:Reflist"," 16.49% 111.718 8 Template:Cite_news"," 16.17% 109.529 2 Template:Navbox"," 15.99% 108.311 1 Template:Differentiable_computing"," 13.62% 92.249 1 Template:Short_description"," 11.40% 77.241 16 Template:Cite_web"," 6.70% 45.403 2 Template:Pagetype"," 6.30% 42.701 2 Template:Fix-span"," 5.92% 40.088 1 Template:Clarification_needed"]},"scribunto":{"limitreport-timeusage":{"value":"0.411","limit":"10.000"},"limitreport-memusage":{"value":6103185,"limit":52428800}},"cachereport":{"origin":"mw-api-int.codfw.main-76d76b94d7-tntdg","timestamp":"20241125210652","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"ImageNet","url":"https:\/\/en.wikipedia.org\/wiki\/ImageNet","sameAs":"http:\/\/www.wikidata.org\/entity\/Q24901201","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q24901201","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2016-06-22T04:35:37Z","dateModified":"2024-11-23T17:27:41Z","headline":"image dataset"}</script> </body> </html>