CINXE.COM
Stable Diffusion - Wikipedia
<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Stable Diffusion - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy", "wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"66b88308-0d1f-4d3f-af68-8c611be3f8cd","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Stable_Diffusion","wgTitle":"Stable Diffusion","wgCurRevisionId":1259657575,"wgRevisionId":1259657575,"wgArticleId":71642695,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["CS1 maint: multiple names: authors list","CS1 maint: numeric names: authors list","CS1 Japanese-language sources (ja)","Articles with short description","Short description matches Wikidata","Use mdy dates from October 2023","All articles with unsourced statements","Articles with unsourced statements from October 2023","Pages using multiple image with auto scaled images","Commons category link from Wikidata","Artificial intelligence art","Deep learning software applications", "Text-to-image generation","Unsupervised learning","Art controversies","Works involved in plagiarism controversies","2022 software","Open-source artificial intelligence"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Stable_Diffusion","wgRelevantArticleId":71642695,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":70000,"wgRelatedArticlesCompat":[],"wgCentralAuthMobileDomain":false, "wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q113660857","wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready", "wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","mediawiki.page.media","site","mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.quicksurveys.init","ext.growthExperiments.SuggestedEditSession","wikibase.sidebar.tracking"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&modules=ext.cite.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&only=styles&skin=vector-2022"> <script async="" src="/w/load.php?lang=en&modules=startup&only=scripts&raw=1&skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&modules=site.styles&only=styles&skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.5"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/Astronaut_Riding_a_Horse_%28SD3.5%29.webp/1024px-Astronaut_Riding_a_Horse_%28SD3.5%29.webp.png"> <meta property="og:image:width" content="1200"> <meta property="og:image:height" content="1200"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/Astronaut_Riding_a_Horse_%28SD3.5%29.webp/800px-Astronaut_Riding_a_Horse_%28SD3.5%29.webp.png"> <meta property="og:image:width" content="800"> <meta property="og:image:height" content="800"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/Astronaut_Riding_a_Horse_%28SD3.5%29.webp/640px-Astronaut_Riding_a_Horse_%28SD3.5%29.webp.png"> <meta property="og:image:width" content="640"> <meta property="og:image:height" content="640"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Stable Diffusion - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/Stable_Diffusion"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Stable_Diffusion&action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Stable_Diffusion"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Stable_Diffusion rootpage-Stable_Diffusion skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page's font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&returnto=Stable+Diffusion" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&returnto=Stable+Diffusion" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&returnto=Stable+Diffusion" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&returnto=Stable+Diffusion" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-Development" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Development"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>Development</span> </div> </a> <ul id="toc-Development-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Technology" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Technology"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Technology</span> </div> </a> <button aria-controls="toc-Technology-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Technology subsection</span> </button> <ul id="toc-Technology-sublist" class="vector-toc-list"> <li id="toc-Architecture" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Architecture"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1</span> <span>Architecture</span> </div> </a> <ul id="toc-Architecture-sublist" class="vector-toc-list"> <li id="toc-SD_XL" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#SD_XL"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1.1</span> <span>SD XL</span> </div> </a> <ul id="toc-SD_XL-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-SD_3.0" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#SD_3.0"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.1.2</span> <span>SD 3.0</span> </div> </a> <ul id="toc-SD_3.0-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Training_data" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Training_data"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.2</span> <span>Training data</span> </div> </a> <ul id="toc-Training_data-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Training_procedures" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Training_procedures"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.3</span> <span>Training procedures</span> </div> </a> <ul id="toc-Training_procedures-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Limitations" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Limitations"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.4</span> <span>Limitations</span> </div> </a> <ul id="toc-Limitations-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-End-user_fine-tuning" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#End-user_fine-tuning"> <div class="vector-toc-text"> <span class="vector-toc-numb">2.5</span> <span>End-user fine-tuning</span> </div> </a> <ul id="toc-End-user_fine-tuning-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Capabilities" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Capabilities"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>Capabilities</span> </div> </a> <button aria-controls="toc-Capabilities-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Capabilities subsection</span> </button> <ul id="toc-Capabilities-sublist" class="vector-toc-list"> <li id="toc-Text_to_image_generation" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Text_to_image_generation"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.1</span> <span>Text to image generation</span> </div> </a> <ul id="toc-Text_to_image_generation-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Image_modification" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Image_modification"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.2</span> <span>Image modification</span> </div> </a> <ul id="toc-Image_modification-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-ControlNet" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#ControlNet"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.3</span> <span>ControlNet</span> </div> </a> <ul id="toc-ControlNet-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-User_Interfaces" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#User_Interfaces"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4</span> <span>User Interfaces</span> </div> </a> <ul id="toc-User_Interfaces-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Releases" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Releases"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>Releases</span> </div> </a> <ul id="toc-Releases-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Usage_and_controversy" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Usage_and_controversy"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>Usage and controversy</span> </div> </a> <ul id="toc-Usage_and_controversy-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Litigation" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#Litigation"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Litigation</span> </div> </a> <button aria-controls="toc-Litigation-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Litigation subsection</span> </button> <ul id="toc-Litigation-sublist" class="vector-toc-list"> <li id="toc-Andersen,_McKernan,_and_Ortiz_v._Stability_AI,_Midjourney,_and_DeviantArt" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Andersen,_McKernan,_and_Ortiz_v._Stability_AI,_Midjourney,_and_DeviantArt"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.1</span> <span>Andersen, McKernan, and Ortiz v. Stability AI, Midjourney, and DeviantArt</span> </div> </a> <ul id="toc-Andersen,_McKernan,_and_Ortiz_v._Stability_AI,_Midjourney,_and_DeviantArt-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Getty_Images_v._Stability_AI" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Getty_Images_v._Stability_AI"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.2</span> <span>Getty Images v. Stability AI</span> </div> </a> <ul id="toc-Getty_Images_v._Stability_AI-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-License" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#License"> <div class="vector-toc-text"> <span class="vector-toc-numb">7</span> <span>License</span> </div> </a> <ul id="toc-License-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">8</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">9</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-External_links" class="vector-toc-list-item vector-toc-level-1 vector-toc-list-item-expanded"> <a class="vector-toc-link" href="#External_links"> <div class="vector-toc-text"> <span class="vector-toc-numb">10</span> <span>External links</span> </div> </a> <ul id="toc-External_links-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Stable Diffusion</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 29 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-29" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">29 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-ar mw-list-item"><a href="https://ar.wikipedia.org/wiki/%D8%B3%D8%AA%D9%8A%D8%A8%D9%84_%D8%AF%D9%8A%D9%81%D9%8A%D9%88%D8%AC%D9%86" title="ستيبل ديفيوجن – Arabic" lang="ar" hreflang="ar" data-title="ستيبل ديفيوجن" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-az mw-list-item"><a href="https://az.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Azerbaijani" lang="az" hreflang="az" data-title="Stable Diffusion" data-language-autonym="Azərbaycanca" data-language-local-name="Azerbaijani" class="interlanguage-link-target"><span>Azərbaycanca</span></a></li><li class="interlanguage-link interwiki-bg mw-list-item"><a href="https://bg.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Bulgarian" lang="bg" hreflang="bg" data-title="Stable Diffusion" data-language-autonym="Български" data-language-local-name="Bulgarian" class="interlanguage-link-target"><span>Български</span></a></li><li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Catalan" lang="ca" hreflang="ca" data-title="Stable Diffusion" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-cs mw-list-item"><a href="https://cs.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Czech" lang="cs" hreflang="cs" data-title="Stable Diffusion" data-language-autonym="Čeština" data-language-local-name="Czech" class="interlanguage-link-target"><span>Čeština</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – German" lang="de" hreflang="de" data-title="Stable Diffusion" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Spanish" lang="es" hreflang="es" data-title="Stable Diffusion" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%D8%A7%D8%B3%D8%AA%DB%8C%D8%A8%D9%84_%D8%AF%DB%8C%D9%81%DB%8C%D9%88%DA%98%D9%86" title="استیبل دیفیوژن – Persian" lang="fa" hreflang="fa" data-title="استیبل دیفیوژن" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – French" lang="fr" hreflang="fr" data-title="Stable Diffusion" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-gl mw-list-item"><a href="https://gl.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Galician" lang="gl" hreflang="gl" data-title="Stable Diffusion" data-language-autonym="Galego" data-language-local-name="Galician" class="interlanguage-link-target"><span>Galego</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EC%8A%A4%ED%85%8C%EC%9D%B4%EB%B8%94_%EB%94%94%ED%93%A8%EC%A0%84" title="스테이블 디퓨전 – Korean" lang="ko" hreflang="ko" data-title="스테이블 디퓨전" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-id mw-list-item"><a href="https://id.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Indonesian" lang="id" hreflang="id" data-title="Stable Diffusion" data-language-autonym="Bahasa Indonesia" data-language-local-name="Indonesian" class="interlanguage-link-target"><span>Bahasa Indonesia</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Italian" lang="it" hreflang="it" data-title="Stable Diffusion" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Hebrew" lang="he" hreflang="he" data-title="Stable Diffusion" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-nl mw-list-item"><a href="https://nl.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Dutch" lang="nl" hreflang="nl" data-title="Stable Diffusion" data-language-autonym="Nederlands" data-language-local-name="Dutch" class="interlanguage-link-target"><span>Nederlands</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Japanese" lang="ja" hreflang="ja" data-title="Stable Diffusion" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Portuguese" lang="pt" hreflang="pt" data-title="Stable Diffusion" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-qu mw-list-item"><a href="https://qu.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Quechua" lang="qu" hreflang="qu" data-title="Stable Diffusion" data-language-autonym="Runa Simi" data-language-local-name="Quechua" class="interlanguage-link-target"><span>Runa Simi</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Russian" lang="ru" hreflang="ru" data-title="Stable Diffusion" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-sq mw-list-item"><a href="https://sq.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Albanian" lang="sq" hreflang="sq" data-title="Stable Diffusion" data-language-autonym="Shqip" data-language-local-name="Albanian" class="interlanguage-link-target"><span>Shqip</span></a></li><li class="interlanguage-link interwiki-si mw-list-item"><a href="https://si.wikipedia.org/wiki/%E0%B7%83%E0%B7%8A%E0%B6%AE%E0%B7%8F%E0%B6%BA%E0%B7%93_%E0%B7%80%E0%B7%92%E0%B7%83%E0%B6%BB%E0%B6%AB%E0%B6%BA" title="ස්ථායී විසරණය – Sinhala" lang="si" hreflang="si" data-title="ස්ථායී විසරණය" data-language-autonym="සිංහල" data-language-local-name="Sinhala" class="interlanguage-link-target"><span>සිංහල</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/Stabilna_difuzija" title="Stabilna difuzija – Serbian" lang="sr" hreflang="sr" data-title="Stabilna difuzija" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-fi mw-list-item"><a href="https://fi.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Finnish" lang="fi" hreflang="fi" data-title="Stable Diffusion" data-language-autonym="Suomi" data-language-local-name="Finnish" class="interlanguage-link-target"><span>Suomi</span></a></li><li class="interlanguage-link interwiki-th mw-list-item"><a href="https://th.wikipedia.org/wiki/%E0%B8%AA%E0%B9%80%E0%B8%95%E0%B9%80%E0%B8%9A%E0%B8%B4%E0%B8%A5%E0%B8%94%E0%B8%B4%E0%B8%9F%E0%B8%9F%E0%B8%B4%E0%B8%A7%E0%B8%8A%E0%B8%B1%E0%B8%99" title="สเตเบิลดิฟฟิวชัน – Thai" lang="th" hreflang="th" data-title="สเตเบิลดิฟฟิวชัน" data-language-autonym="ไทย" data-language-local-name="Thai" class="interlanguage-link-target"><span>ไทย</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Turkish" lang="tr" hreflang="tr" data-title="Stable Diffusion" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-uk mw-list-item"><a href="https://uk.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Ukrainian" lang="uk" hreflang="uk" data-title="Stable Diffusion" data-language-autonym="Українська" data-language-local-name="Ukrainian" class="interlanguage-link-target"><span>Українська</span></a></li><li class="interlanguage-link interwiki-vi mw-list-item"><a href="https://vi.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Vietnamese" lang="vi" hreflang="vi" data-title="Stable Diffusion" data-language-autonym="Tiếng Việt" data-language-local-name="Vietnamese" class="interlanguage-link-target"><span>Tiếng Việt</span></a></li><li class="interlanguage-link interwiki-zh-yue mw-list-item"><a href="https://zh-yue.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Cantonese" lang="yue" hreflang="yue" data-title="Stable Diffusion" data-language-autonym="粵語" data-language-local-name="Cantonese" class="interlanguage-link-target"><span>粵語</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/Stable_Diffusion" title="Stable Diffusion – Chinese" lang="zh" hreflang="zh" data-title="Stable Diffusion" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q113660857#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Stable_Diffusion" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Stable_Diffusion" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Stable_Diffusion"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Stable_Diffusion&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Stable_Diffusion&action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Stable_Diffusion"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Stable_Diffusion&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Stable_Diffusion&action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Stable_Diffusion" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Stable_Diffusion" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Stable_Diffusion&oldid=1259657575" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Stable_Diffusion&action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&page=Stable_Diffusion&id=1259657575&wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FStable_Diffusion"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FStable_Diffusion"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&page=Stable_Diffusion&action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Stable_Diffusion&printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="wb-otherproject-link wb-otherproject-commons mw-list-item"><a href="https://commons.wikimedia.org/wiki/Category:Stable_Diffusion" hreflang="en"><span>Wikimedia Commons</span></a></li><li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q113660857" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Image-generating machine learning model</div> <p class="mw-empty-elt"> </p> <style data-mw-deduplicate="TemplateStyles:r1257001546">.mw-parser-output .infobox-subbox{padding:0;border:none;margin:-3px;width:auto;min-width:100%;font-size:100%;clear:none;float:none;background-color:transparent}.mw-parser-output .infobox-3cols-child{margin:auto}.mw-parser-output .infobox .navbar{font-size:100%}@media screen{html.skin-theme-clientpref-night .mw-parser-output .infobox-full-data:not(.notheme)>div:not(.notheme)[style]{background:#1f1f23!important;color:#f8f9fa}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .infobox-full-data:not(.notheme) div:not(.notheme){background:#1f1f23!important;color:#f8f9fa}}@media(min-width:640px){body.skin--responsive .mw-parser-output .infobox-table{display:table!important}body.skin--responsive .mw-parser-output .infobox-table>caption{display:table-caption!important}body.skin--responsive .mw-parser-output .infobox-table>tbody{display:table-row-group}body.skin--responsive .mw-parser-output .infobox-table tr{display:table-row!important}body.skin--responsive .mw-parser-output .infobox-table th,body.skin--responsive .mw-parser-output .infobox-table td{padding-left:inherit;padding-right:inherit}}</style><table class="infobox vevent"><caption class="infobox-title summary">Stable Diffusion</caption><tbody><tr><td colspan="2" class="infobox-image logo"><span typeof="mw:File"><a href="/wiki/File:Astronaut_Riding_a_Horse_(SD3.5).webp" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/8/82/Astronaut_Riding_a_Horse_%28SD3.5%29.webp/250px-Astronaut_Riding_a_Horse_%28SD3.5%29.webp.png" decoding="async" width="250" height="250" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/82/Astronaut_Riding_a_Horse_%28SD3.5%29.webp/375px-Astronaut_Riding_a_Horse_%28SD3.5%29.webp.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/82/Astronaut_Riding_a_Horse_%28SD3.5%29.webp/500px-Astronaut_Riding_a_Horse_%28SD3.5%29.webp.png 2x" data-file-width="1024" data-file-height="1024" /></a></span><div class="infobox-caption">An image generated with Stable Diffusion 3.5 based on the text prompt "a photograph of an astronaut riding a horse"</div></td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;"><a href="/wiki/Programmer" title="Programmer">Original author(s)</a></th><td class="infobox-data">Runway, CompVis, and Stability AI</td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;"><a href="/wiki/Programmer" title="Programmer">Developer(s)</a></th><td class="infobox-data"><a href="/wiki/Stability_AI" title="Stability AI">Stability AI</a></td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;">Initial release</th><td class="infobox-data">August 22, 2022</td></tr><tr style="display: none;"><td colspan="2" class="infobox-full-data"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1257001546"></td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;"><a href="/wiki/Software_release_life_cycle" title="Software release life cycle">Stable release</a></th><td class="infobox-data"><div style="margin:0px;">SD 3.5 (model)<sup id="cite_ref-release-version_1-0" class="reference"><a href="#cite_note-release-version-1"><span class="cite-bracket">[</span>1<span class="cite-bracket">]</span></a></sup> / October 22, 2024</div></td></tr><tr style="display:none"><td colspan="2"> </td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;"><a href="/wiki/Repository_(version_control)" title="Repository (version control)">Repository</a></th><td class="infobox-data"><style data-mw-deduplicate="TemplateStyles:r1126788409">.mw-parser-output .plainlist ol,.mw-parser-output .plainlist ul{line-height:inherit;list-style:none;margin:0;padding:0}.mw-parser-output .plainlist ol li,.mw-parser-output .plainlist ul li{margin-bottom:0}</style><div class="plainlist"><ul><li><span class="url"><a rel="nofollow" class="external text" href="https://github.com/Stability-AI/generative-models">github<wbr />.com<wbr />/Stability-AI<wbr />/generative-models</a></span> <span class="mw-valign-text-top noprint" typeof="mw:File/Frameless"><a href="https://www.wikidata.org/wiki/Q113660857#P1324" title="Edit this at Wikidata"><img alt="Edit this at Wikidata" src="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/10px-OOjs_UI_icon_edit-ltr-progressive.svg.png" decoding="async" width="10" height="10" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/15px-OOjs_UI_icon_edit-ltr-progressive.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/20px-OOjs_UI_icon_edit-ltr-progressive.svg.png 2x" data-file-width="20" data-file-height="20" /></a></span></li></ul> </div></td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;">Written in</th><td class="infobox-data"><a href="/wiki/Python_(programming_language)" title="Python (programming language)">Python</a><sup id="cite_ref-2" class="reference"><a href="#cite_note-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup></td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;"><a href="/wiki/Software_categories#Categorization_approaches" title="Software categories">Type</a></th><td class="infobox-data"><a href="/wiki/Text-to-image_model" title="Text-to-image model">Text-to-image model</a></td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;"><a href="/wiki/Software_license" title="Software license">License</a></th><td class="infobox-data">Stability AI Community License</td></tr><tr><th scope="row" class="infobox-label" style="white-space: nowrap;">Website</th><td class="infobox-data"><span class="url"><a rel="nofollow" class="external text" href="https://stability.ai/stable-image">stability<wbr />.ai<wbr />/stable-image</a></span> <span class="penicon autoconfirmed-show"><span class="mw-valign-text-top" typeof="mw:File/Frameless"><a href="https://www.wikidata.org/wiki/Q113660857?uselang=en#P856" title="Edit this on Wikidata"><img alt="Edit this on Wikidata" src="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/10px-OOjs_UI_icon_edit-ltr-progressive.svg.png" decoding="async" width="10" height="10" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/15px-OOjs_UI_icon_edit-ltr-progressive.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/20px-OOjs_UI_icon_edit-ltr-progressive.svg.png 2x" data-file-width="20" data-file-height="20" /></a></span></span></td></tr></tbody></table> <p><b>Stable Diffusion</b> is a <a href="/wiki/Deep_learning" title="Deep learning">deep learning</a>, <a href="/wiki/Text-to-image_model" title="Text-to-image model">text-to-image model</a> released in 2022 based on <a href="/wiki/Diffusion_model" title="Diffusion model">diffusion</a> techniques. The <a href="/wiki/Generative_artificial_intelligence" title="Generative artificial intelligence">generative artificial intelligence</a> technology is the premier product of <a href="/wiki/Stability_AI" title="Stability AI">Stability AI</a> and is considered to be a part of the ongoing <a href="/wiki/AI_boom" title="AI boom">artificial intelligence boom</a>. </p><p>It is primarily used to generate detailed images conditioned on text descriptions, though it can also be applied to other tasks such as <a href="/wiki/Inpainting" title="Inpainting">inpainting</a>, outpainting, and generating image-to-image translations guided by a <a href="/wiki/Prompt_engineering" title="Prompt engineering">text prompt</a>.<sup id="cite_ref-:0_3-0" class="reference"><a href="#cite_note-:0-3"><span class="cite-bracket">[</span>3<span class="cite-bracket">]</span></a></sup> Its development involved researchers from the CompVis Group at <a href="/wiki/Ludwig_Maximilian_University_of_Munich" title="Ludwig Maximilian University of Munich">Ludwig Maximilian University of Munich</a> and <a href="/wiki/Runway_(company)" title="Runway (company)">Runway</a> with a computational donation from Stability and training data from non-profit organizations.<sup id="cite_ref-sifted_financialtimes_4-0" class="reference"><a href="#cite_note-sifted_financialtimes-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-lmu_lauch_5-0" class="reference"><a href="#cite_note-lmu_lauch-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-6" class="reference"><a href="#cite_note-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-stable-diffusion-launch_7-0" class="reference"><a href="#cite_note-stable-diffusion-launch-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> </p><p>Stable Diffusion is a <a href="/wiki/Latent_diffusion_model" title="Latent diffusion model">latent diffusion model</a>, a kind of deep generative artificial <a href="/wiki/Neural_network" title="Neural network">neural network</a>. Its code and model weights have been released <a href="/wiki/Source-available_software" title="Source-available software">publicly</a>,<sup id="cite_ref-stable-diffusion-github_8-0" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> and it can run on most consumer hardware equipped with a modest <a href="/wiki/Graphics_processing_unit" title="Graphics processing unit">GPU</a> with at least 4 GB <a href="/wiki/Video_random_access_memory" class="mw-redirect" title="Video random access memory">VRAM</a>. This marked a departure from previous proprietary text-to-image models such as <a href="/wiki/DALL-E" title="DALL-E">DALL-E</a> and <a href="/wiki/Midjourney" title="Midjourney">Midjourney</a> which were accessible only via <a href="/wiki/Cloud_service" class="mw-redirect" title="Cloud service">cloud services</a>.<sup id="cite_ref-pcworld_9-0" class="reference"><a href="#cite_note-pcworld-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-verge_10-0" class="reference"><a href="#cite_note-verge-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> </p> <meta property="mw:PageProp/toc" /> <div class="mw-heading mw-heading2"><h2 id="Development">Development</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=1" title="Edit section: Development"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Stable Diffusion originated from a project called <b>Latent Diffusion</b>,<sup id="cite_ref-:9_11-0" class="reference"><a href="#cite_note-:9-11"><span class="cite-bracket">[</span>11<span class="cite-bracket">]</span></a></sup> developed in Germany by researchers at <a href="/wiki/Ludwig_Maximilian_University" class="mw-redirect" title="Ludwig Maximilian University">Ludwig Maximilian University</a> in <a href="/wiki/Munich" title="Munich">Munich</a> and <a href="/wiki/Heidelberg_University" title="Heidelberg University">Heidelberg University</a>. Four of the original 5 authors (Robin Rombach, Andreas Blattmann, Patrick Esser and Dominik Lorenz) later joined Stability AI and released subsequent versions of Stable Diffusion.<sup id="cite_ref-12" class="reference"><a href="#cite_note-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup> </p><p>The technical license for the model was released by the CompVis group at Ludwig Maximilian University of Munich.<sup id="cite_ref-verge_10-1" class="reference"><a href="#cite_note-verge-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> Development was led by Patrick Esser of <a href="/wiki/Runway_(company)" title="Runway (company)">Runway</a> and Robin Rombach of CompVis, who were among the researchers who had earlier invented the latent diffusion model architecture used by Stable Diffusion.<sup id="cite_ref-stable-diffusion-launch_7-1" class="reference"><a href="#cite_note-stable-diffusion-launch-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> Stability AI also credited <a href="/wiki/EleutherAI" title="EleutherAI">EleutherAI</a> and <a href="/wiki/LAION" title="LAION">LAION</a> (a German nonprofit which assembled the dataset on which Stable Diffusion was trained) as supporters of the project.<sup id="cite_ref-stable-diffusion-launch_7-2" class="reference"><a href="#cite_note-stable-diffusion-launch-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Technology">Technology</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=2" title="Edit section: Technology"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Stable_Diffusion_architecture.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/f/f6/Stable_Diffusion_architecture.png/290px-Stable_Diffusion_architecture.png" decoding="async" width="290" height="142" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/f/f6/Stable_Diffusion_architecture.png 1.5x" data-file-width="435" data-file-height="213" /></a><figcaption>Diagram of the latent diffusion architecture used by Stable Diffusion</figcaption></figure> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:X-Y_plot_of_algorithmically-generated_AI_art_of_European-style_castle_in_Japan_demonstrating_DDIM_diffusion_steps.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/9/99/X-Y_plot_of_algorithmically-generated_AI_art_of_European-style_castle_in_Japan_demonstrating_DDIM_diffusion_steps.png/300px-X-Y_plot_of_algorithmically-generated_AI_art_of_European-style_castle_in_Japan_demonstrating_DDIM_diffusion_steps.png" decoding="async" width="300" height="203" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/99/X-Y_plot_of_algorithmically-generated_AI_art_of_European-style_castle_in_Japan_demonstrating_DDIM_diffusion_steps.png/450px-X-Y_plot_of_algorithmically-generated_AI_art_of_European-style_castle_in_Japan_demonstrating_DDIM_diffusion_steps.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/99/X-Y_plot_of_algorithmically-generated_AI_art_of_European-style_castle_in_Japan_demonstrating_DDIM_diffusion_steps.png/600px-X-Y_plot_of_algorithmically-generated_AI_art_of_European-style_castle_in_Japan_demonstrating_DDIM_diffusion_steps.png 2x" data-file-width="2560" data-file-height="1734" /></a><figcaption>The <a href="/wiki/Denoising" class="mw-redirect" title="Denoising">denoising</a> process used by Stable Diffusion. The model generates images by iteratively denoising <a href="/wiki/Random_noise" class="mw-redirect" title="Random noise">random noise</a> until a configured number of steps have been reached, guided by the CLIP text encoder pretrained on <a href="/wiki/Concept" title="Concept">concepts</a> along with the attention mechanism, resulting in the desired image depicting a representation of the trained concept.</figcaption></figure> <div class="mw-heading mw-heading3"><h3 id="Architecture">Architecture</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=3" title="Edit section: Architecture"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1236090951">.mw-parser-output .hatnote{font-style:italic}.mw-parser-output div.hatnote{padding-left:1.6em;margin-bottom:0.5em}.mw-parser-output .hatnote i{font-style:normal}.mw-parser-output .hatnote+link+.hatnote{margin-top:-0.5em}@media print{body.ns-0 .mw-parser-output .hatnote{display:none!important}}</style><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Latent_diffusion_model" title="Latent diffusion model">Latent diffusion model</a></div> <p>Models in Stable Diffusion series before SD 3 all used a kind of <a href="/wiki/Diffusion_model" title="Diffusion model">diffusion model</a> (DM), called a <a href="/wiki/Latent_diffusion_model" title="Latent diffusion model">latent diffusion model (LDM)</a>, developed by the CompVis (Computer Vision & Learning)<sup id="cite_ref-13" class="reference"><a href="#cite_note-13"><span class="cite-bracket">[</span>13<span class="cite-bracket">]</span></a></sup> group at <a href="/wiki/LMU_Munich" class="mw-redirect" title="LMU Munich">LMU Munich</a>.<sup id="cite_ref-paper_14-0" class="reference"><a href="#cite_note-paper-14"><span class="cite-bracket">[</span>14<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-stable-diffusion-github_8-1" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> Introduced in 2015, diffusion models are trained with the objective of removing successive applications of <a href="/wiki/Gaussian_noise" title="Gaussian noise">Gaussian noise</a> on training images, which can be thought of as a sequence of <a href="/wiki/Denoising_autoencoder" class="mw-redirect" title="Denoising autoencoder">denoising autoencoders</a>. Stable Diffusion consists of 3 parts: the <a href="/wiki/Variational_autoencoder" title="Variational autoencoder">variational autoencoder</a> (VAE), <a href="/wiki/U-Net" title="U-Net">U-Net</a>, and an optional text encoder.<sup id="cite_ref-:02_15-0" class="reference"><a href="#cite_note-:02-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup> The VAE encoder compresses the image from pixel space to a smaller dimensional <a href="/wiki/Latent_space" title="Latent space">latent space</a>, capturing a more fundamental semantic meaning of the image.<sup id="cite_ref-paper_14-1" class="reference"><a href="#cite_note-paper-14"><span class="cite-bracket">[</span>14<span class="cite-bracket">]</span></a></sup> Gaussian noise is iteratively applied to the compressed latent representation during forward diffusion.<sup id="cite_ref-:02_15-1" class="reference"><a href="#cite_note-:02-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup> The U-Net block, composed of a <a href="/wiki/Residual_neural_network" title="Residual neural network">ResNet</a> backbone, <a href="/wiki/Noise_reduction" title="Noise reduction">denoises</a> the output from forward diffusion backwards to obtain a latent representation. Finally, the VAE decoder generates the final image by converting the representation back into pixel space.<sup id="cite_ref-:02_15-2" class="reference"><a href="#cite_note-:02-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup> </p><p>The denoising step can be flexibly conditioned on a string of text, an image, or another modality. The encoded conditioning data is exposed to denoising U-Nets via a <a href="/wiki/Attention_(machine_learning)" title="Attention (machine learning)">cross-attention mechanism</a>.<sup id="cite_ref-:02_15-3" class="reference"><a href="#cite_note-:02-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup> For conditioning on text, the fixed, pretrained <a href="/wiki/Contrastive_Language-Image_Pre-training" title="Contrastive Language-Image Pre-training">CLIP</a> ViT-L/14 text encoder is used to transform text prompts to an embedding space.<sup id="cite_ref-stable-diffusion-github_8-2" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> Researchers point to increased computational efficiency for training and generation as an advantage of LDMs.<sup id="cite_ref-stable-diffusion-launch_7-3" class="reference"><a href="#cite_note-stable-diffusion-launch-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-paper_14-2" class="reference"><a href="#cite_note-paper-14"><span class="cite-bracket">[</span>14<span class="cite-bracket">]</span></a></sup> </p><p>The name <i>diffusion</i> takes inspiration from the <a href="/wiki/Thermodynamic" class="mw-redirect" title="Thermodynamic">thermodynamic</a> <a href="/wiki/Diffusion" title="Diffusion">diffusion</a> and an important link was made between this purely physical field and deep learning in 2015.<sup id="cite_ref-16" class="reference"><a href="#cite_note-16"><span class="cite-bracket">[</span>16<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-17" class="reference"><a href="#cite_note-17"><span class="cite-bracket">[</span>17<span class="cite-bracket">]</span></a></sup> </p><p>With 860<span class="nowrap"> </span>million parameters in the U-Net and 123<span class="nowrap"> </span>million in the text encoder, Stable Diffusion is considered relatively lightweight by 2022 standards, and unlike other diffusion models, it can run on <a href="/wiki/Consumer_electronics" title="Consumer electronics">consumer</a> GPUs,<sup id="cite_ref-18" class="reference"><a href="#cite_note-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup> and even <a href="/wiki/CPU" class="mw-redirect" title="CPU">CPU</a>-only if using the <a href="/wiki/OpenVINO" title="OpenVINO">OpenVINO</a> version of Stable Diffusion.<sup id="cite_ref-19" class="reference"><a href="#cite_note-19"><span class="cite-bracket">[</span>19<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="SD_XL">SD XL</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=4" title="Edit section: SD XL"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The XL version uses the same LDM architecture as previous versions,<sup id="cite_ref-:4_20-0" class="reference"><a href="#cite_note-:4-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup> except larger: larger UNet backbone, larger cross-attention context, two text encoders instead of one, and trained on multiple aspect ratios (not just the square aspect ratio like previous versions). </p><p>The SD XL Refiner, released at the same time, has the same architecture as SD XL, but it was trained for adding fine details to preexisting images via text-conditional img2img. </p> <div class="mw-heading mw-heading4"><h4 id="SD_3.0">SD 3.0</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=5" title="Edit section: SD 3.0"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Diffusion_model#Rectified_flow" title="Diffusion model">Diffusion model § Rectified flow</a></div> <p>The 3.0 version<sup id="cite_ref-:6_21-0" class="reference"><a href="#cite_note-:6-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup> completely changes the backbone. Not a UNet, but a <i>Rectified Flow Transformer</i>, which implements the rectified flow method<sup id="cite_ref-:7_22-0" class="reference"><a href="#cite_note-:7-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:8_23-0" class="reference"><a href="#cite_note-:8-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup> with a <a href="/wiki/Transformer_(deep_learning_architecture)" title="Transformer (deep learning architecture)">Transformer</a>. </p><p>The Transformer architecture used for SD 3.0 has three "tracks", for original text encoding, transformed text encoding, and image encoding (in latent space). The transformed text encoding and image encoding are mixed during each transformer block. </p><p>The architecture is named "multimodal diffusion transformer (MMDiT), where the "multimodal" means that it mixes text and image encodings inside its operations. This differs from previous versions of DiT, where the text encoding affects the image encoding, but not vice versa. </p> <div class="mw-heading mw-heading3"><h3 id="Training_data">Training data</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=6" title="Edit section: Training data"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Stable Diffusion was trained on pairs of images and captions taken from LAION-5B, a publicly available dataset derived from <a href="/wiki/Common_Crawl" title="Common Crawl">Common Crawl</a> data scraped from the web, where 5 billion image-text pairs were classified based on language and filtered into separate datasets by resolution, a predicted likelihood of containing a watermark, and predicted "aesthetic" score (e.g. subjective visual quality).<sup id="cite_ref-Waxy_24-0" class="reference"><a href="#cite_note-Waxy-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> The dataset was created by <a href="/wiki/LAION" title="LAION">LAION</a>, a German non-profit which receives funding from Stability AI.<sup id="cite_ref-Waxy_24-1" class="reference"><a href="#cite_note-Waxy-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-25" class="reference"><a href="#cite_note-25"><span class="cite-bracket">[</span>25<span class="cite-bracket">]</span></a></sup> The Stable Diffusion model was trained on three subsets of LAION-5B: laion2B-en, laion-high-resolution, and laion-aesthetics v2 5+.<sup id="cite_ref-Waxy_24-2" class="reference"><a href="#cite_note-Waxy-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> A third-party analysis of the model's training data identified that out of a smaller subset of 12 million images taken from the original wider dataset used, approximately 47% of the sample size of images came from 100 different domains, with <a href="/wiki/Pinterest" title="Pinterest">Pinterest</a> taking up 8.5% of the subset, followed by websites such as <a href="/wiki/WordPress" title="WordPress">WordPress</a>, <a href="/wiki/Blogspot" class="mw-redirect" title="Blogspot">Blogspot</a>, <a href="/wiki/Flickr" title="Flickr">Flickr</a>, <a href="/wiki/DeviantArt" title="DeviantArt">DeviantArt</a> and <a href="/wiki/Wikimedia_Commons" title="Wikimedia Commons">Wikimedia Commons</a>.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (October 2023)">citation needed</span></a></i>]</sup> An investigation by Bayerischer Rundfunk showed that LAION's datasets, hosted on Hugging Face, contain large amounts of private and sensitive data.<sup id="cite_ref-:2_26-0" class="reference"><a href="#cite_note-:2-26"><span class="cite-bracket">[</span>26<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Training_procedures">Training procedures</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=7" title="Edit section: Training procedures"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The model was initially trained on the laion2B-en and laion-high-resolution subsets, with the last few rounds of training done on LAION-Aesthetics v2 5+, a subset of 600 million captioned images which the LAION-Aesthetics Predictor V2 predicted that humans would, on average, give a score of at least 5 out of 10 when asked to rate how much they liked them.<sup id="cite_ref-27" class="reference"><a href="#cite_note-27"><span class="cite-bracket">[</span>27<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-Waxy_24-3" class="reference"><a href="#cite_note-Waxy-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-LAION-Aesthetics_28-0" class="reference"><a href="#cite_note-LAION-Aesthetics-28"><span class="cite-bracket">[</span>28<span class="cite-bracket">]</span></a></sup> The LAION-Aesthetics v2 5+ subset also excluded low-resolution images and images which LAION-5B-WatermarkDetection identified as carrying a <a href="/wiki/Watermark" title="Watermark">watermark</a> with greater than 80% probability.<sup id="cite_ref-Waxy_24-4" class="reference"><a href="#cite_note-Waxy-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> Final rounds of training additionally dropped 10% of text conditioning to improve Classifier-Free Diffusion Guidance.<sup id="cite_ref-:5_29-0" class="reference"><a href="#cite_note-:5-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> </p><p>The model was trained using 256 <a href="/wiki/Ampere_(microarchitecture)" title="Ampere (microarchitecture)">Nvidia A100</a> GPUs on <a href="/wiki/Amazon_Web_Services" title="Amazon Web Services">Amazon Web Services</a> for a total of 150,000 GPU-hours, at a cost of $600,000.<sup id="cite_ref-30" class="reference"><a href="#cite_note-30"><span class="cite-bracket">[</span>30<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-stable-diffusion-model-card-1-4_31-0" class="reference"><a href="#cite_note-stable-diffusion-model-card-1-4-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-32" class="reference"><a href="#cite_note-32"><span class="cite-bracket">[</span>32<span class="cite-bracket">]</span></a></sup> </p><p>SD3 was trained at a cost of around $10 million.<sup id="cite_ref-33" class="reference"><a href="#cite_note-33"><span class="cite-bracket">[</span>33<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Limitations">Limitations</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=8" title="Edit section: Limitations"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Stable Diffusion has issues with degradation and inaccuracies in certain scenarios. Initial releases of the model were trained on a dataset that consists of 512×512 resolution images, meaning that the quality of generated images noticeably degrades when user specifications deviate from its "expected" 512×512 resolution;<sup id="cite_ref-diffusers_34-0" class="reference"><a href="#cite_note-diffusers-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> the version 2.0 update of the Stable Diffusion model later introduced the ability to natively generate images at 768×768 resolution.<sup id="cite_ref-release2.0_35-0" class="reference"><a href="#cite_note-release2.0-35"><span class="cite-bracket">[</span>35<span class="cite-bracket">]</span></a></sup> Another challenge is in generating human limbs due to poor data quality of limbs in the LAION database.<sup id="cite_ref-36" class="reference"><a href="#cite_note-36"><span class="cite-bracket">[</span>36<span class="cite-bracket">]</span></a></sup> The model is insufficiently trained to understand human limbs and faces due to the lack of representative features in the database, and prompting the model to generate images of such type can confound the model.<sup id="cite_ref-37" class="reference"><a href="#cite_note-37"><span class="cite-bracket">[</span>37<span class="cite-bracket">]</span></a></sup> Stable Diffusion XL (SDXL) version 1.0, released in July 2023, introduced native 1024x1024 resolution and improved generation for limbs and text.<sup id="cite_ref-38" class="reference"><a href="#cite_note-38"><span class="cite-bracket">[</span>38<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-39" class="reference"><a href="#cite_note-39"><span class="cite-bracket">[</span>39<span class="cite-bracket">]</span></a></sup> </p><p>Accessibility for individual developers can also be a problem. In order to customize the model for new use cases that are not included in the dataset, such as generating <a href="/wiki/Anime" title="Anime">anime</a> characters ("waifu diffusion"),<sup id="cite_ref-40" class="reference"><a href="#cite_note-40"><span class="cite-bracket">[</span>40<span class="cite-bracket">]</span></a></sup> new data and further training are required. <a href="/wiki/Fine-tuning_(machine_learning)" class="mw-redirect" title="Fine-tuning (machine learning)">Fine-tuned</a> adaptations of Stable Diffusion created through additional retraining have been used for a variety of different use-cases, from medical imaging<sup id="cite_ref-41" class="reference"><a href="#cite_note-41"><span class="cite-bracket">[</span>41<span class="cite-bracket">]</span></a></sup> to <a href="/wiki/Riffusion" title="Riffusion">algorithmically generated music</a>.<sup id="cite_ref-42" class="reference"><a href="#cite_note-42"><span class="cite-bracket">[</span>42<span class="cite-bracket">]</span></a></sup> However, this fine-tuning process is sensitive to the quality of new data; low resolution images or different resolutions from the original data can not only fail to learn the new task but degrade the overall performance of the model. Even when the model is additionally trained on high quality images, it is difficult for individuals to run models in consumer electronics. For example, the training process for waifu-diffusion requires a minimum 30 GB of <a href="/wiki/VRAM" class="mw-redirect" title="VRAM">VRAM</a>,<sup id="cite_ref-43" class="reference"><a href="#cite_note-43"><span class="cite-bracket">[</span>43<span class="cite-bracket">]</span></a></sup> which exceeds the usual resource provided in such consumer GPUs as <a href="/wiki/Nvidia" title="Nvidia">Nvidia</a>'s <a href="/wiki/GeForce_30_series" title="GeForce 30 series">GeForce 30 series</a>, which has only about 12 GB.<sup id="cite_ref-44" class="reference"><a href="#cite_note-44"><span class="cite-bracket">[</span>44<span class="cite-bracket">]</span></a></sup> </p><p>The creators of Stable Diffusion acknowledge the potential for <a href="/wiki/Algorithmic_bias" title="Algorithmic bias">algorithmic bias</a>, as the model was primarily trained on images with English descriptions.<sup id="cite_ref-stable-diffusion-model-card-1-4_31-1" class="reference"><a href="#cite_note-stable-diffusion-model-card-1-4-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> As a result, generated images reinforce social biases and are from a western perspective, as the creators note that the model lacks data from other communities and cultures. The model gives more accurate results for prompts that are written in English in comparison to those written in other languages, with western or white cultures often being the default representation.<sup id="cite_ref-stable-diffusion-model-card-1-4_31-2" class="reference"><a href="#cite_note-stable-diffusion-model-card-1-4-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="End-user_fine-tuning">End-user fine-tuning</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=9" title="Edit section: End-user fine-tuning"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>To address the limitations of the model's initial training, end-users may opt to implement additional training to <a href="/wiki/Fine-tuning_(machine_learning)" class="mw-redirect" title="Fine-tuning (machine learning)">fine-tune</a> generation outputs to match more specific use-cases, a process also referred to as <a href="/wiki/Text-to-image_personalization" title="Text-to-image personalization">personalization</a>. There are three methods in which user-accessible fine-tuning can be applied to a Stable Diffusion model checkpoint: </p> <ul><li>An "embedding" can be trained from a collection of user-provided images, and allows the model to generate visually similar images whenever the name of the embedding is used within a generation prompt.<sup id="cite_ref-45" class="reference"><a href="#cite_note-45"><span class="cite-bracket">[</span>45<span class="cite-bracket">]</span></a></sup> Embeddings are based on the "textual inversion" concept developed by researchers from <a href="/wiki/Tel_Aviv_University" title="Tel Aviv University">Tel Aviv University</a> in 2022 with support from <a href="/wiki/Nvidia" title="Nvidia">Nvidia</a>, where vector representations for specific tokens used by the model's text encoder are linked to new pseudo-words. Embeddings can be used to reduce biases within the original model, or mimic visual styles.<sup id="cite_ref-46" class="reference"><a href="#cite_note-46"><span class="cite-bracket">[</span>46<span class="cite-bracket">]</span></a></sup></li> <li>A "hypernetwork" is a small pretrained neural network that is applied to various points within a larger neural network, and refers to the technique created by <a href="/wiki/NovelAI" title="NovelAI">NovelAI</a> developer Kurumuz in 2021, originally intended for text-generation <a href="/wiki/Transformer_(machine_learning_model)" class="mw-redirect" title="Transformer (machine learning model)">transformer models</a>. Hypernetworks steer results towards a particular direction, allowing Stable Diffusion-based models to imitate the art style of specific artists, even if the artist is not recognised by the original model; they process the image by finding key areas of importance such as hair and eyes, and then patch these areas in secondary latent space.<sup id="cite_ref-47" class="reference"><a href="#cite_note-47"><span class="cite-bracket">[</span>47<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/DreamBooth" title="DreamBooth">DreamBooth</a> is a deep learning generation model developed by researchers from <a href="/wiki/Google" title="Google">Google Research</a> and <a href="/wiki/Boston_University" title="Boston University">Boston University</a> in 2022 which can fine-tune the model to generate precise, personalised outputs that depict a specific subject, following training via a set of images which depict the subject.<sup id="cite_ref-48" class="reference"><a href="#cite_note-48"><span class="cite-bracket">[</span>48<span class="cite-bracket">]</span></a></sup></li></ul> <div class="mw-heading mw-heading2"><h2 id="Capabilities">Capabilities</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=10" title="Edit section: Capabilities"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The Stable Diffusion model supports the ability to generate new images from scratch through the use of a text prompt describing elements to be included or omitted from the output.<sup id="cite_ref-stable-diffusion-github_8-3" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> Existing images can be re-drawn by the model to incorporate new elements described by a text prompt (a process known as "guided image synthesis"<sup id="cite_ref-49" class="reference"><a href="#cite_note-49"><span class="cite-bracket">[</span>49<span class="cite-bracket">]</span></a></sup>) through its diffusion-denoising mechanism.<sup id="cite_ref-stable-diffusion-github_8-4" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> In addition, the model also allows the use of prompts to partially alter existing images via <a href="/wiki/Inpainting" title="Inpainting">inpainting</a> and outpainting, when used with an appropriate user interface that supports such features, of which numerous different open source implementations exist.<sup id="cite_ref-webui_showcase_50-0" class="reference"><a href="#cite_note-webui_showcase-50"><span class="cite-bracket">[</span>50<span class="cite-bracket">]</span></a></sup> </p><p>Stable Diffusion is recommended to be run with 10 GB or more VRAM, however users with less VRAM may opt to load the weights in <a href="/wiki/Float16" class="mw-redirect" title="Float16">float16</a> precision instead of the default <a href="/wiki/Float32" class="mw-redirect" title="Float32">float32</a> to tradeoff model performance with lower VRAM usage.<sup id="cite_ref-diffusers_34-1" class="reference"><a href="#cite_note-diffusers-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Text_to_image_generation">Text to image generation</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=11" title="Edit section: Text to image generation"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1237032888/mw-parser-output/.tmulti">.mw-parser-output .tmulti .multiimageinner{display:flex;flex-direction:column}.mw-parser-output .tmulti .trow{display:flex;flex-direction:row;clear:left;flex-wrap:wrap;width:100%;box-sizing:border-box}.mw-parser-output .tmulti .tsingle{margin:1px;float:left}.mw-parser-output .tmulti .theader{clear:both;font-weight:bold;text-align:center;align-self:center;background-color:transparent;width:100%}.mw-parser-output .tmulti .thumbcaption{background-color:transparent}.mw-parser-output .tmulti .text-align-left{text-align:left}.mw-parser-output .tmulti .text-align-right{text-align:right}.mw-parser-output .tmulti .text-align-center{text-align:center}@media all and (max-width:720px){.mw-parser-output .tmulti .thumbinner{width:100%!important;box-sizing:border-box;max-width:none!important;align-items:center}.mw-parser-output .tmulti .trow{justify-content:center}.mw-parser-output .tmulti .tsingle{float:none!important;max-width:100%!important;box-sizing:border-box;text-align:center}.mw-parser-output .tmulti .tsingle .thumbcaption{text-align:left}.mw-parser-output .tmulti .trow>.thumbcaption{text-align:center}}@media screen{html.skin-theme-clientpref-night .mw-parser-output .tmulti .multiimageinner img{background-color:white}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .tmulti .multiimageinner img{background-color:white}}</style><div class="thumb tmulti tright"><div class="thumbinner multiimageinner" style="width:192px;max-width:192px"><div class="trow"><div class="tsingle" style="width:190px;max-width:190px"><div class="thumbimage" style="height:94px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine.png" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine.png/188px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine.png" decoding="async" width="188" height="94" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine.png/282px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine.png/376px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine.png 2x" data-file-width="4096" data-file-height="2048" /></a></span></div></div></div><div class="trow"><div class="tsingle" style="width:190px;max-width:190px"><div class="thumbimage" style="height:94px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_green_trees.png" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_green_trees.png/188px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_green_trees.png" decoding="async" width="188" height="94" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_green_trees.png/282px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_green_trees.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6d/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_green_trees.png/376px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_green_trees.png 2x" data-file-width="4096" data-file-height="2048" /></a></span></div></div></div><div class="trow"><div class="tsingle" style="width:190px;max-width:190px"><div class="thumbimage" style="height:94px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_round_stones.png" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/94/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_round_stones.png/188px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_round_stones.png" decoding="async" width="188" height="94" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/94/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_round_stones.png/282px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_round_stones.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/94/Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_round_stones.png/376px-Algorithmically-generated_landscape_artwork_of_forest_with_Shinto_shrine_using_negative_prompt_for_round_stones.png 2x" data-file-width="4096" data-file-height="2048" /></a></span></div></div></div><div class="trow" style="display:flex"><div class="thumbcaption">Demonstration of the effect of negative prompts on image generation <ul><li><b>Top</b>: no negative prompt</li> <li><b>Centre</b>: "green trees"</li> <li><b>Bottom</b>: "round stones, round rocks"</li></ul></div></div></div></div> <p>The text to image sampling script within Stable Diffusion, known as "txt2img", consumes a text prompt in addition to assorted option parameters covering sampling types, output image dimensions, and seed values. The script outputs an image file based on the model's interpretation of the prompt.<sup id="cite_ref-stable-diffusion-github_8-5" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> Generated images are tagged with an invisible <a href="/wiki/Digital_watermark" class="mw-redirect" title="Digital watermark">digital watermark</a> to allow users to identify an image as generated by Stable Diffusion,<sup id="cite_ref-stable-diffusion-github_8-6" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> although this watermark loses its efficacy if the image is resized or rotated.<sup id="cite_ref-51" class="reference"><a href="#cite_note-51"><span class="cite-bracket">[</span>51<span class="cite-bracket">]</span></a></sup> </p><p>Each txt2img generation will involve a specific <a href="/wiki/Random_seed" title="Random seed">seed value</a> which affects the output image. Users may opt to randomize the seed in order to explore different generated outputs, or use the same seed to obtain the same image output as a previously generated image.<sup id="cite_ref-diffusers_34-2" class="reference"><a href="#cite_note-diffusers-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> Users are also able to adjust the number of inference steps for the sampler; a higher value takes a longer duration of time, however a smaller value may result in visual defects.<sup id="cite_ref-diffusers_34-3" class="reference"><a href="#cite_note-diffusers-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> Another configurable option, the classifier-free guidance scale value, allows the user to adjust how closely the output image adheres to the prompt.<sup id="cite_ref-:5_29-1" class="reference"><a href="#cite_note-:5-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> More experimentative use cases may opt for a lower scale value, while use cases aiming for more specific outputs may use a higher value.<sup id="cite_ref-diffusers_34-4" class="reference"><a href="#cite_note-diffusers-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> </p><p>Additional text2img features are provided by <a href="/wiki/Frontend_and_backend" title="Frontend and backend">front-end</a> implementations of Stable Diffusion, which allow users to modify the weight given to specific parts of the text prompt. Emphasis markers allow users to add or reduce emphasis to keywords by enclosing them with brackets.<sup id="cite_ref-52" class="reference"><a href="#cite_note-52"><span class="cite-bracket">[</span>52<span class="cite-bracket">]</span></a></sup> An alternative method of adjusting weight to parts of the prompt are "negative prompts". Negative prompts are a feature included in some front-end implementations, including Stability AI's own DreamStudio cloud service, and allow the user to specify prompts which the model should avoid during image generation. The specified prompts may be undesirable image features that would otherwise be present within image outputs due to the positive prompts provided by the user, or due to how the model was originally trained, with mangled human hands being a common example.<sup id="cite_ref-webui_showcase_50-1" class="reference"><a href="#cite_note-webui_showcase-50"><span class="cite-bracket">[</span>50<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-release2.1_53-0" class="reference"><a href="#cite_note-release2.1-53"><span class="cite-bracket">[</span>53<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Image_modification">Image modification</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=12" title="Edit section: Image modification"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1237032888/mw-parser-output/.tmulti"><div class="thumb tmulti tright"><div class="thumbinner multiimageinner" style="width:392px;max-width:392px"><div class="trow"><div class="tsingle" style="width:194px;max-width:194px"><div class="thumbimage" style="height:192px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:NightCitySphere_(SD1.5).jpg" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/82/NightCitySphere_%28SD1.5%29.jpg/192px-NightCitySphere_%28SD1.5%29.jpg" decoding="async" width="192" height="192" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/82/NightCitySphere_%28SD1.5%29.jpg/288px-NightCitySphere_%28SD1.5%29.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/82/NightCitySphere_%28SD1.5%29.jpg/384px-NightCitySphere_%28SD1.5%29.jpg 2x" data-file-width="4096" data-file-height="4096" /></a></span></div></div><div class="tsingle" style="width:194px;max-width:194px"><div class="thumbimage" style="height:192px;overflow:hidden"><span typeof="mw:File"><a href="/wiki/File:NightCitySphere_(SDXL).jpg" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/5/56/NightCitySphere_%28SDXL%29.jpg/192px-NightCitySphere_%28SDXL%29.jpg" decoding="async" width="192" height="192" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/5/56/NightCitySphere_%28SDXL%29.jpg/288px-NightCitySphere_%28SDXL%29.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/5/56/NightCitySphere_%28SDXL%29.jpg/384px-NightCitySphere_%28SDXL%29.jpg 2x" data-file-width="8000" data-file-height="8000" /></a></span></div></div></div><div class="trow" style="display:flex"><div class="thumbcaption">Demonstration of img2img modification <ul><li><b>Left</b>: Original image created with Stable Diffusion 1.5</li> <li><b>Right</b>: Modified image created with Stable Diffusion XL 1.0</li></ul></div></div></div></div> <p>Stable Diffusion also includes another sampling script, "img2img", which consumes a text prompt, path to an existing image, and strength value between 0.0 and 1.0. The script outputs a new image based on the original image that also features elements provided within the text prompt. The strength value denotes the amount of noise added to the output image. A higher strength value produces more variation within the image but may produce an image that is not semantically consistent with the prompt provided.<sup id="cite_ref-stable-diffusion-github_8-7" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> </p><p>There are different methods for performing img2img. The main method is SDEdit,<sup id="cite_ref-:10_54-0" class="reference"><a href="#cite_note-:10-54"><span class="cite-bracket">[</span>54<span class="cite-bracket">]</span></a></sup> which first adds noise to an image, then denoises it as usual in text2img. </p><p>The ability of img2img to add noise to the original image makes it potentially useful for <a href="/wiki/Data_anonymization" title="Data anonymization">data anonymization</a> and <a href="/wiki/Data_augmentation" title="Data augmentation">data augmentation</a>, in which the visual features of image data are changed and anonymized.<sup id="cite_ref-:1_55-0" class="reference"><a href="#cite_note-:1-55"><span class="cite-bracket">[</span>55<span class="cite-bracket">]</span></a></sup> The same process may also be useful for image upscaling, in which the resolution of an image is increased, with more detail potentially being added to the image.<sup id="cite_ref-:1_55-1" class="reference"><a href="#cite_note-:1-55"><span class="cite-bracket">[</span>55<span class="cite-bracket">]</span></a></sup> Additionally, Stable Diffusion has been experimented with as a tool for image compression. Compared to <a href="/wiki/JPEG" title="JPEG">JPEG</a> and <a href="/wiki/WebP" title="WebP">WebP</a>, the recent methods used for image compression in Stable Diffusion face limitations in preserving small text and faces.<sup id="cite_ref-56" class="reference"><a href="#cite_note-56"><span class="cite-bracket">[</span>56<span class="cite-bracket">]</span></a></sup> </p><p>Additional use-cases for image modification via img2img are offered by numerous front-end implementations of the Stable Diffusion model. Inpainting involves selectively modifying a portion of an existing image delineated by a user-provided <a href="/wiki/Layers_(digital_image_editing)#Layer_mask" title="Layers (digital image editing)">layer mask</a>, which fills the masked space with newly generated content based on the provided prompt.<sup id="cite_ref-webui_showcase_50-2" class="reference"><a href="#cite_note-webui_showcase-50"><span class="cite-bracket">[</span>50<span class="cite-bracket">]</span></a></sup> A dedicated model specifically fine-tuned for inpainting use-cases was created by Stability AI alongside the release of Stable Diffusion 2.0.<sup id="cite_ref-release2.0_35-1" class="reference"><a href="#cite_note-release2.0-35"><span class="cite-bracket">[</span>35<span class="cite-bracket">]</span></a></sup> Conversely, outpainting extends an image beyond its original dimensions, filling the previously empty space with content generated based on the provided prompt.<sup id="cite_ref-webui_showcase_50-3" class="reference"><a href="#cite_note-webui_showcase-50"><span class="cite-bracket">[</span>50<span class="cite-bracket">]</span></a></sup> </p><p>A depth-guided model, named "depth2img", was introduced with the release of Stable Diffusion 2.0 on November 24, 2022; this model infers the <a href="/wiki/Depth_map" title="Depth map">depth</a> of the provided input image, and generates a new output image based on both the text prompt and the depth information, which allows the coherence and depth of the original input image to be maintained in the generated output.<sup id="cite_ref-release2.0_35-2" class="reference"><a href="#cite_note-release2.0-35"><span class="cite-bracket">[</span>35<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="ControlNet">ControlNet</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=13" title="Edit section: ControlNet"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>ControlNet<sup id="cite_ref-controlnet-paper_57-0" class="reference"><a href="#cite_note-controlnet-paper-57"><span class="cite-bracket">[</span>57<span class="cite-bracket">]</span></a></sup> is a neural network architecture designed to manage diffusion models by incorporating additional conditions. It duplicates the weights of neural network blocks into a "locked" copy and a "trainable" copy. The "trainable" copy learns the desired condition, while the "locked" copy preserves the original model. This approach ensures that training with small datasets of image pairs does not compromise the integrity of production-ready diffusion models. The "zero convolution" is a 1×1 convolution with both weight and bias initialized to zero. Before training, all zero convolutions produce zero output, preventing any distortion caused by ControlNet. No layer is trained from scratch; the process is still fine-tuning, keeping the original model secure. This method enables training on small-scale or even personal devices. </p> <div class="mw-heading mw-heading3"><h3 id="User_Interfaces">User Interfaces</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=14" title="Edit section: User Interfaces"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Stability provides an online image generation service called <i>DreamStudio</i>.<sup id="cite_ref-58" class="reference"><a href="#cite_note-58"><span class="cite-bracket">[</span>58<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-59" class="reference"><a href="#cite_note-59"><span class="cite-bracket">[</span>59<span class="cite-bracket">]</span></a></sup> The company also released an open source version of <i>DreamStudio</i> called <i>StableStudio</i>.<sup id="cite_ref-60" class="reference"><a href="#cite_note-60"><span class="cite-bracket">[</span>60<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-61" class="reference"><a href="#cite_note-61"><span class="cite-bracket">[</span>61<span class="cite-bracket">]</span></a></sup> In addition to Stability's interfaces, many third party open source interfaces exist, such as <a href="/wiki/AUTOMATIC1111_Stable_Diffusion_Web_UI" class="mw-redirect" title="AUTOMATIC1111 Stable Diffusion Web UI">AUTOMATIC1111 Stable Diffusion Web UI</a>, which is the most popular and offers extra features,<sup id="cite_ref-" class="reference"><a href="#cite_note-"><span class="cite-bracket">[</span>62<span class="cite-bracket">]</span></a></sup> <i><a href="/wiki/Fooocus" title="Fooocus">Fooocus</a></i>, which aims to decrease the amount of prompting needed by the user,<sup id="cite_ref-63" class="reference"><a href="#cite_note-63"><span class="cite-bracket">[</span>63<span class="cite-bracket">]</span></a></sup> and <i><a href="/wiki/ComfyUI" title="ComfyUI">ComfyUI</a></i>, which has a <a href="/wiki/Node_graph_architecture#Use_in_Computer_Graphics" title="Node graph architecture">node-based</a> user interface, essentially a <a href="/wiki/Visual_programming_language" title="Visual programming language">visual programming language</a> akin to many <a href="/wiki/3D_modeling" title="3D modeling">3D modeling</a> applications.<sup id="cite_ref-64" class="reference"><a href="#cite_note-64"><span class="cite-bracket">[</span>64<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-65" class="reference"><a href="#cite_note-65"><span class="cite-bracket">[</span>65<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-66" class="reference"><a href="#cite_note-66"><span class="cite-bracket">[</span>66<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Releases">Releases</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=15" title="Edit section: Releases"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <table class="wikitable"> <caption> </caption> <tbody><tr> <th>Version number </th> <th>Release date </th> <th>Parameters </th> <th>Notes </th></tr> <tr> <td>1.1, 1.2, 1.3, 1.4<sup id="cite_ref-67" class="reference"><a href="#cite_note-67"><span class="cite-bracket">[</span>67<span class="cite-bracket">]</span></a></sup> </td> <td>August 2022 </td> <td> </td> <td>All released by CompVis. There is no "version 1.0". 1.1 gave rise to 1.2, and 1.2 gave rise to both 1.3 and 1.4.<sup id="cite_ref-68" class="reference"><a href="#cite_note-68"><span class="cite-bracket">[</span>68<span class="cite-bracket">]</span></a></sup> </td></tr> <tr> <td>1.5<sup id="cite_ref-69" class="reference"><a href="#cite_note-69"><span class="cite-bracket">[</span>69<span class="cite-bracket">]</span></a></sup> </td> <td>October 2022 </td> <td>983M </td> <td>Initialized with the weights of 1.2, not 1.4. Released by RunwayML. </td></tr> <tr> <td>2.0<sup id="cite_ref-:3_70-0" class="reference"><a href="#cite_note-:3-70"><span class="cite-bracket">[</span>70<span class="cite-bracket">]</span></a></sup> </td> <td>November 2022 </td> <td> </td> <td>Retrained from scratch on a filtered dataset.<sup id="cite_ref-71" class="reference"><a href="#cite_note-71"><span class="cite-bracket">[</span>71<span class="cite-bracket">]</span></a></sup> </td></tr> <tr> <td>2.1<sup id="cite_ref-72" class="reference"><a href="#cite_note-72"><span class="cite-bracket">[</span>72<span class="cite-bracket">]</span></a></sup> </td> <td>December 2022 </td> <td> </td> <td>Initialized with the weights of 2.0. </td></tr> <tr> <td>XL 1.0<sup id="cite_ref-73" class="reference"><a href="#cite_note-73"><span class="cite-bracket">[</span>73<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:4_20-1" class="reference"><a href="#cite_note-:4-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup> </td> <td>July 2023 </td> <td>3.5B </td> <td>The XL 1.0 base model has 3.5 billion parameters, making it around 3.5x larger than previous versions.<sup id="cite_ref-74" class="reference"><a href="#cite_note-74"><span class="cite-bracket">[</span>74<span class="cite-bracket">]</span></a></sup> </td></tr> <tr> <td>XL Turbo<sup id="cite_ref-75" class="reference"><a href="#cite_note-75"><span class="cite-bracket">[</span>75<span class="cite-bracket">]</span></a></sup> </td> <td>November 2023 </td> <td> </td> <td>Distilled from XL 1.0 to run in fewer diffusion steps.<sup id="cite_ref-76" class="reference"><a href="#cite_note-76"><span class="cite-bracket">[</span>76<span class="cite-bracket">]</span></a></sup> </td></tr> <tr> <td>3.0<sup id="cite_ref-77" class="reference"><a href="#cite_note-77"><span class="cite-bracket">[</span>77<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:6_21-1" class="reference"><a href="#cite_note-:6-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup> </td> <td>February 2024 (early preview) </td> <td>800M to 8B </td> <td>A family of models. </td></tr> <tr> <td>3.5<sup id="cite_ref-release-sd3.5_78-0" class="reference"><a href="#cite_note-release-sd3.5-78"><span class="cite-bracket">[</span>78<span class="cite-bracket">]</span></a></sup> </td> <td>October 2024 </td> <td>2.5B to 8B </td> <td>A family of models with Large (8 billion parameters), Large Turbo (distilled from SD 3.5 Large), and Medium (2.5 billion parameters). </td></tr></tbody></table> <p>Key papers </p> <ul><li><i>Learning Transferable Visual Models From Natural Language Supervision</i> (2021).<sup id="cite_ref-79" class="reference"><a href="#cite_note-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> This paper describes the CLIP method for training text encoders, which convert text into floating point vectors. Such text encodings are used by the diffusion model to create images.</li> <li><i>SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations</i> (2021).<sup id="cite_ref-:10_54-1" class="reference"><a href="#cite_note-:10-54"><span class="cite-bracket">[</span>54<span class="cite-bracket">]</span></a></sup> This paper describes SDEdit, aka "img2img".</li> <li><i>High-Resolution Image Synthesis with Latent Diffusion Models</i> (2021, updated in 2022).<sup id="cite_ref-80" class="reference"><a href="#cite_note-80"><span class="cite-bracket">[</span>80<span class="cite-bracket">]</span></a></sup> This paper describes the latent diffusion model (LDM). This is the backbone of the Stable Diffusion architecture.</li> <li><i>Classifier-Free Diffusion Guidance</i> (2022).<sup id="cite_ref-:5_29-2" class="reference"><a href="#cite_note-:5-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup> This paper describes CFG, which allows the text encoding vector to steer the diffusion model towards creating the image described by the text.</li> <li><i>SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis</i> (2023).<sup id="cite_ref-:4_20-2" class="reference"><a href="#cite_note-:4-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup> Describes SDXL.</li> <li><i>Flow Straight and Fast: Learning to Generate and Transfer Data with Rectified Flow</i> (2022).<sup id="cite_ref-:7_22-1" class="reference"><a href="#cite_note-:7-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:8_23-1" class="reference"><a href="#cite_note-:8-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup> Describes rectified flow, which is used for the backbone architecture of SD 3.0.</li> <li><i>Scaling Rectified Flow Transformers for High-resolution Image Synthesis</i> (2024).<sup id="cite_ref-:6_21-2" class="reference"><a href="#cite_note-:6-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup> Describes SD 3.0.</li></ul> <p>Training cost </p> <ul><li>SD 2.0: 0.2 million hours on A100 (40GB).<sup id="cite_ref-:3_70-1" class="reference"><a href="#cite_note-:3-70"><span class="cite-bracket">[</span>70<span class="cite-bracket">]</span></a></sup></li></ul> <div class="mw-heading mw-heading2"><h2 id="Usage_and_controversy">Usage and controversy</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=16" title="Edit section: Usage and controversy"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Stable Diffusion claims no rights on generated images and freely gives users the rights of usage to any generated images from the model provided that the image content is not illegal or harmful to individuals.<sup id="cite_ref-81" class="reference"><a href="#cite_note-81"><span class="cite-bracket">[</span>81<span class="cite-bracket">]</span></a></sup> </p><p>The images Stable Diffusion was trained on have been filtered without human input, leading to some harmful images and large amounts of private and sensitive information appearing in the training data.<sup id="cite_ref-:2_26-1" class="reference"><a href="#cite_note-:2-26"><span class="cite-bracket">[</span>26<span class="cite-bracket">]</span></a></sup> </p><p>More traditional visual artists have expressed concern that widespread usage of image synthesis software such as Stable Diffusion may eventually lead to human artists, along with photographers, models, cinematographers, and actors, gradually losing commercial viability against AI-based competitors.<sup id="cite_ref-MIT-LAION_82-0" class="reference"><a href="#cite_note-MIT-LAION-82"><span class="cite-bracket">[</span>82<span class="cite-bracket">]</span></a></sup> </p><p>Stable Diffusion is notably more permissive in the types of content users may generate, such as violent or sexually explicit imagery, in comparison to other commercial products based on generative AI.<sup id="cite_ref-bijapan_83-0" class="reference"><a href="#cite_note-bijapan-83"><span class="cite-bracket">[</span>83<span class="cite-bracket">]</span></a></sup> Addressing the concerns that the model may be used for abusive purposes, CEO of Stability AI, <a href="/wiki/Emad_Mostaque" title="Emad Mostaque">Emad Mostaque</a>, argues that "[it is] peoples' responsibility as to whether they are ethical, moral, and legal in how they operate this technology",<sup id="cite_ref-verge_10-2" class="reference"><a href="#cite_note-verge-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> and that putting the capabilities of Stable Diffusion into the hands of the public would result in the technology providing a net benefit, in spite of the potential negative consequences.<sup id="cite_ref-verge_10-3" class="reference"><a href="#cite_note-verge-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> In addition, Mostaque argues that the intention behind the open availability of Stable Diffusion is to end corporate control and dominance over such technologies, who have previously only developed closed AI systems for image synthesis.<sup id="cite_ref-verge_10-4" class="reference"><a href="#cite_note-verge-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-bijapan_83-1" class="reference"><a href="#cite_note-bijapan-83"><span class="cite-bracket">[</span>83<span class="cite-bracket">]</span></a></sup> This is reflected by the fact that any restrictions Stability AI places on the content that users may generate can easily be bypassed due to the availability of the source code.<sup id="cite_ref-:13_84-0" class="reference"><a href="#cite_note-:13-84"><span class="cite-bracket">[</span>84<span class="cite-bracket">]</span></a></sup> </p><p>Controversy around photorealistic <a href="/wiki/Lolicon" title="Lolicon">sexualized depictions of underage characters</a> have been brought up, due to such images generated by Stable Diffusion being shared on websites such as <a href="/wiki/Pixiv" title="Pixiv">Pixiv</a>.<sup id="cite_ref-85" class="reference"><a href="#cite_note-85"><span class="cite-bracket">[</span>85<span class="cite-bracket">]</span></a></sup> </p><p>In June of 2024, a <a href="/wiki/ComfyUI#LLMVision_extension_compromise" title="ComfyUI">hack on an extension of ComfyUI</a>, a user interface for Stable Diffusion, took place, with the hackers claiming they targeted users who committed "one of our sins", which included AI-art generation, art theft, promoting cryptocurrency.<sup id="cite_ref-86" class="reference"><a href="#cite_note-86"><span class="cite-bracket">[</span>86<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Litigation">Litigation</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=17" title="Edit section: Litigation"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Andersen,_McKernan,_and_Ortiz_v._Stability_AI,_Midjourney,_and_DeviantArt"><span id="Andersen.2C_McKernan.2C_and_Ortiz_v._Stability_AI.2C_Midjourney.2C_and_DeviantArt"></span>Andersen, McKernan, and Ortiz v. Stability AI, Midjourney, and DeviantArt</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=18" title="Edit section: Andersen, McKernan, and Ortiz v. Stability AI, Midjourney, and DeviantArt"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In January 2023, three artists, <a href="/wiki/Sarah_Andersen" title="Sarah Andersen">Sarah Andersen</a>, <a href="/wiki/Kelly_McKernan" title="Kelly McKernan">Kelly McKernan</a>, and Karla Ortiz, filed a <a href="/wiki/Copyright_infringement" title="Copyright infringement">copyright infringement</a> lawsuit against Stability AI, <a href="/wiki/Midjourney" title="Midjourney">Midjourney</a>, and <a href="/wiki/DeviantArt" title="DeviantArt">DeviantArt</a>, claiming that these companies have infringed the rights of millions of artists by training AI tools on five billion images scraped from the web without the consent of the original artists.<sup id="cite_ref-87" class="reference"><a href="#cite_note-87"><span class="cite-bracket">[</span>87<span class="cite-bracket">]</span></a></sup> </p><p>In July 2023, U.S. District Judge <a href="/wiki/William_Orrick_III" title="William Orrick III">William Orrick</a> inclined to dismiss most of the lawsuit filed by Andersen, McKernan, and Ortiz but allowed them to file a new complaint, providing them an opportunity to reframe their arguments.<sup id="cite_ref-Reuters-SDLawsuit_88-0" class="reference"><a href="#cite_note-Reuters-SDLawsuit-88"><span class="cite-bracket">[</span>88<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Getty_Images_v._Stability_AI">Getty Images v. Stability AI</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=19" title="Edit section: Getty Images v. Stability AI"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In January 2023, <a href="/wiki/Getty_Images" title="Getty Images">Getty Images</a> initiated legal proceedings against Stability AI in the English High Court, alleging significant infringement of its intellectual property rights. Getty Images claims that Stability AI "scraped" millions of images from Getty’s websites without consent and used these images to train and develop its deep-learning Stable Diffusion model.<sup id="cite_ref-89" class="reference"><a href="#cite_note-89"><span class="cite-bracket">[</span>89<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-90" class="reference"><a href="#cite_note-90"><span class="cite-bracket">[</span>90<span class="cite-bracket">]</span></a></sup> </p><p>Key points of the lawsuit include: </p> <ul><li>Getty Images asserting that the training and development of Stable Diffusion involved the unauthorized use of its images, which were downloaded on servers and computers that were potentially in the UK. However, Stability AI argues that all training and development took place outside the UK, specifically in U.S. data centers operated by Amazon Web Services.<sup id="cite_ref-91" class="reference"><a href="#cite_note-91"><span class="cite-bracket">[</span>91<span class="cite-bracket">]</span></a></sup></li> <li>Stability AI applied for reverse summary judgment and/or strike out of two claims: the training and development claim, and the secondary infringement of copyright claim. The High Court, however, refused to strike out these claims, allowing them to proceed to trial. The court is to determine whether the training and development of Stable Diffusion occurred in the UK, which is crucial for establishing jurisdiction under the UK's Copyright, Designs and Patents Act 1988 (CDPA).<sup id="cite_ref-pinsentmasons2024GettyvsStabilityAI_92-0" class="reference"><a href="#cite_note-pinsentmasons2024GettyvsStabilityAI-92"><span class="cite-bracket">[</span>92<span class="cite-bracket">]</span></a></sup></li> <li>The secondary infringement claim revolves around whether the pre-trained Stable Diffusion software, made available in the UK through platforms like GitHub, HuggingFace, and DreamStudio, constitutes an "article" under sections 22 and 23 of the CDPA. The court will decide whether the term "article" can encompass intangible items such as software.<sup id="cite_ref-pinsentmasons2024GettyvsStabilityAI_92-1" class="reference"><a href="#cite_note-pinsentmasons2024GettyvsStabilityAI-92"><span class="cite-bracket">[</span>92<span class="cite-bracket">]</span></a></sup></li></ul> <p>The trial is expected to take place in summer 2025 and has significant implications for UK copyright law and the licensing of AI-generated content. </p> <div class="mw-heading mw-heading2"><h2 id="License">License</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=20" title="Edit section: License"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Unlike models like <a href="/wiki/DALL-E" title="DALL-E">DALL-E</a>, Stable Diffusion makes its <a href="/wiki/Source-available_software" title="Source-available software">source code available</a>,<sup id="cite_ref-stability_93-0" class="reference"><a href="#cite_note-stability-93"><span class="cite-bracket">[</span>93<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-stable-diffusion-github_8-8" class="reference"><a href="#cite_note-stable-diffusion-github-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> along with the model (pretrained weights). Prior to Stable Diffusion 3, it applied the Creative ML OpenRAIL-M license, a form of Responsible AI License (RAIL), to the model (M).<sup id="cite_ref-94" class="reference"><a href="#cite_note-94"><span class="cite-bracket">[</span>94<span class="cite-bracket">]</span></a></sup> The license prohibits certain use cases, including crime, <a href="/wiki/Libel" class="mw-redirect" title="Libel">libel</a>, <a href="/wiki/Harassment" title="Harassment">harassment</a>, <a href="/wiki/Doxing" title="Doxing">doxing</a>, "<a href="/wiki/Child_pornography" title="Child pornography">exploiting ... minors</a>", giving medical advice, automatically creating legal obligations, producing legal evidence, and "discriminating against or harming individuals or groups based on ... social behavior or ... personal or personality characteristics ... [or] <a href="/wiki/Anti-discrimination_law" title="Anti-discrimination law">legally protected characteristics or categories</a>".<sup id="cite_ref-washingtonpost_95-0" class="reference"><a href="#cite_note-washingtonpost-95"><span class="cite-bracket">[</span>95<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-96" class="reference"><a href="#cite_note-96"><span class="cite-bracket">[</span>96<span class="cite-bracket">]</span></a></sup> The user owns the rights to their generated output images, and is free to use them commercially.<sup id="cite_ref-97" class="reference"><a href="#cite_note-97"><span class="cite-bracket">[</span>97<span class="cite-bracket">]</span></a></sup> </p><p>Stable Diffusion 3.5 applies the permissive Stability AI Community License while commercial enterprises with revenue exceed $1 million need the Stability AI Enterprise License.<sup id="cite_ref-98" class="reference"><a href="#cite_note-98"><span class="cite-bracket">[</span>98<span class="cite-bracket">]</span></a></sup> As with the OpenRAIL-M license, the user retains the rights to their generated output images and is free to use them commercially.<sup id="cite_ref-release-sd3.5_78-1" class="reference"><a href="#cite_note-release-sd3.5-78"><span class="cite-bracket">[</span>78<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=21" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><a href="/wiki/Artificial_intelligence_art" title="Artificial intelligence art">Artificial intelligence art</a></li> <li><a href="/wiki/Midjourney" title="Midjourney">Midjourney</a></li> <li><a href="/wiki/Craiyon" class="mw-redirect" title="Craiyon">Craiyon</a></li> <li><a href="/wiki/Hugging_Face" title="Hugging Face">Hugging Face</a></li> <li><a href="/wiki/Imagen_(Google_Brain)" class="mw-redirect" title="Imagen (Google Brain)">Imagen (Google Brain)</a></li></ul> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=22" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist"> <div class="mw-references-wrap mw-references-columns"><ol class="references"> <li id="cite_note-release-version-1"><span class="mw-cite-backlink"><b><a href="#cite_ref-release-version_1-0">^</a></b></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/news/introducing-stable-diffusion-3-5">"Stable Diffusion 3.5"</a>. <i><a href="/wiki/Stability_AI" title="Stability AI">Stability AI</a></i>. <a rel="nofollow" class="external text" href="https://archive.today/20241023040750/https://stability.ai/news/introducing-stable-diffusion-3-5">Archived</a> from the original on October 23, 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">October 23,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability+AI&rft.atitle=Stable+Diffusion+3.5&rft_id=https%3A%2F%2Fstability.ai%2Fnews%2Fintroducing-stable-diffusion-3-5&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-2">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRyan_O'Connor2022" class="citation web cs1">Ryan O'Connor (August 23, 2022). <a rel="nofollow" class="external text" href="https://www.assemblyai.com/blog/how-to-run-stable-diffusion-locally-to-generate-images/">"How to Run Stable Diffusion Locally to Generate Images"</a>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231013123717/https://www.assemblyai.com/blog/how-to-run-stable-diffusion-locally-to-generate-images/">Archived</a> from the original on October 13, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">May 4,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=How+to+Run+Stable+Diffusion+Locally+to+Generate+Images&rft.date=2022-08-23&rft.au=Ryan+O%27Connor&rft_id=https%3A%2F%2Fwww.assemblyai.com%2Fblog%2Fhow-to-run-stable-diffusion-locally-to-generate-images%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:0-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-:0_3-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/spaces/huggingface/diffuse-the-rest">"Diffuse The Rest - a Hugging Face Space by huggingface"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220905141431/https://huggingface.co/spaces/huggingface/diffuse-the-rest">Archived</a> from the original on September 5, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">September 5,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=Diffuse+The+Rest+-+a+Hugging+Face+Space+by+huggingface&rft_id=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fhuggingface%2Fdiffuse-the-rest&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-sifted_financialtimes-4"><span class="mw-cite-backlink"><b><a href="#cite_ref-sifted_financialtimes_4-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://sifted.eu/articles/stability-ai-fundraise-leak">"Leaked deck raises questions over Stability AI's Series A pitch to investors"</a>. <i>sifted.eu</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230629201917/https://sifted.eu/articles/stability-ai-fundraise-leak">Archived</a> from the original on June 29, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">June 20,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=sifted.eu&rft.atitle=Leaked+deck+raises+questions+over+Stability+AI%27s+Series+A+pitch+to+investors&rft_id=https%3A%2F%2Fsifted.eu%2Farticles%2Fstability-ai-fundraise-leak&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-lmu_lauch-5"><span class="mw-cite-backlink"><b><a href="#cite_ref-lmu_lauch_5-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.lmu.de/en/newsroom/news-overview/news/revolutionizing-image-generation-by-ai-turning-text-into-images.html">"Revolutionizing image generation by AI: Turning text into images"</a>. <i>www.lmu.de</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220917200820/https://www.lmu.de/en/newsroom/news-overview/news/revolutionizing-image-generation-by-ai-turning-text-into-images.html">Archived</a> from the original on September 17, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">June 21,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.lmu.de&rft.atitle=Revolutionizing+image+generation+by+AI%3A+Turning+text+into+images&rft_id=https%3A%2F%2Fwww.lmu.de%2Fen%2Fnewsroom%2Fnews-overview%2Fnews%2Frevolutionizing-image-generation-by-ai-turning-text-into-images.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-6">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMostaque2022" class="citation web cs1">Mostaque, Emad (November 2, 2022). <a rel="nofollow" class="external text" href="https://twitter.com/EMostaque/status/1587844074064822274?lang=en">"Stable Diffusion came from the Machine Vision & Learning research group (CompVis) @LMU_Muenchen"</a>. <i>Twitter</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230720002303/https://twitter.com/EMostaque/status/1587844074064822274?lang=en">Archived</a> from the original on July 20, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">June 22,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Twitter&rft.atitle=Stable+Diffusion+came+from+the+Machine+Vision+%26+Learning+research+group+%28CompVis%29+%40LMU_Muenchen&rft.date=2022-11-02&rft.aulast=Mostaque&rft.aufirst=Emad&rft_id=https%3A%2F%2Ftwitter.com%2FEMostaque%2Fstatus%2F1587844074064822274%3Flang%3Den&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-stable-diffusion-launch-7"><span class="mw-cite-backlink">^ <a href="#cite_ref-stable-diffusion-launch_7-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-stable-diffusion-launch_7-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-stable-diffusion-launch_7-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-stable-diffusion-launch_7-3"><sup><i><b>d</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/blog/stable-diffusion-announcement">"Stable Diffusion Launch Announcement"</a>. <i>Stability.Ai</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220905105009/https://stability.ai/blog/stable-diffusion-announcement">Archived</a> from the original on September 5, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">September 6,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability.Ai&rft.atitle=Stable+Diffusion+Launch+Announcement&rft_id=https%3A%2F%2Fstability.ai%2Fblog%2Fstable-diffusion-announcement&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-stable-diffusion-github-8"><span class="mw-cite-backlink">^ <a href="#cite_ref-stable-diffusion-github_8-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-stable-diffusion-github_8-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-stable-diffusion-github_8-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-stable-diffusion-github_8-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-stable-diffusion-github_8-4"><sup><i><b>e</b></i></sup></a> <a href="#cite_ref-stable-diffusion-github_8-5"><sup><i><b>f</b></i></sup></a> <a href="#cite_ref-stable-diffusion-github_8-6"><sup><i><b>g</b></i></sup></a> <a href="#cite_ref-stable-diffusion-github_8-7"><sup><i><b>h</b></i></sup></a> <a href="#cite_ref-stable-diffusion-github_8-8"><sup><i><b>i</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://github.com/CompVis/stable-diffusion">"Stable Diffusion Repository on GitHub"</a>. CompVis - Machine Vision and Learning Research Group, LMU Munich. September 17, 2022. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230118183342/https://github.com/CompVis/stable-diffusion">Archived</a> from the original on January 18, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">September 17,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Stable+Diffusion+Repository+on+GitHub&rft.pub=CompVis+-+Machine+Vision+and+Learning+Research+Group%2C+LMU+Munich&rft.date=2022-09-17&rft_id=https%3A%2F%2Fgithub.com%2FCompVis%2Fstable-diffusion&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-pcworld-9"><span class="mw-cite-backlink"><b><a href="#cite_ref-pcworld_9-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.pcworld.com/article/916785/creating-ai-art-local-pc-stable-diffusion.html">"The new killer app: Creating AI art will absolutely crush your PC"</a>. <i>PCWorld</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220831065139/https://www.pcworld.com/article/916785/creating-ai-art-local-pc-stable-diffusion.html">Archived</a> from the original on August 31, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">August 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=PCWorld&rft.atitle=The+new+killer+app%3A+Creating+AI+art+will+absolutely+crush+your+PC&rft_id=https%3A%2F%2Fwww.pcworld.com%2Farticle%2F916785%2Fcreating-ai-art-local-pc-stable-diffusion.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-verge-10"><span class="mw-cite-backlink">^ <a href="#cite_ref-verge_10-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-verge_10-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-verge_10-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-verge_10-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-verge_10-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVincent2022" class="citation web cs1">Vincent, James (September 15, 2022). <a rel="nofollow" class="external text" href="https://www.theverge.com/2022/9/15/23340673/ai-image-generation-stable-diffusion-explained-ethics-copyright-data">"Anyone can use this AI art generator — that's the risk"</a>. <i>The Verge</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230121153021/https://www.theverge.com/2022/9/15/23340673/ai-image-generation-stable-diffusion-explained-ethics-copyright-data">Archived</a> from the original on January 21, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">September 30,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Verge&rft.atitle=Anyone+can+use+this+AI+art+generator+%E2%80%94+that%27s+the+risk&rft.date=2022-09-15&rft.aulast=Vincent&rft.aufirst=James&rft_id=https%3A%2F%2Fwww.theverge.com%2F2022%2F9%2F15%2F23340673%2Fai-image-generation-stable-diffusion-explained-ethics-copyright-data&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:9-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-:9_11-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://github.com/CompVis/latent-diffusion">"CompVis/Latent-diffusion"</a>. <i><a href="/wiki/GitHub" title="GitHub">GitHub</a></i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=GitHub&rft.atitle=CompVis%2FLatent-diffusion&rft_id=https%3A%2F%2Fgithub.com%2FCompVis%2Flatent-diffusion&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-12"><span class="mw-cite-backlink"><b><a href="#cite_ref-12">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/news/stable-diffusion-3-research-paper">"Stable Diffusion 3: Research Paper"</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Stable+Diffusion+3%3A+Research+Paper&rft_id=https%3A%2F%2Fstability.ai%2Fnews%2Fstable-diffusion-3-research-paper&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-13"><span class="mw-cite-backlink"><b><a href="#cite_ref-13">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://ommer-lab.com/">"Home"</a>. <i>Computer Vision & Learning Group</i><span class="reference-accessdate">. Retrieved <span class="nowrap">September 5,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Computer+Vision+%26+Learning+Group&rft.atitle=Home&rft_id=https%3A%2F%2Fommer-lab.com%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-paper-14"><span class="mw-cite-backlink">^ <a href="#cite_ref-paper_14-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-paper_14-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-paper_14-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRombachBlattmannLorenzEsser2022" class="citation conference cs1">Rombach; Blattmann; Lorenz; Esser; Ommer (June 2022). <a rel="nofollow" class="external text" href="https://openaccess.thecvf.com/content/CVPR2022/papers/Rombach_High-Resolution_Image_Synthesis_With_Latent_Diffusion_Models_CVPR_2022_paper.pdf"><i>High-Resolution Image Synthesis with Latent Diffusion Models</i></a> <span class="cs1-format">(PDF)</span>. International Conference on Computer Vision and Pattern Recognition (CVPR). New Orleans, LA. pp. 10684–10695. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2112.10752">2112.10752</a></span>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230120163151/https://openaccess.thecvf.com/content/CVPR2022/papers/Rombach_High-Resolution_Image_Synthesis_With_Latent_Diffusion_Models_CVPR_2022_paper.pdf">Archived</a> <span class="cs1-format">(PDF)</span> from the original on January 20, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">September 17,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=conference&rft.btitle=High-Resolution+Image+Synthesis+with+Latent+Diffusion+Models&rft.place=New+Orleans%2C+LA&rft.pages=10684-10695&rft.date=2022-06&rft_id=info%3Aarxiv%2F2112.10752&rft.au=Rombach&rft.au=Blattmann&rft.au=Lorenz&rft.au=Esser&rft.au=Ommer&rft_id=https%3A%2F%2Fopenaccess.thecvf.com%2Fcontent%2FCVPR2022%2Fpapers%2FRombach_High-Resolution_Image_Synthesis_With_Latent_Diffusion_Models_CVPR_2022_paper.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:02-15"><span class="mw-cite-backlink">^ <a href="#cite_ref-:02_15-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:02_15-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:02_15-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-:02_15-3"><sup><i><b>d</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAlammar" class="citation web cs1">Alammar, Jay. <a rel="nofollow" class="external text" href="https://jalammar.github.io/illustrated-stable-diffusion/">"The Illustrated Stable Diffusion"</a>. <i>jalammar.github.io</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221101104342/https://jalammar.github.io/illustrated-stable-diffusion/">Archived</a> from the original on November 1, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">October 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=jalammar.github.io&rft.atitle=The+Illustrated+Stable+Diffusion&rft.aulast=Alammar&rft.aufirst=Jay&rft_id=https%3A%2F%2Fjalammar.github.io%2Fillustrated-stable-diffusion%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-16"><span class="mw-cite-backlink"><b><a href="#cite_ref-16">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDavid" class="citation book cs1">David, Foster. "8. Diffusion Models". <i>Generative Deep Learning</i> (2 ed.). O'Reilly.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=8.+Diffusion+Models&rft.btitle=Generative+Deep+Learning&rft.edition=2&rft.pub=O%27Reilly&rft.aulast=David&rft.aufirst=Foster&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-17"><span class="mw-cite-backlink"><b><a href="#cite_ref-17">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFJascha_Sohl-Dickstein,_Eric_A._Weiss,_Niru_Maheswaranathan,_Surya_Ganguli2015" class="citation arxiv cs1">Jascha Sohl-Dickstein, Eric A. Weiss, Niru Maheswaranathan, Surya Ganguli (March 12, 2015). "Deep Unsupervised Learning using Nonequilibrium Thermodynamics". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/1503.03585">1503.03585</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Deep+Unsupervised+Learning+using+Nonequilibrium+Thermodynamics&rft.date=2015-03-12&rft_id=info%3Aarxiv%2F1503.03585&rft.au=Jascha+Sohl-Dickstein%2C+Eric+A.+Weiss%2C+Niru+Maheswaranathan%2C+Surya+Ganguli&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_arXiv" title="Template:Cite arXiv">cite arXiv</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-18"><span class="mw-cite-backlink"><b><a href="#cite_ref-18">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/docs/diffusers/v0.5.1/en/api/pipelines/stable_diffusion">"Stable diffusion pipelines"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230625030241/https://huggingface.co/docs/diffusers/v0.5.1/en/api/pipelines/stable_diffusion">Archived</a> from the original on June 25, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">June 22,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=Stable+diffusion+pipelines&rft_id=https%3A%2F%2Fhuggingface.co%2Fdocs%2Fdiffusers%2Fv0.5.1%2Fen%2Fapi%2Fpipelines%2Fstable_diffusion&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-19"><span class="mw-cite-backlink"><b><a href="#cite_ref-19">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://docs.openvino.ai/2023.3/notebooks/225-stable-diffusion-text-to-image-with-output.html">"Text-to-Image Generation with Stable Diffusion and OpenVINO™"</a>. <i>openvino.ai</i>. <a href="/wiki/Intel" title="Intel">Intel</a><span class="reference-accessdate">. Retrieved <span class="nowrap">February 10,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=openvino.ai&rft.atitle=Text-to-Image+Generation+with+Stable+Diffusion+and+OpenVINO%E2%84%A2&rft_id=https%3A%2F%2Fdocs.openvino.ai%2F2023.3%2Fnotebooks%2F225-stable-diffusion-text-to-image-with-output.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:4-20"><span class="mw-cite-backlink">^ <a href="#cite_ref-:4_20-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:4_20-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:4_20-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPodellEnglishLaceyBlattmann2023" class="citation arxiv cs1">Podell, Dustin; English, Zion; Lacey, Kyle; Blattmann, Andreas; Dockhorn, Tim; Müller, Jonas; Penna, Joe; Rombach, Robin (July 4, 2023). "SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2307.01952">2307.01952</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=SDXL%3A+Improving+Latent+Diffusion+Models+for+High-Resolution+Image+Synthesis&rft.date=2023-07-04&rft_id=info%3Aarxiv%2F2307.01952&rft.aulast=Podell&rft.aufirst=Dustin&rft.au=English%2C+Zion&rft.au=Lacey%2C+Kyle&rft.au=Blattmann%2C+Andreas&rft.au=Dockhorn%2C+Tim&rft.au=M%C3%BCller%2C+Jonas&rft.au=Penna%2C+Joe&rft.au=Rombach%2C+Robin&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:6-21"><span class="mw-cite-backlink">^ <a href="#cite_ref-:6_21-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:6_21-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:6_21-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEsserKulalBlattmannEntezari2024" class="citation cs2">Esser, Patrick; Kulal, Sumith; Blattmann, Andreas; Entezari, Rahim; Müller, Jonas; Saini, Harry; Levi, Yam; Lorenz, Dominik; Sauer, Axel (March 5, 2024), <i>Scaling Rectified Flow Transformers for High-Resolution Image Synthesis</i>, <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2403.03206">2403.03206</a></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Scaling+Rectified+Flow+Transformers+for+High-Resolution+Image+Synthesis&rft.date=2024-03-05&rft_id=info%3Aarxiv%2F2403.03206&rft.aulast=Esser&rft.aufirst=Patrick&rft.au=Kulal%2C+Sumith&rft.au=Blattmann%2C+Andreas&rft.au=Entezari%2C+Rahim&rft.au=M%C3%BCller%2C+Jonas&rft.au=Saini%2C+Harry&rft.au=Levi%2C+Yam&rft.au=Lorenz%2C+Dominik&rft.au=Sauer%2C+Axel&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:7-22"><span class="mw-cite-backlink">^ <a href="#cite_ref-:7_22-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:7_22-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLiuGongLiu2022" class="citation cs2">Liu, Xingchao; Gong, Chengyue; Liu, Qiang (September 7, 2022), <i>Flow Straight and Fast: Learning to Generate and Transfer Data with Rectified Flow</i>, <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2209.03003">2209.03003</a></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Flow+Straight+and+Fast%3A+Learning+to+Generate+and+Transfer+Data+with+Rectified+Flow&rft.date=2022-09-07&rft_id=info%3Aarxiv%2F2209.03003&rft.aulast=Liu&rft.aufirst=Xingchao&rft.au=Gong%2C+Chengyue&rft.au=Liu%2C+Qiang&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:8-23"><span class="mw-cite-backlink">^ <a href="#cite_ref-:8_23-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:8_23-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.cs.utexas.edu/~lqiang/rectflow/html/intro.html">"Rectified Flow — Rectified Flow"</a>. <i>www.cs.utexas.edu</i><span class="reference-accessdate">. Retrieved <span class="nowrap">March 6,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.cs.utexas.edu&rft.atitle=Rectified+Flow+%E2%80%94+Rectified+Flow&rft_id=https%3A%2F%2Fwww.cs.utexas.edu%2F~lqiang%2Frectflow%2Fhtml%2Fintro.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-Waxy-24"><span class="mw-cite-backlink">^ <a href="#cite_ref-Waxy_24-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-Waxy_24-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-Waxy_24-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-Waxy_24-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-Waxy_24-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBaio2022" class="citation web cs1">Baio, Andy (August 30, 2022). <a rel="nofollow" class="external text" href="https://waxy.org/2022/08/exploring-12-million-of-the-images-used-to-train-stable-diffusions-image-generator/">"Exploring 12 Million of the 2.3 Billion Images Used to Train Stable Diffusion's Image Generator"</a>. <i>Waxy.org</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230120124332/https://waxy.org/2022/08/exploring-12-million-of-the-images-used-to-train-stable-diffusions-image-generator/">Archived</a> from the original on January 20, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">November 2,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Waxy.org&rft.atitle=Exploring+12+Million+of+the+2.3+Billion+Images+Used+to+Train+Stable+Diffusion%27s+Image+Generator&rft.date=2022-08-30&rft.aulast=Baio&rft.aufirst=Andy&rft_id=https%3A%2F%2Fwaxy.org%2F2022%2F08%2Fexploring-12-million-of-the-images-used-to-train-stable-diffusions-image-generator%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-25"><span class="mw-cite-backlink"><b><a href="#cite_ref-25">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.technologyreview.com/2022/09/16/1059598/this-artist-is-dominating-ai-generated-art-and-hes-not-happy-about-it/">"This artist is dominating AI-generated art. And he's not happy about it"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230114125952/https://www.technologyreview.com/2022/09/16/1059598/this-artist-is-dominating-ai-generated-art-and-hes-not-happy-about-it/">Archived</a> from the original on January 14, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">November 2,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=MIT+Technology+Review&rft.atitle=This+artist+is+dominating+AI-generated+art.+And+he%27s+not+happy+about+it.&rft_id=https%3A%2F%2Fwww.technologyreview.com%2F2022%2F09%2F16%2F1059598%2Fthis-artist-is-dominating-ai-generated-art-and-hes-not-happy-about-it%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:2-26"><span class="mw-cite-backlink">^ <a href="#cite_ref-:2_26-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:2_26-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBrunnerHarlan2023" class="citation web cs1">Brunner, Katharina; Harlan, Elisa (July 7, 2023). <a rel="nofollow" class="external text" href="https://interaktiv.br.de/ki-trainingsdaten/en/index.html">"We Are All Raw Material for AI"</a>. Bayerischer Rundfunk (BR). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230912092308/https://interaktiv.br.de/ki-trainingsdaten/en/index.html">Archived</a> from the original on September 12, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">September 12,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=We+Are+All+Raw+Material+for+AI&rft.pub=Bayerischer+Rundfunk+%28BR%29&rft.date=2023-07-07&rft.aulast=Brunner&rft.aufirst=Katharina&rft.au=Harlan%2C+Elisa&rft_id=https%3A%2F%2Finteraktiv.br.de%2Fki-trainingsdaten%2Fen%2Findex.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-27">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchuhmann2022" class="citation cs2">Schuhmann, Christoph (November 2, 2022), <a rel="nofollow" class="external text" href="https://github.com/christophschuhmann/improved-aesthetic-predictor"><i>CLIP+MLP Aesthetic Score Predictor</i></a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230608005334/http://github.com/christophschuhmann/improved-aesthetic-predictor/">archived</a> from the original on June 8, 2023<span class="reference-accessdate">, retrieved <span class="nowrap">November 2,</span> 2022</span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=CLIP%2BMLP+Aesthetic+Score+Predictor&rft.date=2022-11-02&rft.aulast=Schuhmann&rft.aufirst=Christoph&rft_id=https%3A%2F%2Fgithub.com%2Fchristophschuhmann%2Fimproved-aesthetic-predictor&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-LAION-Aesthetics-28"><span class="mw-cite-backlink"><b><a href="#cite_ref-LAION-Aesthetics_28-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://laion.ai/blog/laion-aesthetics">"LAION-Aesthetics | LAION"</a>. <i>laion.ai</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220826121216/https://laion.ai/blog/laion-aesthetics/">Archived</a> from the original on August 26, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">September 2,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=laion.ai&rft.atitle=LAION-Aesthetics+%7C+LAION&rft_id=https%3A%2F%2Flaion.ai%2Fblog%2Flaion-aesthetics&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:5-29"><span class="mw-cite-backlink">^ <a href="#cite_ref-:5_29-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:5_29-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-:5_29-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHoSalimans2022" class="citation arxiv cs1">Ho, Jonathan; Salimans, Tim (July 25, 2022). "Classifier-Free Diffusion Guidance". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2207.12598">2207.12598</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.LG">cs.LG</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Classifier-Free+Diffusion+Guidance&rft.date=2022-07-25&rft_id=info%3Aarxiv%2F2207.12598&rft.aulast=Ho&rft.aufirst=Jonathan&rft.au=Salimans%2C+Tim&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-30"><span class="mw-cite-backlink"><b><a href="#cite_ref-30">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMostaque2022" class="citation web cs1">Mostaque, Emad (August 28, 2022). <a rel="nofollow" class="external text" href="https://twitter.com/emostaque/status/1563870674111832066">"Cost of construction"</a>. <i>Twitter</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220906155426/https://twitter.com/EMostaque/status/1563870674111832066">Archived</a> from the original on September 6, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">September 6,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Twitter&rft.atitle=Cost+of+construction&rft.date=2022-08-28&rft.aulast=Mostaque&rft.aufirst=Emad&rft_id=https%3A%2F%2Ftwitter.com%2Femostaque%2Fstatus%2F1563870674111832066&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-stable-diffusion-model-card-1-4-31"><span class="mw-cite-backlink">^ <a href="#cite_ref-stable-diffusion-model-card-1-4_31-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-stable-diffusion-model-card-1-4_31-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-stable-diffusion-model-card-1-4_31-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/CompVis/stable-diffusion-v1-4">"CompVis/stable-diffusion-v1-4 · Hugging Face"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230111161920/https://huggingface.co/CompVis/stable-diffusion-v1-4">Archived</a> from the original on January 11, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">November 2,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=CompVis%2Fstable-diffusion-v1-4+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2FCompVis%2Fstable-diffusion-v1-4&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-32">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWiggers2022" class="citation web cs1">Wiggers, Kyle (August 12, 2022). <a rel="nofollow" class="external text" href="https://techcrunch.com/2022/08/12/a-startup-wants-to-democratize-the-tech-behind-dall-e-2-consequences-be-damned/">"A startup wants to democratize the tech behind DALL-E 2, consequences be damned"</a>. <i>TechCrunch</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230119005503/https://techcrunch.com/2022/08/12/a-startup-wants-to-democratize-the-tech-behind-dall-e-2-consequences-be-damned/">Archived</a> from the original on January 19, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">November 2,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=TechCrunch&rft.atitle=A+startup+wants+to+democratize+the+tech+behind+DALL-E+2%2C+consequences+be+damned&rft.date=2022-08-12&rft.aulast=Wiggers&rft.aufirst=Kyle&rft_id=https%3A%2F%2Ftechcrunch.com%2F2022%2F08%2F12%2Fa-startup-wants-to-democratize-the-tech-behind-dall-e-2-consequences-be-damned%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-33"><span class="mw-cite-backlink"><b><a href="#cite_ref-33">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFemad_96082024" class="citation web cs1">emad_9608 (April 19, 2024). <a rel="nofollow" class="external text" href="https://www.reddit.com/r/StableDiffusion/comments/1c870a5/any_estimate_on_how_much_money_they_spent_to/l0dc2ni/">"10m is about right"</a>. <i>r/StableDiffusion</i><span class="reference-accessdate">. Retrieved <span class="nowrap">April 25,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=r%2FStableDiffusion&rft.atitle=10m+is+about+right&rft.date=2024-04-19&rft.au=emad_9608&rft_id=http%3A%2F%2Fwww.reddit.com%2Fr%2FStableDiffusion%2Fcomments%2F1c870a5%2Fany_estimate_on_how_much_money_they_spent_to%2Fl0dc2ni%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_web" title="Template:Cite web">cite web</a>}}</code>: CS1 maint: numeric names: authors list (<a href="/wiki/Category:CS1_maint:_numeric_names:_authors_list" title="Category:CS1 maint: numeric names: authors list">link</a>)</span></span> </li> <li id="cite_note-diffusers-34"><span class="mw-cite-backlink">^ <a href="#cite_ref-diffusers_34-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-diffusers_34-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-diffusers_34-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-diffusers_34-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-diffusers_34-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/blog/stable_diffusion">"Stable Diffusion with 🧨 Diffusers"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230117222142/https://huggingface.co/blog/stable_diffusion">Archived</a> from the original on January 17, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">October 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=Stable+Diffusion+with+%F0%9F%A7%A8+Diffusers&rft_id=https%3A%2F%2Fhuggingface.co%2Fblog%2Fstable_diffusion&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-release2.0-35"><span class="mw-cite-backlink">^ <a href="#cite_ref-release2.0_35-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-release2.0_35-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-release2.0_35-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/blog/stable-diffusion-v2-release">"Stable Diffusion 2.0 Release"</a>. <i>stability.ai</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221210062729/https://stability.ai/blog/stable-diffusion-v2-release">Archived</a> from the original on December 10, 2022.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=stability.ai&rft.atitle=Stable+Diffusion+2.0+Release&rft_id=https%3A%2F%2Fstability.ai%2Fblog%2Fstable-diffusion-v2-release&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-36"><span class="mw-cite-backlink"><b><a href="#cite_ref-36">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://laion.ai/">"LAION"</a>. <i>laion.ai</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231016082902/https://laion.ai/">Archived</a> from the original on October 16, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">October 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=laion.ai&rft.atitle=LAION&rft_id=https%3A%2F%2Flaion.ai%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-37"><span class="mw-cite-backlink"><b><a href="#cite_ref-37">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://blog.paperspace.com/generating-images-with-stable-diffusion/">"Generating images with Stable Diffusion"</a>. <i>Paperspace Blog</i>. August 24, 2022. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221031231727/https://blog.paperspace.com/generating-images-with-stable-diffusion/">Archived</a> from the original on October 31, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">October 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Paperspace+Blog&rft.atitle=Generating+images+with+Stable+Diffusion&rft.date=2022-08-24&rft_id=https%3A%2F%2Fblog.paperspace.com%2Fgenerating-images-with-stable-diffusion%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-38"><span class="mw-cite-backlink"><b><a href="#cite_ref-38">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/blog/stable-diffusion-sdxl-1-announcement">"Announcing SDXL 1.0"</a>. <i>Stability AI</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230726215239/https://stability.ai/blog/stable-diffusion-sdxl-1-announcement">Archived</a> from the original on July 26, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">August 21,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability+AI&rft.atitle=Announcing+SDXL+1.0&rft_id=https%3A%2F%2Fstability.ai%2Fblog%2Fstable-diffusion-sdxl-1-announcement&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-39">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEdwards2023" class="citation web cs1">Edwards, Benj (July 27, 2023). <a rel="nofollow" class="external text" href="https://arstechnica.com/information-technology/2023/07/stable-diffusion-xl-puts-ai-generated-visual-worlds-at-your-gpus-command/">"Stability AI releases Stable Diffusion XL, its next-gen image synthesis model"</a>. <i>Ars Technica</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230821011216/https://arstechnica.com/information-technology/2023/07/stable-diffusion-xl-puts-ai-generated-visual-worlds-at-your-gpus-command/">Archived</a> from the original on August 21, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">August 21,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Ars+Technica&rft.atitle=Stability+AI+releases+Stable+Diffusion+XL%2C+its+next-gen+image+synthesis+model&rft.date=2023-07-27&rft.aulast=Edwards&rft.aufirst=Benj&rft_id=https%3A%2F%2Farstechnica.com%2Finformation-technology%2F2023%2F07%2Fstable-diffusion-xl-puts-ai-generated-visual-worlds-at-your-gpus-command%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-40">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/hakurei/waifu-diffusion">"hakurei/waifu-diffusion · Hugging Face"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231008120655/https://huggingface.co/hakurei/waifu-diffusion">Archived</a> from the original on October 8, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">October 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=hakurei%2Fwaifu-diffusion+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2Fhakurei%2Fwaifu-diffusion&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-41"><span class="mw-cite-backlink"><b><a href="#cite_ref-41">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChambonBluethgenLanglotzChaudhari2022" class="citation arxiv cs1">Chambon, Pierre; Bluethgen, Christian; Langlotz, Curtis P.; Chaudhari, Akshay (October 9, 2022). "Adapting Pretrained Vision-Language Foundational Models to Medical Imaging Domains". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2210.04133">2210.04133</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Adapting+Pretrained+Vision-Language+Foundational+Models+to+Medical+Imaging+Domains&rft.date=2022-10-09&rft_id=info%3Aarxiv%2F2210.04133&rft.aulast=Chambon&rft.aufirst=Pierre&rft.au=Bluethgen%2C+Christian&rft.au=Langlotz%2C+Curtis+P.&rft.au=Chaudhari%2C+Akshay&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-42"><span class="mw-cite-backlink"><b><a href="#cite_ref-42">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSeth_ForsgrenHayk_Martiros" class="citation web cs1">Seth Forsgren; Hayk Martiros. <a rel="nofollow" class="external text" href="https://www.riffusion.com/about">"Riffusion - Stable diffusion for real-time music generation"</a>. <i>Riffusion</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221216092717/https://www.riffusion.com/about">Archived</a> from the original on December 16, 2022.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Riffusion&rft.atitle=Riffusion+-+Stable+diffusion+for+real-time+music+generation&rft.au=Seth+Forsgren&rft.au=Hayk+Martiros&rft_id=https%3A%2F%2Fwww.riffusion.com%2Fabout&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-43">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMercurio2022" class="citation cs2">Mercurio, Anthony (October 31, 2022), <a rel="nofollow" class="external text" href="https://github.com/harubaru/waifu-diffusion/blob/6bf942eb6368ebf6bcbbb24b6ba8197bda6582a0/docs/en/training/README.md"><i>Waifu Diffusion</i></a>, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221031234225/https://github.com/harubaru/waifu-diffusion/blob/6bf942eb6368ebf6bcbbb24b6ba8197bda6582a0/docs/en/training/README.md">archived</a> from the original on October 31, 2022<span class="reference-accessdate">, retrieved <span class="nowrap">October 31,</span> 2022</span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Waifu+Diffusion&rft.date=2022-10-31&rft.aulast=Mercurio&rft.aufirst=Anthony&rft_id=https%3A%2F%2Fgithub.com%2Fharubaru%2Fwaifu-diffusion%2Fblob%2F6bf942eb6368ebf6bcbbb24b6ba8197bda6582a0%2Fdocs%2Fen%2Ftraining%2FREADME.md&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-44"><span class="mw-cite-backlink"><b><a href="#cite_ref-44">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSmith" class="citation web cs1">Smith, Ryan. <a rel="nofollow" class="external text" href="https://www.anandtech.com/show/17204/nvidia-quietly-launches-geforce-rtx-3080-12gb-more-vram-more-power-more-money">"NVIDIA Quietly Launches GeForce RTX 3080 12GB: More VRAM, More Power, More Money"</a>. <i>www.anandtech.com</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230827092451/https://www.anandtech.com/show/17204/nvidia-quietly-launches-geforce-rtx-3080-12gb-more-vram-more-power-more-money">Archived</a> from the original on August 27, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">October 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.anandtech.com&rft.atitle=NVIDIA+Quietly+Launches+GeForce+RTX+3080+12GB%3A+More+VRAM%2C+More+Power%2C+More+Money&rft.aulast=Smith&rft.aufirst=Ryan&rft_id=https%3A%2F%2Fwww.anandtech.com%2Fshow%2F17204%2Fnvidia-quietly-launches-geforce-rtx-3080-12gb-more-vram-more-power-more-money&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-45"><span class="mw-cite-backlink"><b><a href="#cite_ref-45">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDave_James2022" class="citation web cs1">Dave James (October 28, 2022). <a rel="nofollow" class="external text" href="https://www.pcgamer.com/nvidia-rtx-4090-stable-diffusion-training-aharon-kahana/">"I thrashed the RTX 4090 for 8 hours straight training Stable Diffusion to paint like my uncle Hermann"</a>. <i><a href="/wiki/PC_Gamer" title="PC Gamer">PC Gamer</a></i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221109154310/https://www.pcgamer.com/nvidia-rtx-4090-stable-diffusion-training-aharon-kahana/">Archived</a> from the original on November 9, 2022.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=PC+Gamer&rft.atitle=I+thrashed+the+RTX+4090+for+8+hours+straight+training+Stable+Diffusion+to+paint+like+my+uncle+Hermann&rft.date=2022-10-28&rft.au=Dave+James&rft_id=https%3A%2F%2Fwww.pcgamer.com%2Fnvidia-rtx-4090-stable-diffusion-training-aharon-kahana%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-46">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGalAlalufAtzmonPatashnik2022" class="citation arxiv cs1">Gal, Rinon; Alaluf, Yuval; Atzmon, Yuval; Patashnik, Or; Bermano, Amit H.; Chechik, Gal; Cohen-Or, Daniel (August 2, 2022). "An Image is Worth One Word: Personalizing Text-to-Image Generation using Textual Inversion". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2208.01618">2208.01618</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=An+Image+is+Worth+One+Word%3A+Personalizing+Text-to-Image+Generation+using+Textual+Inversion&rft.date=2022-08-02&rft_id=info%3Aarxiv%2F2208.01618&rft.aulast=Gal&rft.aufirst=Rinon&rft.au=Alaluf%2C+Yuval&rft.au=Atzmon%2C+Yuval&rft.au=Patashnik%2C+Or&rft.au=Bermano%2C+Amit+H.&rft.au=Chechik%2C+Gal&rft.au=Cohen-Or%2C+Daniel&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-47">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://blog.novelai.net/novelai-improvements-on-stable-diffusion-e10d38db82ac">"NovelAI Improvements on Stable Diffusion"</a>. <i>NovelAI</i>. October 11, 2022. <a rel="nofollow" class="external text" href="https://archive.today/20221027041603/https://blog.novelai.net/novelai-improvements-on-stable-diffusion-e10d38db82ac">Archived</a> from the original on October 27, 2022.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=NovelAI&rft.atitle=NovelAI+Improvements+on+Stable+Diffusion&rft.date=2022-10-11&rft_id=https%3A%2F%2Fblog.novelai.net%2Fnovelai-improvements-on-stable-diffusion-e10d38db82ac&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-48">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYuki_Yamashita2022" class="citation web cs1 cs1-prop-foreign-lang-source">Yuki Yamashita (September 1, 2022). <a rel="nofollow" class="external text" href="https://www.itmedia.co.jp/news/articles/2209/01/news041.html">"愛犬の合成画像を生成できるAI 文章で指示するだけでコスプレ 米Googleが開発"</a>. <i>ITmedia Inc.</i> (in Japanese). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220831232021/https://www.itmedia.co.jp/news/articles/2209/01/news041.html">Archived</a> from the original on August 31, 2022.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=ITmedia+Inc.&rft.atitle=%E6%84%9B%E7%8A%AC%E3%81%AE%E5%90%88%E6%88%90%E7%94%BB%E5%83%8F%E3%82%92%E7%94%9F%E6%88%90%E3%81%A7%E3%81%8D%E3%82%8BAI+%E6%96%87%E7%AB%A0%E3%81%A7%E6%8C%87%E7%A4%BA%E3%81%99%E3%82%8B%E3%81%A0%E3%81%91%E3%81%A7%E3%82%B3%E3%82%B9%E3%83%97%E3%83%AC+%E7%B1%B3Google%E3%81%8C%E9%96%8B%E7%99%BA&rft.date=2022-09-01&rft.au=Yuki+Yamashita&rft_id=https%3A%2F%2Fwww.itmedia.co.jp%2Fnews%2Farticles%2F2209%2F01%2Fnews041.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-49"><span class="mw-cite-backlink"><b><a href="#cite_ref-49">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMengHeSongSong2021" class="citation arxiv cs1">Meng, Chenlin; He, Yutong; Song, Yang; Song, Jiaming; Wu, Jiajun; Zhu, Jun-Yan; Ermon, Stefano (August 2, 2021). "SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2108.01073">2108.01073</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=SDEdit%3A+Guided+Image+Synthesis+and+Editing+with+Stochastic+Differential+Equations&rft.date=2021-08-02&rft_id=info%3Aarxiv%2F2108.01073&rft.aulast=Meng&rft.aufirst=Chenlin&rft.au=He%2C+Yutong&rft.au=Song%2C+Yang&rft.au=Song%2C+Jiaming&rft.au=Wu%2C+Jiajun&rft.au=Zhu%2C+Jun-Yan&rft.au=Ermon%2C+Stefano&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-webui_showcase-50"><span class="mw-cite-backlink">^ <a href="#cite_ref-webui_showcase_50-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-webui_showcase_50-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-webui_showcase_50-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-webui_showcase_50-3"><sup><i><b>d</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://github.com/AUTOMATIC1111/stable-diffusion-webui-feature-showcase">"Stable Diffusion web UI"</a>. <i>GitHub</i>. November 10, 2022. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230120032734/https://github.com/AUTOMATIC1111/stable-diffusion-webui-feature-showcase">Archived</a> from the original on January 20, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">September 27,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=GitHub&rft.atitle=Stable+Diffusion+web+UI&rft.date=2022-11-10&rft_id=https%3A%2F%2Fgithub.com%2FAUTOMATIC1111%2Fstable-diffusion-webui-feature-showcase&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-51"><span class="mw-cite-backlink"><b><a href="#cite_ref-51">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation cs2"><a rel="nofollow" class="external text" href="https://github.com/ShieldMnt/invisible-watermark/blob/9802ce3e0c3a5ec43b41d503f156717f0c739584/README.md"><i>invisible-watermark</i></a>, Shield Mountain, November 2, 2022, <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221018062806/https://github.com/ShieldMnt/invisible-watermark/blob/9802ce3e0c3a5ec43b41d503f156717f0c739584/README.md">archived</a> from the original on October 18, 2022<span class="reference-accessdate">, retrieved <span class="nowrap">November 2,</span> 2022</span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=invisible-watermark&rft.pub=Shield+Mountain&rft.date=2022-11-02&rft_id=https%3A%2F%2Fgithub.com%2FShieldMnt%2Finvisible-watermark%2Fblob%2F9802ce3e0c3a5ec43b41d503f156717f0c739584%2FREADME.md&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-52"><span class="mw-cite-backlink"><b><a href="#cite_ref-52">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://github.com/JohannesGaessler/stable-diffusion-tools">"stable-diffusion-tools/emphasis at master · JohannesGaessler/stable-diffusion-tools"</a>. <i>GitHub</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221002081041/https://github.com/JohannesGaessler/stable-diffusion-tools">Archived</a> from the original on October 2, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">November 2,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=GitHub&rft.atitle=stable-diffusion-tools%2Femphasis+at+master+%C2%B7+JohannesGaessler%2Fstable-diffusion-tools&rft_id=https%3A%2F%2Fgithub.com%2FJohannesGaessler%2Fstable-diffusion-tools&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-release2.1-53"><span class="mw-cite-backlink"><b><a href="#cite_ref-release2.1_53-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/blog/stablediffusion2-1-release7-dec-2022">"Stable Diffusion v2.1 and DreamStudio Updates 7-Dec 22"</a>. <i>stability.ai</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221210062732/https://stability.ai/blog/stablediffusion2-1-release7-dec-2022">Archived</a> from the original on December 10, 2022.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=stability.ai&rft.atitle=Stable+Diffusion+v2.1+and+DreamStudio+Updates+7-Dec+22&rft_id=https%3A%2F%2Fstability.ai%2Fblog%2Fstablediffusion2-1-release7-dec-2022&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:10-54"><span class="mw-cite-backlink">^ <a href="#cite_ref-:10_54-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:10_54-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMengHeSongSong2022" class="citation arxiv cs1">Meng, Chenlin; He, Yutong; Song, Yang; Song, Jiaming; Wu, Jiajun; Zhu, Jun-Yan; Ermon, Stefano (January 4, 2022). "SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2108.01073">2108.01073</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=SDEdit%3A+Guided+Image+Synthesis+and+Editing+with+Stochastic+Differential+Equations&rft.date=2022-01-04&rft_id=info%3Aarxiv%2F2108.01073&rft.aulast=Meng&rft.aufirst=Chenlin&rft.au=He%2C+Yutong&rft.au=Song%2C+Yang&rft.au=Song%2C+Jiaming&rft.au=Wu%2C+Jiajun&rft.au=Zhu%2C+Jun-Yan&rft.au=Ermon%2C+Stefano&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:1-55"><span class="mw-cite-backlink">^ <a href="#cite_ref-:1_55-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:1_55-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFLuziSiahkoohiMayerCasco-Rodriguez2022" class="citation arxiv cs1">Luzi, Lorenzo; Siahkoohi, Ali; Mayer, Paul M.; Casco-Rodriguez, Josue; Baraniuk, Richard (October 21, 2022). "Boomerang: Local sampling on image manifolds using diffusion models". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2210.12100">2210.12100</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Boomerang%3A+Local+sampling+on+image+manifolds+using+diffusion+models&rft.date=2022-10-21&rft_id=info%3Aarxiv%2F2210.12100&rft.aulast=Luzi&rft.aufirst=Lorenzo&rft.au=Siahkoohi%2C+Ali&rft.au=Mayer%2C+Paul+M.&rft.au=Casco-Rodriguez%2C+Josue&rft.au=Baraniuk%2C+Richard&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-56"><span class="mw-cite-backlink"><b><a href="#cite_ref-56">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBühlmann2022" class="citation web cs1">Bühlmann, Matthias (September 28, 2022). <a rel="nofollow" class="external text" href="https://pub.towardsai.net/stable-diffusion-based-image-compresssion-6f1f0a399202">"Stable Diffusion Based Image Compression"</a>. <i>Medium</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221102231642/https://pub.towardsai.net/stable-diffusion-based-image-compresssion-6f1f0a399202">Archived</a> from the original on November 2, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">November 2,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Medium&rft.atitle=Stable+Diffusion+Based+Image+Compression&rft.date=2022-09-28&rft.aulast=B%C3%BChlmann&rft.aufirst=Matthias&rft_id=https%3A%2F%2Fpub.towardsai.net%2Fstable-diffusion-based-image-compresssion-6f1f0a399202&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-controlnet-paper-57"><span class="mw-cite-backlink"><b><a href="#cite_ref-controlnet-paper_57-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFZhang2023" class="citation arxiv cs1">Zhang, Lvmin (February 10, 2023). "Adding Conditional Control to Text-to-Image Diffusion Models". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2302.05543">2302.05543</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Adding+Conditional+Control+to+Text-to-Image+Diffusion+Models&rft.date=2023-02-10&rft_id=info%3Aarxiv%2F2302.05543&rft.aulast=Zhang&rft.aufirst=Lvmin&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-58"><span class="mw-cite-backlink"><b><a href="#cite_ref-58">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFEdwards2022" class="citation web cs1">Edwards, Benj (November 10, 2022). <a rel="nofollow" class="external text" href="https://arstechnica.com/information-technology/2022/11/stable-diffusion-in-your-pocket-draw-things-brings-ai-images-to-iphone/">"Stable Diffusion in your pocket? "Draw Things" brings AI images to iPhone"</a>. <i>Ars Technica</i><span class="reference-accessdate">. Retrieved <span class="nowrap">July 10,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Ars+Technica&rft.atitle=Stable+Diffusion+in+your+pocket%3F+%22Draw+Things%22+brings+AI+images+to+iPhone&rft.date=2022-11-10&rft.aulast=Edwards&rft.aufirst=Benj&rft_id=https%3A%2F%2Farstechnica.com%2Finformation-technology%2F2022%2F11%2Fstable-diffusion-in-your-pocket-draw-things-brings-ai-images-to-iphone%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-59"><span class="mw-cite-backlink"><b><a href="#cite_ref-59">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWendling2024" class="citation web cs1">Wendling, Mike (March 6, 2024). <a rel="nofollow" class="external text" href="https://www.bbc.com/news/world-us-canada-68471253">"AI can be easily used to make fake election photos - report"</a>. <i>bbc.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">July 10,</span> 2024</span>. <q>The CCDH, a campaign group, tested four of the largest public-facing AI platforms: Midjourney, OpenAI's ChatGPT Plus, Stability.ai's DreamStudio and Microsoft's Image Creator.</q></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=bbc.com&rft.atitle=AI+can+be+easily+used+to+make+fake+election+photos+-+report&rft.date=2024-03-06&rft.aulast=Wendling&rft.aufirst=Mike&rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Fworld-us-canada-68471253&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-60"><span class="mw-cite-backlink"><b><a href="#cite_ref-60">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWiggers2023" class="citation web cs1">Wiggers, Kyle (May 18, 2023). <a rel="nofollow" class="external text" href="https://techcrunch.com/2023/05/18/stability-ai-open-sources-its-ai-powered-design-studio/">"Stability AI open sources its AI-powered design studio"</a>. <i>TechCrunch</i><span class="reference-accessdate">. Retrieved <span class="nowrap">July 10,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=TechCrunch&rft.atitle=Stability+AI+open+sources+its+AI-powered+design+studio&rft.date=2023-05-18&rft.aulast=Wiggers&rft.aufirst=Kyle&rft_id=https%3A%2F%2Ftechcrunch.com%2F2023%2F05%2F18%2Fstability-ai-open-sources-its-ai-powered-design-studio%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-61"><span class="mw-cite-backlink"><b><a href="#cite_ref-61">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWeatherbed2023" class="citation web cs1">Weatherbed, Jess (May 17, 2023). <a rel="nofollow" class="external text" href="https://www.theverge.com/2023/5/17/23726751/stability-ai-stablestudio-dreamstudio-stable-diffusion-artificial-intelligence">"Stability AI is open-sourcing its DreamStudio web app"</a>. <i>The Verge</i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Verge&rft.atitle=Stability+AI+is+open-sourcing+its+DreamStudio+web+app&rft.date=2023-05-17&rft.aulast=Weatherbed&rft.aufirst=Jess&rft_id=https%3A%2F%2Fwww.theverge.com%2F2023%2F5%2F17%2F23726751%2Fstability-ai-stablestudio-dreamstudio-stable-diffusion-artificial-intelligence&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-34-62"><span class="mw-cite-backlink"><b><a href="#cite_ref-34_62-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMann2024" class="citation web cs1">Mann, Tobias (June 29, 2024). <a rel="nofollow" class="external text" href="https://www.theregister.com/2024/06/29/image_gen_guide/">"A friendly guide to local AI image gen with Stable Diffusion and Automatic1111"</a>. <i><a href="/wiki/The_Register" title="The Register">The Register</a></i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Register&rft.atitle=A+friendly+guide+to+local+AI+image+gen+with+Stable+Diffusion+and+Automatic1111&rft.date=2024-06-29&rft.aulast=Mann&rft.aufirst=Tobias&rft_id=https%3A%2F%2Fwww.theregister.com%2F2024%2F06%2F29%2Fimage_gen_guide%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-63"><span class="mw-cite-backlink"><b><a href="#cite_ref-63">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHachman" class="citation web cs1">Hachman, Mak. <a rel="nofollow" class="external text" href="https://www.pcworld.com/article/2253285/fooocus-is-the-easiest-way-to-run-ai-art-on-your-pc.html">"Fooocus is the easiest way to create AI art on your PC"</a>. <i>PCWorld</i>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=PCWorld&rft.atitle=Fooocus+is+the+easiest+way+to+create+AI+art+on+your+PC&rft.aulast=Hachman&rft.aufirst=Mak&rft_id=https%3A%2F%2Fwww.pcworld.com%2Farticle%2F2253285%2Ffooocus-is-the-easiest-way-to-run-ai-art-on-your-pc.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-64"><span class="mw-cite-backlink"><b><a href="#cite_ref-64">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://learn.thinkdiffusion.com/comfyui-workflows-and-what-you-need-to-know/">"ComfyUI Workflows and what you need to know"</a>. <i>thinkdiffusion.com</i>. December 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">July 10,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=thinkdiffusion.com&rft.atitle=ComfyUI+Workflows+and+what+you+need+to+know&rft.date=2023-12&rft_id=https%3A%2F%2Flearn.thinkdiffusion.com%2Fcomfyui-workflows-and-what-you-need-to-know%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-65"><span class="mw-cite-backlink"><b><a href="#cite_ref-65">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://github.com/comfyanonymous/ComfyUI">"ComfyUI"</a>. <i>github.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">July 10,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=github.com&rft.atitle=ComfyUI&rft_id=https%3A%2F%2Fgithub.com%2Fcomfyanonymous%2FComfyUI&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-66"><span class="mw-cite-backlink"><b><a href="#cite_ref-66">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHuang2024" class="citation thesis cs1">Huang, Yenkai (May 10, 2024). <a rel="nofollow" class="external text" href="https://digitalcommons.dartmouth.edu/cgi/viewcontent.cgi?article=1188&context=masters_theses"><i>Latent Auto-recursive Composition Engine</i></a> (M.S. Computer Science thesis). <a href="/wiki/Dartmouth_College" title="Dartmouth College">Dartmouth College</a><span class="reference-accessdate">. Retrieved <span class="nowrap">July 10,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Adissertation&rft.title=Latent+Auto-recursive+Composition+Engine&rft.degree=M.S.+Computer+Science&rft.inst=Dartmouth+College&rft.date=2024-05-10&rft.aulast=Huang&rft.aufirst=Yenkai&rft_id=https%3A%2F%2Fdigitalcommons.dartmouth.edu%2Fcgi%2Fviewcontent.cgi%3Farticle%3D1188%26context%3Dmasters_theses&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-67"><span class="mw-cite-backlink"><b><a href="#cite_ref-67">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/CompVis/stable-diffusion-v1-4">"CompVis/stable-diffusion-v1-4 · Hugging Face"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230111161920/https://huggingface.co/CompVis/stable-diffusion-v1-4">Archived</a> from the original on January 11, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">August 17,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=CompVis%2Fstable-diffusion-v1-4+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2FCompVis%2Fstable-diffusion-v1-4&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-68"><span class="mw-cite-backlink"><b><a href="#cite_ref-68">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/CompVis">"CompVis (CompVis)"</a>. <i>huggingface.co</i>. August 23, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">March 6,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=CompVis+%28CompVis%29&rft.date=2023-08-23&rft_id=https%3A%2F%2Fhuggingface.co%2FCompVis&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-69"><span class="mw-cite-backlink"><b><a href="#cite_ref-69">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/runwayml/stable-diffusion-v1-5">"runwayml/stable-diffusion-v1-5 · Hugging Face"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230921025150/https://huggingface.co/runwayml/stable-diffusion-v1-5">Archived</a> from the original on September 21, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">August 17,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=runwayml%2Fstable-diffusion-v1-5+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2Frunwayml%2Fstable-diffusion-v1-5&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:3-70"><span class="mw-cite-backlink">^ <a href="#cite_ref-:3_70-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-:3_70-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/stabilityai/stable-diffusion-2">"stabilityai/stable-diffusion-2 · Hugging Face"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230921135247/https://huggingface.co/stabilityai/stable-diffusion-2">Archived</a> from the original on September 21, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">August 17,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=stabilityai%2Fstable-diffusion-2+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2Fstabilityai%2Fstable-diffusion-2&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-71"><span class="mw-cite-backlink"><b><a href="#cite_ref-71">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/stabilityai/stable-diffusion-2-base">"stabilityai/stable-diffusion-2-base · Hugging Face"</a>. <i>huggingface.co</i><span class="reference-accessdate">. Retrieved <span class="nowrap">January 1,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=stabilityai%2Fstable-diffusion-2-base+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2Fstabilityai%2Fstable-diffusion-2-base&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-72"><span class="mw-cite-backlink"><b><a href="#cite_ref-72">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/stabilityai/stable-diffusion-2-1">"stabilityai/stable-diffusion-2-1 · Hugging Face"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230921025146/https://huggingface.co/stabilityai/stable-diffusion-2-1">Archived</a> from the original on September 21, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">August 17,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=stabilityai%2Fstable-diffusion-2-1+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2Fstabilityai%2Fstable-diffusion-2-1&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-73"><span class="mw-cite-backlink"><b><a href="#cite_ref-73">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0">"stabilityai/stable-diffusion-xl-base-1.0 · Hugging Face"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231008071719/https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0">Archived</a> from the original on October 8, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">August 17,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=stabilityai%2Fstable-diffusion-xl-base-1.0+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2Fstabilityai%2Fstable-diffusion-xl-base-1.0&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-74"><span class="mw-cite-backlink"><b><a href="#cite_ref-74">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/news/stable-diffusion-sdxl-1-announcement">"Announcing SDXL 1.0"</a>. <i>Stability AI</i><span class="reference-accessdate">. Retrieved <span class="nowrap">January 1,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability+AI&rft.atitle=Announcing+SDXL+1.0&rft_id=https%3A%2F%2Fstability.ai%2Fnews%2Fstable-diffusion-sdxl-1-announcement&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-75"><span class="mw-cite-backlink"><b><a href="#cite_ref-75">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/stabilityai/sdxl-turbo">"stabilityai/sdxl-turbo · Hugging Face"</a>. <i>huggingface.co</i><span class="reference-accessdate">. Retrieved <span class="nowrap">January 1,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=stabilityai%2Fsdxl-turbo+%C2%B7+Hugging+Face&rft_id=https%3A%2F%2Fhuggingface.co%2Fstabilityai%2Fsdxl-turbo&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-76"><span class="mw-cite-backlink"><b><a href="#cite_ref-76">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/research/adversarial-diffusion-distillation">"Adversarial Diffusion Distillation"</a>. <i>Stability AI</i><span class="reference-accessdate">. Retrieved <span class="nowrap">January 1,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability+AI&rft.atitle=Adversarial+Diffusion+Distillation&rft_id=https%3A%2F%2Fstability.ai%2Fresearch%2Fadversarial-diffusion-distillation&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-77"><span class="mw-cite-backlink"><b><a href="#cite_ref-77">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/news/stable-diffusion-3">"Stable Diffusion 3"</a>. <i>Stability AI</i><span class="reference-accessdate">. Retrieved <span class="nowrap">March 5,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability+AI&rft.atitle=Stable+Diffusion+3&rft_id=https%3A%2F%2Fstability.ai%2Fnews%2Fstable-diffusion-3&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-release-sd3.5-78"><span class="mw-cite-backlink">^ <a href="#cite_ref-release-sd3.5_78-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-release-sd3.5_78-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/news/introducing-stable-diffusion-3-5">"Stable Diffusion 3.5"</a>. <i><a href="/wiki/Stability_AI" title="Stability AI">Stability AI</a></i>. <a rel="nofollow" class="external text" href="https://archive.today/20241023040750/https://stability.ai/news/introducing-stable-diffusion-3-5">Archived</a> from the original on October 23, 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">October 23,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability+AI&rft.atitle=Stable+Diffusion+3.5&rft_id=https%3A%2F%2Fstability.ai%2Fnews%2Fintroducing-stable-diffusion-3-5&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-79"><span class="mw-cite-backlink"><b><a href="#cite_ref-79">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRadfordKimHallacyRamesh2021" class="citation arxiv cs1">Radford, Alec; Kim, Jong Wook; Hallacy, Chris; Ramesh, Aditya; Goh, Gabriel; Agarwal, Sandhini; Sastry, Girish; Askell, Amanda; Mishkin, Pamela (February 26, 2021). "Learning Transferable Visual Models From Natural Language Supervision". <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2103.00020">2103.00020</a></span> [<a rel="nofollow" class="external text" href="https://arxiv.org/archive/cs.CV">cs.CV</a>].</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=preprint&rft.jtitle=arXiv&rft.atitle=Learning+Transferable+Visual+Models+From+Natural+Language+Supervision&rft.date=2021-02-26&rft_id=info%3Aarxiv%2F2103.00020&rft.aulast=Radford&rft.aufirst=Alec&rft.au=Kim%2C+Jong+Wook&rft.au=Hallacy%2C+Chris&rft.au=Ramesh%2C+Aditya&rft.au=Goh%2C+Gabriel&rft.au=Agarwal%2C+Sandhini&rft.au=Sastry%2C+Girish&rft.au=Askell%2C+Amanda&rft.au=Mishkin%2C+Pamela&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-80"><span class="mw-cite-backlink"><b><a href="#cite_ref-80">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRombachBlattmannLorenzEsser2022" class="citation book cs1">Rombach, Robin; Blattmann, Andreas; Lorenz, Dominik; Esser, Patrick; Ommer, Björn (2022). <a rel="nofollow" class="external text" href="https://openaccess.thecvf.com/content/CVPR2022/html/Rombach_High-Resolution_Image_Synthesis_With_Latent_Diffusion_Models_CVPR_2022_paper.html">"High-Resolution Image Synthesis With Latent Diffusion Models"</a>. <i>Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)</i>. pp. 10684–10695. <a href="/wiki/ArXiv_(identifier)" class="mw-redirect" title="ArXiv (identifier)">arXiv</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://arxiv.org/abs/2112.10752">2112.10752</a></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=High-Resolution+Image+Synthesis+With+Latent+Diffusion+Models&rft.btitle=Proceedings+of+the+IEEE%2FCVF+Conference+on+Computer+Vision+and+Pattern+Recognition+%28CVPR%29&rft.pages=10684-10695&rft.date=2022&rft_id=info%3Aarxiv%2F2112.10752&rft.aulast=Rombach&rft.aufirst=Robin&rft.au=Blattmann%2C+Andreas&rft.au=Lorenz%2C+Dominik&rft.au=Esser%2C+Patrick&rft.au=Ommer%2C+Bj%C3%B6rn&rft_id=https%3A%2F%2Fopenaccess.thecvf.com%2Fcontent%2FCVPR2022%2Fhtml%2FRombach_High-Resolution_Image_Synthesis_With_Latent_Diffusion_Models_CVPR_2022_paper.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-81"><span class="mw-cite-backlink"><b><a href="#cite_ref-81">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md">"LICENSE.md · stabilityai/stable-diffusion-xl-base-1.0 at main"</a>. <i>huggingface.co</i>. July 26, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">January 1,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=LICENSE.md+%C2%B7+stabilityai%2Fstable-diffusion-xl-base-1.0+at+main&rft.date=2023-07-26&rft_id=https%3A%2F%2Fhuggingface.co%2Fstabilityai%2Fstable-diffusion-xl-base-1.0%2Fblob%2Fmain%2FLICENSE.md&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-MIT-LAION-82"><span class="mw-cite-backlink"><b><a href="#cite_ref-MIT-LAION_82-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHeikkilä2022" class="citation web cs1">Heikkilä, Melissa (September 16, 2022). <a rel="nofollow" class="external text" href="https://www.technologyreview.com/2022/09/16/1059598/this-artist-is-dominating-ai-generated-art-and-hes-not-happy-about-it/">"This artist is dominating AI-generated art. And he's not happy about it"</a>. <i>MIT Technology Review</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230114125952/https://www.technologyreview.com/2022/09/16/1059598/this-artist-is-dominating-ai-generated-art-and-hes-not-happy-about-it/">Archived</a> from the original on January 14, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">September 26,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=MIT+Technology+Review&rft.atitle=This+artist+is+dominating+AI-generated+art.+And+he%27s+not+happy+about+it.&rft.date=2022-09-16&rft.aulast=Heikkil%C3%A4&rft.aufirst=Melissa&rft_id=https%3A%2F%2Fwww.technologyreview.com%2F2022%2F09%2F16%2F1059598%2Fthis-artist-is-dominating-ai-generated-art-and-hes-not-happy-about-it%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-bijapan-83"><span class="mw-cite-backlink">^ <a href="#cite_ref-bijapan_83-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-bijapan_83-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRyo_Shimizu2022" class="citation web cs1 cs1-prop-foreign-lang-source">Ryo Shimizu (August 26, 2022). <a rel="nofollow" class="external text" href="https://www.businessinsider.jp/post-258369">"Midjourneyを超えた? 無料の作画AI「 #StableDiffusion 」が「AIを民主化した」と断言できる理由"</a>. <i>Business Insider Japan</i> (in Japanese). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221210192453/https://www.businessinsider.jp/post-258369">Archived</a> from the original on December 10, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">October 4,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Business+Insider+Japan&rft.atitle=Midjourney%E3%82%92%E8%B6%85%E3%81%88%E3%81%9F%EF%BC%9F+%E7%84%A1%E6%96%99%E3%81%AE%E4%BD%9C%E7%94%BBAI%EF%BD%A2+%23StableDiffusion+%EF%BD%A3%E3%81%8C%EF%BD%A2AI%E3%82%92%E6%B0%91%E4%B8%BB%E5%8C%96%E3%81%97%E3%81%9F%EF%BD%A3%E3%81%A8%E6%96%AD%E8%A8%80%E3%81%A7%E3%81%8D%E3%82%8B%E7%90%86%E7%94%B1&rft.date=2022-08-26&rft.au=Ryo+Shimizu&rft_id=https%3A%2F%2Fwww.businessinsider.jp%2Fpost-258369&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-:13-84"><span class="mw-cite-backlink"><b><a href="#cite_ref-:13_84-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCai" class="citation web cs1">Cai, Kenrick. <a rel="nofollow" class="external text" href="https://www.forbes.com/sites/kenrickcai/2022/09/07/stability-ai-funding-round-1-billion-valuation-stable-diffusion-text-to-image/">"Startup Behind AI Image Generator Stable Diffusion Is In Talks To Raise At A Valuation Up To $1 Billion"</a>. <i>Forbes</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230930125226/https://www.forbes.com/sites/kenrickcai/2022/09/07/stability-ai-funding-round-1-billion-valuation-stable-diffusion-text-to-image/">Archived</a> from the original on September 30, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">October 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Forbes&rft.atitle=Startup+Behind+AI+Image+Generator+Stable+Diffusion+Is+In+Talks+To+Raise+At+A+Valuation+Up+To+%241+Billion&rft.aulast=Cai&rft.aufirst=Kenrick&rft_id=https%3A%2F%2Fwww.forbes.com%2Fsites%2Fkenrickcai%2F2022%2F09%2F07%2Fstability-ai-funding-round-1-billion-valuation-stable-diffusion-text-to-image%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-85"><span class="mw-cite-backlink"><b><a href="#cite_ref-85">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.bbc.com/news/uk-65932372">"Illegal trade in AI child sex abuse images exposed"</a>. <i>BBC News</i>. June 27, 2023. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230921100213/https://www.bbc.com/news/uk-65932372">Archived</a> from the original on September 21, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">September 26,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=BBC+News&rft.atitle=Illegal+trade+in+AI+child+sex+abuse+images+exposed&rft.date=2023-06-27&rft_id=https%3A%2F%2Fwww.bbc.com%2Fnews%2Fuk-65932372&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-86"><span class="mw-cite-backlink"><b><a href="#cite_ref-86">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMaiberg2024" class="citation web cs1">Maiberg, Emanuel (June 11, 2024). <span class="id-lock-subscription" title="Paid subscription required"><a rel="nofollow" class="external text" href="https://www.404media.co/hackers-target-ai-users-with-malicious-stable-diffusion-tool-on-github/">"Hackers Target AI Users With Malicious Stable Diffusion Tool on GitHub to Protest 'Art Theft'<span class="cs1-kern-right"></span>"</a></span>. <i>404 Media</i><span class="reference-accessdate">. Retrieved <span class="nowrap">June 14,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=404+Media&rft.atitle=Hackers+Target+AI+Users+With+Malicious+Stable+Diffusion+Tool+on+GitHub+to+Protest+%27Art+Theft%27&rft.date=2024-06-11&rft.aulast=Maiberg&rft.aufirst=Emanuel&rft_id=https%3A%2F%2Fwww.404media.co%2Fhackers-target-ai-users-with-malicious-stable-diffusion-tool-on-github%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-87"><span class="mw-cite-backlink"><b><a href="#cite_ref-87">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVincent2023" class="citation web cs1">Vincent, James (January 16, 2023). <a rel="nofollow" class="external text" href="https://www.theverge.com/2023/1/16/23557098/generative-ai-art-copyright-legal-lawsuit-stable-diffusion-midjourney-deviantart">"AI art tools Stable Diffusion and Midjourney targeted with copyright lawsuit"</a>. <i>The Verge</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230309010528/https://www.theverge.com/2023/1/16/23557098/generative-ai-art-copyright-legal-lawsuit-stable-diffusion-midjourney-deviantart">Archived</a> from the original on March 9, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">January 16,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=The+Verge&rft.atitle=AI+art+tools+Stable+Diffusion+and+Midjourney+targeted+with+copyright+lawsuit&rft.date=2023-01-16&rft.aulast=Vincent&rft.aufirst=James&rft_id=https%3A%2F%2Fwww.theverge.com%2F2023%2F1%2F16%2F23557098%2Fgenerative-ai-art-copyright-legal-lawsuit-stable-diffusion-midjourney-deviantart&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-Reuters-SDLawsuit-88"><span class="mw-cite-backlink"><b><a href="#cite_ref-Reuters-SDLawsuit_88-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBrittain2023" class="citation news cs1">Brittain, Blake (July 19, 2023). <a rel="nofollow" class="external text" href="https://www.reuters.com/legal/litigation/us-judge-finds-flaws-artists-lawsuit-against-ai-companies-2023-07-19/">"US judge finds flaws in artists' lawsuit against AI companies"</a>. <i>Reuters</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230906193839/https://www.reuters.com/legal/litigation/us-judge-finds-flaws-artists-lawsuit-against-ai-companies-2023-07-19/">Archived</a> from the original on September 6, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">August 6,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Reuters&rft.atitle=US+judge+finds+flaws+in+artists%27+lawsuit+against+AI+companies&rft.date=2023-07-19&rft.aulast=Brittain&rft.aufirst=Blake&rft_id=https%3A%2F%2Fwww.reuters.com%2Flegal%2Flitigation%2Fus-judge-finds-flaws-artists-lawsuit-against-ai-companies-2023-07-19%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-89"><span class="mw-cite-backlink"><b><a href="#cite_ref-89">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGoosens2024" class="citation news cs1">Goosens, Sophia (February 28, 2024). <a rel="nofollow" class="external text" href="https://www.pinsentmasons.com/out-law/analysis/getty-images-v-stability-ai-implications-copyright-law-licensing">"Getty Images v Stability AI: the implications for UK copyright law and licensing"</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.atitle=Getty+Images+v+Stability+AI%3A+the+implications+for+UK+copyright+law+and+licensing&rft.date=2024-02-28&rft.aulast=Goosens&rft.aufirst=Sophia&rft_id=https%3A%2F%2Fwww.pinsentmasons.com%2Fout-law%2Fanalysis%2Fgetty-images-v-stability-ai-implications-copyright-law-licensing&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-90"><span class="mw-cite-backlink"><b><a href="#cite_ref-90">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGill2023" class="citation news cs1">Gill, Dennis (December 11, 2023). <a rel="nofollow" class="external text" href="https://www.pinsentmasons.com/out-law/news/getty-images-v-stability-ai">"Getty Images v Stability AI: copyright claims can proceed to trial"</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.atitle=Getty+Images+v+Stability+AI%3A+copyright+claims+can+proceed+to+trial&rft.date=2023-12-11&rft.aulast=Gill&rft.aufirst=Dennis&rft_id=https%3A%2F%2Fwww.pinsentmasons.com%2Fout-law%2Fnews%2Fgetty-images-v-stability-ai&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-91"><span class="mw-cite-backlink"><b><a href="#cite_ref-91">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGoosens2024" class="citation news cs1">Goosens, Sophia (February 28, 2024). <a rel="nofollow" class="external text" href="https://www.reedsmith.com/en/perspectives/2024/02/getty-v-stability-ai-case-goes-to-trial-in-the-uk-what-we-learned">"Getty v. Stability AI case goes to trial in the UK – what we learned"</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.atitle=Getty+v.+Stability+AI+case+goes+to+trial+in+the+UK+%E2%80%93+what+we+learned&rft.date=2024-02-28&rft.aulast=Goosens&rft.aufirst=Sophia&rft_id=https%3A%2F%2Fwww.reedsmith.com%2Fen%2Fperspectives%2F2024%2F02%2Fgetty-v-stability-ai-case-goes-to-trial-in-the-uk-what-we-learned&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-pinsentmasons2024GettyvsStabilityAI-92"><span class="mw-cite-backlink">^ <a href="#cite_ref-pinsentmasons2024GettyvsStabilityAI_92-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-pinsentmasons2024GettyvsStabilityAI_92-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHill2024" class="citation news cs1">Hill, Charlotte (February 16, 2024). <a rel="nofollow" class="external text" href="https://www.penningtonslaw.com/news-publications/latest-news/2024/generative-ai-in-the-courts-getty-images-v-stability-ai">"Generative AI in the courts: Getty Images v Stability AI"</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.atitle=Generative+AI+in+the+courts%3A+Getty+Images+v+Stability+AI&rft.date=2024-02-16&rft.aulast=Hill&rft.aufirst=Charlotte&rft_id=https%3A%2F%2Fwww.penningtonslaw.com%2Fnews-publications%2Flatest-news%2F2024%2Fgenerative-ai-in-the-courts-getty-images-v-stability-ai&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-stability-93"><span class="mw-cite-backlink"><b><a href="#cite_ref-stability_93-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/blog/stable-diffusion-public-release">"Stable Diffusion Public Release"</a>. <i>Stability.Ai</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220830210535/https://stability.ai/blog/stable-diffusion-public-release">Archived</a> from the original on August 30, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">August 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability.Ai&rft.atitle=Stable+Diffusion+Public+Release&rft_id=https%3A%2F%2Fstability.ai%2Fblog%2Fstable-diffusion-public-release&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-94"><span class="mw-cite-backlink"><b><a href="#cite_ref-94">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses">"From RAIL to Open RAIL: Topologies of RAIL Licenses"</a>. <i>Responsible AI Licenses (RAIL)</i>. August 18, 2022. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20230727145215/https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses">Archived</a> from the original on July 27, 2023<span class="reference-accessdate">. Retrieved <span class="nowrap">February 20,</span> 2023</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Responsible+AI+Licenses+%28RAIL%29&rft.atitle=From+RAIL+to+Open+RAIL%3A+Topologies+of+RAIL+Licenses&rft.date=2022-08-18&rft_id=https%3A%2F%2Fwww.licenses.ai%2Fblog%2F2022%2F8%2F18%2Fnaming-convention-of-responsible-ai-licenses&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-washingtonpost-95"><span class="mw-cite-backlink"><b><a href="#cite_ref-washingtonpost_95-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation news cs1"><a rel="nofollow" class="external text" href="https://www.washingtonpost.com/technology/2022/08/30/deep-fake-video-on-agt/">"Ready or not, mass video deepfakes are coming"</a>. <i>The Washington Post</i>. August 30, 2022. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220831115010/https://www.washingtonpost.com/technology/2022/08/30/deep-fake-video-on-agt/">Archived</a> from the original on August 31, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">August 31,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Washington+Post&rft.atitle=Ready+or+not%2C+mass+video+deepfakes+are+coming&rft.date=2022-08-30&rft_id=https%3A%2F%2Fwww.washingtonpost.com%2Ftechnology%2F2022%2F08%2F30%2Fdeep-fake-video-on-agt%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-96"><span class="mw-cite-backlink"><b><a href="#cite_ref-96">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://huggingface.co/spaces/CompVis/stable-diffusion-license">"License - a Hugging Face Space by CompVis"</a>. <i>huggingface.co</i>. <a rel="nofollow" class="external text" href="https://web.archive.org/web/20220904215616/https://huggingface.co/spaces/CompVis/stable-diffusion-license">Archived</a> from the original on September 4, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">September 5,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=huggingface.co&rft.atitle=License+-+a+Hugging+Face+Space+by+CompVis&rft_id=https%3A%2F%2Fhuggingface.co%2Fspaces%2FCompVis%2Fstable-diffusion-license&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-97"><span class="mw-cite-backlink"><b><a href="#cite_ref-97">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKatsuo_Ishida2022" class="citation web cs1 cs1-prop-foreign-lang-source">Katsuo Ishida (August 26, 2022). <a rel="nofollow" class="external text" href="https://forest.watch.impress.co.jp/docs/review/1434893.html">"言葉で指示した画像を凄いAIが描き出す「Stable Diffusion」 ~画像は商用利用も可能"</a>. <i>Impress Corporation</i> (in Japanese). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20221114020520/https://forest.watch.impress.co.jp/docs/review/1434893.html">Archived</a> from the original on November 14, 2022<span class="reference-accessdate">. Retrieved <span class="nowrap">October 4,</span> 2022</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Impress+Corporation&rft.atitle=%E8%A8%80%E8%91%89%E3%81%A7%E6%8C%87%E7%A4%BA%E3%81%97%E3%81%9F%E7%94%BB%E5%83%8F%E3%82%92%E5%87%84%E3%81%84AI%E3%81%8C%E6%8F%8F%E3%81%8D%E5%87%BA%E3%81%99%E3%80%8CStable+Diffusion%E3%80%8D+%EF%BD%9E%E7%94%BB%E5%83%8F%E3%81%AF%E5%95%86%E7%94%A8%E5%88%A9%E7%94%A8%E3%82%82%E5%8F%AF%E8%83%BD&rft.date=2022-08-26&rft.au=Katsuo+Ishida&rft_id=https%3A%2F%2Fforest.watch.impress.co.jp%2Fdocs%2Freview%2F1434893.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> <li id="cite_note-98"><span class="mw-cite-backlink"><b><a href="#cite_ref-98">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stability.ai/news/license-update">"Community License"</a>. <i><a href="/wiki/Stability_AI" title="Stability AI">Stability AI</a></i>. July 5, 2024<span class="reference-accessdate">. Retrieved <span class="nowrap">October 23,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Stability+AI&rft.atitle=Community+License&rft.date=2024-07-05&rft_id=https%3A%2F%2Fstability.ai%2Fnews%2Flicense-update&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></span> </li> </ol></div></div> <div class="mw-heading mw-heading2"><h2 id="External_links">External links</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Stable_Diffusion&action=edit&section=23" title="Edit section: External links"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1235681985">.mw-parser-output .side-box{margin:4px 0;box-sizing:border-box;border:1px solid #aaa;font-size:88%;line-height:1.25em;background-color:var(--background-color-interactive-subtle,#f8f9fa);display:flow-root}.mw-parser-output .side-box-abovebelow,.mw-parser-output .side-box-text{padding:0.25em 0.9em}.mw-parser-output .side-box-image{padding:2px 0 2px 0.9em;text-align:center}.mw-parser-output .side-box-imageright{padding:2px 0.9em 2px 0;text-align:center}@media(min-width:500px){.mw-parser-output .side-box-flex{display:flex;align-items:center}.mw-parser-output .side-box-text{flex:1;min-width:0}}@media(min-width:720px){.mw-parser-output .side-box{width:238px}.mw-parser-output .side-box-right{clear:right;float:right;margin-left:1em}.mw-parser-output .side-box-left{margin-right:1em}}</style><style data-mw-deduplicate="TemplateStyles:r1237033735">@media print{body.ns-0 .mw-parser-output .sistersitebox{display:none!important}}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sistersitebox img[src*="Wiktionary-logo-en-v2.svg"]{background-color:white}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sistersitebox img[src*="Wiktionary-logo-en-v2.svg"]{background-color:white}}</style><div class="side-box side-box-right plainlinks sistersitebox"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1126788409"> <div class="side-box-flex"> <div class="side-box-image"><span class="noviewer" typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/30px-Commons-logo.svg.png" decoding="async" width="30" height="40" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/45px-Commons-logo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/4/4a/Commons-logo.svg/59px-Commons-logo.svg.png 2x" data-file-width="1024" data-file-height="1376" /></span></span></div> <div class="side-box-text plainlist">Wikimedia Commons has media related to <span style="font-weight: bold; font-style: italic;"><a href="https://commons.wikimedia.org/wiki/Category:Stable_Diffusion" class="extiw" title="commons:Category:Stable Diffusion">Stable Diffusion</a></span>.</div></div> </div> <ul><li><a rel="nofollow" class="external text" href="https://huggingface.co/spaces/stabilityai/stable-diffusion">Stable Diffusion Demo</a></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://erdem.pl/2023/11/step-by-step-visual-introduction-to-diffusion-models/">"Step by Step visual introduction to Diffusion Models. - Blog by Kemal Erdem"</a><span class="reference-accessdate">. Retrieved <span class="nowrap">August 31,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Step+by+Step+visual+introduction+to+Diffusion+Models.+-+Blog+by+Kemal+Erdem&rft_id=https%3A%2F%2Ferdem.pl%2F2023%2F11%2Fstep-by-step-visual-introduction-to-diffusion-models%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://nn.labml.ai/diffusion/stable_diffusion/model/unet.html">"U-Net for Stable Diffusion"</a>. <i>U-Net for Stable Diffusion</i><span class="reference-accessdate">. Retrieved <span class="nowrap">August 31,</span> 2024</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=U-Net+for+Stable+Diffusion&rft.atitle=U-Net+for+Stable+Diffusion&rft_id=https%3A%2F%2Fnn.labml.ai%2Fdiffusion%2Fstable_diffusion%2Fmodel%2Funet.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AStable+Diffusion" class="Z3988"></span></li> <li><a rel="nofollow" class="external text" href="https://poloclub.github.io/diffusion-explainer/">Interactive Explanation of Stable Diffusion</a></li> <li><a rel="nofollow" class="external text" href="https://interaktiv.br.de/ki-trainingsdaten/en/index.html">"We Are All Raw Material for AI"</a>: Investigation on sensitive and private data in Stable Diffusions training data</li> <li>"<a rel="nofollow" class="external text" href="https://talkdigital.com.au/ai/stable-diffusion-negative-prompt-list/">Negative Prompts in Stable Diffusion</a>"</li> <li>"<a rel="nofollow" class="external text" href="https://infoofai.com/negative-prompts-in-stable-diffusion//">Negative Prompts in Stable Diffusion</a>"</li></ul> <div class="navbox-styles"><style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Generative_AI" style="padding:3px"><table class="nowraplinks mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Generative_AI" title="Template:Generative AI"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/w/index.php?title=Template_talk:Generative_AI&action=edit&redlink=1" class="new" title="Template talk:Generative AI (page does not exist)"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Generative_AI" title="Special:EditPage/Template:Generative AI"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Generative_AI" style="font-size:114%;margin:0 4em"><a href="/wiki/Generative_artificial_intelligence" title="Generative artificial intelligence">Generative AI</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></li> <li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li> <li><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">Generative adversarial network</a></li> <li><a href="/wiki/Generative_pre-trained_transformer" title="Generative pre-trained transformer">Generative pre-trained transformer</a></li> <li><a href="/wiki/Large_language_model" title="Large language model">Large language model</a></li> <li><a href="/wiki/Neural_network_(machine_learning)" title="Neural network (machine learning)">Neural network</a></li> <li><a href="/wiki/Prompt_engineering" title="Prompt engineering">Prompt engineering</a></li> <li><a href="/wiki/Retrieval-augmented_generation" title="Retrieval-augmented generation">RAG</a></li> <li><a href="/wiki/Reinforcement_learning_from_human_feedback" title="Reinforcement learning from human feedback">RLHF</a></li> <li><a href="/wiki/Self-supervised_learning" title="Self-supervised learning">Self-supervised learning</a></li> <li><a href="/wiki/Transformer_(deep_learning_architecture)" title="Transformer (deep learning architecture)">Transformer</a></li> <li><a href="/wiki/Variational_autoencoder" title="Variational autoencoder">Variational autoencoder</a></li> <li><a href="/wiki/Vision_transformer" title="Vision transformer">Vision transformer</a></li> <li><a href="/wiki/Word_embedding" title="Word embedding">Word embedding</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Models</th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%">Text</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Claude_(language_model)" title="Claude (language model)">Claude</a></li> <li><a href="/wiki/Gemini_(language_model)" title="Gemini (language model)">Gemini</a></li> <li><a href="/wiki/Generative_pre-trained_transformer" title="Generative pre-trained transformer">GPT</a> <ul><li><a href="/wiki/GPT-1" title="GPT-1">1</a></li> <li><a href="/wiki/GPT-2" title="GPT-2">2</a></li> <li><a href="/wiki/GPT-3" title="GPT-3">3</a></li> <li><a href="/wiki/GPT-J" title="GPT-J">J</a></li> <li><a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a></li> <li><a href="/wiki/GPT-4" title="GPT-4">4</a></li> <li><a href="/wiki/GPT-4o" title="GPT-4o">4o</a></li> <li><a href="/wiki/OpenAI_o1" title="OpenAI o1">o1</a></li></ul></li> <li><a href="/wiki/Grok_(chatbot)" title="Grok (chatbot)">Grok</a></li> <li><a href="/wiki/Llama_(language_model)" title="Llama (language model)">Llama</a></li> <li>Pixtral</li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Text-to-image_model" title="Text-to-image model">Images</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/DALL-E" title="DALL-E">DALL-E</a></li> <li><a href="/wiki/Flux_(text-to-image_model)" title="Flux (text-to-image model)">Flux</a></li> <li><a href="/wiki/Ideogram_(text-to-image_model)" title="Ideogram (text-to-image model)">Ideogram</a></li> <li><a href="/wiki/Midjourney" title="Midjourney">Midjourney</a></li> <li>Recraft</li> <li><a class="mw-selflink selflink">Stable Diffusion</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Text-to-video_model" title="Text-to-video model">Videos</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li>Hailuo AI</li> <li>Kling</li> <li>Mochi</li> <li><a href="/wiki/Sora_(text-to-video_model)" title="Sora (text-to-video model)">Sora</a></li> <li><a href="/wiki/Dream_Machine_(text-to-video_model)" title="Dream Machine (text-to-video model)">Dream Machine</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Music</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Suno_AI" title="Suno AI">Suno AI</a></li> <li><a href="/wiki/Udio" title="Udio">Udio</a></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Companies</th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Anthropic" title="Anthropic">Anthropic</a></li> <li>Black Forest Labs</li> <li><a href="/wiki/ElevenLabs" title="ElevenLabs">ElevenLabs</a></li> <li>Genmo</li> <li><a href="/wiki/Google_DeepMind" title="Google DeepMind">Google DeepMind</a></li> <li><a href="/wiki/Hugging_Face" title="Hugging Face">Hugging Face</a></li> <li><a href="/wiki/Kuaishou" title="Kuaishou">Kuaishou</a></li> <li><a href="/wiki/Meta_AI" title="Meta AI">Meta AI</a></li> <li><a href="/wiki/MiniMax_(company)" title="MiniMax (company)">MiniMax</a></li> <li><a href="/wiki/Mistral_AI" title="Mistral AI">Mistral AI</a></li> <li><a href="/wiki/OpenAI" title="OpenAI">OpenAI</a></li> <li>Recraft AI</li> <li><a href="/wiki/Runway_(company)" title="Runway (company)">Runway</a></li> <li><a href="/wiki/Stability_AI" title="Stability AI">Stability AI</a></li> <li><a href="/wiki/XAI_(company)" title="XAI (company)">xAI</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Category:Generative_artificial_intelligence" title="Category:Generative artificial intelligence">Category</a></div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Artificial_intelligence" style="padding:3px"><table class="nowraplinks hlist mw-collapsible {{{state}}} navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Artificial_intelligence_(AI)" title="Template:Artificial intelligence (AI)"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Artificial_intelligence_(AI)" class="mw-redirect" title="Template talk:Artificial intelligence (AI)"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Artificial_intelligence_(AI)" title="Special:EditPage/Template:Artificial intelligence (AI)"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Artificial_intelligence" style="font-size:114%;margin:0 4em"><a href="/wiki/Artificial_intelligence" title="Artificial intelligence">Artificial intelligence</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">Concepts</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Parameter" title="Parameter">Parameter</a> <ul><li><a href="/wiki/Hyperparameter_(machine_learning)" title="Hyperparameter (machine learning)">Hyperparameter</a></li></ul></li> <li><a href="/wiki/Loss_functions_for_classification" title="Loss functions for classification">Loss functions</a></li> <li><a href="/wiki/Regression_analysis" title="Regression analysis">Regression</a> <ul><li><a href="/wiki/Bias%E2%80%93variance_tradeoff" title="Bias–variance tradeoff">Bias–variance tradeoff</a></li> <li><a href="/wiki/Double_descent" title="Double descent">Double descent</a></li> <li><a href="/wiki/Overfitting" title="Overfitting">Overfitting</a></li></ul></li> <li><a href="/wiki/Cluster_analysis" title="Cluster analysis">Clustering</a></li> <li><a href="/wiki/Gradient_descent" title="Gradient descent">Gradient descent</a> <ul><li><a href="/wiki/Stochastic_gradient_descent" title="Stochastic gradient descent">SGD</a></li> <li><a href="/wiki/Quasi-Newton_method" title="Quasi-Newton method">Quasi-Newton method</a></li> <li><a href="/wiki/Conjugate_gradient_method" title="Conjugate gradient method">Conjugate gradient method</a></li></ul></li> <li><a href="/wiki/Backpropagation" title="Backpropagation">Backpropagation</a></li> <li><a href="/wiki/Attention_(machine_learning)" title="Attention (machine learning)">Attention</a></li> <li><a href="/wiki/Convolution" title="Convolution">Convolution</a></li> <li><a href="/wiki/Normalization_(machine_learning)" title="Normalization (machine learning)">Normalization</a> <ul><li><a href="/wiki/Batch_normalization" title="Batch normalization">Batchnorm</a></li></ul></li> <li><a href="/wiki/Activation_function" title="Activation function">Activation</a> <ul><li><a href="/wiki/Softmax_function" title="Softmax function">Softmax</a></li> <li><a href="/wiki/Sigmoid_function" title="Sigmoid function">Sigmoid</a></li> <li><a href="/wiki/Rectifier_(neural_networks)" title="Rectifier (neural networks)">Rectifier</a></li></ul></li> <li><a href="/wiki/Gating_mechanism" title="Gating mechanism">Gating</a></li> <li><a href="/wiki/Weight_initialization" title="Weight initialization">Weight initialization</a></li> <li><a href="/wiki/Regularization_(mathematics)" title="Regularization (mathematics)">Regularization</a></li> <li><a href="/wiki/Training,_validation,_and_test_data_sets" title="Training, validation, and test data sets">Datasets</a> <ul><li><a href="/wiki/Data_augmentation" title="Data augmentation">Augmentation</a></li></ul></li> <li><a href="/wiki/Reinforcement_learning" title="Reinforcement learning">Reinforcement learning</a> <ul><li><a href="/wiki/Q-learning" title="Q-learning">Q-learning</a></li> <li><a href="/wiki/State%E2%80%93action%E2%80%93reward%E2%80%93state%E2%80%93action" title="State–action–reward–state–action">SARSA</a></li> <li><a href="/wiki/Imitation_learning" title="Imitation learning">Imitation</a></li></ul></li> <li><a href="/wiki/Diffusion_process" title="Diffusion process">Diffusion</a></li> <li><a href="/wiki/Latent_diffusion_model" title="Latent diffusion model">Latent diffusion model</a></li> <li><a href="/wiki/Autoregressive_model" title="Autoregressive model">Autoregression</a></li> <li><a href="/wiki/Adversarial_machine_learning" title="Adversarial machine learning">Adversary</a></li> <li><a href="/wiki/Retrieval-augmented_generation" title="Retrieval-augmented generation">RAG</a></li> <li><a href="/wiki/Reinforcement_learning_from_human_feedback" title="Reinforcement learning from human feedback">RLHF</a></li> <li><a href="/wiki/Self-supervised_learning" title="Self-supervised learning">Self-supervised learning</a></li> <li><a href="/wiki/Prompt_engineering" title="Prompt engineering">Prompt engineering</a></li> <li><a href="/wiki/Word_embedding" title="Word embedding">Word embedding</a></li> <li><a href="/wiki/Hallucination_(artificial_intelligence)" title="Hallucination (artificial intelligence)">Hallucination</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Applications</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Machine_learning" title="Machine learning">Machine learning</a> <ul><li><a href="/wiki/Prompt_engineering#In-context_learning" title="Prompt engineering">In-context learning</a></li></ul></li> <li><a href="/wiki/Neural_network_(machine_learning)" title="Neural network (machine learning)">Artificial neural network</a> <ul><li><a href="/wiki/Deep_learning" title="Deep learning">Deep learning</a></li></ul></li> <li><a href="/wiki/Language_model" title="Language model">Language model</a> <ul><li><a href="/wiki/Large_language_model" title="Large language model">Large language model</a></li> <li><a href="/wiki/Neural_machine_translation" title="Neural machine translation">NMT</a></li></ul></li> <li><a href="/wiki/Artificial_general_intelligence" title="Artificial general intelligence">Artificial general intelligence</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Implementations</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%">Audio–visual</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/AlexNet" title="AlexNet">AlexNet</a></li> <li><a href="/wiki/WaveNet" title="WaveNet">WaveNet</a></li> <li><a href="/wiki/Human_image_synthesis" title="Human image synthesis">Human image synthesis</a></li> <li><a href="/wiki/Handwriting_recognition" title="Handwriting recognition">HWR</a></li> <li><a href="/wiki/Optical_character_recognition" title="Optical character recognition">OCR</a></li> <li><a href="/wiki/Deep_learning_speech_synthesis" title="Deep learning speech synthesis">Speech synthesis</a> <ul><li><a href="/wiki/ElevenLabs" title="ElevenLabs">ElevenLabs</a></li></ul></li> <li><a href="/wiki/Speech_recognition" title="Speech recognition">Speech recognition</a> <ul><li><a href="/wiki/Whisper_(speech_recognition_system)" title="Whisper (speech recognition system)">Whisper</a></li></ul></li> <li><a href="/wiki/Facial_recognition_system" title="Facial recognition system">Facial recognition</a></li> <li><a href="/wiki/AlphaFold" title="AlphaFold">AlphaFold</a></li> <li><a href="/wiki/Text-to-image_model" title="Text-to-image model">Text-to-image models</a> <ul><li><a href="/wiki/DALL-E" title="DALL-E">DALL-E</a></li> <li><a href="/wiki/Flux_(text-to-image_model)" title="Flux (text-to-image model)">Flux</a></li> <li><a href="/wiki/Ideogram_(text-to-image_model)" title="Ideogram (text-to-image model)">Ideogram</a></li> <li><a href="/wiki/Midjourney" title="Midjourney">Midjourney</a></li> <li><a class="mw-selflink selflink">Stable Diffusion</a></li></ul></li> <li><a href="/wiki/Text-to-video_model" title="Text-to-video model">Text-to-video models</a> <ul><li><a href="/wiki/Sora_(text-to-video_model)" title="Sora (text-to-video model)">Sora</a></li> <li><a href="/wiki/Dream_Machine_(text-to-video_model)" title="Dream Machine (text-to-video model)">Dream Machine</a></li> <li><a href="/wiki/VideoPoet" title="VideoPoet">VideoPoet</a></li></ul></li> <li><a href="/wiki/Music_and_artificial_intelligence" title="Music and artificial intelligence">Music generation</a> <ul><li><a href="/wiki/Suno_AI" title="Suno AI">Suno AI</a></li> <li><a href="/wiki/Udio" title="Udio">Udio</a></li></ul></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Text</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Word2vec" title="Word2vec">Word2vec</a></li> <li><a href="/wiki/Seq2seq" title="Seq2seq">Seq2seq</a></li> <li><a href="/wiki/GloVe" title="GloVe">GloVe</a></li> <li><a href="/wiki/BERT_(language_model)" title="BERT (language model)">BERT</a></li> <li><a href="/wiki/T5_(language_model)" title="T5 (language model)">T5</a></li> <li><a href="/wiki/Llama_(language_model)" title="Llama (language model)">Llama</a></li> <li><a href="/wiki/Chinchilla_(language_model)" title="Chinchilla (language model)">Chinchilla AI</a></li> <li><a href="/wiki/PaLM" title="PaLM">PaLM</a></li> <li><a href="/wiki/Generative_pre-trained_transformer" title="Generative pre-trained transformer">GPT</a> <ul><li><a href="/wiki/GPT-1" title="GPT-1">1</a></li> <li><a href="/wiki/GPT-2" title="GPT-2">2</a></li> <li><a href="/wiki/GPT-3" title="GPT-3">3</a></li> <li><a href="/wiki/GPT-J" title="GPT-J">J</a></li> <li><a href="/wiki/ChatGPT" title="ChatGPT">ChatGPT</a></li> <li><a href="/wiki/GPT-4" title="GPT-4">4</a></li> <li><a href="/wiki/GPT-4o" title="GPT-4o">4o</a></li> <li><a href="/wiki/OpenAI_o1" title="OpenAI o1">o1</a></li></ul></li> <li><a href="/wiki/Claude_(language_model)" title="Claude (language model)">Claude</a></li> <li><a href="/wiki/Gemini_(language_model)" title="Gemini (language model)">Gemini</a></li> <li><a href="/wiki/Grok_(chatbot)" title="Grok (chatbot)">Grok</a></li> <li><a href="/wiki/LaMDA" title="LaMDA">LaMDA</a></li> <li><a href="/wiki/BLOOM_(language_model)" title="BLOOM (language model)">BLOOM</a></li> <li><a href="/wiki/Project_Debater" title="Project Debater">Project Debater</a></li> <li><a href="/wiki/IBM_Watson" title="IBM Watson">IBM Watson</a></li> <li><a href="/wiki/IBM_Watsonx" title="IBM Watsonx">IBM Watsonx</a></li> <li><a href="/wiki/IBM_Granite" title="IBM Granite">Granite</a></li> <li><a href="/wiki/Huawei_PanGu" title="Huawei PanGu">PanGu-Σ</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Decisional</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/AlphaGo" title="AlphaGo">AlphaGo</a></li> <li><a href="/wiki/AlphaZero" title="AlphaZero">AlphaZero</a></li> <li><a href="/wiki/OpenAI_Five" title="OpenAI Five">OpenAI Five</a></li> <li><a href="/wiki/Self-driving_car" title="Self-driving car">Self-driving car</a></li> <li><a href="/wiki/MuZero" title="MuZero">MuZero</a></li> <li><a href="/wiki/Action_selection" title="Action selection">Action selection</a> <ul><li><a href="/wiki/AutoGPT" title="AutoGPT">AutoGPT</a></li></ul></li> <li><a href="/wiki/Robot_control" title="Robot control">Robot control</a></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">People</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Alan_Turing" title="Alan Turing">Alan Turing</a></li> <li><a href="/wiki/Warren_Sturgis_McCulloch" title="Warren Sturgis McCulloch">Warren Sturgis McCulloch</a></li> <li><a href="/wiki/Walter_Pitts" title="Walter Pitts">Walter Pitts</a></li> <li><a href="/wiki/John_von_Neumann" title="John von Neumann">John von Neumann</a></li> <li><a href="/wiki/Claude_Shannon" title="Claude Shannon">Claude Shannon</a></li> <li><a href="/wiki/Marvin_Minsky" title="Marvin Minsky">Marvin Minsky</a></li> <li><a href="/wiki/John_McCarthy_(computer_scientist)" title="John McCarthy (computer scientist)">John McCarthy</a></li> <li><a href="/wiki/Nathaniel_Rochester_(computer_scientist)" title="Nathaniel Rochester (computer scientist)">Nathaniel Rochester</a></li> <li><a href="/wiki/Allen_Newell" title="Allen Newell">Allen Newell</a></li> <li><a href="/wiki/Cliff_Shaw" title="Cliff Shaw">Cliff Shaw</a></li> <li><a href="/wiki/Herbert_A._Simon" title="Herbert A. Simon">Herbert A. Simon</a></li> <li><a href="/wiki/Oliver_Selfridge" title="Oliver Selfridge">Oliver Selfridge</a></li> <li><a href="/wiki/Frank_Rosenblatt" title="Frank Rosenblatt">Frank Rosenblatt</a></li> <li><a href="/wiki/Bernard_Widrow" title="Bernard Widrow">Bernard Widrow</a></li> <li><a href="/wiki/Joseph_Weizenbaum" title="Joseph Weizenbaum">Joseph Weizenbaum</a></li> <li><a href="/wiki/Seymour_Papert" title="Seymour Papert">Seymour Papert</a></li> <li><a href="/wiki/Seppo_Linnainmaa" title="Seppo Linnainmaa">Seppo Linnainmaa</a></li> <li><a href="/wiki/Paul_Werbos" title="Paul Werbos">Paul Werbos</a></li> <li><a href="/wiki/J%C3%BCrgen_Schmidhuber" title="Jürgen Schmidhuber">Jürgen Schmidhuber</a></li> <li><a href="/wiki/Yann_LeCun" title="Yann LeCun">Yann LeCun</a></li> <li><a href="/wiki/Geoffrey_Hinton" title="Geoffrey Hinton">Geoffrey Hinton</a></li> <li><a href="/wiki/John_Hopfield" title="John Hopfield">John Hopfield</a></li> <li><a href="/wiki/Yoshua_Bengio" title="Yoshua Bengio">Yoshua Bengio</a></li> <li><a href="/wiki/Lotfi_A._Zadeh" title="Lotfi A. Zadeh">Lotfi A. Zadeh</a></li> <li><a href="/wiki/Stephen_Grossberg" title="Stephen Grossberg">Stephen Grossberg</a></li> <li><a href="/wiki/Alex_Graves_(computer_scientist)" title="Alex Graves (computer scientist)">Alex Graves</a></li> <li><a href="/wiki/Andrew_Ng" title="Andrew Ng">Andrew Ng</a></li> <li><a href="/wiki/Fei-Fei_Li" title="Fei-Fei Li">Fei-Fei Li</a></li> <li><a href="/wiki/Alex_Krizhevsky" title="Alex Krizhevsky">Alex Krizhevsky</a></li> <li><a href="/wiki/Ilya_Sutskever" title="Ilya Sutskever">Ilya Sutskever</a></li> <li><a href="/wiki/Demis_Hassabis" title="Demis Hassabis">Demis Hassabis</a></li> <li><a href="/wiki/David_Silver_(computer_scientist)" title="David Silver (computer scientist)">David Silver</a></li> <li><a href="/wiki/Ian_Goodfellow" title="Ian Goodfellow">Ian Goodfellow</a></li> <li><a href="/wiki/Andrej_Karpathy" title="Andrej Karpathy">Andrej Karpathy</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Organizations</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Anthropic" title="Anthropic">Anthropic</a></li> <li><a href="/wiki/EleutherAI" title="EleutherAI">EleutherAI</a></li> <li><a href="/wiki/Google_DeepMind" title="Google DeepMind">Google DeepMind</a></li> <li><a href="/wiki/Hugging_Face" title="Hugging Face">Hugging Face</a></li> <li><a href="/wiki/Kuaishou" title="Kuaishou">Kuaishou</a></li> <li><a href="/wiki/Meta_AI" title="Meta AI">Meta AI</a></li> <li><a href="/wiki/Mila_(research_institute)" title="Mila (research institute)">Mila</a></li> <li><a href="/wiki/MiniMax_(company)" title="MiniMax (company)">MiniMax</a></li> <li><a href="/wiki/Mistral_AI" title="Mistral AI">Mistral AI</a></li> <li><a href="/wiki/MIT_Computer_Science_and_Artificial_Intelligence_Laboratory" title="MIT Computer Science and Artificial Intelligence Laboratory">MIT CSAIL</a></li> <li><a href="/wiki/OpenAI" title="OpenAI">OpenAI</a></li> <li><a href="/wiki/Runway_(company)" title="Runway (company)">Runway</a></li> <li><a href="/wiki/Stability_AI" title="Stability AI">Stability AI</a></li> <li><a href="/wiki/XAI_(company)" title="XAI (company)">xAI</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Architectures</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Neural_Turing_machine" title="Neural Turing machine">Neural Turing machine</a></li> <li><a href="/wiki/Differentiable_neural_computer" title="Differentiable neural computer">Differentiable neural computer</a></li> <li><a href="/wiki/Transformer_(deep_learning_architecture)" title="Transformer (deep learning architecture)">Transformer</a> <ul><li><a href="/wiki/Vision_transformer" title="Vision transformer">Vision transformer (ViT)</a></li></ul></li> <li><a href="/wiki/Recurrent_neural_network" title="Recurrent neural network">Recurrent neural network (RNN)</a></li> <li><a href="/wiki/Long_short-term_memory" title="Long short-term memory">Long short-term memory (LSTM)</a></li> <li><a href="/wiki/Gated_recurrent_unit" title="Gated recurrent unit">Gated recurrent unit (GRU)</a></li> <li><a href="/wiki/Echo_state_network" title="Echo state network">Echo state network</a></li> <li><a href="/wiki/Multilayer_perceptron" title="Multilayer perceptron">Multilayer perceptron (MLP)</a></li> <li><a href="/wiki/Convolutional_neural_network" title="Convolutional neural network">Convolutional neural network (CNN)</a></li> <li><a href="/wiki/Residual_neural_network" title="Residual neural network">Residual neural network (RNN)</a></li> <li><a href="/wiki/Highway_network" title="Highway network">Highway network</a></li> <li><a href="/wiki/Mamba_(deep_learning_architecture)" title="Mamba (deep learning architecture)">Mamba</a></li> <li><a href="/wiki/Autoencoder" title="Autoencoder">Autoencoder</a></li> <li><a href="/wiki/Variational_autoencoder" title="Variational autoencoder">Variational autoencoder (VAE)</a></li> <li><a href="/wiki/Generative_adversarial_network" title="Generative adversarial network">Generative adversarial network (GAN)</a></li> <li><a href="/wiki/Graph_neural_network" title="Graph neural network">Graph neural network (GNN)</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><a href="/wiki/File:Symbol_portal_class.svg" class="mw-file-description" title="Portal"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/16px-Symbol_portal_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/23px-Symbol_portal_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/e/e2/Symbol_portal_class.svg/31px-Symbol_portal_class.svg.png 2x" data-file-width="180" data-file-height="185" /></a></span> Portals <ul><li><a href="/wiki/Portal:Technology" title="Portal:Technology">Technology</a></li></ul></li> <li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> Categories <ul><li><a href="/wiki/Category:Artificial_neural_networks" title="Category:Artificial neural networks">Artificial neural networks</a></li> <li><a href="/wiki/Category:Machine_learning" title="Category:Machine learning">Machine learning</a></li></ul></li></ul> </div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐api‐int.codfw.main‐6b4858cb8b‐x2gph Cached time: 20241128121844 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 1.112 seconds Real time usage: 1.294 seconds Preprocessor visited node count: 6675/1000000 Post‐expand include size: 257539/2097152 bytes Template argument size: 4306/2097152 bytes Highest expansion depth: 18/100 Expensive parser function count: 12/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 385360/5000000 bytes Lua time usage: 0.719/10.000 seconds Lua memory usage: 8088570/52428800 bytes Number of Wikibase entities loaded: 2/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 1084.646 1 -total 57.98% 628.908 1 Template:Reflist 37.38% 405.475 75 Template:Cite_web 14.48% 157.089 2 Template:Infobox 12.37% 134.168 1 Template:Infobox_software 8.15% 88.426 4 Template:Navbox 7.73% 83.807 10 Template:Cite_arXiv 7.53% 81.684 1 Template:Generative_AI 5.74% 62.244 1 Template:Short_description 3.77% 40.905 2 Template:Pagetype --> <!-- Saved in parser cache with key enwiki:pcache:idhash:71642695-0!canonical and timestamp 20241128121844 and revision id 1259657575. Rendering was triggered because: api-parse --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Stable_Diffusion&oldid=1259657575">https://en.wikipedia.org/w/index.php?title=Stable_Diffusion&oldid=1259657575</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Categories</a>: <ul><li><a href="/wiki/Category:Artificial_intelligence_art" title="Category:Artificial intelligence art">Artificial intelligence art</a></li><li><a href="/wiki/Category:Deep_learning_software_applications" title="Category:Deep learning software applications">Deep learning software applications</a></li><li><a href="/wiki/Category:Text-to-image_generation" title="Category:Text-to-image generation">Text-to-image generation</a></li><li><a href="/wiki/Category:Unsupervised_learning" title="Category:Unsupervised learning">Unsupervised learning</a></li><li><a href="/wiki/Category:Art_controversies" title="Category:Art controversies">Art controversies</a></li><li><a href="/wiki/Category:Works_involved_in_plagiarism_controversies" title="Category:Works involved in plagiarism controversies">Works involved in plagiarism controversies</a></li><li><a href="/wiki/Category:2022_software" title="Category:2022 software">2022 software</a></li><li><a href="/wiki/Category:Open-source_artificial_intelligence" title="Category:Open-source artificial intelligence">Open-source artificial intelligence</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">CS1 maint: multiple names: authors list</a></li><li><a href="/wiki/Category:CS1_maint:_numeric_names:_authors_list" title="Category:CS1 maint: numeric names: authors list">CS1 maint: numeric names: authors list</a></li><li><a href="/wiki/Category:CS1_Japanese-language_sources_(ja)" title="Category:CS1 Japanese-language sources (ja)">CS1 Japanese-language sources (ja)</a></li><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_matches_Wikidata" title="Category:Short description matches Wikidata">Short description matches Wikidata</a></li><li><a href="/wiki/Category:Use_mdy_dates_from_October_2023" title="Category:Use mdy dates from October 2023">Use mdy dates from October 2023</a></li><li><a href="/wiki/Category:All_articles_with_unsourced_statements" title="Category:All articles with unsourced statements">All articles with unsourced statements</a></li><li><a href="/wiki/Category:Articles_with_unsourced_statements_from_October_2023" title="Category:Articles with unsourced statements from October 2023">Articles with unsourced statements from October 2023</a></li><li><a href="/wiki/Category:Pages_using_multiple_image_with_auto_scaled_images" title="Category:Pages using multiple image with auto scaled images">Pages using multiple image with auto scaled images</a></li><li><a href="/wiki/Category:Commons_category_link_from_Wikidata" title="Category:Commons category link from Wikidata">Commons category link from Wikidata</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 26 November 2024, at 10:28<span class="anonymous-show"> (UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Stable_Diffusion&mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/w/resources/assets/poweredby_mediawiki.svg" alt="Powered by MediaWiki" width="88" height="31" loading="lazy"></a></li> </ul> </footer> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-847495b4dd-rbf9z","wgBackendResponseTime":159,"wgPageParseReport":{"limitreport":{"cputime":"1.112","walltime":"1.294","ppvisitednodes":{"value":6675,"limit":1000000},"postexpandincludesize":{"value":257539,"limit":2097152},"templateargumentsize":{"value":4306,"limit":2097152},"expansiondepth":{"value":18,"limit":100},"expensivefunctioncount":{"value":12,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":385360,"limit":5000000},"entityaccesscount":{"value":2,"limit":400},"timingprofile":["100.00% 1084.646 1 -total"," 57.98% 628.908 1 Template:Reflist"," 37.38% 405.475 75 Template:Cite_web"," 14.48% 157.089 2 Template:Infobox"," 12.37% 134.168 1 Template:Infobox_software"," 8.15% 88.426 4 Template:Navbox"," 7.73% 83.807 10 Template:Cite_arXiv"," 7.53% 81.684 1 Template:Generative_AI"," 5.74% 62.244 1 Template:Short_description"," 3.77% 40.905 2 Template:Pagetype"]},"scribunto":{"limitreport-timeusage":{"value":"0.719","limit":"10.000"},"limitreport-memusage":{"value":8088570,"limit":52428800}},"cachereport":{"origin":"mw-api-int.codfw.main-6b4858cb8b-x2gph","timestamp":"20241128121844","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Stable Diffusion","url":"https:\/\/en.wikipedia.org\/wiki\/Stable_Diffusion","sameAs":"http:\/\/www.wikidata.org\/entity\/Q113660857","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q113660857","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2022-08-31T09:19:32Z","dateModified":"2024-11-26T10:28:16Z","image":"https:\/\/upload.wikimedia.org\/wikipedia\/commons\/8\/82\/Astronaut_Riding_a_Horse_%28SD3.5%29.webp","headline":"image-generating machine learning model"}</script> </body> </html>