CINXE.COM
Parallel computing - Wikipedia
<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Parallel computing - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy", "wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"af96fefd-7a11-4d42-84e2-6c55650013b5","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Parallel_computing","wgTitle":"Parallel computing","wgCurRevisionId":1259252331,"wgRevisionId":1259252331,"wgArticleId":145162,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Webarchive template wayback links","CS1: long volume value","CS1 errors: periodical ignored","CS1 maint: multiple names: authors list","Articles with short description","Short description is different from Wikidata","All articles with unsourced statements","Articles with unsourced statements from March 2023","Articles with hAudio microformats","Spoken articles","Featured articles","Parallel computing","Concurrent computing","Distributed computing"],"wgPageViewLanguage":"en", "wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Parallel_computing","wgRelevantArticleId":145162,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true},"wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":80000,"wgRelatedArticlesCompat":[],"wgCentralAuthMobileDomain":false,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q232661", "wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","ext.math.styles":"ready","ext.tmh.player.styles":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles":"ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","mediawiki.page.media","ext.tmh.player","site", "mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.growthExperiments.SuggestedEditSession","wikibase.sidebar.tracking"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&modules=ext.cite.styles%7Cext.math.styles%7Cext.tmh.player.styles%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediaBadges%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&only=styles&skin=vector-2022"> <script async="" src="/w/load.php?lang=en&modules=startup&only=scripts&raw=1&skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&modules=site.styles&only=styles&skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.4"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d3/IBM_Blue_Gene_P_supercomputer.jpg/1200px-IBM_Blue_Gene_P_supercomputer.jpg"> <meta property="og:image:width" content="1200"> <meta property="og:image:height" content="795"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d3/IBM_Blue_Gene_P_supercomputer.jpg/800px-IBM_Blue_Gene_P_supercomputer.jpg"> <meta property="og:image:width" content="800"> <meta property="og:image:height" content="530"> <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d3/IBM_Blue_Gene_P_supercomputer.jpg/640px-IBM_Blue_Gene_P_supercomputer.jpg"> <meta property="og:image:width" content="640"> <meta property="og:image:height" content="424"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Parallel computing - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/Parallel_computing"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Parallel_computing&action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Parallel_computing"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Parallel_computing rootpage-Parallel_computing skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page's font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&returnto=Parallel+computing" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&returnto=Parallel+computing" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&utm_medium=sidebar&utm_campaign=C13_en.wikipedia.org&uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&returnto=Parallel+computing" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&returnto=Parallel+computing" title="You're encouraged to log in; however, it's not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-Background" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Background"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>Background</span> </div> </a> <button aria-controls="toc-Background-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Background subsection</span> </button> <ul id="toc-Background-sublist" class="vector-toc-list"> <li id="toc-Relevant_laws" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Relevant_laws"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.1</span> <span>Relevant laws</span> </div> </a> <ul id="toc-Relevant_laws-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Dependencies" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Dependencies"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.2</span> <span>Dependencies</span> </div> </a> <ul id="toc-Dependencies-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Race_conditions,_mutual_exclusion,_synchronization,_and_parallel_slowdown" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Race_conditions,_mutual_exclusion,_synchronization,_and_parallel_slowdown"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.3</span> <span>Race conditions, mutual exclusion, synchronization, and parallel slowdown</span> </div> </a> <ul id="toc-Race_conditions,_mutual_exclusion,_synchronization,_and_parallel_slowdown-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Fine-grained,_coarse-grained,_and_embarrassing_parallelism" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Fine-grained,_coarse-grained,_and_embarrassing_parallelism"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.4</span> <span>Fine-grained, coarse-grained, and embarrassing parallelism</span> </div> </a> <ul id="toc-Fine-grained,_coarse-grained,_and_embarrassing_parallelism-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Flynn's_taxonomy" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Flynn's_taxonomy"> <div class="vector-toc-text"> <span class="vector-toc-numb">1.5</span> <span>Flynn's taxonomy</span> </div> </a> <ul id="toc-Flynn's_taxonomy-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Disadvantages" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Disadvantages"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Disadvantages</span> </div> </a> <ul id="toc-Disadvantages-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Granularity" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Granularity"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>Granularity</span> </div> </a> <button aria-controls="toc-Granularity-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Granularity subsection</span> </button> <ul id="toc-Granularity-sublist" class="vector-toc-list"> <li id="toc-Bit-level_parallelism" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Bit-level_parallelism"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.1</span> <span>Bit-level parallelism</span> </div> </a> <ul id="toc-Bit-level_parallelism-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Instruction-level_parallelism" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Instruction-level_parallelism"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.2</span> <span>Instruction-level parallelism</span> </div> </a> <ul id="toc-Instruction-level_parallelism-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Task_parallelism" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Task_parallelism"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.3</span> <span>Task parallelism</span> </div> </a> <ul id="toc-Task_parallelism-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Superword_level_parallelism" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Superword_level_parallelism"> <div class="vector-toc-text"> <span class="vector-toc-numb">3.4</span> <span>Superword level parallelism</span> </div> </a> <ul id="toc-Superword_level_parallelism-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Hardware" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Hardware"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>Hardware</span> </div> </a> <button aria-controls="toc-Hardware-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Hardware subsection</span> </button> <ul id="toc-Hardware-sublist" class="vector-toc-list"> <li id="toc-Memory_and_communication" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Memory_and_communication"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1</span> <span>Memory and communication</span> </div> </a> <ul id="toc-Memory_and_communication-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Classes_of_parallel_computers" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Classes_of_parallel_computers"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2</span> <span>Classes of parallel computers</span> </div> </a> <ul id="toc-Classes_of_parallel_computers-sublist" class="vector-toc-list"> <li id="toc-Multi-core_computing" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Multi-core_computing"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.1</span> <span>Multi-core computing</span> </div> </a> <ul id="toc-Multi-core_computing-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Symmetric_multiprocessing" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Symmetric_multiprocessing"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.2</span> <span>Symmetric multiprocessing</span> </div> </a> <ul id="toc-Symmetric_multiprocessing-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Distributed_computing" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Distributed_computing"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.3</span> <span>Distributed computing</span> </div> </a> <ul id="toc-Distributed_computing-sublist" class="vector-toc-list"> <li id="toc-Cluster_computing" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Cluster_computing"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.3.1</span> <span>Cluster computing</span> </div> </a> <ul id="toc-Cluster_computing-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Massively_parallel_computing" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Massively_parallel_computing"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.3.2</span> <span>Massively parallel computing</span> </div> </a> <ul id="toc-Massively_parallel_computing-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Grid_computing" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Grid_computing"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.3.3</span> <span>Grid computing</span> </div> </a> <ul id="toc-Grid_computing-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Cloud_computing" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Cloud_computing"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.3.4</span> <span>Cloud computing</span> </div> </a> <ul id="toc-Cloud_computing-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Specialized_parallel_computers" class="vector-toc-list-item vector-toc-level-3"> <a class="vector-toc-link" href="#Specialized_parallel_computers"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.4</span> <span>Specialized parallel computers</span> </div> </a> <ul id="toc-Specialized_parallel_computers-sublist" class="vector-toc-list"> <li id="toc-Reconfigurable_computing_with_field-programmable_gate_arrays" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Reconfigurable_computing_with_field-programmable_gate_arrays"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.4.1</span> <span>Reconfigurable computing with field-programmable gate arrays</span> </div> </a> <ul id="toc-Reconfigurable_computing_with_field-programmable_gate_arrays-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-General-purpose_computing_on_graphics_processing_units_(GPGPU)" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#General-purpose_computing_on_graphics_processing_units_(GPGPU)"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.4.2</span> <span>General-purpose computing on graphics processing units (GPGPU)</span> </div> </a> <ul id="toc-General-purpose_computing_on_graphics_processing_units_(GPGPU)-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Application-specific_integrated_circuits" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Application-specific_integrated_circuits"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.4.3</span> <span>Application-specific integrated circuits</span> </div> </a> <ul id="toc-Application-specific_integrated_circuits-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Vector_processors" class="vector-toc-list-item vector-toc-level-4"> <a class="vector-toc-link" href="#Vector_processors"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2.4.4</span> <span>Vector processors</span> </div> </a> <ul id="toc-Vector_processors-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> </ul> </li> </ul> </li> <li id="toc-Software" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Software"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>Software</span> </div> </a> <button aria-controls="toc-Software-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Software subsection</span> </button> <ul id="toc-Software-sublist" class="vector-toc-list"> <li id="toc-Parallel_programming_languages" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Parallel_programming_languages"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.1</span> <span>Parallel programming languages</span> </div> </a> <ul id="toc-Parallel_programming_languages-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Automatic_parallelization" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Automatic_parallelization"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.2</span> <span>Automatic parallelization</span> </div> </a> <ul id="toc-Automatic_parallelization-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Application_checkpointing" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Application_checkpointing"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.3</span> <span>Application checkpointing</span> </div> </a> <ul id="toc-Application_checkpointing-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Algorithmic_methods" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Algorithmic_methods"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Algorithmic methods</span> </div> </a> <ul id="toc-Algorithmic_methods-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Fault_tolerance" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Fault_tolerance"> <div class="vector-toc-text"> <span class="vector-toc-numb">7</span> <span>Fault tolerance</span> </div> </a> <ul id="toc-Fault_tolerance-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-History" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#History"> <div class="vector-toc-text"> <span class="vector-toc-numb">8</span> <span>History</span> </div> </a> <ul id="toc-History-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Biological_brain_as_massively_parallel_computer" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Biological_brain_as_massively_parallel_computer"> <div class="vector-toc-text"> <span class="vector-toc-numb">9</span> <span>Biological brain as massively parallel computer</span> </div> </a> <ul id="toc-Biological_brain_as_massively_parallel_computer-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">10</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">11</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Further_reading" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Further_reading"> <div class="vector-toc-text"> <span class="vector-toc-numb">12</span> <span>Further reading</span> </div> </a> <ul id="toc-Further_reading-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-External_links" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#External_links"> <div class="vector-toc-text"> <span class="vector-toc-numb">13</span> <span>External links</span> </div> </a> <ul id="toc-External_links-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Parallel computing</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 53 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-53" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">53 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-ar badge-Q17437796 badge-featuredarticle mw-list-item" title="featured article badge"><a href="https://ar.wikipedia.org/wiki/%D8%AD%D9%88%D8%B3%D8%A8%D8%A9_%D9%85%D8%AA%D9%88%D8%A7%D8%B2%D9%8A%D8%A9" title="حوسبة متوازية – Arabic" lang="ar" hreflang="ar" data-title="حوسبة متوازية" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-az mw-list-item"><a href="https://az.wikipedia.org/wiki/Paralel_hesablama" title="Paralel hesablama – Azerbaijani" lang="az" hreflang="az" data-title="Paralel hesablama" data-language-autonym="Azərbaycanca" data-language-local-name="Azerbaijani" class="interlanguage-link-target"><span>Azərbaycanca</span></a></li><li class="interlanguage-link interwiki-bn mw-list-item"><a href="https://bn.wikipedia.org/wiki/%E0%A6%B8%E0%A6%AE%E0%A6%BE%E0%A6%A8%E0%A7%8D%E0%A6%A4%E0%A6%B0%E0%A6%BE%E0%A6%B2_%E0%A6%95%E0%A6%AE%E0%A7%8D%E0%A6%AA%E0%A6%BF%E0%A6%89%E0%A6%9F%E0%A6%BF%E0%A6%82" title="সমান্তরাল কম্পিউটিং – Bangla" lang="bn" hreflang="bn" data-title="সমান্তরাল কম্পিউটিং" data-language-autonym="বাংলা" data-language-local-name="Bangla" class="interlanguage-link-target"><span>বাংলা</span></a></li><li class="interlanguage-link interwiki-bg mw-list-item"><a href="https://bg.wikipedia.org/wiki/%D0%9F%D0%B0%D1%80%D0%B0%D0%BB%D0%B5%D0%BB%D0%BD%D0%B8_%D0%B8%D0%B7%D1%87%D0%B8%D1%81%D0%BB%D0%B5%D0%BD%D0%B8%D1%8F" title="Паралелни изчисления – Bulgarian" lang="bg" hreflang="bg" data-title="Паралелни изчисления" data-language-autonym="Български" data-language-local-name="Bulgarian" class="interlanguage-link-target"><span>Български</span></a></li><li class="interlanguage-link interwiki-bs mw-list-item"><a href="https://bs.wikipedia.org/wiki/Paralelni_sistemi" title="Paralelni sistemi – Bosnian" lang="bs" hreflang="bs" data-title="Paralelni sistemi" data-language-autonym="Bosanski" data-language-local-name="Bosnian" class="interlanguage-link-target"><span>Bosanski</span></a></li><li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/Computaci%C3%B3_paral%C2%B7lela" title="Computació paral·lela – Catalan" lang="ca" hreflang="ca" data-title="Computació paral·lela" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-cs mw-list-item"><a href="https://cs.wikipedia.org/wiki/Paraleln%C3%AD_v%C3%BDpo%C4%8Dty" title="Paralelní výpočty – Czech" lang="cs" hreflang="cs" data-title="Paralelní výpočty" data-language-autonym="Čeština" data-language-local-name="Czech" class="interlanguage-link-target"><span>Čeština</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Parallelrechner" title="Parallelrechner – German" lang="de" hreflang="de" data-title="Parallelrechner" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-et mw-list-item"><a href="https://et.wikipedia.org/wiki/Paralleelarvutus" title="Paralleelarvutus – Estonian" lang="et" hreflang="et" data-title="Paralleelarvutus" data-language-autonym="Eesti" data-language-local-name="Estonian" class="interlanguage-link-target"><span>Eesti</span></a></li><li class="interlanguage-link interwiki-el mw-list-item"><a href="https://el.wikipedia.org/wiki/%CE%A0%CE%B1%CF%81%CE%AC%CE%BB%CE%BB%CE%B7%CE%BB%CE%BF%CF%82_%CF%80%CF%81%CE%BF%CE%B3%CF%81%CE%B1%CE%BC%CE%BC%CE%B1%CF%84%CE%B9%CF%83%CE%BC%CF%8C%CF%82" title="Παράλληλος προγραμματισμός – Greek" lang="el" hreflang="el" data-title="Παράλληλος προγραμματισμός" data-language-autonym="Ελληνικά" data-language-local-name="Greek" class="interlanguage-link-target"><span>Ελληνικά</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/Computaci%C3%B3n_paralela" title="Computación paralela – Spanish" lang="es" hreflang="es" data-title="Computación paralela" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-eo mw-list-item"><a href="https://eo.wikipedia.org/wiki/Paralela_komputado" title="Paralela komputado – Esperanto" lang="eo" hreflang="eo" data-title="Paralela komputado" data-language-autonym="Esperanto" data-language-local-name="Esperanto" class="interlanguage-link-target"><span>Esperanto</span></a></li><li class="interlanguage-link interwiki-eu mw-list-item"><a href="https://eu.wikipedia.org/wiki/Konputazio_paralelo" title="Konputazio paralelo – Basque" lang="eu" hreflang="eu" data-title="Konputazio paralelo" data-language-autonym="Euskara" data-language-local-name="Basque" class="interlanguage-link-target"><span>Euskara</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%D8%B1%D8%A7%DB%8C%D8%A7%D9%86%D8%B4_%D9%85%D9%88%D8%A7%D8%B2%DB%8C" title="رایانش موازی – Persian" lang="fa" hreflang="fa" data-title="رایانش موازی" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/Parall%C3%A9lisme_(informatique)" title="Parallélisme (informatique) – French" lang="fr" hreflang="fr" data-title="Parallélisme (informatique)" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-gl mw-list-item"><a href="https://gl.wikipedia.org/wiki/Computaci%C3%B3n_paralela" title="Computación paralela – Galician" lang="gl" hreflang="gl" data-title="Computación paralela" data-language-autonym="Galego" data-language-local-name="Galician" class="interlanguage-link-target"><span>Galego</span></a></li><li class="interlanguage-link interwiki-gu mw-list-item"><a href="https://gu.wikipedia.org/wiki/%E0%AA%B8%E0%AA%AE%E0%AA%BE%E0%AA%82%E0%AA%A4%E0%AA%B0_%E0%AA%95%E0%AA%AE%E0%AB%8D%E0%AA%AA%E0%AB%8D%E0%AA%AF%E0%AB%81%E0%AA%9F%E0%AA%BF%E0%AA%82%E0%AA%97" title="સમાંતર કમ્પ્યુટિંગ – Gujarati" lang="gu" hreflang="gu" data-title="સમાંતર કમ્પ્યુટિંગ" data-language-autonym="ગુજરાતી" data-language-local-name="Gujarati" class="interlanguage-link-target"><span>ગુજરાતી</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EB%B3%91%EB%A0%AC_%EC%BB%B4%ED%93%A8%ED%8C%85" title="병렬 컴퓨팅 – Korean" lang="ko" hreflang="ko" data-title="병렬 컴퓨팅" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-hi mw-list-item"><a href="https://hi.wikipedia.org/wiki/%E0%A4%AF%E0%A5%81%E0%A4%97%E0%A4%AA%E0%A4%A4_%E0%A4%85%E0%A4%AD%E0%A4%BF%E0%A4%95%E0%A4%B2%E0%A4%A8" title="युगपत अभिकलन – Hindi" lang="hi" hreflang="hi" data-title="युगपत अभिकलन" data-language-autonym="हिन्दी" data-language-local-name="Hindi" class="interlanguage-link-target"><span>हिन्दी</span></a></li><li class="interlanguage-link interwiki-hr mw-list-item"><a href="https://hr.wikipedia.org/wiki/Paralelna_obrada" title="Paralelna obrada – Croatian" lang="hr" hreflang="hr" data-title="Paralelna obrada" data-language-autonym="Hrvatski" data-language-local-name="Croatian" class="interlanguage-link-target"><span>Hrvatski</span></a></li><li class="interlanguage-link interwiki-id mw-list-item"><a href="https://id.wikipedia.org/wiki/Komputasi_paralel" title="Komputasi paralel – Indonesian" lang="id" hreflang="id" data-title="Komputasi paralel" data-language-autonym="Bahasa Indonesia" data-language-local-name="Indonesian" class="interlanguage-link-target"><span>Bahasa Indonesia</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Calcolo_parallelo" title="Calcolo parallelo – Italian" lang="it" hreflang="it" data-title="Calcolo parallelo" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-he mw-list-item"><a href="https://he.wikipedia.org/wiki/%D7%A2%D7%99%D7%91%D7%95%D7%93_%D7%9E%D7%A7%D7%91%D7%99%D7%9C%D7%99" title="עיבוד מקבילי – Hebrew" lang="he" hreflang="he" data-title="עיבוד מקבילי" data-language-autonym="עברית" data-language-local-name="Hebrew" class="interlanguage-link-target"><span>עברית</span></a></li><li class="interlanguage-link interwiki-jv mw-list-item"><a href="https://jv.wikipedia.org/wiki/Komputasi_paralel" title="Komputasi paralel – Javanese" lang="jv" hreflang="jv" data-title="Komputasi paralel" data-language-autonym="Jawa" data-language-local-name="Javanese" class="interlanguage-link-target"><span>Jawa</span></a></li><li class="interlanguage-link interwiki-kn mw-list-item"><a href="https://kn.wikipedia.org/wiki/%E0%B2%AA%E0%B3%8D%E0%B2%AF%E0%B2%BE%E0%B2%B0%E0%B3%86%E0%B2%B2%E0%B3%86%E0%B2%B2%E0%B3%8D_%E0%B2%95%E0%B2%82%E0%B2%AA%E0%B3%8D%E0%B2%AF%E0%B3%82%E0%B2%9F%E0%B2%BF%E0%B2%82%E0%B2%97%E0%B3%8D(%E0%B2%8F%E0%B2%95%E0%B2%95%E0%B2%BE%E0%B2%B2%E0%B2%A6_%E0%B2%97%E0%B2%A3%E0%B2%95%E0%B2%95%E0%B2%BE%E0%B2%B0%E0%B3%8D%E0%B2%AF)" title="ಪ್ಯಾರೆಲೆಲ್ ಕಂಪ್ಯೂಟಿಂಗ್(ಏಕಕಾಲದ ಗಣಕಕಾರ್ಯ) – Kannada" lang="kn" hreflang="kn" data-title="ಪ್ಯಾರೆಲೆಲ್ ಕಂಪ್ಯೂಟಿಂಗ್(ಏಕಕಾಲದ ಗಣಕಕಾರ್ಯ)" data-language-autonym="ಕನ್ನಡ" data-language-local-name="Kannada" class="interlanguage-link-target"><span>ಕನ್ನಡ</span></a></li><li class="interlanguage-link interwiki-la mw-list-item"><a href="https://la.wikipedia.org/wiki/Computatio_parallela" title="Computatio parallela – Latin" lang="la" hreflang="la" data-title="Computatio parallela" data-language-autonym="Latina" data-language-local-name="Latin" class="interlanguage-link-target"><span>Latina</span></a></li><li class="interlanguage-link interwiki-lv mw-list-item"><a href="https://lv.wikipedia.org/wiki/Paral%C4%93l%C4%81_skait%C4%BCo%C5%A1ana" title="Paralēlā skaitļošana – Latvian" lang="lv" hreflang="lv" data-title="Paralēlā skaitļošana" data-language-autonym="Latviešu" data-language-local-name="Latvian" class="interlanguage-link-target"><span>Latviešu</span></a></li><li class="interlanguage-link interwiki-hu mw-list-item"><a href="https://hu.wikipedia.org/wiki/P%C3%A1rhuzamos_sz%C3%A1m%C3%ADt%C3%A1stechnika" title="Párhuzamos számítástechnika – Hungarian" lang="hu" hreflang="hu" data-title="Párhuzamos számítástechnika" data-language-autonym="Magyar" data-language-local-name="Hungarian" class="interlanguage-link-target"><span>Magyar</span></a></li><li class="interlanguage-link interwiki-ml mw-list-item"><a href="https://ml.wikipedia.org/wiki/%E0%B4%B8%E0%B4%AE%E0%B4%BE%E0%B4%A8%E0%B5%8D%E0%B4%A4%E0%B4%B0_%E0%B4%95%E0%B4%82%E0%B4%AA%E0%B5%8D%E0%B4%AF%E0%B5%82%E0%B4%9F%E0%B5%8D%E0%B4%9F%E0%B4%BF%E0%B4%99%E0%B5%8D%E0%B4%99%E0%B5%8D" title="സമാന്തര കംപ്യൂട്ടിങ്ങ് – Malayalam" lang="ml" hreflang="ml" data-title="സമാന്തര കംപ്യൂട്ടിങ്ങ്" data-language-autonym="മലയാളം" data-language-local-name="Malayalam" class="interlanguage-link-target"><span>മലയാളം</span></a></li><li class="interlanguage-link interwiki-mr mw-list-item"><a href="https://mr.wikipedia.org/wiki/%E0%A4%B8%E0%A4%AE%E0%A4%BE%E0%A4%82%E0%A4%A4%E0%A4%B0_%E0%A4%B8%E0%A4%82%E0%A4%97%E0%A4%A3%E0%A4%A8" title="समांतर संगणन – Marathi" lang="mr" hreflang="mr" data-title="समांतर संगणन" data-language-autonym="मराठी" data-language-local-name="Marathi" class="interlanguage-link-target"><span>मराठी</span></a></li><li class="interlanguage-link interwiki-mn mw-list-item"><a href="https://mn.wikipedia.org/wiki/%D0%97%D1%8D%D1%80%D1%8D%D0%B3%D1%86%D1%8D%D1%8D_%D1%82%D0%BE%D0%BE%D1%86%D0%BE%D0%BE%D0%BB%D0%BE%D0%BB" title="Зэрэгцээ тооцоолол – Mongolian" lang="mn" hreflang="mn" data-title="Зэрэгцээ тооцоолол" data-language-autonym="Монгол" data-language-local-name="Mongolian" class="interlanguage-link-target"><span>Монгол</span></a></li><li class="interlanguage-link interwiki-nl mw-list-item"><a href="https://nl.wikipedia.org/wiki/Parallelle_computer" title="Parallelle computer – Dutch" lang="nl" hreflang="nl" data-title="Parallelle computer" data-language-autonym="Nederlands" data-language-local-name="Dutch" class="interlanguage-link-target"><span>Nederlands</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/%E4%B8%A6%E5%88%97%E8%A8%88%E7%AE%97" title="並列計算 – Japanese" lang="ja" hreflang="ja" data-title="並列計算" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-no mw-list-item"><a href="https://no.wikipedia.org/wiki/Parallellprosessering" title="Parallellprosessering – Norwegian Bokmål" lang="nb" hreflang="nb" data-title="Parallellprosessering" data-language-autonym="Norsk bokmål" data-language-local-name="Norwegian Bokmål" class="interlanguage-link-target"><span>Norsk bokmål</span></a></li><li class="interlanguage-link interwiki-pl badge-Q17437796 badge-featuredarticle mw-list-item" title="featured article badge"><a href="https://pl.wikipedia.org/wiki/Obliczenia_r%C3%B3wnoleg%C5%82e" title="Obliczenia równoległe – Polish" lang="pl" hreflang="pl" data-title="Obliczenia równoległe" data-language-autonym="Polski" data-language-local-name="Polish" class="interlanguage-link-target"><span>Polski</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/Computa%C3%A7%C3%A3o_paralela" title="Computação paralela – Portuguese" lang="pt" hreflang="pt" data-title="Computação paralela" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-ro mw-list-item"><a href="https://ro.wikipedia.org/wiki/Calcul_paralel" title="Calcul paralel – Romanian" lang="ro" hreflang="ro" data-title="Calcul paralel" data-language-autonym="Română" data-language-local-name="Romanian" class="interlanguage-link-target"><span>Română</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/%D0%9F%D0%B0%D1%80%D0%B0%D0%BB%D0%BB%D0%B5%D0%BB%D1%8C%D0%BD%D1%8B%D0%B5_%D0%B2%D1%8B%D1%87%D0%B8%D1%81%D0%BB%D0%B8%D1%82%D0%B5%D0%BB%D1%8C%D0%BD%D1%8B%D0%B5_%D1%81%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D1%8B" title="Параллельные вычислительные системы – Russian" lang="ru" hreflang="ru" data-title="Параллельные вычислительные системы" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-sq badge-Q17437798 badge-goodarticle mw-list-item" title="good article badge"><a href="https://sq.wikipedia.org/wiki/Informatika_paralele" title="Informatika paralele – Albanian" lang="sq" hreflang="sq" data-title="Informatika paralele" data-language-autonym="Shqip" data-language-local-name="Albanian" class="interlanguage-link-target"><span>Shqip</span></a></li><li class="interlanguage-link interwiki-si mw-list-item"><a href="https://si.wikipedia.org/wiki/%E0%B7%83%E0%B6%B8%E0%B7%8F%E0%B6%B1%E0%B7%8A%E0%B6%AD%E0%B6%BB_%E0%B6%B4%E0%B6%BB%E0%B7%92%E0%B6%9C%E0%B6%AB%E0%B6%B1%E0%B6%BA" title="සමාන්තර පරිගණනය – Sinhala" lang="si" hreflang="si" data-title="සමාන්තර පරිගණනය" data-language-autonym="සිංහල" data-language-local-name="Sinhala" class="interlanguage-link-target"><span>සිංහල</span></a></li><li class="interlanguage-link interwiki-simple mw-list-item"><a href="https://simple.wikipedia.org/wiki/Parallel_computing" title="Parallel computing – Simple English" lang="en-simple" hreflang="en-simple" data-title="Parallel computing" data-language-autonym="Simple English" data-language-local-name="Simple English" class="interlanguage-link-target"><span>Simple English</span></a></li><li class="interlanguage-link interwiki-sl mw-list-item"><a href="https://sl.wikipedia.org/wiki/Vzporedna_obdelava" title="Vzporedna obdelava – Slovenian" lang="sl" hreflang="sl" data-title="Vzporedna obdelava" data-language-autonym="Slovenščina" data-language-local-name="Slovenian" class="interlanguage-link-target"><span>Slovenščina</span></a></li><li class="interlanguage-link interwiki-sr mw-list-item"><a href="https://sr.wikipedia.org/wiki/%D0%9F%D0%B0%D1%80%D0%B0%D0%BB%D0%B5%D0%BB%D0%BD%D0%B0_%D0%BE%D0%B1%D1%80%D0%B0%D0%B4%D0%B0" title="Паралелна обрада – Serbian" lang="sr" hreflang="sr" data-title="Паралелна обрада" data-language-autonym="Српски / srpski" data-language-local-name="Serbian" class="interlanguage-link-target"><span>Српски / srpski</span></a></li><li class="interlanguage-link interwiki-fi mw-list-item"><a href="https://fi.wikipedia.org/wiki/Rinnakkaislaskenta" title="Rinnakkaislaskenta – Finnish" lang="fi" hreflang="fi" data-title="Rinnakkaislaskenta" data-language-autonym="Suomi" data-language-local-name="Finnish" class="interlanguage-link-target"><span>Suomi</span></a></li><li class="interlanguage-link interwiki-sv mw-list-item"><a href="https://sv.wikipedia.org/wiki/Parallelldator" title="Parallelldator – Swedish" lang="sv" hreflang="sv" data-title="Parallelldator" data-language-autonym="Svenska" data-language-local-name="Swedish" class="interlanguage-link-target"><span>Svenska</span></a></li><li class="interlanguage-link interwiki-ta mw-list-item"><a href="https://ta.wikipedia.org/wiki/%E0%AE%87%E0%AE%A3%E0%AF%88%E0%AE%95%E0%AF%8D_%E0%AE%95%E0%AE%A3%E0%AE%BF%E0%AE%AA%E0%AF%8D%E0%AE%AA%E0%AF%80%E0%AE%9F%E0%AF%81" title="இணைக் கணிப்பீடு – Tamil" lang="ta" hreflang="ta" data-title="இணைக் கணிப்பீடு" data-language-autonym="தமிழ்" data-language-local-name="Tamil" class="interlanguage-link-target"><span>தமிழ்</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/Paralel_hesaplama" title="Paralel hesaplama – Turkish" lang="tr" hreflang="tr" data-title="Paralel hesaplama" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-uk mw-list-item"><a href="https://uk.wikipedia.org/wiki/%D0%9F%D0%B0%D1%80%D0%B0%D0%BB%D0%B5%D0%BB%D1%8C%D0%BD%D1%96_%D0%BE%D0%B1%D1%87%D0%B8%D1%81%D0%BB%D0%B5%D0%BD%D0%BD%D1%8F" title="Паралельні обчислення – Ukrainian" lang="uk" hreflang="uk" data-title="Паралельні обчислення" data-language-autonym="Українська" data-language-local-name="Ukrainian" class="interlanguage-link-target"><span>Українська</span></a></li><li class="interlanguage-link interwiki-ur mw-list-item"><a href="https://ur.wikipedia.org/wiki/%D9%85%D8%AA%D9%88%D8%A7%D8%B2%DB%8C_%DA%A9%D9%85%D9%BE%DB%8C%D9%88%D9%B9%D9%86%DA%AF" title="متوازی کمپیوٹنگ – Urdu" lang="ur" hreflang="ur" data-title="متوازی کمپیوٹنگ" data-language-autonym="اردو" data-language-local-name="Urdu" class="interlanguage-link-target"><span>اردو</span></a></li><li class="interlanguage-link interwiki-vi mw-list-item"><a href="https://vi.wikipedia.org/wiki/T%C3%ADnh_to%C3%A1n_song_song" title="Tính toán song song – Vietnamese" lang="vi" hreflang="vi" data-title="Tính toán song song" data-language-autonym="Tiếng Việt" data-language-local-name="Vietnamese" class="interlanguage-link-target"><span>Tiếng Việt</span></a></li><li class="interlanguage-link interwiki-wuu mw-list-item"><a href="https://wuu.wikipedia.org/wiki/%E5%B9%B6%E8%A1%8C%E8%AE%A1%E7%AE%97" title="并行计算 – Wu" lang="wuu" hreflang="wuu" data-title="并行计算" data-language-autonym="吴语" data-language-local-name="Wu" class="interlanguage-link-target"><span>吴语</span></a></li><li class="interlanguage-link interwiki-zh-yue mw-list-item"><a href="https://zh-yue.wikipedia.org/wiki/%E5%B9%B3%E8%A1%8C%E9%81%8B%E7%AE%97" title="平行運算 – Cantonese" lang="yue" hreflang="yue" data-title="平行運算" data-language-autonym="粵語" data-language-local-name="Cantonese" class="interlanguage-link-target"><span>粵語</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/%E5%B9%B6%E8%A1%8C%E8%AE%A1%E7%AE%97" title="并行计算 – Chinese" lang="zh" hreflang="zh" data-title="并行计算" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q232661#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Parallel_computing" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Parallel_computing" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Parallel_computing"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Parallel_computing&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Parallel_computing&action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Parallel_computing"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Parallel_computing&action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Parallel_computing&action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Parallel_computing" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Parallel_computing" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Parallel_computing&oldid=1259252331" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Parallel_computing&action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&page=Parallel_computing&id=1259252331&wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FParallel_computing"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FParallel_computing"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&page=Parallel_computing&action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Parallel_computing&printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="wb-otherproject-link wb-otherproject-commons mw-list-item"><a href="https://commons.wikimedia.org/wiki/Category:Parallel_computing" hreflang="en"><span>Wikimedia Commons</span></a></li><li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q232661" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> <div id="mw-indicator-featured-star" class="mw-indicator"><div class="mw-parser-output"><span typeof="mw:File"><a href="/wiki/Wikipedia:Featured_articles*" title="This is a featured article. Click here for more information."><img alt="Featured article" src="//upload.wikimedia.org/wikipedia/en/thumb/e/e7/Cscr-featured.svg/20px-Cscr-featured.svg.png" decoding="async" width="20" height="19" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/e/e7/Cscr-featured.svg/30px-Cscr-featured.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/e/e7/Cscr-featured.svg/40px-Cscr-featured.svg.png 2x" data-file-width="466" data-file-height="443" /></a></span></div></div> <div id="mw-indicator-spoken-icon" class="mw-indicator"><div class="mw-parser-output"><span typeof="mw:File"><a href="/wiki/File:En-Parallel_computing.ogg" title="Listen to this article"><img alt="Listen to this article" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/20px-Sound-icon.svg.png" decoding="async" width="20" height="15" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/30px-Sound-icon.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/40px-Sound-icon.svg.png 2x" data-file-width="128" data-file-height="96" /></a></span></div></div> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><style data-mw-deduplicate="TemplateStyles:r1236090951">.mw-parser-output .hatnote{font-style:italic}.mw-parser-output div.hatnote{padding-left:1.6em;margin-bottom:0.5em}.mw-parser-output .hatnote i{font-style:normal}.mw-parser-output .hatnote+link+.hatnote{margin-top:-0.5em}@media print{body.ns-0 .mw-parser-output .hatnote{display:none!important}}</style><div role="note" class="hatnote navigation-not-searchable">"Parallelization" redirects here. For parallelization of manifolds, see <a href="/wiki/Parallelization_(mathematics)" title="Parallelization (mathematics)">Parallelization (mathematics)</a>.</div> <div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Programming paradigm in which many processes are executed simultaneously</div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:IBM_Blue_Gene_P_supercomputer.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/d/d3/IBM_Blue_Gene_P_supercomputer.jpg/300px-IBM_Blue_Gene_P_supercomputer.jpg" decoding="async" width="300" height="199" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/d3/IBM_Blue_Gene_P_supercomputer.jpg/450px-IBM_Blue_Gene_P_supercomputer.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/d3/IBM_Blue_Gene_P_supercomputer.jpg/600px-IBM_Blue_Gene_P_supercomputer.jpg 2x" data-file-width="2100" data-file-height="1391" /></a><figcaption>Large <a href="/wiki/Supercomputer" title="Supercomputer">supercomputers</a> such as IBM's <a href="/wiki/Blue_Gene" class="mw-redirect" title="Blue Gene">Blue Gene/P</a> are designed to heavily exploit parallelism. </figcaption></figure> <p><b>Parallel computing</b> is a type of <a href="/wiki/Computing" title="Computing">computation</a> in which many calculations or <a href="/wiki/Process_(computing)" title="Process (computing)">processes</a> are carried out simultaneously.<sup id="cite_ref-1" class="reference"><a href="#cite_note-1"><span class="cite-bracket">[</span>1<span class="cite-bracket">]</span></a></sup> Large problems can often be divided into smaller ones, which can then be solved at the same time. There are several different forms of parallel computing: <a href="/wiki/Bit-level_parallelism" title="Bit-level parallelism">bit-level</a>, <a href="/wiki/Instruction-level_parallelism" title="Instruction-level parallelism">instruction-level</a>, <a href="/wiki/Data_parallelism" title="Data parallelism">data</a>, and <a href="/wiki/Task_parallelism" title="Task parallelism">task parallelism</a>. Parallelism has long been employed in <a href="/wiki/High-performance_computing" title="High-performance computing">high-performance computing</a>, but has gained broader interest due to the physical constraints preventing <a href="/wiki/Frequency_scaling" title="Frequency scaling">frequency scaling</a>.<sup id="cite_ref-:0_2-0" class="reference"><a href="#cite_note-:0-2"><span class="cite-bracket">[</span>2<span class="cite-bracket">]</span></a></sup> As power consumption (and consequently heat generation) by computers has become a concern in recent years,<sup id="cite_ref-3" class="reference"><a href="#cite_note-3"><span class="cite-bracket">[</span>3<span class="cite-bracket">]</span></a></sup> parallel computing has become the dominant paradigm in <a href="/wiki/Computer_architecture" title="Computer architecture">computer architecture</a>, mainly in the form of <a href="/wiki/Multi-core_processor" title="Multi-core processor">multi-core processors</a>.<sup id="cite_ref-View-Power_4-0" class="reference"><a href="#cite_note-View-Power-4"><span class="cite-bracket">[</span>4<span class="cite-bracket">]</span></a></sup> </p> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Parallelism_vs_concurrency.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/65/Parallelism_vs_concurrency.png/220px-Parallelism_vs_concurrency.png" decoding="async" width="220" height="56" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/65/Parallelism_vs_concurrency.png/330px-Parallelism_vs_concurrency.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/65/Parallelism_vs_concurrency.png/440px-Parallelism_vs_concurrency.png 2x" data-file-width="962" data-file-height="245" /></a><figcaption>Parallelism vs concurrency</figcaption></figure> <p>In <a href="/wiki/Computer_science" title="Computer science">computer science</a>, <b>parallelism</b> and concurrency are two different things: a parallel program uses <a href="/wiki/Multi-core_processor" title="Multi-core processor">multiple CPU cores</a>, each core performing a task independently. On the other hand, concurrency enables a program to deal with multiple tasks even on a single CPU core; the core switches between tasks (i.e. <a href="/wiki/Thread_(computing)" title="Thread (computing)">threads</a>) without necessarily completing each one. A program can have both, neither or a combination of parallelism and concurrency characteristics.<sup id="cite_ref-5" class="reference"><a href="#cite_note-5"><span class="cite-bracket">[</span>5<span class="cite-bracket">]</span></a></sup> </p><p>Parallel computers can be roughly classified according to the level at which the hardware supports parallelism, with multi-core and <a href="/wiki/Symmetric_multiprocessing" title="Symmetric multiprocessing">multi-processor</a> computers having multiple <a href="/wiki/Processing_element" class="mw-redirect" title="Processing element">processing elements</a> within a single machine, while <a href="/wiki/Computer_cluster" title="Computer cluster">clusters</a>, <a href="/wiki/Massively_parallel_(computing)" class="mw-redirect" title="Massively parallel (computing)">MPPs</a>, and <a href="/wiki/Grid_computing" title="Grid computing">grids</a> use multiple computers to work on the same task. Specialized parallel computer architectures are sometimes used alongside traditional processors, for accelerating specific tasks. </p><p>In some cases parallelism is transparent to the programmer, such as in bit-level or instruction-level parallelism, but explicitly <a href="/wiki/Parallel_algorithm" title="Parallel algorithm">parallel algorithms</a>, particularly those that use concurrency, are more difficult to write than <a href="/wiki/Sequential_algorithm" title="Sequential algorithm">sequential</a> ones,<sup id="cite_ref-6" class="reference"><a href="#cite_note-6"><span class="cite-bracket">[</span>6<span class="cite-bracket">]</span></a></sup> because concurrency introduces several new classes of potential <a href="/wiki/Software_bug" title="Software bug">software bugs</a>, of which <a href="/wiki/Race_condition" title="Race condition">race conditions</a> are the most common. <a href="/wiki/Computer_networking" class="mw-redirect" title="Computer networking">Communication</a> and <a href="/wiki/Synchronization_(computer_science)" title="Synchronization (computer science)">synchronization</a> between the different subtasks are typically some of the greatest obstacles to getting optimal parallel program performance. </p><p>A theoretical <a href="/wiki/Upper_bound" class="mw-redirect" title="Upper bound">upper bound</a> on the <a href="/wiki/Speedup" title="Speedup">speed-up</a> of a single program as a result of parallelization is given by <a href="/wiki/Amdahl%27s_law" title="Amdahl's law">Amdahl's law</a>, which states that it is limited by the fraction of time for which the parallelization can be utilised. </p> <style data-mw-deduplicate="TemplateStyles:r886046785">.mw-parser-output .toclimit-2 .toclevel-1 ul,.mw-parser-output .toclimit-3 .toclevel-2 ul,.mw-parser-output .toclimit-4 .toclevel-3 ul,.mw-parser-output .toclimit-5 .toclevel-4 ul,.mw-parser-output .toclimit-6 .toclevel-5 ul,.mw-parser-output .toclimit-7 .toclevel-6 ul{display:none}</style><div class="toclimit-4"><meta property="mw:PageProp/toc" /></div> <div class="mw-heading mw-heading2"><h2 id="Background">Background</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=1" title="Edit section: Background"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Traditionally, <a href="/wiki/Computer_software" class="mw-redirect" title="Computer software">computer software</a> has been written for <a href="/wiki/Serial_computation" class="mw-redirect" title="Serial computation">serial computation</a>. To solve a problem, an <a href="/wiki/Algorithm" title="Algorithm">algorithm</a> is constructed and implemented as a serial stream of instructions. These instructions are executed on a <a href="/wiki/Central_processing_unit" title="Central processing unit">central processing unit</a> on one computer. Only one instruction may execute at a time—after that instruction is finished, the next one is executed.<sup id="cite_ref-llnltut_7-0" class="reference"><a href="#cite_note-llnltut-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> </p><p>Parallel computing, on the other hand, uses multiple processing elements simultaneously to solve a problem. This is accomplished by breaking the problem into independent parts so that each processing element can execute its part of the algorithm simultaneously with the others. The processing elements can be diverse and include resources such as a single computer with multiple processors, several networked computers, specialized hardware, or any combination of the above.<sup id="cite_ref-llnltut_7-1" class="reference"><a href="#cite_note-llnltut-7"><span class="cite-bracket">[</span>7<span class="cite-bracket">]</span></a></sup> Historically parallel computing was used for scientific computing and the simulation of scientific problems, particularly in the natural and <a href="/wiki/Engineering_sciences" class="mw-redirect" title="Engineering sciences">engineering sciences</a>, such as <a href="/wiki/Meteorology" title="Meteorology">meteorology</a>. This led to the design of parallel hardware and software, as well as <a href="/wiki/High_performance_computing" class="mw-redirect" title="High performance computing">high performance computing</a>.<sup id="cite_ref-8" class="reference"><a href="#cite_note-8"><span class="cite-bracket">[</span>8<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Frequency_scaling" title="Frequency scaling">Frequency scaling</a> was the dominant reason for improvements in <a href="/wiki/Computer_performance" title="Computer performance">computer performance</a> from the mid-1980s until 2004. The <a href="/wiki/Run_time_(program_lifecycle_phase)" class="mw-redirect" title="Run time (program lifecycle phase)">runtime</a> of a program is equal to the number of instructions multiplied by the average time per instruction. Maintaining everything else constant, increasing the clock frequency decreases the average time it takes to execute an instruction. An increase in frequency thus decreases runtime for all <a href="/wiki/CPU_bound" class="mw-redirect" title="CPU bound">compute-bound</a> programs.<sup id="cite_ref-9" class="reference"><a href="#cite_note-9"><span class="cite-bracket">[</span>9<span class="cite-bracket">]</span></a></sup> However, power consumption <i>P</i> by a chip is given by the equation <i>P</i> = <i>C</i> × <i>V</i> <sup>2</sup> × <i>F</i>, where <i>C</i> is the <a href="/wiki/Capacitance" title="Capacitance">capacitance</a> being switched per clock cycle (proportional to the number of transistors whose inputs change), <i>V</i> is <a href="/wiki/Voltage" title="Voltage">voltage</a>, and <i>F</i> is the processor frequency (cycles per second).<sup id="cite_ref-10" class="reference"><a href="#cite_note-10"><span class="cite-bracket">[</span>10<span class="cite-bracket">]</span></a></sup> Increases in frequency increase the amount of power used in a processor. Increasing processor power consumption led ultimately to <a href="/wiki/Intel" title="Intel">Intel</a>'s May 8, 2004 cancellation of its <a href="/wiki/Tejas_and_Jayhawk" title="Tejas and Jayhawk">Tejas and Jayhawk</a> processors, which is generally cited as the end of frequency scaling as the dominant computer architecture paradigm.<sup id="cite_ref-11" class="reference"><a href="#cite_note-11"><span class="cite-bracket">[</span>11<span class="cite-bracket">]</span></a></sup> </p><p>To deal with the problem of power consumption and overheating the major <a href="/wiki/Central_processing_unit" title="Central processing unit">central processing unit</a> (CPU or processor) manufacturers started to produce power efficient processors with multiple cores. The core is the computing unit of the processor and in multi-core processors each core is independent and can access the same memory concurrently. <a href="/wiki/Multi-core_processor" title="Multi-core processor">Multi-core processors</a> have brought parallel computing to <a href="/wiki/Desktop_computers" class="mw-redirect" title="Desktop computers">desktop computers</a>. Thus parallelization of serial programs has become a mainstream programming task. In 2012 quad-core processors became standard for <a href="/wiki/Desktop_computers" class="mw-redirect" title="Desktop computers">desktop computers</a>, while <a href="/wiki/Server_(computing)" title="Server (computing)">servers</a> have 10+ core processors. From <a href="/wiki/Moore%27s_law" title="Moore's law">Moore's law</a> it can be predicted that the number of cores per processor will double every 18–24 months. This could mean that after 2020 a typical processor will have dozens or hundreds of cores, however in reality the standard is somewhere in the region of 4 to 16 cores, with some designs having a mix of performance and efficiency cores (such as <a href="/wiki/ARM_big.LITTLE" title="ARM big.LITTLE">ARM's big.LITTLE</a> design) due to thermal and design constraints.<sup id="cite_ref-12" class="reference"><a href="#cite_note-12"><span class="cite-bracket">[</span>12<span class="cite-bracket">]</span></a></sup><sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">[<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (March 2023)">citation needed</span></a></i>]</sup> </p><p>An <a href="/wiki/Operating_system" title="Operating system">operating system</a> can ensure that different tasks and user programs are run in parallel on the available cores. However, for a serial software program to take full advantage of the multi-core architecture the programmer needs to restructure and parallelize the code. A speed-up of application software runtime will no longer be achieved through frequency scaling, instead programmers will need to parallelize their software code to take advantage of the increasing computing power of multicore architectures.<sup id="cite_ref-13" class="reference"><a href="#cite_note-13"><span class="cite-bracket">[</span>13<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Relevant_laws">Relevant laws</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=2" title="Edit section: Relevant laws"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:AmdahlsLaw.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/e/ea/AmdahlsLaw.svg/300px-AmdahlsLaw.svg.png" decoding="async" width="300" height="234" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/ea/AmdahlsLaw.svg/450px-AmdahlsLaw.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/ea/AmdahlsLaw.svg/600px-AmdahlsLaw.svg.png 2x" data-file-width="640" data-file-height="500" /></a><figcaption>A graphical representation of <a href="/wiki/Amdahl%27s_law" title="Amdahl's law">Amdahl's law</a>. The law demonstrates the theoretical maximum <a href="/wiki/Speedup" title="Speedup">speedup</a> of an overall system and the concept of diminishing returns. If exactly 50% of the work can be parallelized, the best possible speedup is 2 times. If 95% of the work can be parallelized, the best possible speedup is 20 times. According to the law, even with an infinite number of processors, the speedup is constrained by the unparallelizable portion.</figcaption></figure> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Optimizing-different-parts.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/4/40/Optimizing-different-parts.svg/300px-Optimizing-different-parts.svg.png" decoding="async" width="300" height="150" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/40/Optimizing-different-parts.svg/450px-Optimizing-different-parts.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/40/Optimizing-different-parts.svg/600px-Optimizing-different-parts.svg.png 2x" data-file-width="500" data-file-height="250" /></a><figcaption>Assume that a task has two independent parts, <i>A</i> and <i>B</i>. Part <i>B</i> takes roughly 25% of the time of the whole computation. By working very hard, one may be able to make this part 5 times faster, but this only reduces the time for the whole computation by a little. In contrast, one may need to perform less work to make part <i>A</i> twice as fast. This will make the computation much faster than by optimizing part <i>B</i>, even though part <i>B'</i>s speedup is greater by ratio, (5 times versus 2 times).</figcaption></figure> <p>Main article: <a href="/wiki/Amdahl%27s_law" title="Amdahl's law">Amdahl's law</a> </p><p>Optimally, the <a href="/wiki/Speedup" title="Speedup">speedup</a> from parallelization would be linear—doubling the number of processing elements should halve the runtime, and doubling it a second time should again halve the runtime. However, very few parallel algorithms achieve optimal speedup. Most of them have a near-linear speedup for small numbers of processing elements, which flattens out into a constant value for large numbers of processing elements. </p><p>The maximum potential speedup of an overall system can be calculated by <a href="/wiki/Amdahl%27s_law" title="Amdahl's law">Amdahl's law</a>. <sup id="cite_ref-:02_14-0" class="reference"><a href="#cite_note-:02-14"><span class="cite-bracket">[</span>14<span class="cite-bracket">]</span></a></sup> Amdahl's Law indicates that optimal performance improvement is achieved by balancing enhancements to both parallelizable and non-parallelizable components of a task. Furthermore, it reveals that increasing the number of processors yields diminishing returns, with negligible speedup gains beyond a certain point. <sup id="cite_ref-15" class="reference"><a href="#cite_note-15"><span class="cite-bracket">[</span>15<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-16" class="reference"><a href="#cite_note-16"><span class="cite-bracket">[</span>16<span class="cite-bracket">]</span></a></sup> </p><p>Amdahl's Law has limitations, including assumptions of fixed workload, neglecting <a href="/wiki/Inter-process_communication" title="Inter-process communication">inter-process communication</a> and <a href="/wiki/Synchronization_(computer_science)" title="Synchronization (computer science)">synchronization</a> overheads, primarily focusing on computational aspect and ignoring extrinsic factors such as data persistence, I/O operations, and memory access overheads. <sup id="cite_ref-17" class="reference"><a href="#cite_note-17"><span class="cite-bracket">[</span>17<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-:1_18-0" class="reference"><a href="#cite_note-:1-18"><span class="cite-bracket">[</span>18<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-19" class="reference"><a href="#cite_note-19"><span class="cite-bracket">[</span>19<span class="cite-bracket">]</span></a></sup> </p> <p><a href="/wiki/Gustafson%27s_law" title="Gustafson's law">Gustafson's law</a> and <a href="/wiki/Neil_J._Gunther#Universal_Scalability_Law" title="Neil J. Gunther">Universal Scalability Law</a> give a more realistic assessment of the parallel performance. <sup id="cite_ref-20" class="reference"><a href="#cite_note-20"><span class="cite-bracket">[</span>20<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-21" class="reference"><a href="#cite_note-21"><span class="cite-bracket">[</span>21<span class="cite-bracket">]</span></a></sup></p><figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Gustafson.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Gustafson.png/300px-Gustafson.png" decoding="async" width="300" height="211" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Gustafson.png/450px-Gustafson.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Gustafson.png/600px-Gustafson.png 2x" data-file-width="976" data-file-height="685" /></a><figcaption>A graphical representation of <a href="/wiki/Gustafson%27s_law" title="Gustafson's law">Gustafson's law</a></figcaption></figure> <div class="mw-heading mw-heading3"><h3 id="Dependencies">Dependencies</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=3" title="Edit section: Dependencies"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Understanding <a href="/wiki/Data_dependency" title="Data dependency">data dependencies</a> is fundamental in implementing <a href="/wiki/Parallel_algorithm" title="Parallel algorithm">parallel algorithms</a>. No program can run more quickly than the longest chain of dependent calculations (known as the <a href="/wiki/Critical_path_method" title="Critical path method">critical path</a>), since calculations that depend upon prior calculations in the chain must be executed in order. However, most algorithms do not consist of just a long chain of dependent calculations; there are usually opportunities to execute independent calculations in parallel. </p><p>Let <i>P</i><sub><i>i</i></sub> and <i>P</i><sub><i>j</i></sub> be two program segments. Bernstein's conditions<sup id="cite_ref-22" class="reference"><a href="#cite_note-22"><span class="cite-bracket">[</span>22<span class="cite-bracket">]</span></a></sup> describe when the two are independent and can be executed in parallel. For <i>P</i><sub><i>i</i></sub>, let <i>I</i><sub><i>i</i></sub> be all of the input variables and <i>O</i><sub><i>i</i></sub> the output variables, and likewise for <i>P</i><sub><i>j</i></sub>. <i>P</i><sub><i>i</i></sub> and <i>P</i><sub><i>j</i></sub> are independent if they satisfy </p> <dl><dd><span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle I_{j}\cap O_{i}=\varnothing ,}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <msub> <mi>I</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>j</mi> </mrow> </msub> <mo>∩<!-- ∩ --></mo> <msub> <mi>O</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> <mo>=</mo> <mi class="MJX-variant">∅<!-- ∅ --></mi> <mo>,</mo> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle I_{j}\cap O_{i}=\varnothing ,}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/1d0dcfecb17d55baca48e4eaba2b2182f7fbc971" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -1.005ex; width:12.642ex; height:2.843ex;" alt="{\displaystyle I_{j}\cap O_{i}=\varnothing ,}"></span></dd> <dd><span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle I_{i}\cap O_{j}=\varnothing ,}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <msub> <mi>I</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> <mo>∩<!-- ∩ --></mo> <msub> <mi>O</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>j</mi> </mrow> </msub> <mo>=</mo> <mi class="MJX-variant">∅<!-- ∅ --></mi> <mo>,</mo> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle I_{i}\cap O_{j}=\varnothing ,}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/e07eb3cfaf6b5930579d0616b7a5b93bd23ef216" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -1.005ex; width:12.642ex; height:2.843ex;" alt="{\displaystyle I_{i}\cap O_{j}=\varnothing ,}"></span></dd> <dd><span class="mwe-math-element"><span class="mwe-math-mathml-inline mwe-math-mathml-a11y" style="display: none;"><math xmlns="http://www.w3.org/1998/Math/MathML" alttext="{\displaystyle O_{i}\cap O_{j}=\varnothing .}"> <semantics> <mrow class="MJX-TeXAtom-ORD"> <mstyle displaystyle="true" scriptlevel="0"> <msub> <mi>O</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>i</mi> </mrow> </msub> <mo>∩<!-- ∩ --></mo> <msub> <mi>O</mi> <mrow class="MJX-TeXAtom-ORD"> <mi>j</mi> </mrow> </msub> <mo>=</mo> <mi class="MJX-variant">∅<!-- ∅ --></mi> <mo>.</mo> </mstyle> </mrow> <annotation encoding="application/x-tex">{\displaystyle O_{i}\cap O_{j}=\varnothing .}</annotation> </semantics> </math></span><img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/8f88a5f909aa03587600d57e1475bc51419bfaeb" class="mwe-math-fallback-image-inline mw-invert skin-invert" aria-hidden="true" style="vertical-align: -1.005ex; width:13.392ex; height:2.843ex;" alt="{\displaystyle O_{i}\cap O_{j}=\varnothing .}"></span></dd></dl> <p>Violation of the first condition introduces a flow dependency, corresponding to the first segment producing a result used by the second segment. The second condition represents an anti-dependency, when the second segment produces a variable needed by the first segment. The third and final condition represents an output dependency: when two segments write to the same location, the result comes from the logically last executed segment.<sup id="cite_ref-23" class="reference"><a href="#cite_note-23"><span class="cite-bracket">[</span>23<span class="cite-bracket">]</span></a></sup> </p><p>Consider the following functions, which demonstrate several kinds of dependencies: </p> <pre>1: function Dep(a, b) 2: c := a * b 3: d := 3 * c 4: end function </pre> <p>In this example, instruction 3 cannot be executed before (or even in parallel with) instruction 2, because instruction 3 uses a result from instruction 2. It violates condition 1, and thus introduces a flow dependency. </p> <pre>1: function NoDep(a, b) 2: c := a * b 3: d := 3 * b 4: e := a + b 5: end function </pre> <p>In this example, there are no dependencies between the instructions, so they can all be run in parallel. </p><p>Bernstein's conditions do not allow memory to be shared between different processes. For that, some means of enforcing an ordering between accesses is necessary, such as <a href="/wiki/Semaphore_(programming)" title="Semaphore (programming)">semaphores</a>, <a href="/wiki/Barrier_(computer_science)" title="Barrier (computer science)">barriers</a> or some other <a href="/wiki/Synchronization_(computer_science)" title="Synchronization (computer science)">synchronization method</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Race_conditions,_mutual_exclusion,_synchronization,_and_parallel_slowdown"><span id="Race_conditions.2C_mutual_exclusion.2C_synchronization.2C_and_parallel_slowdown"></span>Race conditions, mutual exclusion, synchronization, and parallel slowdown</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=4" title="Edit section: Race conditions, mutual exclusion, synchronization, and parallel slowdown"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Subtasks in a parallel program are often called <a href="/wiki/Thread_(computing)" title="Thread (computing)">threads</a>. Some parallel computer architectures use smaller, lightweight versions of threads known as <a href="/wiki/Fiber_(computer_science)" title="Fiber (computer science)">fibers</a>, while others use bigger versions known as <a href="/wiki/Process_(computing)" title="Process (computing)">processes</a>. However, "threads" is generally accepted as a generic term for subtasks.<sup id="cite_ref-24" class="reference"><a href="#cite_note-24"><span class="cite-bracket">[</span>24<span class="cite-bracket">]</span></a></sup> Threads will often need <a href="/wiki/Synchronization_(computer_science)" title="Synchronization (computer science)">synchronized</a> access to an <a href="/wiki/Object_(computer_science)" title="Object (computer science)">object</a> or other <a href="/wiki/Resource_management_(computing)" title="Resource management (computing)">resource</a>, for example when they must update a <a href="/wiki/Variable_(programming)" class="mw-redirect" title="Variable (programming)">variable</a> that is shared between them. Without synchronization, the instructions between the two threads may be interleaved in any order. For example, consider the following program: </p> <table class="wikitable"> <tbody><tr> <td>Thread A </td> <td>Thread B </td></tr> <tr> <td>1A: Read variable V </td> <td>1B: Read variable V </td></tr> <tr> <td>2A: Add 1 to variable V </td> <td>2B: Add 1 to variable V </td></tr> <tr> <td>3A: Write back to variable V </td> <td>3B: Write back to variable V </td></tr></tbody></table> <p>If instruction 1B is executed between 1A and 3A, or if instruction 1A is executed between 1B and 3B, the program will produce incorrect data. This is known as a <a href="/wiki/Race_condition" title="Race condition">race condition</a>. The programmer must use a <a href="/wiki/Lock_(computer_science)" title="Lock (computer science)">lock</a> to provide <a href="/wiki/Mutual_exclusion" title="Mutual exclusion">mutual exclusion</a>. A lock is a programming language construct that allows one thread to take control of a variable and prevent other threads from reading or writing it, until that variable is unlocked. The thread holding the lock is free to execute its <a href="/wiki/Critical_section" title="Critical section">critical section</a> (the section of a program that requires exclusive access to some variable), and to unlock the data when it is finished. Therefore, to guarantee correct program execution, the above program can be rewritten to use locks: </p> <table class="wikitable"> <tbody><tr> <td>Thread A </td> <td>Thread B </td></tr> <tr> <td>1A: Lock variable V </td> <td>1B: Lock variable V </td></tr> <tr> <td>2A: Read variable V </td> <td>2B: Read variable V </td></tr> <tr> <td>3A: Add 1 to variable V </td> <td>3B: Add 1 to variable V </td></tr> <tr> <td>4A: Write back to variable V </td> <td>4B: Write back to variable V </td></tr> <tr> <td>5A: Unlock variable V </td> <td>5B: Unlock variable V </td></tr></tbody></table> <p>One thread will successfully lock variable V, while the other thread will be <a href="/wiki/Software_lockout" title="Software lockout">locked out</a>—unable to proceed until V is unlocked again. This guarantees correct execution of the program. Locks may be necessary to ensure correct program execution when threads must serialize access to resources, but their use can greatly slow a program and may affect its <a href="/wiki/Software_quality#Reliability" title="Software quality">reliability</a>.<sup id="cite_ref-25" class="reference"><a href="#cite_note-25"><span class="cite-bracket">[</span>25<span class="cite-bracket">]</span></a></sup> </p><p>Locking multiple variables using <a href="/wiki/Atomic_operation" class="mw-redirect" title="Atomic operation">non-atomic</a> locks introduces the possibility of program <a href="/wiki/Deadlock_(computer_science)" title="Deadlock (computer science)">deadlock</a>. An <a href="/wiki/Atomic_lock" class="mw-redirect" title="Atomic lock">atomic lock</a> locks multiple variables all at once. If it cannot lock all of them, it does not lock any of them. If two threads each need to lock the same two variables using non-atomic locks, it is possible that one thread will lock one of them and the second thread will lock the second variable. In such a case, neither thread can complete, and deadlock results.<sup id="cite_ref-26" class="reference"><a href="#cite_note-26"><span class="cite-bracket">[</span>26<span class="cite-bracket">]</span></a></sup> </p><p>Many parallel programs require that their subtasks <a href="/wiki/Synchronization_(computer_science)" title="Synchronization (computer science)">act in synchrony</a>. This requires the use of a <a href="/wiki/Barrier_(computer_science)" title="Barrier (computer science)">barrier</a>. Barriers are typically implemented using a lock or a <a href="/wiki/Semaphore_(programming)" title="Semaphore (programming)">semaphore</a>.<sup id="cite_ref-27" class="reference"><a href="#cite_note-27"><span class="cite-bracket">[</span>27<span class="cite-bracket">]</span></a></sup> One class of algorithms, known as <a href="/wiki/Lock-free_and_wait-free_algorithms" class="mw-redirect" title="Lock-free and wait-free algorithms">lock-free and wait-free algorithms</a>, altogether avoids the use of locks and barriers. However, this approach is generally difficult to implement and requires correctly designed data structures.<sup id="cite_ref-28" class="reference"><a href="#cite_note-28"><span class="cite-bracket">[</span>28<span class="cite-bracket">]</span></a></sup> </p><p>Not all parallelization results in speed-up. Generally, as a task is split up into more and more threads, those threads spend an ever-increasing portion of their time communicating with each other or waiting on each other for access to resources.<sup id="cite_ref-29" class="reference"><a href="#cite_note-29"><span class="cite-bracket">[</span>29<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-30" class="reference"><a href="#cite_note-30"><span class="cite-bracket">[</span>30<span class="cite-bracket">]</span></a></sup> Once the overhead from resource contention or communication dominates the time spent on other computation, further parallelization (that is, splitting the workload over even more threads) increases rather than decreases the amount of time required to finish. This problem, known as <a href="/wiki/Parallel_slowdown" title="Parallel slowdown">parallel slowdown</a>,<sup id="cite_ref-31" class="reference"><a href="#cite_note-31"><span class="cite-bracket">[</span>31<span class="cite-bracket">]</span></a></sup> can be improved in some cases by software analysis and redesign.<sup id="cite_ref-32" class="reference"><a href="#cite_note-32"><span class="cite-bracket">[</span>32<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Fine-grained,_coarse-grained,_and_embarrassing_parallelism"><span id="Fine-grained.2C_coarse-grained.2C_and_embarrassing_parallelism"></span>Fine-grained, coarse-grained, and embarrassing parallelism</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=5" title="Edit section: Fine-grained, coarse-grained, and embarrassing parallelism"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Applications are often classified according to how often their subtasks need to synchronize or communicate with each other. An application exhibits fine-grained parallelism if its subtasks must communicate many times per second; it exhibits coarse-grained parallelism if they do not communicate many times per second, and it exhibits <a href="/wiki/Embarrassingly_parallel" title="Embarrassingly parallel">embarrassing parallelism</a> if they rarely or never have to communicate. Embarrassingly parallel applications are considered the easiest to parallelize. </p> <div class="mw-heading mw-heading3"><h3 id="Flynn's_taxonomy"><span id="Flynn.27s_taxonomy"></span>Flynn's taxonomy</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=6" title="Edit section: Flynn's taxonomy"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Flynn%27s_taxonomy" title="Flynn's taxonomy">Flynn's taxonomy</a></div> <p><a href="/wiki/Michael_J._Flynn" title="Michael J. Flynn">Michael J. Flynn</a> created one of the earliest classification systems for parallel (and sequential) computers and programs, now known as <a href="/wiki/Flynn%27s_taxonomy" title="Flynn's taxonomy">Flynn's taxonomy</a>. Flynn classified programs and computers by whether they were operating using a single set or multiple sets of instructions, and whether or not those instructions were using a single set or multiple sets of data. </p> <style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1246091330">.mw-parser-output .sidebar{width:22em;float:right;clear:right;margin:0.5em 0 1em 1em;background:var(--background-color-neutral-subtle,#f8f9fa);border:1px solid var(--border-color-base,#a2a9b1);padding:0.2em;text-align:center;line-height:1.4em;font-size:88%;border-collapse:collapse;display:table}body.skin-minerva .mw-parser-output .sidebar{display:table!important;float:right!important;margin:0.5em 0 1em 1em!important}.mw-parser-output .sidebar-subgroup{width:100%;margin:0;border-spacing:0}.mw-parser-output .sidebar-left{float:left;clear:left;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-none{float:none;clear:both;margin:0.5em 1em 1em 0}.mw-parser-output .sidebar-outer-title{padding:0 0.4em 0.2em;font-size:125%;line-height:1.2em;font-weight:bold}.mw-parser-output .sidebar-top-image{padding:0.4em}.mw-parser-output .sidebar-top-caption,.mw-parser-output .sidebar-pretitle-with-top-image,.mw-parser-output .sidebar-caption{padding:0.2em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-pretitle{padding:0.4em 0.4em 0;line-height:1.2em}.mw-parser-output .sidebar-title,.mw-parser-output .sidebar-title-with-pretitle{padding:0.2em 0.8em;font-size:145%;line-height:1.2em}.mw-parser-output .sidebar-title-with-pretitle{padding:0.1em 0.4em}.mw-parser-output .sidebar-image{padding:0.2em 0.4em 0.4em}.mw-parser-output .sidebar-heading{padding:0.1em 0.4em}.mw-parser-output .sidebar-content{padding:0 0.5em 0.4em}.mw-parser-output .sidebar-content-with-subgroup{padding:0.1em 0.4em 0.2em}.mw-parser-output .sidebar-above,.mw-parser-output .sidebar-below{padding:0.3em 0.8em;font-weight:bold}.mw-parser-output .sidebar-collapse .sidebar-above,.mw-parser-output .sidebar-collapse .sidebar-below{border-top:1px solid #aaa;border-bottom:1px solid #aaa}.mw-parser-output .sidebar-navbar{text-align:right;font-size:115%;padding:0 0.4em 0.4em}.mw-parser-output .sidebar-list-title{padding:0 0.4em;text-align:left;font-weight:bold;line-height:1.6em;font-size:105%}.mw-parser-output .sidebar-list-title-c{padding:0 0.4em;text-align:center;margin:0 3.3em}@media(max-width:640px){body.mediawiki .mw-parser-output .sidebar{width:100%!important;clear:both;float:none!important;margin-left:0!important;margin-right:0!important}}body.skin--responsive .mw-parser-output .sidebar a>img{max-width:none!important}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-night .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-list-title,html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle{background:transparent!important}html.skin-theme-clientpref-os .mw-parser-output .sidebar:not(.notheme) .sidebar-title-with-pretitle a{color:var(--color-progressive)!important}}@media print{body.ns-0 .mw-parser-output .sidebar{display:none!important}}</style><table class="sidebar nomobile nowraplinks hlist"><tbody><tr><th class="sidebar-title"><a href="/wiki/Flynn%27s_taxonomy" title="Flynn's taxonomy">Flynn's taxonomy</a></th></tr><tr><th class="sidebar-heading"> Single data stream</th></tr><tr><td class="sidebar-content"> <ul><li><a href="/wiki/Single_instruction,_single_data" title="Single instruction, single data">SISD</a></li> <li><a href="/wiki/Multiple_instruction,_single_data" title="Multiple instruction, single data">MISD</a></li></ul></td> </tr><tr><th class="sidebar-heading"> Multiple data streams</th></tr><tr><td class="sidebar-content"> <ul><li><a href="/wiki/Single_instruction,_multiple_data" title="Single instruction, multiple data">SIMD</a></li> <li><a href="/wiki/Multiple_instruction,_multiple_data" title="Multiple instruction, multiple data">MIMD</a></li></ul></td> </tr><tr><th class="sidebar-heading"> SIMD subcategories<sup id="cite_ref-flynn-1972_33-0" class="reference"><a href="#cite_note-flynn-1972-33"><span class="cite-bracket">[</span>33<span class="cite-bracket">]</span></a></sup></th></tr><tr><td class="sidebar-content"> <ul><li><a href="/wiki/Flynn%27s_taxonomy#Array_processor" title="Flynn's taxonomy">Array processing (SIMT)</a></li> <li><a href="/wiki/Flynn%27s_taxonomy#Pipelined_processor" title="Flynn's taxonomy">Pipelined processing (packed SIMD)</a></li> <li><a href="/wiki/Flynn%27s_taxonomy#Associative_processor" title="Flynn's taxonomy">Associative processing (predicated/masked SIMD)</a></li></ul></td> </tr><tr><th class="sidebar-heading"> See also</th></tr><tr><td class="sidebar-content"> <ul><li><a href="/wiki/Single_program,_multiple_data" title="Single program, multiple data">SPMD</a></li> <li><a href="/wiki/MPMD" class="mw-redirect" title="MPMD">MPMD</a></li></ul></td> </tr></tbody></table> <p>The single-instruction-single-data (SISD) classification is equivalent to an entirely sequential program. The single-instruction-multiple-data (SIMD) classification is analogous to doing the same operation repeatedly over a large data set. This is commonly done in <a href="/wiki/Signal_processing" title="Signal processing">signal processing</a> applications. Multiple-instruction-single-data (MISD) is a rarely used classification. While computer architectures to deal with this were devised (such as <a href="/wiki/Systolic_array" title="Systolic array">systolic arrays</a>), few applications that fit this class materialized. Multiple-instruction-multiple-data (MIMD) programs are by far the most common type of parallel programs. </p><p>According to <a href="/wiki/David_A._Patterson_(scientist)" class="mw-redirect" title="David A. Patterson (scientist)">David A. Patterson</a> and <a href="/wiki/John_L._Hennessy" title="John L. Hennessy">John L. Hennessy</a>, "Some machines are hybrids of these categories, of course, but this classic model has survived because it is simple, easy to understand, and gives a good first approximation. It is also—perhaps because of its understandability—the most widely used scheme."<sup id="cite_ref-34" class="reference"><a href="#cite_note-34"><span class="cite-bracket">[</span>34<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Disadvantages">Disadvantages</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=7" title="Edit section: Disadvantages"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Parallel computing can incur significant overhead in practice, primarily due to the costs associated with merging data from multiple processes. Specifically, inter-process communication and synchronization can lead to overheads that are substantially higher—often by two or more orders of magnitude—compared to processing the same data on a single thread. <sup id="cite_ref-35" class="reference"><a href="#cite_note-35"><span class="cite-bracket">[</span>35<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-36" class="reference"><a href="#cite_note-36"><span class="cite-bracket">[</span>36<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-37" class="reference"><a href="#cite_note-37"><span class="cite-bracket">[</span>37<span class="cite-bracket">]</span></a></sup> Therefore, the overall improvement should be carefully evaluated. </p> <div class="mw-heading mw-heading2"><h2 id="Granularity">Granularity</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=8" title="Edit section: Granularity"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Bit-level_parallelism">Bit-level parallelism</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=9" title="Edit section: Bit-level parallelism"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Bit-level_parallelism" title="Bit-level parallelism">Bit-level parallelism</a></div> <figure class="mw-default-size" typeof="mw:File/Thumb"><a href="/wiki/File:Taiwania_3_Supercomputer.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/0/04/Taiwania_3_Supercomputer.jpg/220px-Taiwania_3_Supercomputer.jpg" decoding="async" width="220" height="138" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/04/Taiwania_3_Supercomputer.jpg/330px-Taiwania_3_Supercomputer.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/04/Taiwania_3_Supercomputer.jpg/440px-Taiwania_3_Supercomputer.jpg 2x" data-file-width="809" data-file-height="506" /></a><figcaption>Taiwania 3 of <a href="/wiki/Taiwan" title="Taiwan">Taiwan</a>, a parallel supercomputing device that joined <a href="/wiki/COVID-19" title="COVID-19">COVID-19</a> research</figcaption></figure> <p>From the advent of <a href="/wiki/Very-large-scale_integration" title="Very-large-scale integration">very-large-scale integration</a> (VLSI) computer-chip fabrication technology in the 1970s until about 1986, speed-up in computer architecture was driven by doubling <a href="/wiki/Word_(data_type)" class="mw-redirect" title="Word (data type)">computer word size</a>—the amount of information the processor can manipulate per cycle.<sup id="cite_ref-38" class="reference"><a href="#cite_note-38"><span class="cite-bracket">[</span>38<span class="cite-bracket">]</span></a></sup> Increasing the word size reduces the number of instructions the processor must execute to perform an operation on variables whose sizes are greater than the length of the word. For example, where an <a href="/wiki/8-bit_computing" title="8-bit computing">8-bit</a> processor must add two <a href="/wiki/16-bit_computing" title="16-bit computing">16-bit</a> <a href="/wiki/Integer" title="Integer">integers</a>, the processor must first add the 8 lower-order bits from each integer using the standard addition instruction, then add the 8 higher-order bits using an add-with-carry instruction and the <a href="/wiki/Carry_bit" class="mw-redirect" title="Carry bit">carry bit</a> from the lower order addition; thus, an 8-bit processor requires two instructions to complete a single operation, where a 16-bit processor would be able to complete the operation with a single instruction. </p><p>Historically, <a href="/wiki/4-bit_computing" title="4-bit computing">4-bit</a> microprocessors were replaced with 8-bit, then 16-bit, then 32-bit microprocessors. This trend generally came to an end with the introduction of 32-bit processors, which has been a standard in general-purpose computing for two decades. Not until the early 2000s, with the advent of <a href="/wiki/X86-64" title="X86-64">x86-64</a> architectures, did <a href="/wiki/64-bit_computing" title="64-bit computing">64-bit</a> processors become commonplace. </p> <div class="mw-heading mw-heading3"><h3 id="Instruction-level_parallelism">Instruction-level parallelism</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=10" title="Edit section: Instruction-level parallelism"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Instruction-level_parallelism" title="Instruction-level parallelism">Instruction-level parallelism</a></div> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Nopipeline.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/2/2c/Nopipeline.png/300px-Nopipeline.png" decoding="async" width="300" height="54" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/2c/Nopipeline.png/450px-Nopipeline.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/2c/Nopipeline.png/600px-Nopipeline.png 2x" data-file-width="876" data-file-height="157" /></a><figcaption>A canonical processor without <a href="/wiki/Instruction_pipelining" title="Instruction pipelining">pipeline</a>. It takes five clock cycles to complete one instruction and thus the processor can issue subscalar performance (<span class="nowrap">IPC = 0.2 < 1</span>).</figcaption></figure> <p>A computer program is, in essence, a stream of instructions executed by a processor. Without instruction-level parallelism, a processor can only issue less than one <a href="/wiki/Instructions_per_cycle" title="Instructions per cycle">instruction per clock cycle</a> (<span class="nowrap">IPC < 1</span>). These processors are known as <i>subscalar</i> processors. These instructions can be <a href="/wiki/Out-of-order_execution" title="Out-of-order execution">re-ordered</a> and combined into groups which are then executed in parallel without changing the result of the program. This is known as instruction-level parallelism. Advances in instruction-level parallelism dominated computer architecture from the mid-1980s until the mid-1990s.<sup id="cite_ref-39" class="reference"><a href="#cite_note-39"><span class="cite-bracket">[</span>39<span class="cite-bracket">]</span></a></sup> </p> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Fivestagespipeline.png" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/2/21/Fivestagespipeline.png/300px-Fivestagespipeline.png" decoding="async" width="300" height="87" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/2/21/Fivestagespipeline.png/450px-Fivestagespipeline.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/2/21/Fivestagespipeline.png/600px-Fivestagespipeline.png 2x" data-file-width="972" data-file-height="282" /></a><figcaption>A canonical five-stage <a href="/wiki/Instruction_pipelining" title="Instruction pipelining">pipelined</a> processor. In the best case scenario, it takes one clock cycle to complete one instruction and thus the processor can issue scalar performance (<span class="nowrap">IPC = 1</span>).</figcaption></figure> <p>All modern processors have multi-stage <a href="/wiki/Instruction_pipelining" title="Instruction pipelining">instruction pipelines</a>. Each stage in the pipeline corresponds to a different action the processor performs on that instruction in that stage; a processor with an <i>N</i>-stage pipeline can have up to <i>N</i> different instructions at different stages of completion and thus can issue one instruction per clock cycle (<span class="nowrap">IPC = 1</span>). These processors are known as <i>scalar</i> processors. The canonical example of a pipelined processor is a <a href="/wiki/RISC" class="mw-redirect" title="RISC">RISC</a> processor, with five stages: instruction fetch (IF), instruction decode (ID), execute (EX), memory access (MEM), and register write back (WB). The <a href="/wiki/Pentium_4" title="Pentium 4">Pentium 4</a> processor had a 35-stage pipeline.<sup id="cite_ref-40" class="reference"><a href="#cite_note-40"><span class="cite-bracket">[</span>40<span class="cite-bracket">]</span></a></sup> </p> <figure typeof="mw:File/Thumb"><a href="/wiki/File:Superscalarpipeline.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/4/46/Superscalarpipeline.svg/300px-Superscalarpipeline.svg.png" decoding="async" width="300" height="175" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/46/Superscalarpipeline.svg/450px-Superscalarpipeline.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/46/Superscalarpipeline.svg/600px-Superscalarpipeline.svg.png 2x" data-file-width="978" data-file-height="571" /></a><figcaption>A canonical five-stage <a href="/wiki/Instruction_pipelining" title="Instruction pipelining">pipelined</a> processor with two execution units. In the best case scenario, it takes one clock cycle to complete two instructions and thus the processor can issue superscalar performance (<span class="nowrap">IPC = 2 > 1</span>).</figcaption></figure> <p>Most modern processors also have multiple <a href="/wiki/Execution_unit" title="Execution unit">execution units</a>. They usually combine this feature with pipelining and thus can issue more than one instruction per clock cycle (<span class="nowrap">IPC > 1</span>). These processors are known as <i><a href="/wiki/Superscalar" class="mw-redirect" title="Superscalar">superscalar</a></i> processors. Superscalar processors differ from <a href="/wiki/Multi-core_processor" title="Multi-core processor">multi-core processors</a> in that the several execution units are not entire processors (i.e. processing units). Instructions can be grouped together only if there is no <a href="/wiki/Data_dependency" title="Data dependency">data dependency</a> between them. <a href="/wiki/Scoreboarding" title="Scoreboarding">Scoreboarding</a> and the <a href="/wiki/Tomasulo_algorithm" class="mw-redirect" title="Tomasulo algorithm">Tomasulo algorithm</a> (which is similar to scoreboarding but makes use of <a href="/wiki/Register_renaming" title="Register renaming">register renaming</a>) are two of the most common techniques for implementing out-of-order execution and instruction-level parallelism. </p> <div class="mw-heading mw-heading3"><h3 id="Task_parallelism">Task parallelism</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=11" title="Edit section: Task parallelism"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Task_parallelism" title="Task parallelism">Task parallelism</a></div> <p>Task parallelisms is the characteristic of a parallel program that "entirely different calculations can be performed on either the same or different sets of data".<sup id="cite_ref-Culler124_41-0" class="reference"><a href="#cite_note-Culler124-41"><span class="cite-bracket">[</span>41<span class="cite-bracket">]</span></a></sup> This contrasts with data parallelism, where the same calculation is performed on the same or different sets of data. Task parallelism involves the decomposition of a task into sub-tasks and then allocating each sub-task to a processor for execution. The processors would then execute these sub-tasks concurrently and often cooperatively. Task parallelism does not usually scale with the size of a problem.<sup id="cite_ref-Culler125_42-0" class="reference"><a href="#cite_note-Culler125-42"><span class="cite-bracket">[</span>42<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Superword_level_parallelism">Superword level parallelism</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=12" title="Edit section: Superword level parallelism"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Superword level parallelism is a <a href="/wiki/Automatic_vectorization" title="Automatic vectorization">vectorization</a> technique based on <a href="/wiki/Loop_unwinding" class="mw-redirect" title="Loop unwinding">loop unrolling</a> and basic block vectorization. It is distinct from loop vectorization algorithms in that it can exploit <a href="/wiki/Parallelism_(computing)" class="mw-redirect" title="Parallelism (computing)">parallelism</a> of <a href="/wiki/Inline_code" class="mw-redirect" title="Inline code">inline code</a>, such as manipulating coordinates, color channels or in loops unrolled by hand.<sup id="cite_ref-43" class="reference"><a href="#cite_note-43"><span class="cite-bracket">[</span>43<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Hardware">Hardware</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=13" title="Edit section: Hardware"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Memory_and_communication">Memory and communication</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=14" title="Edit section: Memory and communication"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Main memory in a parallel computer is either <a href="/wiki/Shared_memory_(interprocess_communication)" class="mw-redirect" title="Shared memory (interprocess communication)">shared memory</a> (shared between all processing elements in a single <a href="/wiki/Address_space" title="Address space">address space</a>), or <a href="/wiki/Distributed_memory" title="Distributed memory">distributed memory</a> (in which each processing element has its own local address space).<sup id="cite_ref-PH713_44-0" class="reference"><a href="#cite_note-PH713-44"><span class="cite-bracket">[</span>44<span class="cite-bracket">]</span></a></sup> Distributed memory refers to the fact that the memory is logically distributed, but often implies that it is physically distributed as well. <a href="/wiki/Distributed_shared_memory" title="Distributed shared memory">Distributed shared memory</a> and <a href="/wiki/Memory_virtualization" title="Memory virtualization">memory virtualization</a> combine the two approaches, where the processing element has its own local memory and access to the memory on non-local processors. Accesses to local memory are typically faster than accesses to non-local memory. On the <a href="/wiki/Supercomputers" class="mw-redirect" title="Supercomputers">supercomputers</a>, distributed shared memory space can be implemented using the programming model such as <a href="/wiki/Partitioned_global_address_space" title="Partitioned global address space">PGAS</a>. This model allows processes on one compute node to transparently access the remote memory of another compute node. All compute nodes are also connected to an external shared memory system via high-speed interconnect, such as <a href="/wiki/Infiniband" class="mw-redirect" title="Infiniband">Infiniband</a>, this external shared memory system is known as <a href="/wiki/Burst_buffer" title="Burst buffer">burst buffer</a>, which is typically built from arrays of <a href="/wiki/Non-volatile_memory" title="Non-volatile memory">non-volatile memory</a> physically distributed across multiple I/O nodes. </p> <figure class="mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Numa.svg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/7/7b/Numa.svg/400px-Numa.svg.png" decoding="async" width="400" height="158" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/7/7b/Numa.svg/600px-Numa.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/7/7b/Numa.svg/800px-Numa.svg.png 2x" data-file-width="569" data-file-height="225" /></a><figcaption>A logical view of a <a href="/wiki/Non-uniform_memory_access" title="Non-uniform memory access">non-uniform memory access</a> (NUMA) architecture. Processors in one directory can access that directory's memory with less latency than they can access memory in the other directory's memory.</figcaption></figure> <p>Computer architectures in which each element of main memory can be accessed with equal <a href="/wiki/Memory_latency" title="Memory latency">latency</a> and <a href="/wiki/Bandwidth_(computing)" title="Bandwidth (computing)">bandwidth</a> are known as <a href="/wiki/Uniform_memory_access" title="Uniform memory access">uniform memory access</a> (UMA) systems. Typically, that can be achieved only by a <a href="/wiki/Shared_memory_(interprocess_communication)" class="mw-redirect" title="Shared memory (interprocess communication)">shared memory</a> system, in which the memory is not physically distributed. A system that does not have this property is known as a <a href="/wiki/Non-uniform_memory_access" title="Non-uniform memory access">non-uniform memory access</a> (NUMA) architecture. Distributed memory systems have non-uniform memory access. </p><p>Computer systems make use of <a href="/wiki/CPU_cache" title="CPU cache">caches</a>—small and fast memories located close to the processor which store temporary copies of memory values (nearby in both the physical and logical sense). Parallel computer systems have difficulties with caches that may store the same value in more than one location, with the possibility of incorrect program execution. These computers require a <a href="/wiki/Cache_coherency" class="mw-redirect" title="Cache coherency">cache coherency</a> system, which keeps track of cached values and strategically purges them, thus ensuring correct program execution. <a href="/wiki/Bus_sniffing" class="mw-redirect" title="Bus sniffing">Bus snooping</a> is one of the most common methods for keeping track of which values are being accessed (and thus should be purged). Designing large, high-performance cache coherence systems is a very difficult problem in computer architecture. As a result, shared memory computer architectures do not scale as well as distributed memory systems do.<sup id="cite_ref-PH713_44-1" class="reference"><a href="#cite_note-PH713-44"><span class="cite-bracket">[</span>44<span class="cite-bracket">]</span></a></sup> </p><p>Processor–processor and processor–memory communication can be implemented in hardware in several ways, including via shared (either multiported or <a href="/wiki/Multiplexing" title="Multiplexing">multiplexed</a>) memory, a <a href="/wiki/Crossbar_switch" title="Crossbar switch">crossbar switch</a>, a shared <a href="/wiki/Bus_(computing)" title="Bus (computing)">bus</a> or an interconnect network of a myriad of <a href="/wiki/Network_topology" title="Network topology">topologies</a> including <a href="/wiki/Star_network" title="Star network">star</a>, <a href="/wiki/Ring_network" title="Ring network">ring</a>, <a href="/wiki/Tree_(graph_theory)" title="Tree (graph theory)">tree</a>, <a href="/wiki/Hypercube_graph" title="Hypercube graph">hypercube</a>, fat hypercube (a hypercube with more than one processor at a node), or <a href="/wiki/Mesh_networking" title="Mesh networking">n-dimensional mesh</a>. </p><p>Parallel computers based on interconnected networks need to have some kind of <a href="/wiki/Routing" title="Routing">routing</a> to enable the passing of messages between nodes that are not directly connected. The medium used for communication between the processors is likely to be hierarchical in large multiprocessor machines. </p> <div class="mw-heading mw-heading3"><h3 id="Classes_of_parallel_computers">Classes of parallel computers</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=15" title="Edit section: Classes of parallel computers"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Parallel computers can be roughly classified according to the level at which the hardware supports parallelism. This classification is broadly analogous to the distance between basic computing nodes. These are not mutually exclusive; for example, clusters of symmetric multiprocessors are relatively common. </p> <div class="mw-heading mw-heading4"><h4 id="Multi-core_computing">Multi-core computing</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=16" title="Edit section: Multi-core computing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Multi-core_processor" title="Multi-core processor">Multi-core processor</a></div> <p>A multi-core processor is a processor that includes multiple <a href="/wiki/Central_processing_unit" title="Central processing unit">processing units</a> (called "cores") on the same chip. This processor differs from a <a href="/wiki/Superscalar" class="mw-redirect" title="Superscalar">superscalar</a> processor, which includes multiple <a href="/wiki/Execution_unit" title="Execution unit">execution units</a> and can issue multiple instructions per clock cycle from one instruction stream (thread); in contrast, a multi-core processor can issue multiple instructions per clock cycle from multiple instruction streams. <a href="/wiki/IBM" title="IBM">IBM</a>'s <a href="/wiki/Cell_(microprocessor)" class="mw-redirect" title="Cell (microprocessor)">Cell microprocessor</a>, designed for use in the <a href="/wiki/Sony" title="Sony">Sony</a> <a href="/wiki/PlayStation_3" title="PlayStation 3">PlayStation 3</a>, is a prominent multi-core processor. Each core in a multi-core processor can potentially be superscalar as well—that is, on every clock cycle, each core can issue multiple instructions from one thread. </p><p><a href="/wiki/Simultaneous_multithreading" title="Simultaneous multithreading">Simultaneous multithreading</a> (of which Intel's <a href="/wiki/Hyper-Threading" class="mw-redirect" title="Hyper-Threading">Hyper-Threading</a> is the best known) was an early form of pseudo-multi-coreism. A processor capable of concurrent multithreading includes multiple execution units in the same processing unit—that is it has a superscalar architecture—and can issue multiple instructions per clock cycle from <i>multiple</i> threads. <a href="/wiki/Temporal_multithreading" title="Temporal multithreading">Temporal multithreading</a> on the other hand includes a single execution unit in the same processing unit and can issue one instruction at a time from <i>multiple</i> threads. </p> <div class="mw-heading mw-heading4"><h4 id="Symmetric_multiprocessing">Symmetric multiprocessing</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=17" title="Edit section: Symmetric multiprocessing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Symmetric_multiprocessing" title="Symmetric multiprocessing">Symmetric multiprocessing</a></div> <p>A symmetric multiprocessor (SMP) is a computer system with multiple identical processors that share memory and connect via a <a href="/wiki/Bus_(computing)" title="Bus (computing)">bus</a>.<sup id="cite_ref-HP549_45-0" class="reference"><a href="#cite_note-HP549-45"><span class="cite-bracket">[</span>45<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Bus_contention" title="Bus contention">Bus contention</a> prevents bus architectures from scaling. As a result, SMPs generally do not comprise more than 32 processors.<sup id="cite_ref-46" class="reference"><a href="#cite_note-46"><span class="cite-bracket">[</span>46<span class="cite-bracket">]</span></a></sup> Because of the small size of the processors and the significant reduction in the requirements for bus bandwidth achieved by large caches, such symmetric multiprocessors are extremely cost-effective, provided that a sufficient amount of memory bandwidth exists.<sup id="cite_ref-HP549_45-1" class="reference"><a href="#cite_note-HP549-45"><span class="cite-bracket">[</span>45<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading4"><h4 id="Distributed_computing">Distributed computing</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=18" title="Edit section: Distributed computing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Distributed_computing" title="Distributed computing">Distributed computing</a></div> <p>A distributed computer (also known as a distributed memory multiprocessor) is a distributed memory computer system in which the processing elements are connected by a network. Distributed computers are highly scalable. The terms "<a href="/wiki/Concurrent_computing" title="Concurrent computing">concurrent computing</a>", "parallel computing", and "distributed computing" have a lot of overlap, and no clear distinction exists between them.<sup id="cite_ref-47" class="reference"><a href="#cite_note-47"><span class="cite-bracket">[</span>47<span class="cite-bracket">]</span></a></sup> The same system may be characterized both as "parallel" and "distributed"; the processors in a typical distributed system run concurrently in parallel.<sup id="cite_ref-48" class="reference"><a href="#cite_note-48"><span class="cite-bracket">[</span>48<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading5"><h5 id="Cluster_computing">Cluster computing</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=19" title="Edit section: Cluster computing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Computer_cluster" title="Computer cluster">Computer cluster</a></div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Beowulf.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/8/8c/Beowulf.jpg/170px-Beowulf.jpg" decoding="async" width="170" height="227" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/8c/Beowulf.jpg/255px-Beowulf.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/8c/Beowulf.jpg/340px-Beowulf.jpg 2x" data-file-width="500" data-file-height="667" /></a><figcaption>A <a href="/wiki/Beowulf_(computing)" class="mw-redirect" title="Beowulf (computing)">Beowulf cluster</a></figcaption></figure> <p>A cluster is a group of loosely coupled computers that work together closely, so that in some respects they can be regarded as a single computer.<sup id="cite_ref-49" class="reference"><a href="#cite_note-49"><span class="cite-bracket">[</span>49<span class="cite-bracket">]</span></a></sup> Clusters are composed of multiple standalone machines connected by a network. While machines in a cluster do not have to be symmetric, <a href="/wiki/Load_balancing_(computing)" title="Load balancing (computing)">load balancing</a> is more difficult if they are not. The most common type of cluster is the <a href="/wiki/Beowulf_(computing)" class="mw-redirect" title="Beowulf (computing)">Beowulf cluster</a>, which is a cluster implemented on multiple identical <a href="/wiki/Commercial_off-the-shelf" title="Commercial off-the-shelf">commercial off-the-shelf</a> computers connected with a <a href="/wiki/TCP/IP" class="mw-redirect" title="TCP/IP">TCP/IP</a> <a href="/wiki/Ethernet" title="Ethernet">Ethernet</a> <a href="/wiki/Local_area_network" title="Local area network">local area network</a>.<sup id="cite_ref-50" class="reference"><a href="#cite_note-50"><span class="cite-bracket">[</span>50<span class="cite-bracket">]</span></a></sup> Beowulf technology was originally developed by <a href="/wiki/Thomas_Sterling_(computing)" title="Thomas Sterling (computing)">Thomas Sterling</a> and <a href="/wiki/Donald_Becker" title="Donald Becker">Donald Becker</a>. 87% of all <a href="/wiki/TOP500" title="TOP500">Top500</a> supercomputers are clusters.<sup id="cite_ref-51" class="reference"><a href="#cite_note-51"><span class="cite-bracket">[</span>51<span class="cite-bracket">]</span></a></sup> The remaining are Massively Parallel Processors, explained below. </p><p>Because grid computing systems (described below) can easily handle embarrassingly parallel problems, modern clusters are typically designed to handle more difficult problems—problems that require nodes to share intermediate results with each other more often. This requires a high bandwidth and, more importantly, a low-<a href="/wiki/Latency_(engineering)" title="Latency (engineering)">latency</a> interconnection network. Many historic and current supercomputers use customized high-performance network hardware specifically designed for cluster computing, such as the Cray Gemini network.<sup id="cite_ref-52" class="reference"><a href="#cite_note-52"><span class="cite-bracket">[</span>52<span class="cite-bracket">]</span></a></sup> As of 2014, most current supercomputers use some off-the-shelf standard network hardware, often <a href="/wiki/Myrinet" title="Myrinet">Myrinet</a>, <a href="/wiki/InfiniBand" title="InfiniBand">InfiniBand</a>, or <a href="/wiki/Gigabit_Ethernet" title="Gigabit Ethernet">Gigabit Ethernet</a>. </p> <div class="mw-heading mw-heading5"><h5 id="Massively_parallel_computing">Massively parallel computing</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=20" title="Edit section: Massively parallel computing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Massively_parallel_(computing)" class="mw-redirect" title="Massively parallel (computing)">Massively parallel (computing)</a></div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:BlueGeneL_cabinet.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/a/a7/BlueGeneL_cabinet.jpg/170px-BlueGeneL_cabinet.jpg" decoding="async" width="170" height="260" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/a/a7/BlueGeneL_cabinet.jpg/255px-BlueGeneL_cabinet.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/a/a7/BlueGeneL_cabinet.jpg/340px-BlueGeneL_cabinet.jpg 2x" data-file-width="1244" data-file-height="1906" /></a><figcaption>A cabinet from <a href="/wiki/IBM" title="IBM">IBM</a>'s <a href="/wiki/Blue_Gene" class="mw-redirect" title="Blue Gene">Blue Gene/L</a> massively parallel <a href="/wiki/Supercomputer" title="Supercomputer">supercomputer</a></figcaption></figure> <p>A massively parallel processor (MPP) is a single computer with many networked processors. MPPs have many of the same characteristics as clusters, but MPPs have specialized interconnect networks (whereas clusters use commodity hardware for networking). MPPs also tend to be larger than clusters, typically having "far more" than 100 processors.<sup id="cite_ref-53" class="reference"><a href="#cite_note-53"><span class="cite-bracket">[</span>53<span class="cite-bracket">]</span></a></sup> In an MPP, "each CPU contains its own memory and copy of the operating system and application. Each subsystem communicates with the others via a high-speed interconnect."<sup id="cite_ref-54" class="reference"><a href="#cite_note-54"><span class="cite-bracket">[</span>54<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/IBM" title="IBM">IBM</a>'s <a href="/wiki/Blue_Gene" class="mw-redirect" title="Blue Gene">Blue Gene/L</a>, the fifth fastest <a href="/wiki/Supercomputer" title="Supercomputer">supercomputer</a> in the world according to the June 2009 <a href="/wiki/TOP500" title="TOP500">TOP500</a> ranking, is an MPP. </p> <div class="mw-heading mw-heading5"><h5 id="Grid_computing">Grid computing</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=21" title="Edit section: Grid computing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Grid_computing" title="Grid computing">Grid computing</a></div> <p>Grid computing is the most distributed form of parallel computing. It makes use of computers communicating over the <a href="/wiki/Internet" title="Internet">Internet</a> to work on a given problem. Because of the low bandwidth and extremely high latency available on the Internet, distributed computing typically deals only with <a href="/wiki/Embarrassingly_parallel" title="Embarrassingly parallel">embarrassingly parallel</a> problems. </p><p>Most grid computing applications use <a href="/wiki/Middleware" title="Middleware">middleware</a> (software that sits between the operating system and the application to manage network resources and standardize the software interface). The most common grid computing middleware is the <a href="/wiki/Berkeley_Open_Infrastructure_for_Network_Computing" title="Berkeley Open Infrastructure for Network Computing">Berkeley Open Infrastructure for Network Computing</a> (BOINC). Often <a href="/wiki/Volunteer_computing" title="Volunteer computing">volunteer computing</a> software makes use of "spare cycles", performing computations at times when a computer is idling.<sup id="cite_ref-55" class="reference"><a href="#cite_note-55"><span class="cite-bracket">[</span>55<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading5"><h5 id="Cloud_computing">Cloud computing</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=22" title="Edit section: Cloud computing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Cloud_computing" title="Cloud computing">Cloud computing</a></div> <p>The ubiquity of Internet brought the possibility of large-scale cloud computing. </p> <div class="mw-heading mw-heading4"><h4 id="Specialized_parallel_computers">Specialized parallel computers</h4><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=23" title="Edit section: Specialized parallel computers"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Within parallel computing, there are specialized parallel devices that remain niche areas of interest. While not <a href="/wiki/Domain-specific_programming_language" class="mw-redirect" title="Domain-specific programming language">domain-specific</a>, they tend to be applicable to only a few classes of parallel problems. </p> <div class="mw-heading mw-heading5"><h5 id="Reconfigurable_computing_with_field-programmable_gate_arrays">Reconfigurable computing with field-programmable gate arrays</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=24" title="Edit section: Reconfigurable computing with field-programmable gate arrays"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Reconfigurable_computing" title="Reconfigurable computing">Reconfigurable computing</a> is the use of a <a href="/wiki/Field-programmable_gate_array" title="Field-programmable gate array">field-programmable gate array</a> (FPGA) as a co-processor to a general-purpose computer. An FPGA is, in essence, a computer chip that can rewire itself for a given task. </p><p>FPGAs can be programmed with <a href="/wiki/Hardware_description_language" title="Hardware description language">hardware description languages</a> such as <a href="/wiki/VHDL" title="VHDL">VHDL</a><sup id="cite_ref-56" class="reference"><a href="#cite_note-56"><span class="cite-bracket">[</span>56<span class="cite-bracket">]</span></a></sup> or <a href="/wiki/Verilog" title="Verilog">Verilog</a>.<sup id="cite_ref-57" class="reference"><a href="#cite_note-57"><span class="cite-bracket">[</span>57<span class="cite-bracket">]</span></a></sup> Several vendors have created <a href="/wiki/C_to_HDL" title="C to HDL">C to HDL</a> languages that attempt to emulate the syntax and semantics of the <a href="/wiki/C_programming_language" class="mw-redirect" title="C programming language">C programming language</a>, with which most programmers are familiar. The best known C to HDL languages are <a href="/wiki/Mitrionics" title="Mitrionics">Mitrion-C</a>, <a href="/wiki/Impulse_C" title="Impulse C">Impulse C</a>, and <a href="/wiki/Handel-C" title="Handel-C">Handel-C</a>. Specific subsets of <a href="/wiki/SystemC" title="SystemC">SystemC</a> based on C++ can also be used for this purpose. </p><p>AMD's decision to open its <a href="/wiki/HyperTransport" title="HyperTransport">HyperTransport</a> technology to third-party vendors has become the enabling technology for high-performance reconfigurable computing.<sup id="cite_ref-DAmour_58-0" class="reference"><a href="#cite_note-DAmour-58"><span class="cite-bracket">[</span>58<span class="cite-bracket">]</span></a></sup> According to Michael R. D'Amour, Chief Operating Officer of DRC Computer Corporation, "when we first walked into AMD, they called us 'the <a href="/wiki/CPU_socket" title="CPU socket">socket</a> stealers.' Now they call us their partners."<sup id="cite_ref-DAmour_58-1" class="reference"><a href="#cite_note-DAmour-58"><span class="cite-bracket">[</span>58<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading5"><h5 id="General-purpose_computing_on_graphics_processing_units_(GPGPU)"><span id="General-purpose_computing_on_graphics_processing_units_.28GPGPU.29"></span>General-purpose computing on graphics processing units (GPGPU)</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=25" title="Edit section: General-purpose computing on graphics processing units (GPGPU)"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/GPGPU" class="mw-redirect" title="GPGPU">GPGPU</a></div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:NvidiaTesla.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/3/32/NvidiaTesla.jpg/220px-NvidiaTesla.jpg" decoding="async" width="220" height="75" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/32/NvidiaTesla.jpg/330px-NvidiaTesla.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/32/NvidiaTesla.jpg/440px-NvidiaTesla.jpg 2x" data-file-width="1948" data-file-height="666" /></a><figcaption>Nvidia's <a href="/wiki/Nvidia_Tesla" title="Nvidia Tesla">Tesla GPGPU card</a></figcaption></figure> <p>General-purpose computing on <a href="/wiki/Graphics_processing_unit" title="Graphics processing unit">graphics processing units</a> (GPGPU) is a fairly recent trend in computer engineering research. GPUs are co-processors that have been heavily optimized for <a href="/wiki/Computer_graphics" title="Computer graphics">computer graphics</a> processing.<sup id="cite_ref-59" class="reference"><a href="#cite_note-59"><span class="cite-bracket">[</span>59<span class="cite-bracket">]</span></a></sup> Computer graphics processing is a field dominated by data parallel operations—particularly <a href="/wiki/Linear_algebra" title="Linear algebra">linear algebra</a> <a href="/wiki/Matrix_(mathematics)" title="Matrix (mathematics)">matrix</a> operations. </p><p>In the early days, GPGPU programs used the normal graphics APIs for executing programs. However, several new programming languages and platforms have been built to do general purpose computation on GPUs with both <a href="/wiki/Nvidia" title="Nvidia">Nvidia</a> and <a href="/wiki/AMD" title="AMD">AMD</a> releasing programming environments with <a href="/wiki/CUDA" title="CUDA">CUDA</a> and <a href="/wiki/AMD_FireStream#Software_Development_Kit" title="AMD FireStream">Stream SDK</a> respectively. Other GPU programming languages include <a href="/wiki/BrookGPU" title="BrookGPU">BrookGPU</a>, <a href="/wiki/PeakStream" class="mw-redirect" title="PeakStream">PeakStream</a>, and <a href="/wiki/RapidMind" title="RapidMind">RapidMind</a>. Nvidia has also released specific products for computation in their <a href="/wiki/Nvidia_Tesla" title="Nvidia Tesla">Tesla series</a>. The technology consortium Khronos Group has released the <a href="/wiki/OpenCL" title="OpenCL">OpenCL</a> specification, which is a framework for writing programs that execute across platforms consisting of CPUs and GPUs. <a href="/wiki/AMD" title="AMD">AMD</a>, <a href="/wiki/Apple_Inc." title="Apple Inc.">Apple</a>, <a href="/wiki/Intel" title="Intel">Intel</a>, <a href="/wiki/Nvidia" title="Nvidia">Nvidia</a> and others are supporting <a href="/wiki/OpenCL" title="OpenCL">OpenCL</a>. </p> <div class="mw-heading mw-heading5"><h5 id="Application-specific_integrated_circuits">Application-specific integrated circuits</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=26" title="Edit section: Application-specific integrated circuits"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Application-specific_integrated_circuit" title="Application-specific integrated circuit">Application-specific integrated circuit</a></div> <p>Several <a href="/wiki/Application-specific_integrated_circuit" title="Application-specific integrated circuit">application-specific integrated circuit</a> (ASIC) approaches have been devised for dealing with parallel applications.<sup id="cite_ref-60" class="reference"><a href="#cite_note-60"><span class="cite-bracket">[</span>60<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-61" class="reference"><a href="#cite_note-61"><span class="cite-bracket">[</span>61<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-62" class="reference"><a href="#cite_note-62"><span class="cite-bracket">[</span>62<span class="cite-bracket">]</span></a></sup> </p><p>Because an ASIC is (by definition) specific to a given application, it can be fully optimized for that application. As a result, for a given application, an ASIC tends to outperform a general-purpose computer. However, ASICs are created by <a href="/wiki/Photolithography" title="Photolithography">UV photolithography</a>. This process requires a mask set, which can be extremely expensive. A mask set can cost over a million US dollars.<sup id="cite_ref-63" class="reference"><a href="#cite_note-63"><span class="cite-bracket">[</span>63<span class="cite-bracket">]</span></a></sup> (The smaller the transistors required for the chip, the more expensive the mask will be.) Meanwhile, performance increases in general-purpose computing over time (as described by <a href="/wiki/Moore%27s_law" title="Moore's law">Moore's law</a>) tend to wipe out these gains in only one or two chip generations.<sup id="cite_ref-DAmour_58-2" class="reference"><a href="#cite_note-DAmour-58"><span class="cite-bracket">[</span>58<span class="cite-bracket">]</span></a></sup> High initial cost, and the tendency to be overtaken by Moore's-law-driven general-purpose computing, has rendered ASICs unfeasible for most parallel computing applications. However, some have been built. One example is the PFLOPS <a href="/wiki/RIKEN_MDGRAPE-3" title="RIKEN MDGRAPE-3">RIKEN MDGRAPE-3</a> machine which uses custom ASICs for <a href="/wiki/Molecular_dynamics" title="Molecular dynamics">molecular dynamics</a> simulation. </p> <div class="mw-heading mw-heading5"><h5 id="Vector_processors">Vector processors</h5><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=27" title="Edit section: Vector processors"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Vector_processor" title="Vector processor">Vector processor</a></div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:Cray_1_IMG_9126.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Cray_1_IMG_9126.jpg/220px-Cray_1_IMG_9126.jpg" decoding="async" width="220" height="147" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Cray_1_IMG_9126.jpg/330px-Cray_1_IMG_9126.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Cray_1_IMG_9126.jpg/440px-Cray_1_IMG_9126.jpg 2x" data-file-width="5616" data-file-height="3744" /></a><figcaption>The <a href="/wiki/Cray-1" title="Cray-1">Cray-1</a> is a vector processor.</figcaption></figure> <p>A vector processor is a CPU or computer system that can execute the same instruction on large sets of data. Vector processors have high-level operations that work on linear arrays of numbers or vectors. An example vector operation is <i>A</i> = <i>B</i> × <i>C</i>, where <i>A</i>, <i>B</i>, and <i>C</i> are each 64-element vectors of 64-bit <a href="/wiki/Floating-point" class="mw-redirect" title="Floating-point">floating-point</a> numbers.<sup id="cite_ref-PH751_64-0" class="reference"><a href="#cite_note-PH751-64"><span class="cite-bracket">[</span>64<span class="cite-bracket">]</span></a></sup> They are closely related to Flynn's SIMD classification.<sup id="cite_ref-PH751_64-1" class="reference"><a href="#cite_note-PH751-64"><span class="cite-bracket">[</span>64<span class="cite-bracket">]</span></a></sup> </p><p><a href="/wiki/Cray" title="Cray">Cray</a> computers became famous for their vector-processing computers in the 1970s and 1980s. However, vector processors—both as CPUs and as full computer systems—have generally disappeared. Modern <a href="/wiki/Instruction_set" class="mw-redirect" title="Instruction set">processor instruction sets</a> do include some vector processing instructions, such as with <a href="/wiki/Freescale_Semiconductor" title="Freescale Semiconductor">Freescale Semiconductor</a>'s <a href="/wiki/AltiVec" title="AltiVec">AltiVec</a> and <a href="/wiki/Intel" title="Intel">Intel</a>'s <a href="/wiki/Streaming_SIMD_Extensions" title="Streaming SIMD Extensions">Streaming SIMD Extensions</a> (SSE). </p> <div class="mw-heading mw-heading2"><h2 id="Software">Software</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=28" title="Edit section: Software"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="Parallel_programming_languages">Parallel programming languages</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=29" title="Edit section: Parallel programming languages"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/List_of_concurrent_and_parallel_programming_languages" title="List of concurrent and parallel programming languages">List of concurrent and parallel programming languages</a></div> <p><a href="/wiki/List_of_concurrent_and_parallel_programming_languages" title="List of concurrent and parallel programming languages">Concurrent programming languages</a>, <a href="/wiki/Library_(computing)" title="Library (computing)">libraries</a>, <a href="/wiki/Application_programming_interface" class="mw-redirect" title="Application programming interface">APIs</a>, and <a href="/wiki/Parallel_programming_model" title="Parallel programming model">parallel programming models</a> (such as <a href="/wiki/Algorithmic_skeleton" title="Algorithmic skeleton">algorithmic skeletons</a>) have been created for programming parallel computers. These can generally be divided into classes based on the assumptions they make about the underlying memory architecture—shared memory, distributed memory, or shared distributed memory. Shared memory programming languages communicate by manipulating shared memory variables. Distributed memory uses <a href="/wiki/Message_passing" title="Message passing">message passing</a>. <a href="/wiki/POSIX_Threads" class="mw-redirect" title="POSIX Threads">POSIX Threads</a> and <a href="/wiki/OpenMP" title="OpenMP">OpenMP</a> are two of the most widely used shared memory APIs, whereas <a href="/wiki/Message_Passing_Interface" title="Message Passing Interface">Message Passing Interface</a> (MPI) is the most widely used message-passing system API.<sup id="cite_ref-65" class="reference"><a href="#cite_note-65"><span class="cite-bracket">[</span>65<span class="cite-bracket">]</span></a></sup> One concept used in programming parallel programs is the <a href="/wiki/Futures_and_promises" title="Futures and promises">future concept</a>, where one part of a program promises to deliver a required datum to another part of a program at some future time. </p><p>Efforts to standardize parallel programming include an open standard called <a href="/wiki/OpenHMPP" title="OpenHMPP">OpenHMPP</a> for hybrid multi-core parallel programming. The OpenHMPP directive-based programming model offers a syntax to efficiently offload computations on hardware accelerators and to optimize data movement to/from the hardware memory using <a href="/wiki/Remote_procedure_call" title="Remote procedure call">remote procedure calls</a>. </p><p>The rise of consumer GPUs has led to support for <a href="/wiki/Compute_kernel" title="Compute kernel">compute kernels</a>, either in graphics APIs (referred to as <a href="/wiki/Compute_shader" class="mw-redirect" title="Compute shader">compute shaders</a>), in dedicated APIs (such as <a href="/wiki/OpenCL" title="OpenCL">OpenCL</a>), or in other language extensions. </p> <div class="mw-heading mw-heading3"><h3 id="Automatic_parallelization">Automatic parallelization</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=30" title="Edit section: Automatic parallelization"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Automatic_parallelization" title="Automatic parallelization">Automatic parallelization</a></div> <p><a href="/wiki/Automatic_parallelization" title="Automatic parallelization">Automatic parallelization</a> of a sequential program by a <a href="/wiki/Compiler" title="Compiler">compiler</a> is the "holy grail" of parallel computing, especially with the aforementioned limit of processor frequency. Despite decades of work by compiler researchers, automatic parallelization has had only limited success.<sup id="cite_ref-66" class="reference"><a href="#cite_note-66"><span class="cite-bracket">[</span>66<span class="cite-bracket">]</span></a></sup> </p><p>Mainstream parallel programming languages remain either <a href="/wiki/Explicit_parallelism" title="Explicit parallelism">explicitly parallel</a> or (at best) <a href="/wiki/Implicit_parallelism" title="Implicit parallelism">partially implicit</a>, in which a programmer gives the compiler <a href="/wiki/Directive_(programming)" title="Directive (programming)">directives</a> for parallelization. A few fully implicit parallel programming languages exist—<a href="/wiki/SISAL" title="SISAL">SISAL</a>, Parallel <a href="/wiki/Haskell_(programming_language)" class="mw-redirect" title="Haskell (programming language)">Haskell</a>, <a href="/wiki/SequenceL" title="SequenceL">SequenceL</a>, <a href="/wiki/SystemC" title="SystemC">System C</a> (for <a href="/wiki/FPGA" class="mw-redirect" title="FPGA">FPGAs</a>), <a href="/w/index.php?title=Mitrion-C&action=edit&redlink=1" class="new" title="Mitrion-C (page does not exist)">Mitrion-C</a>, <a href="/wiki/VHDL" title="VHDL">VHDL</a>, and <a href="/wiki/Verilog" title="Verilog">Verilog</a>. </p> <div class="mw-heading mw-heading3"><h3 id="Application_checkpointing">Application checkpointing</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=31" title="Edit section: Application checkpointing"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Main article: <a href="/wiki/Application_checkpointing" title="Application checkpointing">Application checkpointing</a></div> <p>As a computer system grows in complexity, the <a href="/wiki/Mean_time_between_failures" title="Mean time between failures">mean time between failures</a> usually decreases. <a href="/wiki/Application_checkpointing" title="Application checkpointing">Application checkpointing</a> is a technique whereby the computer system takes a "snapshot" of the application—a record of all current resource allocations and variable states, akin to a <a href="/wiki/Core_dump" title="Core dump">core dump</a>—; this information can be used to restore the program if the computer should fail. Application checkpointing means that the program has to restart from only its last checkpoint rather than the beginning. While checkpointing provides benefits in a variety of situations, it is especially useful in highly parallel systems with a large number of processors used in <a href="/wiki/High_performance_computing" class="mw-redirect" title="High performance computing">high performance computing</a>.<sup id="cite_ref-67" class="reference"><a href="#cite_note-67"><span class="cite-bracket">[</span>67<span class="cite-bracket">]</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Algorithmic_methods">Algorithmic methods</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=32" title="Edit section: Algorithmic methods"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>As parallel computers become larger and faster, we are now able to solve problems that had previously taken too long to run. Fields as varied as <a href="/wiki/Bioinformatics" title="Bioinformatics">bioinformatics</a> (for <a href="/wiki/Protein_folding" title="Protein folding">protein folding</a> and <a href="/wiki/Sequence_analysis" title="Sequence analysis">sequence analysis</a>) and economics have taken advantage of parallel computing. Common types of problems in parallel computing applications include:<sup id="cite_ref-68" class="reference"><a href="#cite_note-68"><span class="cite-bracket">[</span>68<span class="cite-bracket">]</span></a></sup> </p> <ul><li>Dense <a href="/wiki/Linear_algebra" title="Linear algebra">linear algebra</a></li> <li>Sparse linear algebra</li> <li>Spectral methods (such as <a href="/wiki/Cooley%E2%80%93Tukey_FFT_algorithm" title="Cooley–Tukey FFT algorithm">Cooley–Tukey fast Fourier transform</a>)</li> <li><a href="/wiki/N-body_problem" title="N-body problem"><i>N</i>-body problems</a> (such as <a href="/wiki/Barnes%E2%80%93Hut_simulation" title="Barnes–Hut simulation">Barnes–Hut simulation</a>)</li> <li><a href="/wiki/Regular_grid" title="Regular grid">Structured grid</a> problems (such as <a href="/wiki/Lattice_Boltzmann_methods" title="Lattice Boltzmann methods">Lattice Boltzmann methods</a>)</li> <li><a href="/wiki/Unstructured_grid" title="Unstructured grid">Unstructured grid</a> problems (such as found in <a href="/wiki/Finite_element_analysis" class="mw-redirect" title="Finite element analysis">finite element analysis</a>)</li> <li><a href="/wiki/Monte_Carlo_method" title="Monte Carlo method">Monte Carlo method</a></li> <li><a href="/wiki/Combinational_logic" title="Combinational logic">Combinational logic</a> (such as <a href="/wiki/Brute_force_attack" class="mw-redirect" title="Brute force attack">brute-force cryptographic techniques</a>)</li> <li><a href="/wiki/Graph_traversal" title="Graph traversal">Graph traversal</a> (such as <a href="/wiki/Sorting_algorithm" title="Sorting algorithm">sorting algorithms</a>)</li> <li><a href="/wiki/Dynamic_programming" title="Dynamic programming">Dynamic programming</a></li> <li><a href="/wiki/Branch_and_bound" title="Branch and bound">Branch and bound</a> methods</li> <li><a href="/wiki/Graphical_model" title="Graphical model">Graphical models</a> (such as detecting <a href="/wiki/Hidden_Markov_model" title="Hidden Markov model">hidden Markov models</a> and constructing <a href="/wiki/Bayesian_network" title="Bayesian network">Bayesian networks</a>)</li> <li><a href="/wiki/HBJ_model" title="HBJ model">HBJ model</a>, a concise message-passing model<sup id="cite_ref-69" class="reference"><a href="#cite_note-69"><span class="cite-bracket">[</span>69<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/Finite-state_machine" title="Finite-state machine">Finite-state machine</a> simulation</li></ul> <div class="mw-heading mw-heading2"><h2 id="Fault_tolerance">Fault tolerance</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=33" title="Edit section: Fault tolerance"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">Further information: <a href="/wiki/Fault-tolerant_computer_system" class="mw-redirect" title="Fault-tolerant computer system">Fault-tolerant computer system</a></div> <p>Parallel computing can also be applied to the design of <a href="/wiki/Fault-tolerant_computer_system" class="mw-redirect" title="Fault-tolerant computer system">fault-tolerant computer systems</a>, particularly via <a href="/wiki/Lockstep_(computing)" title="Lockstep (computing)">lockstep</a> systems performing the same operation in parallel. This provides <a href="/wiki/Redundancy_(engineering)" title="Redundancy (engineering)">redundancy</a> in case one component fails, and also allows automatic <a href="/wiki/Error_detection" class="mw-redirect" title="Error detection">error detection</a> and <a href="/wiki/Error_correction" class="mw-redirect" title="Error correction">error correction</a> if the results differ. These methods can be used to help prevent single-event upsets caused by transient errors.<sup id="cite_ref-70" class="reference"><a href="#cite_note-70"><span class="cite-bracket">[</span>70<span class="cite-bracket">]</span></a></sup> Although additional measures may be required in embedded or specialized systems, this method can provide a cost-effective approach to achieve n-modular redundancy in commercial off-the-shelf systems. </p> <div class="mw-heading mw-heading2"><h2 id="History">History</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=34" title="Edit section: History"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236090951"><div role="note" class="hatnote navigation-not-searchable">For broader coverage of this topic, see <a href="/wiki/History_of_computing" title="History of computing">History of computing</a>.</div> <figure class="mw-default-size mw-halign-right" typeof="mw:File/Thumb"><a href="/wiki/File:ILLIAC_4_parallel_computer.jpg" class="mw-file-description"><img src="//upload.wikimedia.org/wikipedia/commons/thumb/9/91/ILLIAC_4_parallel_computer.jpg/220px-ILLIAC_4_parallel_computer.jpg" decoding="async" width="220" height="177" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/91/ILLIAC_4_parallel_computer.jpg/330px-ILLIAC_4_parallel_computer.jpg 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/91/ILLIAC_4_parallel_computer.jpg/440px-ILLIAC_4_parallel_computer.jpg 2x" data-file-width="2393" data-file-height="1920" /></a><figcaption><a href="/wiki/ILLIAC_IV" title="ILLIAC IV">ILLIAC IV</a>, "the most infamous of supercomputers"<sup id="cite_ref-infamous_71-0" class="reference"><a href="#cite_note-infamous-71"><span class="cite-bracket">[</span>71<span class="cite-bracket">]</span></a></sup></figcaption></figure> <p>The origins of true (MIMD) parallelism go back to <a href="/wiki/Luigi_Federico_Menabrea" title="Luigi Federico Menabrea">Luigi Federico Menabrea</a> and his <i>Sketch of the <a href="/wiki/Analytic_Engine" class="mw-redirect" title="Analytic Engine">Analytic Engine</a> Invented by <a href="/wiki/Charles_Babbage" title="Charles Babbage">Charles Babbage</a></i>.<sup id="cite_ref-72" class="reference"><a href="#cite_note-72"><span class="cite-bracket">[</span>72<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-PH753_73-0" class="reference"><a href="#cite_note-PH753-73"><span class="cite-bracket">[</span>73<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-74" class="reference"><a href="#cite_note-74"><span class="cite-bracket">[</span>74<span class="cite-bracket">]</span></a></sup> </p><p>In 1957, <a href="/wiki/Compagnie_des_Machines_Bull" class="mw-redirect" title="Compagnie des Machines Bull">Compagnie des Machines Bull</a> announced the first computer architecture specifically designed for parallelism, the <a href="/wiki/Bull_Gamma_60" title="Bull Gamma 60">Gamma 60</a>.<sup id="cite_ref-75" class="reference"><a href="#cite_note-75"><span class="cite-bracket">[</span>75<span class="cite-bracket">]</span></a></sup> It utilized a <a href="/wiki/Fork%E2%80%93join_model" title="Fork–join model">fork-join model</a> and a "Program Distributor" to dispatch and collect data to and from independent processing units connected to a central memory.<sup id="cite_ref-76" class="reference"><a href="#cite_note-76"><span class="cite-bracket">[</span>76<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-77" class="reference"><a href="#cite_note-77"><span class="cite-bracket">[</span>77<span class="cite-bracket">]</span></a></sup> </p><p>In April 1958, Stanley Gill (Ferranti) discussed parallel programming and the need for branching and waiting.<sup id="cite_ref-78" class="reference"><a href="#cite_note-78"><span class="cite-bracket">[</span>78<span class="cite-bracket">]</span></a></sup> Also in 1958, IBM researchers <a href="/wiki/John_Cocke_(computer_scientist)" title="John Cocke (computer scientist)">John Cocke</a> and <a href="/wiki/Daniel_Slotnick" title="Daniel Slotnick">Daniel Slotnick</a> discussed the use of parallelism in numerical calculations for the first time.<sup id="cite_ref-G_Wilson_79-0" class="reference"><a href="#cite_note-G_Wilson-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> <a href="/wiki/Burroughs_Corporation" title="Burroughs Corporation">Burroughs Corporation</a> introduced the D825 in 1962, a four-processor computer that accessed up to 16 memory modules through a <a href="/wiki/Crossbar_switch" title="Crossbar switch">crossbar switch</a>.<sup id="cite_ref-80" class="reference"><a href="#cite_note-80"><span class="cite-bracket">[</span>80<span class="cite-bracket">]</span></a></sup> In 1967, Amdahl and Slotnick published a debate about the feasibility of parallel processing at American Federation of Information Processing Societies Conference.<sup id="cite_ref-G_Wilson_79-1" class="reference"><a href="#cite_note-G_Wilson-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> It was during this debate that <a href="/wiki/Amdahl%27s_law" title="Amdahl's law">Amdahl's law</a> was coined to define the limit of speed-up due to parallelism. </p><p>In 1969, <a href="/wiki/Honeywell" title="Honeywell">Honeywell</a> introduced its first <a href="/wiki/Multics" title="Multics">Multics</a> system, a symmetric multiprocessor system capable of running up to eight processors in parallel.<sup id="cite_ref-G_Wilson_79-2" class="reference"><a href="#cite_note-G_Wilson-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> <a href="/wiki/C.mmp" title="C.mmp">C.mmp</a>, a multi-processor project at <a href="/wiki/Carnegie_Mellon_University" title="Carnegie Mellon University">Carnegie Mellon University</a> in the 1970s, was among the first multiprocessors with more than a few processors. The first bus-connected multiprocessor with snooping caches was the Synapse N+1 in 1984.<sup id="cite_ref-PH753_73-1" class="reference"><a href="#cite_note-PH753-73"><span class="cite-bracket">[</span>73<span class="cite-bracket">]</span></a></sup> </p><p>SIMD parallel computers can be traced back to the 1970s. The motivation behind early SIMD computers was to amortize the <a href="/wiki/Propagation_delay" title="Propagation delay">gate delay</a> of the processor's <a href="/wiki/Control_unit" title="Control unit">control unit</a> over multiple instructions.<sup id="cite_ref-81" class="reference"><a href="#cite_note-81"><span class="cite-bracket">[</span>81<span class="cite-bracket">]</span></a></sup> In 1964, Slotnick had proposed building a massively parallel computer for the <a href="/wiki/Lawrence_Livermore_National_Laboratory" title="Lawrence Livermore National Laboratory">Lawrence Livermore National Laboratory</a>.<sup id="cite_ref-G_Wilson_79-3" class="reference"><a href="#cite_note-G_Wilson-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> His design was funded by the <a href="/wiki/US_Air_Force" class="mw-redirect" title="US Air Force">US Air Force</a>, which was the earliest SIMD parallel-computing effort, <a href="/wiki/ILLIAC_IV" title="ILLIAC IV">ILLIAC IV</a>.<sup id="cite_ref-G_Wilson_79-4" class="reference"><a href="#cite_note-G_Wilson-79"><span class="cite-bracket">[</span>79<span class="cite-bracket">]</span></a></sup> The key to its design was a fairly high parallelism, with up to 256 processors, which allowed the machine to work on large datasets in what would later be known as <a href="/wiki/Vector_processor" title="Vector processor">vector processing</a>. However, ILLIAC IV was called "the most infamous of supercomputers", because the project was only one-fourth completed, but took 11 years and cost almost four times the original estimate.<sup id="cite_ref-infamous_71-1" class="reference"><a href="#cite_note-infamous-71"><span class="cite-bracket">[</span>71<span class="cite-bracket">]</span></a></sup> When it was finally ready to run its first real application in 1976, it was outperformed by existing commercial supercomputers such as the <a href="/wiki/Cray-1" title="Cray-1">Cray-1</a>. </p> <div class="mw-heading mw-heading2"><h2 id="Biological_brain_as_massively_parallel_computer">Biological brain as massively parallel computer</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=35" title="Edit section: Biological brain as massively parallel computer"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>In the early 1970s, at the <a href="/wiki/MIT_Computer_Science_and_Artificial_Intelligence_Laboratory" title="MIT Computer Science and Artificial Intelligence Laboratory">MIT Computer Science and Artificial Intelligence Laboratory</a>, <a href="/wiki/Marvin_Minsky" title="Marvin Minsky">Marvin Minsky</a> and <a href="/wiki/Seymour_Papert" title="Seymour Papert">Seymour Papert</a> started developing the <i><a href="/wiki/Society_of_Mind" title="Society of Mind">Society of Mind</a></i> theory, which views the biological brain as <a href="/wiki/Massively_parallel" title="Massively parallel">massively parallel computer</a>. In 1986, Minsky published <i>The Society of Mind</i>, which claims that "mind is formed from many little agents, each mindless by itself".<sup id="cite_ref-82" class="reference"><a href="#cite_note-82"><span class="cite-bracket">[</span>82<span class="cite-bracket">]</span></a></sup> The theory attempts to explain how what we call intelligence could be a product of the interaction of non-intelligent parts. Minsky says that the biggest source of ideas about the theory came from his work in trying to create a machine that uses a robotic arm, a video camera, and a computer to build with children's blocks.<sup id="cite_ref-83" class="reference"><a href="#cite_note-83"><span class="cite-bracket">[</span>83<span class="cite-bracket">]</span></a></sup> </p><p>Similar models (which also view the biological brain as a massively parallel computer, i.e., the brain is made up of a constellation of independent or semi-independent agents) were also described by: </p> <ul><li>Thomas R. Blakeslee,<sup id="cite_ref-84" class="reference"><a href="#cite_note-84"><span class="cite-bracket">[</span>84<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/Michael_Gazzaniga" title="Michael Gazzaniga">Michael S. Gazzaniga</a>,<sup id="cite_ref-85" class="reference"><a href="#cite_note-85"><span class="cite-bracket">[</span>85<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-86" class="reference"><a href="#cite_note-86"><span class="cite-bracket">[</span>86<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/Robert_E._Ornstein" title="Robert E. Ornstein">Robert E. Ornstein</a>,<sup id="cite_ref-87" class="reference"><a href="#cite_note-87"><span class="cite-bracket">[</span>87<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/Ernest_Hilgard" title="Ernest Hilgard">Ernest Hilgard</a>,<sup id="cite_ref-88" class="reference"><a href="#cite_note-88"><span class="cite-bracket">[</span>88<span class="cite-bracket">]</span></a></sup><sup id="cite_ref-89" class="reference"><a href="#cite_note-89"><span class="cite-bracket">[</span>89<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/Michio_Kaku" title="Michio Kaku">Michio Kaku</a>,<sup id="cite_ref-90" class="reference"><a href="#cite_note-90"><span class="cite-bracket">[</span>90<span class="cite-bracket">]</span></a></sup></li> <li><a href="/wiki/George_Gurdjieff" title="George Gurdjieff">George Ivanovich Gurdjieff</a>,<sup id="cite_ref-91" class="reference"><a href="#cite_note-91"><span class="cite-bracket">[</span>91<span class="cite-bracket">]</span></a></sup></li> <li>Neurocluster Brain Model.<sup id="cite_ref-92" class="reference"><a href="#cite_note-92"><span class="cite-bracket">[</span>92<span class="cite-bracket">]</span></a></sup></li></ul> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=36" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1184024115">.mw-parser-output .div-col{margin-top:0.3em;column-width:30em}.mw-parser-output .div-col-small{font-size:90%}.mw-parser-output .div-col-rules{column-rule:1px solid #aaa}.mw-parser-output .div-col dl,.mw-parser-output .div-col ol,.mw-parser-output .div-col ul{margin-top:0}.mw-parser-output .div-col li,.mw-parser-output .div-col dd{page-break-inside:avoid;break-inside:avoid-column}</style><div class="div-col" style="column-width: 25em;"> <ul><li><a href="/wiki/Computer_multitasking" title="Computer multitasking">Computer multitasking</a></li> <li><a href="/wiki/Concurrency_(computer_science)" title="Concurrency (computer science)">Concurrency (computer science)</a></li> <li><a href="/wiki/Content_Addressable_Parallel_Processor" class="mw-redirect" title="Content Addressable Parallel Processor">Content Addressable Parallel Processor</a></li> <li><a href="/wiki/List_of_distributed_computing_conferences" title="List of distributed computing conferences">List of distributed computing conferences</a></li> <li><a href="/wiki/Loop-level_parallelism" title="Loop-level parallelism">Loop-level parallelism</a></li> <li><a href="/wiki/Dataflow_architecture" title="Dataflow architecture">Manchester dataflow machine</a></li> <li><a href="/wiki/Manycore" class="mw-redirect" title="Manycore">Manycore</a></li> <li><a href="/wiki/Parallel_programming_model" title="Parallel programming model">Parallel programming model</a></li> <li><a href="/wiki/Parallelization_contract" title="Parallelization contract">Parallelization contract</a></li> <li><a href="/wiki/Serializability" class="mw-redirect" title="Serializability">Serializability</a></li> <li><a href="/wiki/Synchronous_programming" class="mw-redirect" title="Synchronous programming">Synchronous programming</a></li> <li><a href="/wiki/Transputer" title="Transputer">Transputer</a></li> <li><a href="/wiki/Vector_processing" class="mw-redirect" title="Vector processing">Vector processing</a></li></ul> </div> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=37" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist reflist-columns references-column-width" style="column-width: 30em;"> <ol class="references"> <li id="cite_note-1"><span class="mw-cite-backlink"><b><a href="#cite_ref-1">^</a></b></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite id="CITEREFGottliebAlmasi,_George_S.1989" class="citation book cs1">Gottlieb, Allan; Almasi, George S. (1989). <a rel="nofollow" class="external text" href="http://dl.acm.org/citation.cfm?id=160438"><i>Highly parallel computing</i></a>. Redwood City, Calif.: Benjamin/Cummings. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-8053-0177-9" title="Special:BookSources/978-0-8053-0177-9"><bdi>978-0-8053-0177-9</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Highly+parallel+computing&rft.place=Redwood+City%2C+Calif.&rft.pub=Benjamin%2FCummings&rft.date=1989&rft.isbn=978-0-8053-0177-9&rft.aulast=Gottlieb&rft.aufirst=Allan&rft.au=Almasi%2C+George+S.&rft_id=http%3A%2F%2Fdl.acm.org%2Fcitation.cfm%3Fid%3D160438&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-:0-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-:0_2-0">^</a></b></span> <span class="reference-text">S.V. Adve <i>et al.</i> (November 2008). <a rel="nofollow" class="external text" href="https://graphics.cs.illinois.edu/sites/default/files/upcrc-wp.pdf">"Parallel Computing Research at Illinois: The UPCRC Agenda"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180111165735/https://graphics.cs.illinois.edu/sites/default/files/upcrc-wp.pdf">Archived</a> 2018-01-11 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> (PDF). Parallel@Illinois, University of Illinois at Urbana-Champaign. "The main techniques for these performance benefits—increased clock frequency and smarter but increasingly complex architectures—are now hitting the so-called power wall. The <a href="/wiki/Computer_industry" class="mw-redirect" title="Computer industry">computer industry</a> has accepted that future performance increases must largely come from increasing the number of processors (or cores) on a die, rather than making a single core go faster."</span> </li> <li id="cite_note-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-3">^</a></b></span> <span class="reference-text"><a href="/wiki/Krste_Asanovi%C4%87" title="Krste Asanović">Asanovic</a> <i>et al.</i> Old [conventional wisdom]: Power is free, but <a href="/wiki/Transistor" title="Transistor">transistors</a> are expensive. New [conventional wisdom] is [that] power is expensive, but transistors are "free".</span> </li> <li id="cite_note-View-Power-4"><span class="mw-cite-backlink"><b><a href="#cite_ref-View-Power_4-0">^</a></b></span> <span class="reference-text"><a href="/wiki/Asanovic,_Krste" class="mw-redirect" title="Asanovic, Krste">Asanovic, Krste</a> <i>et al.</i> (December 18, 2006). <a rel="nofollow" class="external text" href="http://www.eecs.berkeley.edu/Pubs/TechRpts/2006/EECS-2006-183.pdf">"The Landscape of Parallel Computing Research: A View from Berkeley"</a> (PDF). University of California, Berkeley. Technical Report No. UCB/EECS-2006-183. "Old [conventional wisdom]: Increasing clock frequency is the primary method of improving processor performance. New [conventional wisdom]: Increasing parallelism is the primary method of improving processor performance… Even representatives from Intel, a company generally associated with the 'higher clock-speed is better' position, warned that traditional approaches to maximizing performance through maximizing clock speed have been pushed to their limits."</span> </li> <li id="cite_note-5"><span class="mw-cite-backlink"><b><a href="#cite_ref-5">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><i>Parallel and Concurrent Programming in Haskell</i>. O'Reilly Media. 2013. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781449335922" title="Special:BookSources/9781449335922"><bdi>9781449335922</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Parallel+and+Concurrent+Programming+in+Haskell&rft.pub=O%27Reilly+Media&rft.date=2013&rft.isbn=9781449335922&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-6">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHennessyPatterson,_David_A.Larus,_James_R.1999" class="citation book cs1"><a href="/wiki/John_L._Hennessy" title="John L. Hennessy">Hennessy, John L.</a>; <a href="/wiki/David_Patterson_(computer_scientist)" title="David Patterson (computer scientist)">Patterson, David A.</a>; <a href="/wiki/James_Larus" title="James Larus">Larus, James R.</a> (1999). <a rel="nofollow" class="external text" href="https://archive.org/details/computerorganiz000henn"><i>Computer organization and design: the hardware/software interface</i></a> (2. ed., 3rd print. ed.). San Francisco: Kaufmann. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-55860-428-5" title="Special:BookSources/978-1-55860-428-5"><bdi>978-1-55860-428-5</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Computer+organization+and+design%3A+the+hardware%2Fsoftware+interface&rft.place=San+Francisco&rft.edition=2.+ed.%2C+3rd+print.&rft.pub=Kaufmann&rft.date=1999&rft.isbn=978-1-55860-428-5&rft.aulast=Hennessy&rft.aufirst=John+L.&rft.au=Patterson%2C+David+A.&rft.au=Larus%2C+James+R.&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fcomputerorganiz000henn&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-llnltut-7"><span class="mw-cite-backlink">^ <a href="#cite_ref-llnltut_7-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-llnltut_7-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBarney,_Blaise" class="citation web cs1">Barney, Blaise. <a rel="nofollow" class="external text" href="http://www.llnl.gov/computing/tutorials/parallel_comp/">"Introduction to Parallel Computing"</a>. Lawrence Livermore National Laboratory<span class="reference-accessdate">. Retrieved <span class="nowrap">2007-11-09</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Introduction+to+Parallel+Computing&rft.pub=Lawrence+Livermore+National+Laboratory&rft.au=Barney%2C+Blaise&rft_id=http%3A%2F%2Fwww.llnl.gov%2Fcomputing%2Ftutorials%2Fparallel_comp%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-8"><span class="mw-cite-backlink"><b><a href="#cite_ref-8">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFThomas_RauberGudula_Rünger2013" class="citation book cs1">Thomas Rauber; Gudula Rünger (2013). <i>Parallel Programming: for Multicore and Cluster Systems</i>. Springer Science & Business Media. p. 1. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9783642378010" title="Special:BookSources/9783642378010"><bdi>9783642378010</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Parallel+Programming%3A+for+Multicore+and+Cluster+Systems&rft.pages=1&rft.pub=Springer+Science+%26+Business+Media&rft.date=2013&rft.isbn=9783642378010&rft.au=Thomas+Rauber&rft.au=Gudula+R%C3%BCnger&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-9"><span class="mw-cite-backlink"><b><a href="#cite_ref-9">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHennessyPatterson,_David_A.2002" class="citation book cs1">Hennessy, John L.; Patterson, David A. (2002). <i>Computer architecture / a quantitative approach</i> (3rd ed.). San Francisco, Calif.: International Thomson. p. 43. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-55860-724-8" title="Special:BookSources/978-1-55860-724-8"><bdi>978-1-55860-724-8</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Computer+architecture+%2F+a+quantitative+approach.&rft.place=San+Francisco%2C+Calif.&rft.pages=43&rft.edition=3rd&rft.pub=International+Thomson&rft.date=2002&rft.isbn=978-1-55860-724-8&rft.aulast=Hennessy&rft.aufirst=John+L.&rft.au=Patterson%2C+David+A.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-10"><span class="mw-cite-backlink"><b><a href="#cite_ref-10">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRabaey1996" class="citation book cs1">Rabaey, Jan M. (1996). <i>Digital integrated circuits : a design perspective</i>. Upper Saddle River, N.J.: Prentice-Hall. p. 235. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-13-178609-7" title="Special:BookSources/978-0-13-178609-7"><bdi>978-0-13-178609-7</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Digital+integrated+circuits+%3A+a+design+perspective&rft.place=Upper+Saddle+River%2C+N.J.&rft.pages=235&rft.pub=Prentice-Hall&rft.date=1996&rft.isbn=978-0-13-178609-7&rft.aulast=Rabaey&rft.aufirst=Jan+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-11">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFlynn2004" class="citation news cs1">Flynn, Laurie J. (8 May 2004). <a rel="nofollow" class="external text" href="https://www.nytimes.com/2004/05/08/business/08chip.html?ex=1399348800&en=98cc44ca97b1a562&ei=5007">"Intel Halts Development Of 2 New Microprocessors"</a>. <i>New York Times</i><span class="reference-accessdate">. Retrieved <span class="nowrap">5 June</span> 2012</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=New+York+Times&rft.atitle=Intel+Halts+Development+Of+2+New+Microprocessors&rft.date=2004-05-08&rft.aulast=Flynn&rft.aufirst=Laurie+J.&rft_id=https%3A%2F%2Fwww.nytimes.com%2F2004%2F05%2F08%2Fbusiness%2F08chip.html%3Fex%3D1399348800%26en%3D98cc44ca97b1a562%26ei%3D5007&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-12"><span class="mw-cite-backlink"><b><a href="#cite_ref-12">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFThomas_RauberGudula_Rünger2013" class="citation book cs1">Thomas Rauber; Gudula Rünger (2013). <i>Parallel Programming: for Multicore and Cluster Systems</i>. Springer Science & Business Media. p. 2. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9783642378010" title="Special:BookSources/9783642378010"><bdi>9783642378010</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Parallel+Programming%3A+for+Multicore+and+Cluster+Systems&rft.pages=2&rft.pub=Springer+Science+%26+Business+Media&rft.date=2013&rft.isbn=9783642378010&rft.au=Thomas+Rauber&rft.au=Gudula+R%C3%BCnger&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-13"><span class="mw-cite-backlink"><b><a href="#cite_ref-13">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFThomas_RauberGudula_Rünger2013" class="citation book cs1">Thomas Rauber; Gudula Rünger (2013). <i>Parallel Programming: for Multicore and Cluster Systems</i>. Springer Science & Business Media. p. 3. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9783642378010" title="Special:BookSources/9783642378010"><bdi>9783642378010</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Parallel+Programming%3A+for+Multicore+and+Cluster+Systems&rft.pages=3&rft.pub=Springer+Science+%26+Business+Media&rft.date=2013&rft.isbn=9783642378010&rft.au=Thomas+Rauber&rft.au=Gudula+R%C3%BCnger&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-:02-14"><span class="mw-cite-backlink"><b><a href="#cite_ref-:02_14-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBakos2016" class="citation cs2">Bakos, Jason D. (2016-01-01), Bakos, Jason D. (ed.), <a rel="nofollow" class="external text" href="https://linkinghub.elsevier.com/retrieve/pii/B978012800342800002X">"Chapter 2 - Multicore and data-level optimization: OpenMP and SIMD"</a>, <i>Embedded Systems</i>, Boston: Morgan Kaufmann, pp. 49–103, <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2Fb978-0-12-800342-8.00002-x">10.1016/b978-0-12-800342-8.00002-x</a>, <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-12-800342-8" title="Special:BookSources/978-0-12-800342-8"><bdi>978-0-12-800342-8</bdi></a><span class="reference-accessdate">, retrieved <span class="nowrap">2024-11-18</span></span></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Embedded+Systems&rft.atitle=Chapter+2+-+Multicore+and+data-level+optimization%3A+OpenMP+and+SIMD&rft.pages=49-103&rft.date=2016-01-01&rft_id=info%3Adoi%2F10.1016%2Fb978-0-12-800342-8.00002-x&rft.isbn=978-0-12-800342-8&rft.aulast=Bakos&rft.aufirst=Jason+D.&rft_id=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FB978012800342800002X&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-15">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><i>The Art of Multiprocessor Programming, Revised Reprint</i>. Morgan Kaufmann. 22 May 2012. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780123973375" title="Special:BookSources/9780123973375"><bdi>9780123973375</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Art+of+Multiprocessor+Programming%2C+Revised+Reprint&rft.pub=Morgan+Kaufmann&rft.date=2012-05-22&rft.isbn=9780123973375&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-16"><span class="mw-cite-backlink"><b><a href="#cite_ref-16">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFVajda2011" class="citation book cs1">Vajda, András (10 June 2011). <i>Programming Many-Core Chips</i>. Springer. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781441997395" title="Special:BookSources/9781441997395"><bdi>9781441997395</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Programming+Many-Core+Chips&rft.pub=Springer&rft.date=2011-06-10&rft.isbn=9781441997395&rft.aulast=Vajda&rft.aufirst=Andr%C3%A1s&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-17"><span class="mw-cite-backlink"><b><a href="#cite_ref-17">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAmdahl1967" class="citation book cs1">Amdahl, Gene M. (1967-04-18). <a rel="nofollow" class="external text" href="https://dl.acm.org/doi/10.1145/1465482.1465560">"Validity of the single processor approach to achieving large scale computing capabilities"</a>. <i>Proceedings of the April 18-20, 1967, spring joint computer conference on - AFIPS '67 (Spring)</i>. New York, NY, USA: Association for Computing Machinery. pp. 483–485. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F1465482.1465560">10.1145/1465482.1465560</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-4503-7895-6" title="Special:BookSources/978-1-4503-7895-6"><bdi>978-1-4503-7895-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Validity+of+the+single+processor+approach+to+achieving+large+scale+computing+capabilities&rft.btitle=Proceedings+of+the+April+18-20%2C+1967%2C+spring+joint+computer+conference+on+-+AFIPS+%2767+%28Spring%29&rft.place=New+York%2C+NY%2C+USA&rft.pages=483-485&rft.pub=Association+for+Computing+Machinery&rft.date=1967-04-18&rft_id=info%3Adoi%2F10.1145%2F1465482.1465560&rft.isbn=978-1-4503-7895-6&rft.aulast=Amdahl&rft.aufirst=Gene+M.&rft_id=https%3A%2F%2Fdl.acm.org%2Fdoi%2F10.1145%2F1465482.1465560&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-:1-18"><span class="mw-cite-backlink"><b><a href="#cite_ref-:1_18-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><i>Computer Architecture: A Quantitative Approach</i>. Morgan Kaufmann. 2003. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-8178672663" title="Special:BookSources/978-8178672663"><bdi>978-8178672663</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Computer+Architecture%3A+A+Quantitative+Approach&rft.pub=Morgan+Kaufmann&rft.date=2003&rft.isbn=978-8178672663&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-19"><span class="mw-cite-backlink"><b><a href="#cite_ref-19">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><i>Parallel Computer Architecture A Hardware/Software Approach</i>. Elsevier Science. 1999. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9781558603431" title="Special:BookSources/9781558603431"><bdi>9781558603431</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Parallel+Computer+Architecture+A+Hardware%2FSoftware+Approach&rft.pub=Elsevier+Science&rft.date=1999&rft.isbn=9781558603431&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-20"><span class="mw-cite-backlink"><b><a href="#cite_ref-20">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMcCoolReindersRobison2013" class="citation book cs1">McCool, Michael; Reinders, James; Robison, Arch (2013). <i>Structured Parallel Programming: Patterns for Efficient Computation</i>. Elsevier. p. 61. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-12-415993-8" title="Special:BookSources/978-0-12-415993-8"><bdi>978-0-12-415993-8</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Structured+Parallel+Programming%3A+Patterns+for+Efficient+Computation&rft.pages=61&rft.pub=Elsevier&rft.date=2013&rft.isbn=978-0-12-415993-8&rft.aulast=McCool&rft.aufirst=Michael&rft.au=Reinders%2C+James&rft.au=Robison%2C+Arch&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-21"><span class="mw-cite-backlink"><b><a href="#cite_ref-21">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGunther2007" class="citation book cs1">Gunther, Neil (2007). <i>Guerrilla Capacity Planning: A Tactical Approach to Planning for Highly Scalable Applications and Services</i>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-3540261384" title="Special:BookSources/978-3540261384"><bdi>978-3540261384</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Guerrilla+Capacity+Planning%3A+A+Tactical+Approach+to+Planning+for+Highly+Scalable+Applications+and+Services&rft.date=2007&rft.isbn=978-3540261384&rft.aulast=Gunther&rft.aufirst=Neil&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-22"><span class="mw-cite-backlink"><b><a href="#cite_ref-22">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBernstein1966" class="citation journal cs1 cs1-prop-long-vol">Bernstein, Arthur J. (1 October 1966). "Analysis of Programs for Parallel Processing". <i>IEEE Transactions on Electronic Computers</i>. EC-15 (5): 757–763. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FPGEC.1966.264565">10.1109/PGEC.1966.264565</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Electronic+Computers&rft.atitle=Analysis+of+Programs+for+Parallel+Processing&rft.volume=EC-15&rft.issue=5&rft.pages=757-763&rft.date=1966-10-01&rft_id=info%3Adoi%2F10.1109%2FPGEC.1966.264565&rft.aulast=Bernstein&rft.aufirst=Arthur+J.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-23"><span class="mw-cite-backlink"><b><a href="#cite_ref-23">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRoosta2000" class="citation book cs1">Roosta, Seyed H. (2000). <i>Parallel processing and parallel algorithms : theory and computation</i>. New York, NY [u.a.]: Springer. p. 114. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-387-98716-3" title="Special:BookSources/978-0-387-98716-3"><bdi>978-0-387-98716-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Parallel+processing+and+parallel+algorithms+%3A+theory+and+computation&rft.place=New+York%2C+NY+%5Bu.a.%5D&rft.pages=114&rft.pub=Springer&rft.date=2000&rft.isbn=978-0-387-98716-3&rft.aulast=Roosta&rft.aufirst=Seyed+H.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-24"><span class="mw-cite-backlink"><b><a href="#cite_ref-24">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://msdn.microsoft.com/en-us/library/windows/desktop/ms684841(v=vs.85).aspx">"Processes and Threads"</a>. <i>Microsoft Developer Network</i>. Microsoft Corp. 2018<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Microsoft+Developer+Network&rft.atitle=Processes+and+Threads&rft.date=2018&rft_id=https%3A%2F%2Fmsdn.microsoft.com%2Fen-us%2Flibrary%2Fwindows%2Fdesktop%2Fms684841%28v%3Dvs.85%29.aspx&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-25"><span class="mw-cite-backlink"><b><a href="#cite_ref-25">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKrauss2018" class="citation web cs1">Krauss, Kirk J (2018). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180513081315/http://www.developforperformance.com/ThreadSafetyForPerformance.html">"Thread Safety for Performance"</a>. <i>Develop for Performance</i>. Archived from <a rel="nofollow" class="external text" href="http://www.developforperformance.com/ThreadSafetyForPerformance.html">the original</a> on 2018-05-13<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Develop+for+Performance&rft.atitle=Thread+Safety+for+Performance&rft.date=2018&rft.aulast=Krauss&rft.aufirst=Kirk+J&rft_id=http%3A%2F%2Fwww.developforperformance.com%2FThreadSafetyForPerformance.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-26"><span class="mw-cite-backlink"><b><a href="#cite_ref-26">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTanenbaum2002" class="citation book cs1">Tanenbaum, Andrew S. (2002-02-01). <a rel="nofollow" class="external text" href="http://www.informit.com/articles/article.aspx?p=25193"><i>Introduction to Operating System Deadlocks</i></a>. Pearson Education, Informit<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Introduction+to+Operating+System+Deadlocks&rft.pub=Pearson+Education%2C+Informit&rft.date=2002-02-01&rft.aulast=Tanenbaum&rft.aufirst=Andrew+S.&rft_id=http%3A%2F%2Fwww.informit.com%2Farticles%2Farticle.aspx%3Fp%3D25193&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span> <span class="cs1-visible-error citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_book" title="Template:Cite book">cite book</a>}}</code>: </span><span class="cs1-visible-error citation-comment"><code class="cs1-code">|website=</code> ignored (<a href="/wiki/Help:CS1_errors#periodical_ignored" title="Help:CS1 errors">help</a>)</span></span> </li> <li id="cite_note-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-27">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFCecil2015" class="citation web cs1">Cecil, David (2015-11-03). <a rel="nofollow" class="external text" href="https://www.embedded.com/design/operating-systems/4440752/Synchronization-internals----the-semaphore">"Synchronization internals – the semaphore"</a>. <i>Embedded</i>. AspenCore<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Embedded&rft.atitle=Synchronization+internals+%26ndash%3B+the+semaphore&rft.date=2015-11-03&rft.aulast=Cecil&rft.aufirst=David&rft_id=https%3A%2F%2Fwww.embedded.com%2Fdesign%2Foperating-systems%2F4440752%2FSynchronization-internals----the-semaphore&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-28"><span class="mw-cite-backlink"><b><a href="#cite_ref-28">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFPreshing2012" class="citation web cs1">Preshing, Jeff (2012-06-08). <a rel="nofollow" class="external text" href="http://preshing.com/20120612/an-introduction-to-lock-free-programming/">"An Introduction to Lock-Free Programming"</a>. <i>Preshing on Programming</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2018-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Preshing+on+Programming&rft.atitle=An+Introduction+to+Lock-Free+Programming&rft.date=2012-06-08&rft.aulast=Preshing&rft.aufirst=Jeff&rft_id=http%3A%2F%2Fpreshing.com%2F20120612%2Fan-introduction-to-lock-free-programming%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-29"><span class="mw-cite-backlink"><b><a href="#cite_ref-29">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://stackoverflow.com/questions/806569/whats-the-opposite-of-embarrassingly-parallel">"What's the opposite of "embarrassingly parallel"?"</a>. <i>StackOverflow</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2018-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=StackOverflow&rft.atitle=What%27s+the+opposite+of+%22embarrassingly+parallel%22%3F&rft_id=https%3A%2F%2Fstackoverflow.com%2Fquestions%2F806569%2Fwhats-the-opposite-of-embarrassingly-parallel&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-30"><span class="mw-cite-backlink"><b><a href="#cite_ref-30">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSchwartz2011" class="citation web cs1">Schwartz, David (2011-08-15). <a rel="nofollow" class="external text" href="https://stackoverflow.com/questions/1970345/what-is-thread-contention">"What is thread contention?"</a>. <i>StackOverflow</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2018-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=StackOverflow&rft.atitle=What+is+thread+contention%3F&rft.date=2011-08-15&rft.aulast=Schwartz&rft.aufirst=David&rft_id=https%3A%2F%2Fstackoverflow.com%2Fquestions%2F1970345%2Fwhat-is-thread-contention&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-31"><span class="mw-cite-backlink"><b><a href="#cite_ref-31">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKukanov2008" class="citation web cs1">Kukanov, Alexey (2008-03-04). <a rel="nofollow" class="external text" href="https://software.intel.com/en-us/blogs/2008/03/04/why-a-simple-test-can-get-parallel-slowdown">"Why a simple test can get parallel slowdown"</a><span class="reference-accessdate">. Retrieved <span class="nowrap">2015-02-15</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Why+a+simple+test+can+get+parallel+slowdown&rft.date=2008-03-04&rft.aulast=Kukanov&rft.aufirst=Alexey&rft_id=https%3A%2F%2Fsoftware.intel.com%2Fen-us%2Fblogs%2F2008%2F03%2F04%2Fwhy-a-simple-test-can-get-parallel-slowdown&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-32">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKrauss2018" class="citation web cs1">Krauss, Kirk J (2018). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20180513081501/http://www.developforperformance.com/ThreadingForPerformance.html">"Threading for Performance"</a>. <i>Develop for Performance</i>. Archived from <a rel="nofollow" class="external text" href="http://www.developforperformance.com/ThreadingForPerformance.html">the original</a> on 2018-05-13<span class="reference-accessdate">. Retrieved <span class="nowrap">2018-05-10</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Develop+for+Performance&rft.atitle=Threading+for+Performance&rft.date=2018&rft.aulast=Krauss&rft.aufirst=Kirk+J&rft_id=http%3A%2F%2Fwww.developforperformance.com%2FThreadingForPerformance.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-flynn-1972-33"><span class="mw-cite-backlink"><b><a href="#cite_ref-flynn-1972_33-0">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFFlynn1972" class="citation journal cs1"><a href="/wiki/Michael_J._Flynn" title="Michael J. Flynn">Flynn, Michael J.</a> (September 1972). <a rel="nofollow" class="external text" href="https://www.cs.utah.edu/~hari/teaching/paralg/Flynn72.pdf">"Some Computer Organizations and Their Effectiveness"</a> <span class="cs1-format">(PDF)</span>. <i><a href="/wiki/IEEE_Transactions_on_Computers" title="IEEE Transactions on Computers">IEEE Transactions on Computers</a></i>. <b>C-21</b> (9): 948–960. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FTC.1972.5009071">10.1109/TC.1972.5009071</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=IEEE+Transactions+on+Computers&rft.atitle=Some+Computer+Organizations+and+Their+Effectiveness&rft.volume=C-21&rft.issue=9&rft.pages=948-960&rft.date=1972-09&rft_id=info%3Adoi%2F10.1109%2FTC.1972.5009071&rft.aulast=Flynn&rft.aufirst=Michael+J.&rft_id=https%3A%2F%2Fwww.cs.utah.edu%2F~hari%2Fteaching%2Fparalg%2FFlynn72.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-34"><span class="mw-cite-backlink"><b><a href="#cite_ref-34">^</a></b></span> <span class="reference-text">Patterson and Hennessy, p. 748.</span> </li> <li id="cite_note-35"><span class="mw-cite-backlink"><b><a href="#cite_ref-35">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSilberschatzGalvinGagne2008" class="citation book cs1">Silberschatz, Abraham; Galvin, Peter B.; Gagne, Greg (29 July 2008). <i>Operating System Concepts</i>. Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0470128725" title="Special:BookSources/978-0470128725"><bdi>978-0470128725</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Operating+System+Concepts&rft.pub=Wiley&rft.date=2008-07-29&rft.isbn=978-0470128725&rft.aulast=Silberschatz&rft.aufirst=Abraham&rft.au=Galvin%2C+Peter+B.&rft.au=Gagne%2C+Greg&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-36"><span class="mw-cite-backlink"><b><a href="#cite_ref-36">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><i>Computer Organization and Design MIPS Edition: The Hardware/Software Interface (The Morgan Kaufmann Series in Computer Architecture and Design)</i>. Morgan Kaufmann. 2013. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0124077263" title="Special:BookSources/978-0124077263"><bdi>978-0124077263</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Computer+Organization+and+Design+MIPS+Edition%3A+The+Hardware%2FSoftware+Interface+%28The+Morgan+Kaufmann+Series+in+Computer+Architecture+and+Design%29&rft.pub=Morgan+Kaufmann&rft.date=2013&rft.isbn=978-0124077263&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-37"><span class="mw-cite-backlink"><b><a href="#cite_ref-37">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation book cs1"><i>Parallel Programming: Techniques and Applications Using Networked Workstations and Parallel Computers</i>. Pearson. 2005. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0131405639" title="Special:BookSources/978-0131405639"><bdi>978-0131405639</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Parallel+Programming%3A+Techniques+and+Applications+Using+Networked+Workstations+and+Parallel+Computers&rft.pub=Pearson&rft.date=2005&rft.isbn=978-0131405639&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-38"><span class="mw-cite-backlink"><b><a href="#cite_ref-38">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSingh1997" class="citation book cs1">Singh, David Culler; J.P. (1997). <i>Parallel computer architecture</i> ([Nachdr.] ed.). San Francisco: Morgan Kaufmann Publ. p. 15. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-55860-343-1" title="Special:BookSources/978-1-55860-343-1"><bdi>978-1-55860-343-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Parallel+computer+architecture&rft.place=San+Francisco&rft.pages=15&rft.edition=%5BNachdr.%5D&rft.pub=Morgan+Kaufmann+Publ.&rft.date=1997&rft.isbn=978-1-55860-343-1&rft.aulast=Singh&rft.aufirst=David+Culler%3B+J.P.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span><span class="cs1-maint citation-comment"><code class="cs1-code">{{<a href="/wiki/Template:Cite_book" title="Template:Cite book">cite book</a>}}</code>: CS1 maint: multiple names: authors list (<a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">link</a>)</span></span> </li> <li id="cite_note-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-39">^</a></b></span> <span class="reference-text">Culler et al. p. 15.</span> </li> <li id="cite_note-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-40">^</a></b></span> <span class="reference-text"><a href="/wiki/Yale_Patt" title="Yale Patt">Patt, Yale</a> (April 2004). "<a rel="nofollow" class="external text" href="http://users.ece.utexas.edu/~patt/Videos/talk_videos/cmu_04-29-04.wmv">The Microprocessor Ten Years From Now: What Are The Challenges, How Do We Meet Them?</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20080414141000/http://users.ece.utexas.edu/~patt/Videos/talk_videos/cmu_04-29-04.wmv">Archived</a> 2008-04-14 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> (wmv). Distinguished Lecturer talk at <a href="/wiki/Carnegie_Mellon_University" title="Carnegie Mellon University">Carnegie Mellon University</a>. Retrieved on November 7, 2007.</span> </li> <li id="cite_note-Culler124-41"><span class="mw-cite-backlink"><b><a href="#cite_ref-Culler124_41-0">^</a></b></span> <span class="reference-text">Culler et al. p. 124.</span> </li> <li id="cite_note-Culler125-42"><span class="mw-cite-backlink"><b><a href="#cite_ref-Culler125_42-0">^</a></b></span> <span class="reference-text">Culler et al. p. 125.</span> </li> <li id="cite_note-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-43">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSamuel_LarsenSaman_Amarasinghe" class="citation web cs1">Samuel Larsen; Saman Amarasinghe. <a rel="nofollow" class="external text" href="http://groups.csail.mit.edu/cag/slp/SLP-PLDI-2000.pdf">"Exploiting Superword Level Parallelism with Multimedia Instruction Sets"</a> <span class="cs1-format">(PDF)</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Exploiting+Superword+Level+Parallelism+with+Multimedia+Instruction+Sets&rft.au=Samuel+Larsen&rft.au=Saman+Amarasinghe&rft_id=http%3A%2F%2Fgroups.csail.mit.edu%2Fcag%2Fslp%2FSLP-PLDI-2000.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-PH713-44"><span class="mw-cite-backlink">^ <a href="#cite_ref-PH713_44-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-PH713_44-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">Patterson and Hennessy, p. 713.</span> </li> <li id="cite_note-HP549-45"><span class="mw-cite-backlink">^ <a href="#cite_ref-HP549_45-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-HP549_45-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">Hennessy and Patterson, p. 549.</span> </li> <li id="cite_note-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-46">^</a></b></span> <span class="reference-text">Patterson and Hennessy, p. 714.</span> </li> <li id="cite_note-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-47">^</a></b></span> <span class="reference-text"><a href="/wiki/Distributed_computing#CITEREFGhosh2007" title="Distributed computing">Ghosh (2007)</a>, p. 10. <a href="/wiki/Distributed_computing#CITEREFKeidar2008" title="Distributed computing">Keidar (2008)</a>.</span> </li> <li id="cite_note-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-48">^</a></b></span> <span class="reference-text"><a href="/wiki/Distributed_computing#CITEREFLynch1996" title="Distributed computing">Lynch (1996)</a>, p. xix, 1–2. <a href="/wiki/Distributed_computing#CITEREFPeleg2000" title="Distributed computing">Peleg (2000)</a>, p. 1.</span> </li> <li id="cite_note-49"><span class="mw-cite-backlink"><b><a href="#cite_ref-49">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.webopedia.com/TERM/c/clustering.html">What is clustering?</a> Webopedia computer dictionary. Retrieved on November 7, 2007.</span> </li> <li id="cite_note-50"><span class="mw-cite-backlink"><b><a href="#cite_ref-50">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://www.pcmag.com/encyclopedia_term/0,2542,t=Beowulf&i=38548,00.asp">Beowulf definition.</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20121010215231/https://www.pcmag.com/encyclopedia_term/0%2C2542%2Ct%3DBeowulf%26i%3D38548%2C00.asp">Archived</a> 2012-10-10 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> <i>PC Magazine</i>. Retrieved on November 7, 2007.</span> </li> <li id="cite_note-51"><span class="mw-cite-backlink"><b><a href="#cite_ref-51">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.top500.org/statistics/list/">"List Statistics | TOP500 Supercomputer Sites"</a>. <i>www.top500.org</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2018-08-05</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.top500.org&rft.atitle=List+Statistics+%7C+TOP500+Supercomputer+Sites&rft_id=https%3A%2F%2Fwww.top500.org%2Fstatistics%2Flist%2F&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-52"><span class="mw-cite-backlink"><b><a href="#cite_ref-52">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://www.nersc.gov/users/computational-systems/hopper/configuration/interconnect/">"Interconnect"</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20150128133120/https://www.nersc.gov/users/computational-systems/hopper/configuration/interconnect/">Archived</a> 2015-01-28 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>.</span> </li> <li id="cite_note-53"><span class="mw-cite-backlink"><b><a href="#cite_ref-53">^</a></b></span> <span class="reference-text">Hennessy and Patterson, p. 537.</span> </li> <li id="cite_note-54"><span class="mw-cite-backlink"><b><a href="#cite_ref-54">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="https://www.pcmag.com/encyclopedia_term/0,,t=mpp&i=47310,00.asp">MPP Definition.</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20130511084523/https://www.pcmag.com/encyclopedia_term/0%2C%2Ct%3Dmpp%26i%3D47310%2C00.asp">Archived</a> 2013-05-11 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> <i>PC Magazine</i>. Retrieved on November 7, 2007.</span> </li> <li id="cite_note-55"><span class="mw-cite-backlink"><b><a href="#cite_ref-55">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKirkpatrick2003" class="citation journal cs1">Kirkpatrick, Scott (2003). "COMPUTER SCIENCE: Rough Times Ahead". <i>Science</i>. <b>299</b> (5607): 668–669. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1126%2Fscience.1081623">10.1126/science.1081623</a>. <a href="/wiki/PMID_(identifier)" class="mw-redirect" title="PMID (identifier)">PMID</a> <a rel="nofollow" class="external text" href="https://pubmed.ncbi.nlm.nih.gov/12560537">12560537</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:60622095">60622095</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Science&rft.atitle=COMPUTER+SCIENCE%3A+Rough+Times+Ahead&rft.volume=299&rft.issue=5607&rft.pages=668-669&rft.date=2003&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A60622095%23id-name%3DS2CID&rft_id=info%3Apmid%2F12560537&rft_id=info%3Adoi%2F10.1126%2Fscience.1081623&rft.aulast=Kirkpatrick&rft.aufirst=Scott&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-56"><span class="mw-cite-backlink"><b><a href="#cite_ref-56">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFValuevaValuevSemyonovaLyakhov2019" class="citation journal cs1">Valueva, Maria; Valuev, Georgii; Semyonova, Nataliya; Lyakhov, Pavel; Chervyakov, Nikolay; Kaplun, Dmitry; Bogaevskiy, Danil (2019-06-20). <a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Felectronics8060694">"Construction of Residue Number System Using Hardware Efficient Diagonal Function"</a>. <i>Electronics</i>. <b>8</b> (6): 694. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://doi.org/10.3390%2Felectronics8060694">10.3390/electronics8060694</a></span>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/2079-9292">2079-9292</a>. <q>All simulated circuits were described in very high speed integrated circuit (VHSIC) hardware description language (VHDL). Hardware modeling was performed on Xilinx FPGA Artix 7 xc7a200tfbg484-2.</q></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Electronics&rft.atitle=Construction+of+Residue+Number+System+Using+Hardware+Efficient+Diagonal+Function&rft.volume=8&rft.issue=6&rft.pages=694&rft.date=2019-06-20&rft_id=info%3Adoi%2F10.3390%2Felectronics8060694&rft.issn=2079-9292&rft.aulast=Valueva&rft.aufirst=Maria&rft.au=Valuev%2C+Georgii&rft.au=Semyonova%2C+Nataliya&rft.au=Lyakhov%2C+Pavel&rft.au=Chervyakov%2C+Nikolay&rft.au=Kaplun%2C+Dmitry&rft.au=Bogaevskiy%2C+Danil&rft_id=https%3A%2F%2Fdoi.org%2F10.3390%252Felectronics8060694&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-57"><span class="mw-cite-backlink"><b><a href="#cite_ref-57">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGuptaSuneja2020" class="citation book cs1">Gupta, Ankit; Suneja, Kriti (May 2020). <a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/document/9121004">"Hardware Design of Approximate Matrix Multiplier based on FPGA in Verilog"</a>. <i>2020 4th International Conference on Intelligent Computing and Control Systems (ICICCS)</i>. Madurai, India: IEEE. pp. 496–498. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FICICCS48265.2020.9121004">10.1109/ICICCS48265.2020.9121004</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-1-7281-4876-2" title="Special:BookSources/978-1-7281-4876-2"><bdi>978-1-7281-4876-2</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:219990653">219990653</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Hardware+Design+of+Approximate+Matrix+Multiplier+based+on+FPGA+in+Verilog&rft.btitle=2020+4th+International+Conference+on+Intelligent+Computing+and+Control+Systems+%28ICICCS%29&rft.place=Madurai%2C+India&rft.pages=496-498&rft.pub=IEEE&rft.date=2020-05&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A219990653%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FICICCS48265.2020.9121004&rft.isbn=978-1-7281-4876-2&rft.aulast=Gupta&rft.aufirst=Ankit&rft.au=Suneja%2C+Kriti&rft_id=https%3A%2F%2Fieeexplore.ieee.org%2Fdocument%2F9121004&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-DAmour-58"><span class="mw-cite-backlink">^ <a href="#cite_ref-DAmour_58-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-DAmour_58-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-DAmour_58-2"><sup><i><b>c</b></i></sup></a></span> <span class="reference-text">D'Amour, Michael R., Chief Operating Officer, DRC Computer Corporation. "Standard Reconfigurable Computing". Invited speaker at the University of Delaware, February 28, 2007.</span> </li> <li id="cite_note-59"><span class="mw-cite-backlink"><b><a href="#cite_ref-59">^</a></b></span> <span class="reference-text">Boggan, Sha'Kia and Daniel M. Pressel (August 2007). <a rel="nofollow" class="external text" href="http://www.arl.army.mil/arlreports/2007/ARL-SR-154.pdf">GPUs: An Emerging Platform for General-Purpose Computation</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20161225073248/http://www.arl.army.mil/arlreports/2007/ARL-SR-154.pdf">Archived</a> 2016-12-25 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> (PDF). ARL-SR-154, U.S. Army Research Lab. Retrieved on November 7, 2007.</span> </li> <li id="cite_note-60"><span class="mw-cite-backlink"><b><a href="#cite_ref-60">^</a></b></span> <span class="reference-text">Maslennikov, Oleg (2002). <a rel="nofollow" class="external text" href="https://doi.org/10.1007%2F3-540-48086-2_30">"Systematic Generation of Executing Programs for Processor Elements in Parallel ASIC or FPGA-Based Systems and Their Transformation into VHDL-Descriptions of Processor Element Control Units".</a> <i>Lecture Notes in Computer Science</i>, <b>2328/2002:</b> p. 272.</span> </li> <li id="cite_note-61"><span class="mw-cite-backlink"><b><a href="#cite_ref-61">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFShimokawaFuwa,_Y.Aramaki,_N.1991" class="citation book cs1">Shimokawa, Y.; Fuwa, Y.; Aramaki, N. (18–21 November 1991). "A parallel ASIC VLSI neurocomputer for a large number of neurons and billion connections per second speed". <i>[Proceedings] 1991 IEEE International Joint Conference on Neural Networks</i>. Vol. 3. pp. 2162–2167. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FIJCNN.1991.170708">10.1109/IJCNN.1991.170708</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-7803-0227-3" title="Special:BookSources/978-0-7803-0227-3"><bdi>978-0-7803-0227-3</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:61094111">61094111</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=A+parallel+ASIC+VLSI+neurocomputer+for+a+large+number+of+neurons+and+billion+connections+per+second+speed&rft.btitle=%26%2391%3BProceedings%26%2393%3B+1991+IEEE+International+Joint+Conference+on+Neural+Networks&rft.pages=2162-2167&rft.date=1991-11-18%2F1991-11-21&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A61094111%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1109%2FIJCNN.1991.170708&rft.isbn=978-0-7803-0227-3&rft.aulast=Shimokawa&rft.aufirst=Y.&rft.au=Fuwa%2C+Y.&rft.au=Aramaki%2C+N.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-62"><span class="mw-cite-backlink"><b><a href="#cite_ref-62">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAckenIrwin,_Mary_JaneOwens,_Robert_M.1998" class="citation journal cs1">Acken, Kevin P.; Irwin, Mary Jane; Owens, Robert M. (July 1998). "A Parallel ASIC Architecture for Efficient Fractal Image Coding". <i>The Journal of VLSI Signal Processing</i>. <b>19</b> (2): 97–113. <a href="/wiki/Bibcode_(identifier)" class="mw-redirect" title="Bibcode (identifier)">Bibcode</a>:<a rel="nofollow" class="external text" href="https://ui.adsabs.harvard.edu/abs/1998JSPSy..19...97A">1998JSPSy..19...97A</a>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1023%2FA%3A1008005616596">10.1023/A:1008005616596</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2976028">2976028</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=The+Journal+of+VLSI+Signal+Processing&rft.atitle=A+Parallel+ASIC+Architecture+for+Efficient+Fractal+Image+Coding&rft.volume=19&rft.issue=2&rft.pages=97-113&rft.date=1998-07&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2976028%23id-name%3DS2CID&rft_id=info%3Adoi%2F10.1023%2FA%3A1008005616596&rft_id=info%3Abibcode%2F1998JSPSy..19...97A&rft.aulast=Acken&rft.aufirst=Kevin+P.&rft.au=Irwin%2C+Mary+Jane&rft.au=Owens%2C+Robert+M.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-63"><span class="mw-cite-backlink"><b><a href="#cite_ref-63">^</a></b></span> <span class="reference-text">Kahng, Andrew B. (June 21, 2004) "<a rel="nofollow" class="external text" href="http://www.future-fab.com/documents.asp?grID=353&d_ID=2596">Scoping the Problem of DFM in the Semiconductor Industry</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20080131221732/http://www.future-fab.com/documents.asp?grID=353&d_ID=2596">Archived</a> 2008-01-31 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a>." University of California, San Diego. "Future design for manufacturing (DFM) technology must reduce design [non-recoverable expenditure] cost and directly address manufacturing [non-recoverable expenditures]—the cost of a mask set and probe card—which is well over $1 million at the 90 nm technology node and creates a significant damper on semiconductor-based innovation."</span> </li> <li id="cite_note-PH751-64"><span class="mw-cite-backlink">^ <a href="#cite_ref-PH751_64-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-PH751_64-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">Patterson and Hennessy, p. 751.</span> </li> <li id="cite_note-65"><span class="mw-cite-backlink"><b><a href="#cite_ref-65">^</a></b></span> <span class="reference-text">The <a rel="nofollow" class="external text" href="http://awards.computer.org/ana/award/viewPastRecipients.action?id=16">Sidney Fernbach Award given to MPI inventor Bill Gropp</a> <a rel="nofollow" class="external text" href="https://web.archive.org/web/20110725191103/http://awards.computer.org/ana/award/viewPastRecipients.action?id=16">Archived</a> 2011-07-25 at the <a href="/wiki/Wayback_Machine" title="Wayback Machine">Wayback Machine</a> refers to MPI as "the dominant HPC communications interface"</span> </li> <li id="cite_note-66"><span class="mw-cite-backlink"><b><a href="#cite_ref-66">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFShenMikko_H._Lipasti2004" class="citation book cs1">Shen, John Paul; Mikko H. Lipasti (2004). <i>Modern processor design : fundamentals of superscalar processors</i> (1st ed.). Dubuque, Iowa: McGraw-Hill. p. 561. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-07-057064-1" title="Special:BookSources/978-0-07-057064-1"><bdi>978-0-07-057064-1</bdi></a>. <q>However, the holy grail of such research—automated parallelization of serial programs—has yet to materialize. While automated parallelization of certain classes of algorithms has been demonstrated, such success has largely been limited to scientific and numeric applications with predictable flow control (e.g., nested loop structures with statically determined iteration counts) and statically analyzable memory access patterns. (e.g., walks over large multidimensional arrays of float-point data).</q></cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Modern+processor+design+%3A+fundamentals+of+superscalar+processors&rft.place=Dubuque%2C+Iowa&rft.pages=561&rft.edition=1st&rft.pub=McGraw-Hill&rft.date=2004&rft.isbn=978-0-07-057064-1&rft.aulast=Shen&rft.aufirst=John+Paul&rft.au=Mikko+H.+Lipasti&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-67"><span class="mw-cite-backlink"><b><a href="#cite_ref-67">^</a></b></span> <span class="reference-text"><i>Encyclopedia of Parallel Computing, Volume 4</i> by David Padua 2011 <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/0387097651" title="Special:BookSources/0387097651">0387097651</a> page 265</span> </li> <li id="cite_note-68"><span class="mw-cite-backlink"><b><a href="#cite_ref-68">^</a></b></span> <span class="reference-text"><a href="/wiki/Asanovic,_Krste" class="mw-redirect" title="Asanovic, Krste">Asanovic, Krste</a>, et al. (December 18, 2006). <a rel="nofollow" class="external text" href="http://www.eecs.berkeley.edu/Pubs/TechRpts/2006/EECS-2006-183.pdf">"The Landscape of Parallel Computing Research: A View from Berkeley"</a> (PDF). University of California, Berkeley. Technical Report No. UCB/EECS-2006-183. See table on pages 17–19.</span> </li> <li id="cite_note-69"><span class="mw-cite-backlink"><b><a href="#cite_ref-69">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFDavid_R.David_A.JaJa1998" class="citation journal cs1">David R., Helman; David A., Bader; JaJa, Joseph (1998). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20121119012835/http://www.cc.gatech.edu/~bader/papers/JPDC-981462.pdf">"A Randomized Parallel Sorting Algorithm with an Experimental Study"</a> <span class="cs1-format">(PDF)</span>. <i>Journal of Parallel and Distributed Computing</i>. <b>52</b>: 1–23. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1006%2Fjpdc.1998.1462">10.1006/jpdc.1998.1462</a>. <a href="/wiki/Hdl_(identifier)" class="mw-redirect" title="Hdl (identifier)">hdl</a>:<a rel="nofollow" class="external text" href="https://hdl.handle.net/1903%2F835">1903/835</a>. Archived from <a rel="nofollow" class="external text" href="http://www.cc.gatech.edu/~bader/papers/JPDC-981462.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 19 November 2012<span class="reference-accessdate">. Retrieved <span class="nowrap">26 October</span> 2012</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Journal+of+Parallel+and+Distributed+Computing&rft.atitle=A+Randomized+Parallel+Sorting+Algorithm+with+an+Experimental+Study&rft.volume=52&rft.pages=1-23&rft.date=1998&rft_id=info%3Ahdl%2F1903%2F835&rft_id=info%3Adoi%2F10.1006%2Fjpdc.1998.1462&rft.aulast=David+R.&rft.aufirst=Helman&rft.au=David+A.%2C+Bader&rft.au=JaJa%2C+Joseph&rft_id=http%3A%2F%2Fwww.cc.gatech.edu%2F~bader%2Fpapers%2FJPDC-981462.pdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-70"><span class="mw-cite-backlink"><b><a href="#cite_ref-70">^</a></b></span> <span class="reference-text">Dobel, B., Hartig, H., & Engel, M. (2012) "Operating system support for redundant multithreading". <i>Proceedings of the Tenth ACM International Conference on Embedded Software</i>, 83–92. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F2380356.2380375">10.1145/2380356.2380375</a></span> </li> <li id="cite_note-infamous-71"><span class="mw-cite-backlink">^ <a href="#cite_ref-infamous_71-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-infamous_71-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">Patterson and Hennessy, pp. 749–50: "Although successful in pushing several technologies useful in later projects, the ILLIAC IV failed as a computer. Costs escalated from the $8 million estimated in 1966 to $31 million by 1972, despite the construction of only a quarter of the planned machine . It was perhaps the most infamous of supercomputers. The project started in 1965 and ran its first real application in 1976."</span> </li> <li id="cite_note-72"><span class="mw-cite-backlink"><b><a href="#cite_ref-72">^</a></b></span> <span class="reference-text"><a href="/wiki/Luigi_Federico_Menabrea" title="Luigi Federico Menabrea">Menabrea, L. F.</a> (1842). <a rel="nofollow" class="external text" href="http://www.fourmilab.ch/babbage/sketch.html"><i>Sketch of the Analytic Engine Invented by Charles Babbage</i></a>. Bibliothèque Universelle de Genève. Retrieved on November 7, 2007. quote: "when a long series of identical computations is to be performed, such as those required for the formation of numerical tables, the machine can be brought into play so as to give several results at the same time, which will greatly abridge the whole amount of the processes."</span> </li> <li id="cite_note-PH753-73"><span class="mw-cite-backlink">^ <a href="#cite_ref-PH753_73-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-PH753_73-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text">Patterson and Hennessy, p. 753.</span> </li> <li id="cite_note-74"><span class="mw-cite-backlink"><b><a href="#cite_ref-74">^</a></b></span> <span class="reference-text"> R.W. Hockney, C.R. Jesshope. <a rel="nofollow" class="external text" href="https://books.google.com/books?id=6HcBQ67-Fb4C"><i>Parallel Computers 2: Architecture, Programming and Algorithms, Volume 2</i></a>. 1988. p. 8 quote: "The earliest reference to parallelism in computer design is thought to be in General L. F. Menabrea's publication in… 1842, entitled <i>Sketch of the Analytical Engine Invented by Charles Babbage</i>".</span> </li> <li id="cite_note-75"><span class="mw-cite-backlink"><b><a href="#cite_ref-75">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBataille1972" class="citation journal cs1">Bataille, M. (1972-04-01). <a rel="nofollow" class="external text" href="https://dl.acm.org/doi/10.1145/641276.641278">"Something old: the Gamma 60 the computer that was ahead of its time"</a>. <i>ACM SIGARCH Computer Architecture News</i>. <b>1</b> (2): 10–15. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F641276.641278">10.1145/641276.641278</a>. <a href="/wiki/ISSN_(identifier)" class="mw-redirect" title="ISSN (identifier)">ISSN</a> <a rel="nofollow" class="external text" href="https://search.worldcat.org/issn/0163-5964">0163-5964</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:34642285">34642285</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=ACM+SIGARCH+Computer+Architecture+News&rft.atitle=Something+old%3A+the+Gamma+60+the+computer+that+was+ahead+of+its+time&rft.volume=1&rft.issue=2&rft.pages=10-15&rft.date=1972-04-01&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A34642285%23id-name%3DS2CID&rft.issn=0163-5964&rft_id=info%3Adoi%2F10.1145%2F641276.641278&rft.aulast=Bataille&rft.aufirst=M.&rft_id=https%3A%2F%2Fdl.acm.org%2Fdoi%2F10.1145%2F641276.641278&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-76"><span class="mw-cite-backlink"><b><a href="#cite_ref-76">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.feb-patrimoine.com/projet/gamma60/architecture_sketch_of_bull_gamma_60_jbourb_--_mark_smotherman.htm">"Architecture Sketch of Bull Gamma 60 -- Mark Smotherman"</a>. <i>www.feb-patrimoine.com</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2023-08-14</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=www.feb-patrimoine.com&rft.atitle=Architecture+Sketch+of+Bull+Gamma+60+--+Mark+Smotherman&rft_id=http%3A%2F%2Fwww.feb-patrimoine.com%2Fprojet%2Fgamma60%2Farchitecture_sketch_of_bull_gamma_60_jbourb_--_mark_smotherman.htm&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-77"><span class="mw-cite-backlink"><b><a href="#cite_ref-77">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTumlin,_Smotherman2023" class="citation web cs1">Tumlin, Smotherman (2023-08-14). <a rel="nofollow" class="external text" href="https://db.aconit.org/dbaconit/medias.view.php?media=../dbmedia_0/pdf_10/10174.pdf&cotemedia=An%20Evaluation%20of%20the%20Design%20of%20the%20Gamma%2060.pdf&format=pdf">"An Evaluation of the Design of the Gamma 60"</a>. <i>ACONIT Computer History Museum</i>. Department of Computer Science, Clemson University<span class="reference-accessdate">. Retrieved <span class="nowrap">2023-08-14</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=ACONIT+Computer+History+Museum&rft.atitle=An+Evaluation+of+the+Design+of+the+Gamma+60&rft.date=2023-08-14&rft.au=Tumlin%2C+Smotherman&rft_id=https%3A%2F%2Fdb.aconit.org%2Fdbaconit%2Fmedias.view.php%3Fmedia%3D..%2Fdbmedia_0%2Fpdf_10%2F10174.pdf%26cotemedia%3DAn%2520Evaluation%2520of%2520the%2520Design%2520of%2520the%2520Gamma%252060.pdf%26format%3Dpdf&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-78"><span class="mw-cite-backlink"><b><a href="#cite_ref-78">^</a></b></span> <span class="reference-text">"Parallel Programming", S. Gill, <i>The Computer Journal</i> Vol. 1 #1, pp2-10, British Computer Society, April 1958.</span> </li> <li id="cite_note-G_Wilson-79"><span class="mw-cite-backlink">^ <a href="#cite_ref-G_Wilson_79-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-G_Wilson_79-1"><sup><i><b>b</b></i></sup></a> <a href="#cite_ref-G_Wilson_79-2"><sup><i><b>c</b></i></sup></a> <a href="#cite_ref-G_Wilson_79-3"><sup><i><b>d</b></i></sup></a> <a href="#cite_ref-G_Wilson_79-4"><sup><i><b>e</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWilson1994" class="citation web cs1">Wilson, Gregory V. (1994). <a rel="nofollow" class="external text" href="http://ei.cs.vt.edu/~history/Parallel.html">"The History of the Development of Parallel Computing"</a>. Virginia Tech/Norfolk State University, Interactive Learning with a Digital Library in Computer Science<span class="reference-accessdate">. Retrieved <span class="nowrap">2008-01-08</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=The+History+of+the+Development+of+Parallel+Computing&rft.pub=Virginia+Tech%2FNorfolk+State+University%2C+Interactive+Learning+with+a+Digital+Library+in+Computer+Science&rft.date=1994&rft.aulast=Wilson&rft.aufirst=Gregory+V.&rft_id=http%3A%2F%2Fei.cs.vt.edu%2F~history%2FParallel.html&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-80"><span class="mw-cite-backlink"><b><a href="#cite_ref-80">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFAnthes,_Gry2001" class="citation web cs1">Anthes, Gry (November 19, 2001). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20080131205427/http://www.computerworld.com/action/article.do?command=viewArticleBasic&articleId=65878">"The Power of Parallelism"</a>. <i><a href="/wiki/Computerworld" title="Computerworld">Computerworld</a></i>. Archived from <a rel="nofollow" class="external text" href="http://www.computerworld.com/action/article.do?command=viewArticleBasic&articleId=65878">the original</a> on January 31, 2008<span class="reference-accessdate">. Retrieved <span class="nowrap">2008-01-08</span></span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=unknown&rft.jtitle=Computerworld&rft.atitle=The+Power+of+Parallelism&rft.date=2001-11-19&rft.au=Anthes%2C+Gry&rft_id=http%3A%2F%2Fwww.computerworld.com%2Faction%2Farticle.do%3Fcommand%3DviewArticleBasic%26articleId%3D65878&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-81"><span class="mw-cite-backlink"><b><a href="#cite_ref-81">^</a></b></span> <span class="reference-text">Patterson and Hennessy, p. 749.</span> </li> <li id="cite_note-82"><span class="mw-cite-backlink"><b><a href="#cite_ref-82">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMinsky1986" class="citation book cs1">Minsky, Marvin (1986). <a rel="nofollow" class="external text" href="https://archive.org/details/societyofmind00marv/page/17"><i>The Society of Mind</i></a>. New York: Simon & Schuster. pp. <a rel="nofollow" class="external text" href="https://archive.org/details/societyofmind00marv/page/17">17</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-671-60740-1" title="Special:BookSources/978-0-671-60740-1"><bdi>978-0-671-60740-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Society+of+Mind&rft.place=New+York&rft.pages=17&rft.pub=Simon+%26+Schuster&rft.date=1986&rft.isbn=978-0-671-60740-1&rft.aulast=Minsky&rft.aufirst=Marvin&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fsocietyofmind00marv%2Fpage%2F17&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-83"><span class="mw-cite-backlink"><b><a href="#cite_ref-83">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFMinsky1986" class="citation book cs1">Minsky, Marvin (1986). <a rel="nofollow" class="external text" href="https://archive.org/details/societyofmind00marv/page/29"><i>The Society of Mind</i></a>. New York: Simon & Schuster. pp. <a rel="nofollow" class="external text" href="https://archive.org/details/societyofmind00marv/page/29">29</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-671-60740-1" title="Special:BookSources/978-0-671-60740-1"><bdi>978-0-671-60740-1</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Society+of+Mind&rft.place=New+York&rft.pages=29&rft.pub=Simon+%26+Schuster&rft.date=1986&rft.isbn=978-0-671-60740-1&rft.aulast=Minsky&rft.aufirst=Marvin&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fsocietyofmind00marv%2Fpage%2F29&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-84"><span class="mw-cite-backlink"><b><a href="#cite_ref-84">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFBlakeslee1996" class="citation book cs1">Blakeslee, Thomas (1996). <span class="id-lock-registration" title="Free registration required"><a rel="nofollow" class="external text" href="https://archive.org/details/beyondconsciousm00blak"><i>Beyond the Conscious Mind. Unlocking the Secrets of the Self</i></a></span>. Springer. pp. <a rel="nofollow" class="external text" href="https://archive.org/details/beyondconsciousm00blak/page/6">6–7</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780306452628" title="Special:BookSources/9780306452628"><bdi>9780306452628</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Beyond+the+Conscious+Mind.+Unlocking+the+Secrets+of+the+Self&rft.pages=6-7&rft.pub=Springer&rft.date=1996&rft.isbn=9780306452628&rft.aulast=Blakeslee&rft.aufirst=Thomas&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fbeyondconsciousm00blak&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-85"><span class="mw-cite-backlink"><b><a href="#cite_ref-85">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGazzanigaLeDoux1978" class="citation book cs1"><a href="/wiki/Michael_S._Gazzaniga" class="mw-redirect" title="Michael S. Gazzaniga">Gazzaniga, Michael</a>; <a href="/wiki/Joseph_E._LeDoux" title="Joseph E. LeDoux">LeDoux, Joseph</a> (1978). <i>The Integrated Mind</i>. pp. 132–161.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Integrated+Mind&rft.pages=132-161&rft.date=1978&rft.aulast=Gazzaniga&rft.aufirst=Michael&rft.au=LeDoux%2C+Joseph&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-86"><span class="mw-cite-backlink"><b><a href="#cite_ref-86">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGazzaniga1985" class="citation book cs1"><a href="/wiki/Michael_S._Gazzaniga" class="mw-redirect" title="Michael S. Gazzaniga">Gazzaniga, Michael</a> (1985). <span class="id-lock-registration" title="Free registration required"><a rel="nofollow" class="external text" href="https://archive.org/details/socialbraindisco0000gazz"><i>The Social Brain. Discovering the Networks of the Mind</i></a></span>. Basic Books. pp. <a rel="nofollow" class="external text" href="https://archive.org/details/socialbraindisco0000gazz/page/77">77–79</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/9780465078509" title="Special:BookSources/9780465078509"><bdi>9780465078509</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Social+Brain.+Discovering+the+Networks+of+the+Mind&rft.pages=77-79&rft.pub=Basic+Books&rft.date=1985&rft.isbn=9780465078509&rft.aulast=Gazzaniga&rft.aufirst=Michael&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fsocialbraindisco0000gazz&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-87"><span class="mw-cite-backlink"><b><a href="#cite_ref-87">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOrnstein1992" class="citation book cs1"><a href="/wiki/Robert_Ornstein" class="mw-redirect" title="Robert Ornstein">Ornstein, Robert</a> (1992). <span class="id-lock-registration" title="Free registration required"><a rel="nofollow" class="external text" href="https://archive.org/details/evolutionofconsc0000orns"><i>Evolution of Consciousness: The Origins of the Way We Think</i></a></span>. pp. <a rel="nofollow" class="external text" href="https://archive.org/details/evolutionofconsc0000orns/page/2">2</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Evolution+of+Consciousness%3A+The+Origins+of+the+Way+We+Think&rft.pages=2&rft.date=1992&rft.aulast=Ornstein&rft.aufirst=Robert&rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fevolutionofconsc0000orns&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-88"><span class="mw-cite-backlink"><b><a href="#cite_ref-88">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHilgard1977" class="citation book cs1"><a href="/wiki/Ernest_Hilgard" title="Ernest Hilgard">Hilgard, Ernest</a> (1977). <i>Divided consciousness: multiple controls in human thought and action</i>. New York: Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-471-39602-4" title="Special:BookSources/978-0-471-39602-4"><bdi>978-0-471-39602-4</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Divided+consciousness%3A+multiple+controls+in+human+thought+and+action.&rft.place=New+York&rft.pub=Wiley&rft.date=1977&rft.isbn=978-0-471-39602-4&rft.aulast=Hilgard&rft.aufirst=Ernest&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-89"><span class="mw-cite-backlink"><b><a href="#cite_ref-89">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFHilgard1986" class="citation book cs1"><a href="/wiki/Ernest_Hilgard" title="Ernest Hilgard">Hilgard, Ernest</a> (1986). <i>Divided consciousness: multiple controls in human thought and action (expanded edition)</i>. New York: Wiley. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a> <a href="/wiki/Special:BookSources/978-0-471-80572-4" title="Special:BookSources/978-0-471-80572-4"><bdi>978-0-471-80572-4</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=Divided+consciousness%3A+multiple+controls+in+human+thought+and+action+%28expanded+edition%29.&rft.place=New+York&rft.pub=Wiley&rft.date=1986&rft.isbn=978-0-471-80572-4&rft.aulast=Hilgard&rft.aufirst=Ernest&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-90"><span class="mw-cite-backlink"><b><a href="#cite_ref-90">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFKaku2014" class="citation book cs1"><a href="/wiki/Michio_Kaku" title="Michio Kaku">Kaku, Michio</a> (2014). <a href="/wiki/The_Future_of_the_Mind" title="The Future of the Mind"><i>The Future of the Mind</i></a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&rft.btitle=The+Future+of+the+Mind&rft.date=2014&rft.aulast=Kaku&rft.aufirst=Michio&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-91"><span class="mw-cite-backlink"><b><a href="#cite_ref-91">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFOuspenskii1992" class="citation book cs1"><a href="/wiki/Pyotr_Demianovich_Ouspenskii" class="mw-redirect" title="Pyotr Demianovich Ouspenskii">Ouspenskii, Pyotr</a> (1992). "Chapter 3". <i>In Search of the Miraculous. Fragments of an Unknown Teaching</i>. pp. 72–83.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=bookitem&rft.atitle=Chapter+3&rft.btitle=In+Search+of+the+Miraculous.+Fragments+of+an+Unknown+Teaching&rft.pages=72-83&rft.date=1992&rft.aulast=Ouspenskii&rft.aufirst=Pyotr&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> <li id="cite_note-92"><span class="mw-cite-backlink"><b><a href="#cite_ref-92">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://neuroclusterbrain.com">"Official Neurocluster Brain Model site"</a><span class="reference-accessdate">. Retrieved <span class="nowrap">July 22,</span> 2017</span>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=unknown&rft.btitle=Official+Neurocluster+Brain+Model+site&rft_id=http%3A%2F%2Fneuroclusterbrain.com&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></span> </li> </ol></div> <div class="mw-heading mw-heading2"><h2 id="Further_reading">Further reading</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=38" title="Edit section: Further reading"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFRodriguezVillagraBaran2008" class="citation journal cs1">Rodriguez, C.; Villagra, M.; Baran, B. (29 August 2008). "Asynchronous team algorithms for Boolean Satisfiability". <i>Bio-Inspired Models of Network, Information and Computing Systems, 2007. Bionetics 2007. 2nd</i>: 66–69. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FBIMNICS.2007.4610083">10.1109/BIMNICS.2007.4610083</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a> <a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:15185219">15185219</a>.</cite><span title="ctx_ver=Z39.88-2004&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&rft.genre=article&rft.jtitle=Bio-Inspired+Models+of+Network%2C+Information+and+Computing+Systems%2C+2007.+Bionetics+2007.+2nd&rft.atitle=Asynchronous+team+algorithms+for+Boolean+Satisfiability&rft.pages=66-69&rft.date=2008-08-29&rft_id=info%3Adoi%2F10.1109%2FBIMNICS.2007.4610083&rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A15185219%23id-name%3DS2CID&rft.aulast=Rodriguez&rft.aufirst=C.&rft.au=Villagra%2C+M.&rft.au=Baran%2C+B.&rfr_id=info%3Asid%2Fen.wikipedia.org%3AParallel+computing" class="Z3988"></span></li> <li>Sechin, A.; Parallel Computing in Photogrammetry. GIM International. #1, 2016, pp. 21–23.</li></ul> <div class="mw-heading mw-heading2"><h2 id="External_links">External links</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Parallel_computing&action=edit&section=39" title="Edit section: External links"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1235611614">.mw-parser-output .spoken-wikipedia{border:1px solid #a2a9b1;background-color:var(--background-color-interactive-subtle,#f8f9fa);margin:0.5em 0;padding:0.2em;line-height:1.5em;font-size:90%}.mw-parser-output .spoken-wikipedia-header{text-align:center}.mw-parser-output .spoken-wikipedia-listen-to{font-weight:bold}.mw-parser-output .spoken-wikipedia-files{text-align:center;margin-top:10px;margin-bottom:0.4em}.mw-parser-output .spoken-wikipedia-icon{float:left;margin-left:5px;margin-top:10px}.mw-parser-output .spoken-wikipedia-disclaimer{margin-left:60px;margin-top:10px;font-size:95%;line-height:1.4em}.mw-parser-output .spoken-wikipedia-footer{margin-top:10px;text-align:center}@media(min-width:720px){.mw-parser-output .spoken-wikipedia{width:20em;float:right;clear:right;margin-left:1em}}</style><div class="spoken-wikipedia noprint haudio"><div class="spoken-wikipedia-header"><span class="spoken-wikipedia-listen-to">Listen to this article</span> (<span class="duration"><span class="min">54</span> minutes</span>)</div><div class="spoken-wikipedia-files"><figure class="mw-halign-center" typeof="mw:File"><span><audio id="mwe_player_0" controls="" preload="none" data-mw-tmh="" class="mw-file-element" width="200" style="width:200px;" data-durationhint="3256" data-mwtitle="En-Parallel_computing.ogg" data-mwprovider="wikimediacommons"><source src="//upload.wikimedia.org/wikipedia/commons/3/3b/En-Parallel_computing.ogg" type="audio/ogg; codecs="vorbis"" data-width="0" data-height="0" /><source src="//upload.wikimedia.org/wikipedia/commons/transcoded/3/3b/En-Parallel_computing.ogg/En-Parallel_computing.ogg.mp3" type="audio/mpeg" data-transcodekey="mp3" data-width="0" data-height="0" /></audio></span><figcaption></figcaption></figure> </div><div class="spoken-wikipedia-icon"><span typeof="mw:File"><span title="Spoken Wikipedia"><img alt="Spoken Wikipedia icon" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/45px-Sound-icon.svg.png" decoding="async" width="45" height="34" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/68px-Sound-icon.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sound-icon.svg/90px-Sound-icon.svg.png 2x" data-file-width="128" data-file-height="96" /></span></span></div><div class="spoken-wikipedia-disclaimer"><a href="/wiki/File:En-Parallel_computing.ogg" title="File:En-Parallel computing.ogg">This audio file</a> was created from a revision of this article dated 21 August 2013<span style="display:none"> (<span class="bday dtstart published updated itvstart">2013-08-21</span>)</span>, and does not reflect subsequent edits.</div><div class="spoken-wikipedia-footer">(<a href="/wiki/Wikipedia:Media_help" class="mw-redirect" title="Wikipedia:Media help">Audio help</a> · <a href="/wiki/Wikipedia:Spoken_articles" title="Wikipedia:Spoken articles">More spoken articles</a>)</div></div> <style data-mw-deduplicate="TemplateStyles:r1235681985">.mw-parser-output .side-box{margin:4px 0;box-sizing:border-box;border:1px solid #aaa;font-size:88%;line-height:1.25em;background-color:var(--background-color-interactive-subtle,#f8f9fa);display:flow-root}.mw-parser-output .side-box-abovebelow,.mw-parser-output .side-box-text{padding:0.25em 0.9em}.mw-parser-output .side-box-image{padding:2px 0 2px 0.9em;text-align:center}.mw-parser-output .side-box-imageright{padding:2px 0.9em 2px 0;text-align:center}@media(min-width:500px){.mw-parser-output .side-box-flex{display:flex;align-items:center}.mw-parser-output .side-box-text{flex:1;min-width:0}}@media(min-width:720px){.mw-parser-output .side-box{width:238px}.mw-parser-output .side-box-right{clear:right;float:right;margin-left:1em}.mw-parser-output .side-box-left{margin-right:1em}}</style><style data-mw-deduplicate="TemplateStyles:r1237033735">@media print{body.ns-0 .mw-parser-output .sistersitebox{display:none!important}}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sistersitebox img[src*="Wiktionary-logo-en-v2.svg"]{background-color:white}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sistersitebox img[src*="Wiktionary-logo-en-v2.svg"]{background-color:white}}</style><div class="side-box side-box-right plainlinks sistersitebox"><style data-mw-deduplicate="TemplateStyles:r1126788409">.mw-parser-output .plainlist ol,.mw-parser-output .plainlist ul{line-height:inherit;list-style:none;margin:0;padding:0}.mw-parser-output .plainlist ol li,.mw-parser-output .plainlist ul li{margin-bottom:0}</style> <div class="side-box-flex"> <div class="side-box-image"><span class="noviewer" typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/d/df/Wikibooks-logo-en-noslogan.svg/40px-Wikibooks-logo-en-noslogan.svg.png" decoding="async" width="40" height="40" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/df/Wikibooks-logo-en-noslogan.svg/60px-Wikibooks-logo-en-noslogan.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/df/Wikibooks-logo-en-noslogan.svg/80px-Wikibooks-logo-en-noslogan.svg.png 2x" data-file-width="400" data-file-height="400" /></span></span></div> <div class="side-box-text plainlist">Wikibooks has a book on the topic of: <i><b><a href="https://en.wikibooks.org/wiki/Distributed_Systems" class="extiw" title="wikibooks:Distributed Systems">Distributed Systems</a></b></i></div></div> </div> <ul><li><a rel="nofollow" class="external text" href="http://www.llnl.gov/computing/tutorials/parallel_comp/">Lawrence Livermore National Laboratory: Introduction to Parallel Computing</a></li> <li><a rel="nofollow" class="external text" href="http://www-unix.mcs.anl.gov/dbpp/">Designing and Building Parallel Programs, by Ian Foster</a></li> <li><a rel="nofollow" class="external text" href="https://web.archive.org/web/20021012122919/http://wotug.ukc.ac.uk/parallel/">Internet Parallel Computing Archive</a></li></ul> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Parallel_computing" style="padding:3px"><table class="nowraplinks hlist mw-collapsible mw-collapsed navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Parallel_computing" title="Template:Parallel computing"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Parallel_computing" title="Template talk:Parallel computing"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Parallel_computing" title="Special:EditPage/Template:Parallel computing"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Parallel_computing" style="font-size:114%;margin:0 4em"><a class="mw-selflink selflink">Parallel computing</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">General</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Distributed_computing" title="Distributed computing">Distributed computing</a></li> <li><a class="mw-selflink selflink">Parallel computing</a></li> <li><a href="/wiki/Massively_parallel" title="Massively parallel">Massively parallel</a></li> <li><a href="/wiki/Cloud_computing" title="Cloud computing">Cloud computing</a></li> <li><a href="/wiki/High-performance_computing" title="High-performance computing">High-performance computing</a></li> <li><a href="/wiki/Multiprocessing" title="Multiprocessing">Multiprocessing</a></li> <li><a href="/wiki/Manycore_processor" title="Manycore processor">Manycore processor</a></li> <li><a href="/wiki/General-purpose_computing_on_graphics_processing_units" title="General-purpose computing on graphics processing units">GPGPU</a></li> <li><a href="/wiki/Computer_network" title="Computer network">Computer network</a></li> <li><a href="/wiki/Systolic_array" title="Systolic array">Systolic array</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Levels</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Bit-level_parallelism" title="Bit-level parallelism">Bit</a></li> <li><a href="/wiki/Instruction-level_parallelism" title="Instruction-level parallelism">Instruction</a></li> <li><a href="/wiki/Task_parallelism" title="Task parallelism">Thread</a></li> <li><a href="/wiki/Task_parallelism" title="Task parallelism">Task</a></li> <li><a href="/wiki/Data_parallelism" title="Data parallelism">Data</a></li> <li><a href="/wiki/Memory-level_parallelism" title="Memory-level parallelism">Memory</a></li> <li><a href="/wiki/Loop-level_parallelism" title="Loop-level parallelism">Loop</a></li> <li><a href="/wiki/Pipeline_(computing)" title="Pipeline (computing)">Pipeline</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Multithreading_(computer_architecture)" title="Multithreading (computer architecture)">Multithreading</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Temporal_multithreading" title="Temporal multithreading">Temporal</a></li> <li><a href="/wiki/Simultaneous_multithreading" title="Simultaneous multithreading">Simultaneous</a> (SMT)</li> <li><a href="/wiki/Simultaneous_and_heterogeneous_multithreading" title="Simultaneous and heterogeneous multithreading">Simultaneous and heterogenous</a></li> <li><a href="/wiki/Speculative_multithreading" title="Speculative multithreading">Speculative</a> (SpMT)</li> <li><a href="/wiki/Preemption_(computing)" title="Preemption (computing)">Preemptive</a></li> <li><a href="/wiki/Computer_multitasking#Cooperative_multitasking" title="Computer multitasking">Cooperative</a></li> <li><a href="/wiki/Bulldozer_(microarchitecture)#Bulldozer_core" title="Bulldozer (microarchitecture)">Clustered multi-thread</a> (CMT)</li> <li><a href="/wiki/Hardware_scout" title="Hardware scout">Hardware scout</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Theory</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Parallel_RAM" title="Parallel RAM">PRAM model</a></li> <li><a href="/wiki/Parallel_external_memory" title="Parallel external memory">PEM model</a></li> <li><a href="/wiki/Analysis_of_parallel_algorithms" title="Analysis of parallel algorithms">Analysis of parallel algorithms</a></li> <li><a href="/wiki/Amdahl%27s_law" title="Amdahl's law">Amdahl's law</a></li> <li><a href="/wiki/Gustafson%27s_law" title="Gustafson's law">Gustafson's law</a></li> <li><a href="/wiki/Cost_efficiency" title="Cost efficiency">Cost efficiency</a></li> <li><a href="/wiki/Karp%E2%80%93Flatt_metric" title="Karp–Flatt metric">Karp–Flatt metric</a></li> <li><a href="/wiki/Parallel_slowdown" title="Parallel slowdown">Slowdown</a></li> <li><a href="/wiki/Speedup" title="Speedup">Speedup</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Elements</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Process_(computing)" title="Process (computing)">Process</a></li> <li><a href="/wiki/Thread_(computing)" title="Thread (computing)">Thread</a></li> <li><a href="/wiki/Fiber_(computer_science)" title="Fiber (computer science)">Fiber</a></li> <li><a href="/wiki/Instruction_window" title="Instruction window">Instruction window</a></li> <li><a href="/wiki/Array_(data_structure)" title="Array (data structure)">Array</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Coordination</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Multiprocessing" title="Multiprocessing">Multiprocessing</a></li> <li><a href="/wiki/Memory_coherence" title="Memory coherence">Memory coherence</a></li> <li><a href="/wiki/Cache_coherence" title="Cache coherence">Cache coherence</a></li> <li><a href="/wiki/Cache_invalidation" title="Cache invalidation">Cache invalidation</a></li> <li><a href="/wiki/Barrier_(computer_science)" title="Barrier (computer science)">Barrier</a></li> <li><a href="/wiki/Synchronization_(computer_science)" title="Synchronization (computer science)">Synchronization</a></li> <li><a href="/wiki/Application_checkpointing" title="Application checkpointing">Application checkpointing</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_programming" title="Computer programming">Programming</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Stream_processing" title="Stream processing">Stream processing</a></li> <li><a href="/wiki/Dataflow_programming" title="Dataflow programming">Dataflow programming</a></li> <li><a href="/wiki/Parallel_programming_model" title="Parallel programming model">Models</a> <ul><li><a href="/wiki/Implicit_parallelism" title="Implicit parallelism">Implicit parallelism</a></li> <li><a href="/wiki/Explicit_parallelism" title="Explicit parallelism">Explicit parallelism</a></li> <li><a href="/wiki/Concurrency_(computer_science)" title="Concurrency (computer science)">Concurrency</a></li></ul></li> <li><a href="/wiki/Non-blocking_algorithm" title="Non-blocking algorithm">Non-blocking algorithm</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_hardware" title="Computer hardware">Hardware</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Flynn%27s_taxonomy" title="Flynn's taxonomy">Flynn's taxonomy</a> <ul><li><a href="/wiki/Single_instruction,_single_data" title="Single instruction, single data">SISD</a></li> <li><a href="/wiki/Single_instruction,_multiple_data" title="Single instruction, multiple data">SIMD</a> <ul><li><a href="/wiki/Single_instruction,_multiple_threads" title="Single instruction, multiple threads">Array processing</a> (SIMT)</li> <li><a href="/wiki/Flynn%27s_taxonomy#Pipelined_processor" title="Flynn's taxonomy">Pipelined processing</a></li> <li><a href="/wiki/Flynn%27s_taxonomy#Associative_processor" title="Flynn's taxonomy">Associative processing</a></li></ul></li> <li><a href="/wiki/Multiple_instruction,_single_data" title="Multiple instruction, single data">MISD</a></li> <li><a href="/wiki/Multiple_instruction,_multiple_data" title="Multiple instruction, multiple data">MIMD</a></li></ul></li> <li><a href="/wiki/Dataflow_architecture" title="Dataflow architecture">Dataflow architecture</a></li> <li><a href="/wiki/Instruction_pipelining" title="Instruction pipelining">Pipelined processor</a></li> <li><a href="/wiki/Superscalar_processor" title="Superscalar processor">Superscalar processor</a></li> <li><a href="/wiki/Vector_processor" title="Vector processor">Vector processor</a></li> <li><a href="/wiki/Multiprocessing" title="Multiprocessing">Multiprocessor</a> <ul><li><a href="/wiki/Symmetric_multiprocessing" title="Symmetric multiprocessing">symmetric</a></li> <li><a href="/wiki/Asymmetric_multiprocessing" title="Asymmetric multiprocessing">asymmetric</a></li></ul></li> <li><a href="/wiki/Semiconductor_memory" title="Semiconductor memory">Memory</a> <ul><li><a href="/wiki/Shared_memory" title="Shared memory">shared</a></li> <li><a href="/wiki/Distributed_memory" title="Distributed memory">distributed</a></li> <li><a href="/wiki/Distributed_shared_memory" title="Distributed shared memory">distributed shared</a></li> <li><a href="/wiki/Uniform_memory_access" title="Uniform memory access">UMA</a></li> <li><a href="/wiki/Non-uniform_memory_access" title="Non-uniform memory access">NUMA</a></li> <li><a href="/wiki/Cache-only_memory_architecture" title="Cache-only memory architecture">COMA</a></li></ul></li> <li><a href="/wiki/Massively_parallel" title="Massively parallel">Massively parallel</a> computer</li> <li><a href="/wiki/Computer_cluster" title="Computer cluster">Computer cluster</a> <ul><li><a href="/wiki/Beowulf_cluster" title="Beowulf cluster">Beowulf cluster</a></li></ul></li> <li><a href="/wiki/Grid_computing" title="Grid computing">Grid computer</a></li> <li><a href="/wiki/Hardware_acceleration" title="Hardware acceleration">Hardware acceleration</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/API" title="API">APIs</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Ateji_PX" title="Ateji PX">Ateji PX</a></li> <li><a href="/wiki/Boost_(C%2B%2B_libraries)" title="Boost (C++ libraries)">Boost</a></li> <li><a href="/wiki/Chapel_(programming_language)" title="Chapel (programming language)">Chapel</a></li> <li><a href="/wiki/HPX" title="HPX">HPX</a></li> <li><a href="/wiki/Charm%2B%2B" title="Charm++">Charm++</a></li> <li><a href="/wiki/Cilk" title="Cilk">Cilk</a></li> <li><a href="/wiki/Coarray_Fortran" title="Coarray Fortran">Coarray Fortran</a></li> <li><a href="/wiki/CUDA" title="CUDA">CUDA</a></li> <li><a href="/wiki/Dryad_(programming)" title="Dryad (programming)">Dryad</a></li> <li><a href="/wiki/C%2B%2B_AMP" title="C++ AMP">C++ AMP</a></li> <li><a href="/wiki/Global_Arrays" title="Global Arrays">Global Arrays</a></li> <li><a href="/wiki/GPUOpen" title="GPUOpen">GPUOpen</a></li> <li><a href="/wiki/Message_Passing_Interface" title="Message Passing Interface">MPI</a></li> <li><a href="/wiki/OpenMP" title="OpenMP">OpenMP</a></li> <li><a href="/wiki/OpenCL" title="OpenCL">OpenCL</a></li> <li><a href="/wiki/OpenHMPP" title="OpenHMPP">OpenHMPP</a></li> <li><a href="/wiki/OpenACC" title="OpenACC">OpenACC</a></li> <li><a href="/wiki/Parallel_Extensions" title="Parallel Extensions">Parallel Extensions</a></li> <li><a href="/wiki/Parallel_Virtual_Machine" title="Parallel Virtual Machine">PVM</a></li> <li><a href="/wiki/Pthreads" title="Pthreads">pthreads</a></li> <li><a href="/wiki/RaftLib" title="RaftLib">RaftLib</a></li> <li><a href="/wiki/ROCm" title="ROCm">ROCm</a></li> <li><a href="/wiki/Unified_Parallel_C" title="Unified Parallel C">UPC</a></li> <li><a href="/wiki/Threading_Building_Blocks" title="Threading Building Blocks">TBB</a></li> <li><a href="/wiki/ZPL_(programming_language)" class="mw-redirect" title="ZPL (programming language)">ZPL</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Problems</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Automatic_parallelization" title="Automatic parallelization">Automatic parallelization</a></li> <li><a href="/wiki/Deadlock_(computer_science)" title="Deadlock (computer science)">Deadlock</a></li> <li><a href="/wiki/Deterministic_algorithm" title="Deterministic algorithm">Deterministic algorithm</a></li> <li><a href="/wiki/Embarrassingly_parallel" title="Embarrassingly parallel">Embarrassingly parallel</a></li> <li><a href="/wiki/Parallel_slowdown" title="Parallel slowdown">Parallel slowdown</a></li> <li><a href="/wiki/Race_condition" title="Race condition">Race condition</a></li> <li><a href="/wiki/Software_lockout" title="Software lockout">Software lockout</a></li> <li><a href="/wiki/Scalability" title="Scalability">Scalability</a></li> <li><a href="/wiki/Starvation_(computer_science)" title="Starvation (computer science)">Starvation</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span> <a href="/wiki/Category:Parallel_computing" title="Category:Parallel computing">Category: Parallel computing</a></li></ul> </div></td></tr></tbody></table></div> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"></div><div role="navigation" class="navbox" aria-labelledby="Programming_paradigms_(Comparison_by_language)" style="padding:3px"><table class="nowraplinks mw-collapsible autocollapse navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1239400231"><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Programming_paradigms_navbox" title="Template:Programming paradigms navbox"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Programming_paradigms_navbox" title="Template talk:Programming paradigms navbox"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Programming_paradigms_navbox" title="Special:EditPage/Template:Programming paradigms navbox"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Programming_paradigms_(Comparison_by_language)" style="font-size:114%;margin:0 4em"><a href="/wiki/Programming_paradigm" title="Programming paradigm">Programming paradigms</a> (<a href="/wiki/Comparison_of_multi-paradigm_programming_languages" title="Comparison of multi-paradigm programming languages">Comparison by language</a>)</div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Imperative_programming" title="Imperative programming">Imperative</a></th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Structured_programming" title="Structured programming">Structured</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Jackson_structured_programming" title="Jackson structured programming">Jackson structures</a></li> <li><a href="/wiki/Block_(programming)" title="Block (programming)">Block-structured</a></li> <li><a href="/wiki/Modular_programming" title="Modular programming">Modular</a></li> <li><a href="/wiki/Non-structured_programming" title="Non-structured programming">Non-structured</a></li> <li><a href="/wiki/Procedural_programming" title="Procedural programming">Procedural</a></li> <li><a href="/wiki/Programming_in_the_large_and_programming_in_the_small" title="Programming in the large and programming in the small">Programming in the large and in the small</a></li> <li><a href="/wiki/Design_by_contract" title="Design by contract">Design by contract</a></li> <li><a href="/wiki/Invariant-based_programming" title="Invariant-based programming">Invariant-based</a></li> <li><a href="/wiki/Nested_function" title="Nested function">Nested function</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Object-oriented_programming" title="Object-oriented programming">Object-oriented</a><br />(<a href="/wiki/Comparison_of_programming_languages_(object-oriented_programming)" title="Comparison of programming languages (object-oriented programming)">comparison</a>, <a href="/wiki/List_of_object-oriented_programming_languages" title="List of object-oriented programming languages">list</a>)</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Class-based_programming" title="Class-based programming">Class-based</a>, <a href="/wiki/Prototype-based_programming" title="Prototype-based programming">Prototype-based</a>, <a href="/wiki/Object-based_language" title="Object-based language">Object-based</a></li> <li><a href="/wiki/Agent-oriented_programming" title="Agent-oriented programming">Agent</a></li> <li><a href="/wiki/Immutable_object" title="Immutable object">Immutable object</a></li> <li><a href="/wiki/Persistent_programming_language" title="Persistent programming language">Persistent</a></li> <li><a href="/wiki/Uniform_Function_Call_Syntax" title="Uniform Function Call Syntax">Uniform Function Call Syntax</a></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Declarative_programming" title="Declarative programming">Declarative</a></th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"></div><table class="nowraplinks navbox-subgroup" style="border-spacing:0"><tbody><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Functional_programming" title="Functional programming">Functional</a><br />(<a href="/wiki/Comparison_of_functional_programming_languages" title="Comparison of functional programming languages">comparison</a>)</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Recursion_(computer_science)" title="Recursion (computer science)">Recursive</a></li> <li><a href="/wiki/Anonymous_function" title="Anonymous function">Anonymous function</a> (<a href="/wiki/Partial_application" title="Partial application">Partial application</a>)</li> <li><a href="/wiki/Higher-order_programming" title="Higher-order programming">Higher-order</a></li> <li><a href="/wiki/Purely_functional_programming" title="Purely functional programming">Purely functional</a></li> <li><a href="/wiki/Total_functional_programming" title="Total functional programming">Total</a></li> <li><a href="/wiki/Strict_programming_language" title="Strict programming language">Strict</a></li> <li><a href="/wiki/Generalized_algebraic_data_type" title="Generalized algebraic data type">GADTs</a></li> <li><a href="/wiki/Dependent_type" title="Dependent type">Dependent types</a></li> <li><a href="/wiki/Functional_logic_programming" title="Functional logic programming">Functional logic</a></li> <li><a href="/wiki/Tacit_programming" title="Tacit programming">Point-free style</a></li> <li><a href="/wiki/Expression-oriented_programming_language" title="Expression-oriented programming language">Expression-oriented</a></li> <li><a href="/wiki/Applicative_programming_language" title="Applicative programming language">Applicative</a>, <a href="/wiki/Concatenative_programming_language" title="Concatenative programming language">Concatenative</a></li> <li><a href="/wiki/Function-level_programming" title="Function-level programming">Function-level</a>, <a href="/wiki/Value-level_programming" title="Value-level programming">Value-level</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Dataflow_programming" title="Dataflow programming">Dataflow</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Flow-based_programming" title="Flow-based programming">Flow-based</a></li> <li><a href="/wiki/Reactive_programming" title="Reactive programming">Reactive</a> (<a href="/wiki/Functional_reactive_programming" title="Functional reactive programming">Functional reactive</a>)</li> <li><a href="/wiki/Signal_programming" class="mw-redirect" title="Signal programming">Signals</a></li> <li><a href="/wiki/Stream_processing" title="Stream processing">Streams</a></li> <li><a href="/wiki/Synchronous_programming_language" title="Synchronous programming language">Synchronous</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Logic_programming" title="Logic programming">Logic</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Abductive_logic_programming" title="Abductive logic programming">Abductive logic</a></li> <li><a href="/wiki/Answer_set_programming" title="Answer set programming">Answer set</a></li> <li><a href="/wiki/Constraint_programming" title="Constraint programming">Constraint</a> (<a href="/wiki/Constraint_logic_programming" title="Constraint logic programming">Constraint logic</a>)</li> <li><a href="/wiki/Inductive_logic_programming" title="Inductive logic programming">Inductive logic</a></li> <li><a href="/wiki/Nondeterministic_programming" title="Nondeterministic programming">Nondeterministic</a></li> <li><a href="/wiki/Ontology_language" title="Ontology language">Ontology</a></li> <li><a href="/wiki/Probabilistic_logic_programming" title="Probabilistic logic programming">Probabilistic logic</a></li> <li><a href="/wiki/Query_language" title="Query language">Query</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Domain-specific_language" title="Domain-specific language">DSL</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Algebraic_modeling_language" title="Algebraic modeling language">Algebraic modeling</a></li> <li><a href="/wiki/Array_programming" title="Array programming">Array</a></li> <li><a href="/wiki/Automata-based_programming" title="Automata-based programming">Automata-based</a> (<a href="/wiki/Action_language" title="Action language">Action</a>)</li> <li><a href="/wiki/Command_language" title="Command language">Command</a> (<a href="/wiki/Spacecraft_command_language" title="Spacecraft command language">Spacecraft</a>)</li> <li><a href="/wiki/Differentiable_programming" title="Differentiable programming">Differentiable</a></li> <li><a href="/wiki/End-user_development" title="End-user development">End-user</a></li> <li><a href="/wiki/Grammar-oriented_programming" title="Grammar-oriented programming">Grammar-oriented</a></li> <li><a href="/wiki/Interface_description_language" title="Interface description language">Interface description</a></li> <li><a href="/wiki/Language-oriented_programming" title="Language-oriented programming">Language-oriented</a></li> <li><a href="/wiki/List_comprehension" title="List comprehension">List comprehension</a></li> <li><a href="/wiki/Low-code_development_platform" title="Low-code development platform">Low-code</a></li> <li><a href="/wiki/Modeling_language" title="Modeling language">Modeling</a></li> <li><a href="/wiki/Natural-language_programming" title="Natural-language programming">Natural language</a></li> <li><a href="/wiki/Non-English-based_programming_languages" title="Non-English-based programming languages">Non-English-based</a></li> <li><a href="/wiki/Page_description_language" title="Page description language">Page description</a></li> <li><a href="/wiki/Pipeline_(software)" title="Pipeline (software)">Pipes</a> and <a href="/wiki/Filter_(software)" title="Filter (software)">filters</a></li> <li><a href="/wiki/Probabilistic_programming" title="Probabilistic programming">Probabilistic</a></li> <li><a href="/wiki/Quantum_programming" title="Quantum programming">Quantum</a></li> <li><a href="/wiki/Scientific_programming_language" title="Scientific programming language">Scientific</a></li> <li><a href="/wiki/Scripting_language" title="Scripting language">Scripting</a></li> <li><a href="/wiki/Set_theoretic_programming" title="Set theoretic programming">Set-theoretic</a></li> <li><a href="/wiki/Simulation_language" title="Simulation language">Simulation</a></li> <li><a href="/wiki/Stack-oriented_programming" title="Stack-oriented programming">Stack-based</a></li> <li><a href="/wiki/System_programming_language" title="System programming language">System</a></li> <li><a href="/wiki/Tactile_programming_language" title="Tactile programming language">Tactile</a></li> <li><a href="/wiki/Template_processor" title="Template processor">Templating</a></li> <li><a href="/wiki/Transformation_language" title="Transformation language">Transformation</a> (<a href="/wiki/Graph_rewriting" title="Graph rewriting">Graph rewriting</a>, <a href="/wiki/Production_system_(computer_science)" title="Production system (computer science)">Production</a>, <a href="/wiki/Pattern_matching" title="Pattern matching">Pattern</a>)</li> <li><a href="/wiki/Visual_programming_language" title="Visual programming language">Visual</a></li></ul> </div></td></tr></tbody></table><div></div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Concurrent_computing" title="Concurrent computing">Concurrent</a>,<br /><a href="/wiki/Distributed_computing" title="Distributed computing">distributed</a>,<br /><a class="mw-selflink selflink">parallel</a></th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Actor_model" title="Actor model">Actor-based</a></li> <li><a href="/wiki/Automatic_mutual_exclusion" title="Automatic mutual exclusion">Automatic mutual exclusion</a></li> <li><a href="/wiki/Choreographic_programming" title="Choreographic programming">Choreographic programming</a></li> <li><a href="/wiki/Concurrent_logic_programming" title="Concurrent logic programming">Concurrent logic</a> (<a href="/wiki/Concurrent_constraint_logic_programming" title="Concurrent constraint logic programming">Concurrent constraint logic</a>)</li> <li><a href="/wiki/Concurrent_object-oriented_programming" title="Concurrent object-oriented programming">Concurrent OO</a></li> <li><a href="/wiki/Macroprogramming" title="Macroprogramming">Macroprogramming</a></li> <li><a href="/wiki/Multitier_programming" title="Multitier programming">Multitier programming</a></li> <li><a href="/wiki/Organic_computing" title="Organic computing">Organic computing</a></li> <li><a href="/wiki/Parallel_programming_model" title="Parallel programming model">Parallel programming models</a></li> <li><a href="/wiki/Partitioned_global_address_space" title="Partitioned global address space">Partitioned global address space</a></li> <li><a href="/wiki/Process-oriented_programming" title="Process-oriented programming">Process-oriented</a></li> <li><a href="/wiki/Relativistic_programming" title="Relativistic programming">Relativistic programming</a></li> <li><a href="/wiki/Service-oriented_programming" title="Service-oriented programming">Service-oriented</a></li> <li><a href="/wiki/Structured_concurrency" title="Structured concurrency">Structured concurrency</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Metaprogramming" title="Metaprogramming">Metaprogramming</a></th><td class="navbox-list-with-group navbox-list navbox-even hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Attribute-oriented_programming" title="Attribute-oriented programming">Attribute-oriented</a></li> <li><a href="/wiki/Automatic_programming" title="Automatic programming">Automatic</a> (<a href="/wiki/Inductive_programming" title="Inductive programming">Inductive</a>)</li> <li><a href="/wiki/Dynamic_programming_language" title="Dynamic programming language">Dynamic</a></li> <li><a href="/wiki/Extensible_programming" title="Extensible programming">Extensible</a></li> <li><a href="/wiki/Generic_programming" title="Generic programming">Generic</a></li> <li><a href="/wiki/Homoiconicity" title="Homoiconicity">Homoiconicity</a></li> <li><a href="/wiki/Interactive_programming" title="Interactive programming">Interactive</a></li> <li><a href="/wiki/Macro_(computer_science)" title="Macro (computer science)">Macro</a> (<a href="/wiki/Hygienic_macro" title="Hygienic macro">Hygienic</a>)</li> <li><a href="/wiki/Metalinguistic_abstraction" title="Metalinguistic abstraction">Metalinguistic abstraction</a></li> <li><a href="/wiki/Multi-stage_programming" title="Multi-stage programming">Multi-stage</a></li> <li><a href="/wiki/Program_synthesis" title="Program synthesis">Program synthesis</a> (<a href="/wiki/Bayesian_program_synthesis" title="Bayesian program synthesis">Bayesian</a>, <a href="/wiki/Inferential_programming" title="Inferential programming">Inferential</a>, <a href="/wiki/Programming_by_demonstration" title="Programming by demonstration">by demonstration</a>, <a href="/wiki/Programming_by_example" title="Programming by example">by example</a>)</li> <li><a href="/wiki/Reflective_programming" title="Reflective programming">Reflective</a></li> <li><a href="/wiki/Self-modifying_code" title="Self-modifying code">Self-modifying code</a></li> <li><a href="/wiki/Symbolic_programming" title="Symbolic programming">Symbolic</a></li> <li><a href="/wiki/Template_metaprogramming" title="Template metaprogramming">Template</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Separation_of_concerns" title="Separation of concerns">Separation<br />of concerns</a></th><td class="navbox-list-with-group navbox-list navbox-odd hlist" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Aspect-oriented_programming" title="Aspect-oriented programming">Aspects</a></li> <li><a href="/wiki/Component-based_software_engineering" title="Component-based software engineering">Components</a></li> <li><a href="/wiki/Data-driven_programming" title="Data-driven programming">Data-driven</a></li> <li><a href="/wiki/Data-oriented_design" title="Data-oriented design">Data-oriented</a></li> <li><a href="/wiki/Event-driven_programming" title="Event-driven programming">Event-driven</a></li> <li><a href="/wiki/Feature-oriented_programming" title="Feature-oriented programming">Features</a></li> <li><a href="/wiki/Intentional_programming" title="Intentional programming">Intentional</a></li> <li><a href="/wiki/Literate_programming" title="Literate programming">Literate</a></li> <li><a href="/wiki/Role-oriented_programming" title="Role-oriented programming">Roles</a></li> <li><a href="/wiki/Subject-oriented_programming" title="Subject-oriented programming">Subjects</a></li></ul> </div></td></tr></tbody></table></div> <p class="mw-empty-elt"> </p> <div class="navbox-styles"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1236075235"><style data-mw-deduplicate="TemplateStyles:r1038841319">.mw-parser-output .tooltip-dotted{border-bottom:1px dotted;cursor:help}</style><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1038841319"></div><div role="navigation" class="navbox authority-control" aria-label="Navbox" style="padding:3px"><table class="nowraplinks hlist navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Help:Authority_control" title="Help:Authority control">Authority control databases</a>: National <span class="mw-valign-text-top noprint" typeof="mw:File/Frameless"><a href="https://www.wikidata.org/wiki/Q232661#identifiers" title="Edit this at Wikidata"><img alt="Edit this at Wikidata" src="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/10px-OOjs_UI_icon_edit-ltr-progressive.svg.png" decoding="async" width="10" height="10" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/15px-OOjs_UI_icon_edit-ltr-progressive.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/8/8a/OOjs_UI_icon_edit-ltr-progressive.svg/20px-OOjs_UI_icon_edit-ltr-progressive.svg.png 2x" data-file-width="20" data-file-height="20" /></a></span></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"><ul><li><span class="uid"><a rel="nofollow" class="external text" href="https://id.loc.gov/authorities/sh85097826">United States</a></span></li><li><span class="uid"><span class="rt-commentedText tooltip tooltip-dotted" title="Parallélisme (informatique)"><a rel="nofollow" class="external text" href="https://catalogue.bnf.fr/ark:/12148/cb11982441p">France</a></span></span></li><li><span class="uid"><span class="rt-commentedText tooltip tooltip-dotted" title="Parallélisme (informatique)"><a rel="nofollow" class="external text" href="https://data.bnf.fr/ark:/12148/cb11982441p">BnF data</a></span></span></li><li><span class="uid"><a rel="nofollow" class="external text" href="http://olduli.nli.org.il/F/?func=find-b&local_base=NLX10&find_code=UID&request=987007563162905171">Israel</a></span></li></ul></div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐api‐ext.codfw.main‐7556f8b5dd‐ltvzj Cached time: 20241124051946 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 1.085 seconds Real time usage: 1.343 seconds Preprocessor visited node count: 5648/1000000 Post‐expand include size: 193269/2097152 bytes Template argument size: 3003/2097152 bytes Highest expansion depth: 16/100 Expensive parser function count: 24/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 273153/5000000 bytes Lua time usage: 0.647/10.000 seconds Lua memory usage: 7266704/52428800 bytes Number of Wikibase entities loaded: 1/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 1066.796 1 -total 45.51% 485.541 1 Template:Reflist 24.07% 256.803 34 Template:Cite_book 8.98% 95.840 1 Template:Flynn's_taxonomy 8.63% 92.107 1 Template:Sidebar 6.96% 74.294 4 Template:Navbox 6.76% 72.070 16 Template:Cite_web 6.57% 70.106 1 Template:Parallel_Computing 5.43% 57.955 1 Template:Short_description 4.31% 46.014 1 Template:Redirect --> <!-- Saved in parser cache with key enwiki:pcache:idhash:145162-0!canonical and timestamp 20241124051946 and revision id 1259252331. Rendering was triggered because: edit-page --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Parallel_computing&oldid=1259252331">https://en.wikipedia.org/w/index.php?title=Parallel_computing&oldid=1259252331</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Categories</a>: <ul><li><a href="/wiki/Category:Parallel_computing" title="Category:Parallel computing">Parallel computing</a></li><li><a href="/wiki/Category:Concurrent_computing" title="Category:Concurrent computing">Concurrent computing</a></li><li><a href="/wiki/Category:Distributed_computing" title="Category:Distributed computing">Distributed computing</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:Webarchive_template_wayback_links" title="Category:Webarchive template wayback links">Webarchive template wayback links</a></li><li><a href="/wiki/Category:CS1:_long_volume_value" title="Category:CS1: long volume value">CS1: long volume value</a></li><li><a href="/wiki/Category:CS1_errors:_periodical_ignored" title="Category:CS1 errors: periodical ignored">CS1 errors: periodical ignored</a></li><li><a href="/wiki/Category:CS1_maint:_multiple_names:_authors_list" title="Category:CS1 maint: multiple names: authors list">CS1 maint: multiple names: authors list</a></li><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_is_different_from_Wikidata" title="Category:Short description is different from Wikidata">Short description is different from Wikidata</a></li><li><a href="/wiki/Category:All_articles_with_unsourced_statements" title="Category:All articles with unsourced statements">All articles with unsourced statements</a></li><li><a href="/wiki/Category:Articles_with_unsourced_statements_from_March_2023" title="Category:Articles with unsourced statements from March 2023">Articles with unsourced statements from March 2023</a></li><li><a href="/wiki/Category:Articles_with_hAudio_microformats" title="Category:Articles with hAudio microformats">Articles with hAudio microformats</a></li><li><a href="/wiki/Category:Spoken_articles" title="Category:Spoken articles">Spoken articles</a></li><li><a href="/wiki/Category:Featured_articles" title="Category:Featured articles">Featured articles</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 24 November 2024, at 05:19<span class="anonymous-show"> (UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Parallel_computing&mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/w/resources/assets/poweredby_mediawiki.svg" alt="Powered by MediaWiki" width="88" height="31" loading="lazy"></a></li> </ul> </footer> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-f69cdc8f6-cnx88","wgBackendResponseTime":151,"wgPageParseReport":{"limitreport":{"cputime":"1.085","walltime":"1.343","ppvisitednodes":{"value":5648,"limit":1000000},"postexpandincludesize":{"value":193269,"limit":2097152},"templateargumentsize":{"value":3003,"limit":2097152},"expansiondepth":{"value":16,"limit":100},"expensivefunctioncount":{"value":24,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":273153,"limit":5000000},"entityaccesscount":{"value":1,"limit":400},"timingprofile":["100.00% 1066.796 1 -total"," 45.51% 485.541 1 Template:Reflist"," 24.07% 256.803 34 Template:Cite_book"," 8.98% 95.840 1 Template:Flynn's_taxonomy"," 8.63% 92.107 1 Template:Sidebar"," 6.96% 74.294 4 Template:Navbox"," 6.76% 72.070 16 Template:Cite_web"," 6.57% 70.106 1 Template:Parallel_Computing"," 5.43% 57.955 1 Template:Short_description"," 4.31% 46.014 1 Template:Redirect"]},"scribunto":{"limitreport-timeusage":{"value":"0.647","limit":"10.000"},"limitreport-memusage":{"value":7266704,"limit":52428800}},"cachereport":{"origin":"mw-api-ext.codfw.main-7556f8b5dd-ltvzj","timestamp":"20241124051946","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Parallel computing","url":"https:\/\/en.wikipedia.org\/wiki\/Parallel_computing","sameAs":"http:\/\/www.wikidata.org\/entity\/Q232661","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q232661","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2002-11-08T11:55:35Z","dateModified":"2024-11-24T05:19:44Z","image":"https:\/\/upload.wikimedia.org\/wikipedia\/commons\/d\/d3\/IBM_Blue_Gene_P_supercomputer.jpg","headline":"programming paradigm in which many calculations or the execution of processes are carried out simultaneously"}</script> </body> </html>