CINXE.COM

Message Passing Interface - Wikipedia

<!DOCTYPE html> <html class="client-nojs vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available" lang="en" dir="ltr"> <head> <meta charset="UTF-8"> <title>Message Passing Interface - Wikipedia</title> <script>(function(){var className="client-js vector-feature-language-in-header-enabled vector-feature-language-in-main-page-header-disabled vector-feature-sticky-header-disabled vector-feature-page-tools-pinned-disabled vector-feature-toc-pinned-clientpref-1 vector-feature-main-menu-pinned-disabled vector-feature-limited-width-clientpref-1 vector-feature-limited-width-content-enabled vector-feature-custom-font-size-clientpref-1 vector-feature-appearance-pinned-clientpref-1 vector-feature-night-mode-enabled skin-theme-clientpref-day vector-toc-available";var cookie=document.cookie.match(/(?:^|; )enwikimwclientpreferences=([^;]+)/);if(cookie){cookie[1].split('%2C').forEach(function(pref){className=className.replace(new RegExp('(^| )'+pref.replace(/-clientpref-\w+$|[^\w-]+/g,'')+'-clientpref-\\w+( |$)'),'$1'+pref+'$2');});}document.documentElement.className=className;}());RLCONF={"wgBreakFrames":false,"wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgDefaultDateFormat":"dmy", "wgMonthNames":["","January","February","March","April","May","June","July","August","September","October","November","December"],"wgRequestId":"b2fd8e56-fda8-4281-bf3d-23c18e19caa3","wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Message_Passing_Interface","wgTitle":"Message Passing Interface","wgCurRevisionId":1258236261,"wgRevisionId":1258236261,"wgArticleId":221466,"wgIsArticle":true,"wgIsRedirect":false,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Harv and Sfn no-target errors","Articles with short description","Short description matches Wikidata","Wikipedia articles in need of updating from October 2021","All Wikipedia articles in need of updating","Wikipedia articles in need of updating from May 2024","Articles with multiple maintenance issues","Wikipedia articles in need of updating from August 2022","Articles needing additional references from July 2021","All articles needing additional references", "Wikipedia articles needing clarification from April 2015","Articles to be expanded from June 2008","All articles to be expanded","All articles with unsourced statements","Articles with unsourced statements from January 2011","Articles with example C code","Application programming interfaces","Parallel computing"],"wgPageViewLanguage":"en","wgPageContentLanguage":"en","wgPageContentModel":"wikitext","wgRelevantPageName":"Message_Passing_Interface","wgRelevantArticleId":221466,"wgIsProbablyEditable":true,"wgRelevantPageIsProbablyEditable":true,"wgRestrictionEdit":[],"wgRestrictionMove":[],"wgNoticeProject":"wikipedia","wgCiteReferencePreviewsActive":false,"wgFlaggedRevsParams":{"tags":{"status":{"levels":1}}},"wgMediaViewerOnClick":true,"wgMediaViewerEnabledByDefault":true,"wgPopupsFlags":0,"wgVisualEditor":{"pageLanguageCode":"en","pageLanguageDir":"ltr","pageVariantFallbacks":"en"},"wgMFDisplayWikibaseDescriptions":{"search":true,"watchlist":true,"tagline":false,"nearby":true}, "wgWMESchemaEditAttemptStepOversample":false,"wgWMEPageLength":50000,"wgRelatedArticlesCompat":[],"wgCentralAuthMobileDomain":false,"wgEditSubmitButtonLabelPublish":true,"wgULSPosition":"interlanguage","wgULSisCompactLinksEnabled":false,"wgVector2022LanguageInHeader":true,"wgULSisLanguageSelectorEmpty":false,"wgWikibaseItemId":"Q127879","wgCheckUserClientHintsHeadersJsApi":["brands","architecture","bitness","fullVersionList","mobile","model","platform","platformVersion"],"GEHomepageSuggestedEditsEnableTopics":true,"wgGETopicsMatchModeEnabled":false,"wgGEStructuredTaskRejectionReasonTextInputEnabled":false,"wgGELevelingUpEnabledForUser":false};RLSTATE={"ext.globalCssJs.user.styles":"ready","site.styles":"ready","user.styles":"ready","ext.globalCssJs.user":"ready","user":"ready","user.options":"loading","ext.cite.styles":"ready","ext.pygments":"ready","skins.vector.search.codex.styles":"ready","skins.vector.styles":"ready","skins.vector.icons":"ready","jquery.makeCollapsible.styles": "ready","ext.wikimediamessages.styles":"ready","ext.visualEditor.desktopArticleTarget.noscript":"ready","ext.uls.interlanguage":"ready","wikibase.client.init":"ready","ext.wikimediaBadges":"ready"};RLPAGEMODULES=["ext.cite.ux-enhancements","ext.pygments.view","ext.scribunto.logs","site","mediawiki.page.ready","jquery.makeCollapsible","mediawiki.toc","skins.vector.js","ext.centralNotice.geoIP","ext.centralNotice.startUp","ext.gadget.ReferenceTooltips","ext.gadget.switcher","ext.urlShortener.toolbar","ext.centralauth.centralautologin","mmv.bootstrap","ext.popups","ext.visualEditor.desktopArticleTarget.init","ext.visualEditor.targetLoader","ext.echo.centralauth","ext.eventLogging","ext.wikimediaEvents","ext.navigationTiming","ext.uls.interface","ext.cx.eventlogging.campaigns","ext.cx.uls.quick.actions","wikibase.client.vector-2022","ext.checkUser.clientHints","ext.quicksurveys.init","ext.growthExperiments.SuggestedEditSession","wikibase.sidebar.tracking"];</script> <script>(RLQ=window.RLQ||[]).push(function(){mw.loader.impl(function(){return["user.options@12s5i",function($,jQuery,require,module){mw.user.tokens.set({"patrolToken":"+\\","watchToken":"+\\","csrfToken":"+\\"}); }];});});</script> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=ext.cite.styles%7Cext.pygments%2CwikimediaBadges%7Cext.uls.interlanguage%7Cext.visualEditor.desktopArticleTarget.noscript%7Cext.wikimediamessages.styles%7Cjquery.makeCollapsible.styles%7Cskins.vector.icons%2Cstyles%7Cskins.vector.search.codex.styles%7Cwikibase.client.init&amp;only=styles&amp;skin=vector-2022"> <script async="" src="/w/load.php?lang=en&amp;modules=startup&amp;only=scripts&amp;raw=1&amp;skin=vector-2022"></script> <meta name="ResourceLoaderDynamicStyles" content=""> <link rel="stylesheet" href="/w/load.php?lang=en&amp;modules=site.styles&amp;only=styles&amp;skin=vector-2022"> <meta name="generator" content="MediaWiki 1.44.0-wmf.4"> <meta name="referrer" content="origin"> <meta name="referrer" content="origin-when-cross-origin"> <meta name="robots" content="max-image-preview:standard"> <meta name="format-detection" content="telephone=no"> <meta name="viewport" content="width=1120"> <meta property="og:title" content="Message Passing Interface - Wikipedia"> <meta property="og:type" content="website"> <link rel="preconnect" href="//upload.wikimedia.org"> <link rel="alternate" media="only screen and (max-width: 640px)" href="//en.m.wikipedia.org/wiki/Message_Passing_Interface"> <link rel="alternate" type="application/x-wiki" title="Edit this page" href="/w/index.php?title=Message_Passing_Interface&amp;action=edit"> <link rel="apple-touch-icon" href="/static/apple-touch/wikipedia.png"> <link rel="icon" href="/static/favicon/wikipedia.ico"> <link rel="search" type="application/opensearchdescription+xml" href="/w/rest.php/v1/search" title="Wikipedia (en)"> <link rel="EditURI" type="application/rsd+xml" href="//en.wikipedia.org/w/api.php?action=rsd"> <link rel="canonical" href="https://en.wikipedia.org/wiki/Message_Passing_Interface"> <link rel="license" href="https://creativecommons.org/licenses/by-sa/4.0/deed.en"> <link rel="alternate" type="application/atom+xml" title="Wikipedia Atom feed" href="/w/index.php?title=Special:RecentChanges&amp;feed=atom"> <link rel="dns-prefetch" href="//meta.wikimedia.org" /> <link rel="dns-prefetch" href="//login.wikimedia.org"> </head> <body class="skin--responsive skin-vector skin-vector-search-vue mediawiki ltr sitedir-ltr mw-hide-empty-elt ns-0 ns-subject mw-editable page-Message_Passing_Interface rootpage-Message_Passing_Interface skin-vector-2022 action-view"><a class="mw-jump-link" href="#bodyContent">Jump to content</a> <div class="vector-header-container"> <header class="vector-header mw-header"> <div class="vector-header-start"> <nav class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-dropdown" class="vector-dropdown vector-main-menu-dropdown vector-button-flush-left vector-button-flush-right" > <input type="checkbox" id="vector-main-menu-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-main-menu-dropdown" class="vector-dropdown-checkbox " aria-label="Main menu" > <label id="vector-main-menu-dropdown-label" for="vector-main-menu-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-menu mw-ui-icon-wikimedia-menu"></span> <span class="vector-dropdown-label-text">Main menu</span> </label> <div class="vector-dropdown-content"> <div id="vector-main-menu-unpinned-container" class="vector-unpinned-container"> <div id="vector-main-menu" class="vector-main-menu vector-pinnable-element"> <div class="vector-pinnable-header vector-main-menu-pinnable-header vector-pinnable-header-unpinned" data-feature-name="main-menu-pinned" data-pinnable-element-id="vector-main-menu" data-pinned-container-id="vector-main-menu-pinned-container" data-unpinned-container-id="vector-main-menu-unpinned-container" > <div class="vector-pinnable-header-label">Main menu</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-main-menu.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-main-menu.unpin">hide</button> </div> <div id="p-navigation" class="vector-menu mw-portlet mw-portlet-navigation" > <div class="vector-menu-heading"> Navigation </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-mainpage-description" class="mw-list-item"><a href="/wiki/Main_Page" title="Visit the main page [z]" accesskey="z"><span>Main page</span></a></li><li id="n-contents" class="mw-list-item"><a href="/wiki/Wikipedia:Contents" title="Guides to browsing Wikipedia"><span>Contents</span></a></li><li id="n-currentevents" class="mw-list-item"><a href="/wiki/Portal:Current_events" title="Articles related to current events"><span>Current events</span></a></li><li id="n-randompage" class="mw-list-item"><a href="/wiki/Special:Random" title="Visit a randomly selected article [x]" accesskey="x"><span>Random article</span></a></li><li id="n-aboutsite" class="mw-list-item"><a href="/wiki/Wikipedia:About" title="Learn about Wikipedia and how it works"><span>About Wikipedia</span></a></li><li id="n-contactpage" class="mw-list-item"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us" title="How to contact Wikipedia"><span>Contact us</span></a></li> </ul> </div> </div> <div id="p-interaction" class="vector-menu mw-portlet mw-portlet-interaction" > <div class="vector-menu-heading"> Contribute </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="n-help" class="mw-list-item"><a href="/wiki/Help:Contents" title="Guidance on how to use and edit Wikipedia"><span>Help</span></a></li><li id="n-introduction" class="mw-list-item"><a href="/wiki/Help:Introduction" title="Learn how to edit Wikipedia"><span>Learn to edit</span></a></li><li id="n-portal" class="mw-list-item"><a href="/wiki/Wikipedia:Community_portal" title="The hub for editors"><span>Community portal</span></a></li><li id="n-recentchanges" class="mw-list-item"><a href="/wiki/Special:RecentChanges" title="A list of recent changes to Wikipedia [r]" accesskey="r"><span>Recent changes</span></a></li><li id="n-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_upload_wizard" title="Add images or other media for use on Wikipedia"><span>Upload file</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> <a href="/wiki/Main_Page" class="mw-logo"> <img class="mw-logo-icon" src="/static/images/icons/wikipedia.png" alt="" aria-hidden="true" height="50" width="50"> <span class="mw-logo-container skin-invert"> <img class="mw-logo-wordmark" alt="Wikipedia" src="/static/images/mobile/copyright/wikipedia-wordmark-en.svg" style="width: 7.5em; height: 1.125em;"> <img class="mw-logo-tagline" alt="The Free Encyclopedia" src="/static/images/mobile/copyright/wikipedia-tagline-en.svg" width="117" height="13" style="width: 7.3125em; height: 0.8125em;"> </span> </a> </div> <div class="vector-header-end"> <div id="p-search" role="search" class="vector-search-box-vue vector-search-box-collapses vector-search-box-show-thumbnail vector-search-box-auto-expand-width vector-search-box"> <a href="/wiki/Special:Search" class="cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only search-toggle" title="Search Wikipedia [f]" accesskey="f"><span class="vector-icon mw-ui-icon-search mw-ui-icon-wikimedia-search"></span> <span>Search</span> </a> <div class="vector-typeahead-search-container"> <div class="cdx-typeahead-search cdx-typeahead-search--show-thumbnail cdx-typeahead-search--auto-expand-width"> <form action="/w/index.php" id="searchform" class="cdx-search-input cdx-search-input--has-end-button"> <div id="simpleSearch" class="cdx-search-input__input-wrapper" data-search-loc="header-moved"> <div class="cdx-text-input cdx-text-input--has-start-icon"> <input class="cdx-text-input__input" type="search" name="search" placeholder="Search Wikipedia" aria-label="Search Wikipedia" autocapitalize="sentences" title="Search Wikipedia [f]" accesskey="f" id="searchInput" > <span class="cdx-text-input__icon cdx-text-input__start-icon"></span> </div> <input type="hidden" name="title" value="Special:Search"> </div> <button class="cdx-button cdx-search-input__end-button">Search</button> </form> </div> </div> </div> <nav class="vector-user-links vector-user-links-wide" aria-label="Personal tools"> <div class="vector-user-links-main"> <div id="p-vector-user-menu-preferences" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-userpage" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-dropdown" class="vector-dropdown " title="Change the appearance of the page&#039;s font size, width, and color" > <input type="checkbox" id="vector-appearance-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-appearance-dropdown" class="vector-dropdown-checkbox " aria-label="Appearance" > <label id="vector-appearance-dropdown-label" for="vector-appearance-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-appearance mw-ui-icon-wikimedia-appearance"></span> <span class="vector-dropdown-label-text">Appearance</span> </label> <div class="vector-dropdown-content"> <div id="vector-appearance-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <div id="p-vector-user-menu-notifications" class="vector-menu mw-portlet emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> <div id="p-vector-user-menu-overflow" class="vector-menu mw-portlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikipedia.org&amp;uselang=en" class=""><span>Donate</span></a> </li> <li id="pt-createaccount-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:CreateAccount&amp;returnto=Message+Passing+Interface" title="You are encouraged to create an account and log in; however, it is not mandatory" class=""><span>Create account</span></a> </li> <li id="pt-login-2" class="user-links-collapsible-item mw-list-item user-links-collapsible-item"><a data-mw="interface" href="/w/index.php?title=Special:UserLogin&amp;returnto=Message+Passing+Interface" title="You&#039;re encouraged to log in; however, it&#039;s not mandatory. [o]" accesskey="o" class=""><span>Log in</span></a> </li> </ul> </div> </div> </div> <div id="vector-user-links-dropdown" class="vector-dropdown vector-user-menu vector-button-flush-right vector-user-menu-logged-out" title="Log in and more options" > <input type="checkbox" id="vector-user-links-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-user-links-dropdown" class="vector-dropdown-checkbox " aria-label="Personal tools" > <label id="vector-user-links-dropdown-label" for="vector-user-links-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-ellipsis mw-ui-icon-wikimedia-ellipsis"></span> <span class="vector-dropdown-label-text">Personal tools</span> </label> <div class="vector-dropdown-content"> <div id="p-personal" class="vector-menu mw-portlet mw-portlet-personal user-links-collapsible-item" title="User menu" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-sitesupport" class="user-links-collapsible-item mw-list-item"><a href="https://donate.wikimedia.org/wiki/Special:FundraiserRedirector?utm_source=donate&amp;utm_medium=sidebar&amp;utm_campaign=C13_en.wikipedia.org&amp;uselang=en"><span>Donate</span></a></li><li id="pt-createaccount" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:CreateAccount&amp;returnto=Message+Passing+Interface" title="You are encouraged to create an account and log in; however, it is not mandatory"><span class="vector-icon mw-ui-icon-userAdd mw-ui-icon-wikimedia-userAdd"></span> <span>Create account</span></a></li><li id="pt-login" class="user-links-collapsible-item mw-list-item"><a href="/w/index.php?title=Special:UserLogin&amp;returnto=Message+Passing+Interface" title="You&#039;re encouraged to log in; however, it&#039;s not mandatory. [o]" accesskey="o"><span class="vector-icon mw-ui-icon-logIn mw-ui-icon-wikimedia-logIn"></span> <span>Log in</span></a></li> </ul> </div> </div> <div id="p-user-menu-anon-editor" class="vector-menu mw-portlet mw-portlet-user-menu-anon-editor" > <div class="vector-menu-heading"> Pages for logged out editors <a href="/wiki/Help:Introduction" aria-label="Learn more about editing"><span>learn more</span></a> </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="pt-anoncontribs" class="mw-list-item"><a href="/wiki/Special:MyContributions" title="A list of edits made from this IP address [y]" accesskey="y"><span>Contributions</span></a></li><li id="pt-anontalk" class="mw-list-item"><a href="/wiki/Special:MyTalk" title="Discussion about edits from this IP address [n]" accesskey="n"><span>Talk</span></a></li> </ul> </div> </div> </div> </div> </nav> </div> </header> </div> <div class="mw-page-container"> <div class="mw-page-container-inner"> <div class="vector-sitenotice-container"> <div id="siteNotice"><!-- CentralNotice --></div> </div> <div class="vector-column-start"> <div class="vector-main-menu-container"> <div id="mw-navigation"> <nav id="mw-panel" class="vector-main-menu-landmark" aria-label="Site"> <div id="vector-main-menu-pinned-container" class="vector-pinned-container"> </div> </nav> </div> </div> <div class="vector-sticky-pinned-container"> <nav id="mw-panel-toc" aria-label="Contents" data-event-name="ui.sidebar-toc" class="mw-table-of-contents-container vector-toc-landmark"> <div id="vector-toc-pinned-container" class="vector-pinned-container"> <div id="vector-toc" class="vector-toc vector-pinnable-element"> <div class="vector-pinnable-header vector-toc-pinnable-header vector-pinnable-header-pinned" data-feature-name="toc-pinned" data-pinnable-element-id="vector-toc" > <h2 class="vector-pinnable-header-label">Contents</h2> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-toc.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-toc.unpin">hide</button> </div> <ul class="vector-toc-contents" id="mw-panel-toc-list"> <li id="toc-mw-content-text" class="vector-toc-list-item vector-toc-level-1"> <a href="#" class="vector-toc-link"> <div class="vector-toc-text">(Top)</div> </a> </li> <li id="toc-History" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#History"> <div class="vector-toc-text"> <span class="vector-toc-numb">1</span> <span>History</span> </div> </a> <ul id="toc-History-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Overview" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Overview"> <div class="vector-toc-text"> <span class="vector-toc-numb">2</span> <span>Overview</span> </div> </a> <ul id="toc-Overview-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Functionality" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Functionality"> <div class="vector-toc-text"> <span class="vector-toc-numb">3</span> <span>Functionality</span> </div> </a> <ul id="toc-Functionality-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Concepts" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Concepts"> <div class="vector-toc-text"> <span class="vector-toc-numb">4</span> <span>Concepts</span> </div> </a> <button aria-controls="toc-Concepts-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Concepts subsection</span> </button> <ul id="toc-Concepts-sublist" class="vector-toc-list"> <li id="toc-Communicator" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Communicator"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.1</span> <span>Communicator</span> </div> </a> <ul id="toc-Communicator-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Point-to-point_basics" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Point-to-point_basics"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.2</span> <span>Point-to-point basics</span> </div> </a> <ul id="toc-Point-to-point_basics-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Collective_basics" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Collective_basics"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.3</span> <span>Collective basics</span> </div> </a> <ul id="toc-Collective_basics-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Derived_data_types" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Derived_data_types"> <div class="vector-toc-text"> <span class="vector-toc-numb">4.4</span> <span>Derived data types</span> </div> </a> <ul id="toc-Derived_data_types-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-MPI-2_concepts" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#MPI-2_concepts"> <div class="vector-toc-text"> <span class="vector-toc-numb">5</span> <span>MPI-2 concepts</span> </div> </a> <button aria-controls="toc-MPI-2_concepts-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle MPI-2 concepts subsection</span> </button> <ul id="toc-MPI-2_concepts-sublist" class="vector-toc-list"> <li id="toc-One-sided_communication" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#One-sided_communication"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.1</span> <span>One-sided communication</span> </div> </a> <ul id="toc-One-sided_communication-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Dynamic_process_management" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Dynamic_process_management"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.2</span> <span>Dynamic process management</span> </div> </a> <ul id="toc-Dynamic_process_management-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-I/O" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#I/O"> <div class="vector-toc-text"> <span class="vector-toc-numb">5.3</span> <span>I/O</span> </div> </a> <ul id="toc-I/O-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Official_implementations" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Official_implementations"> <div class="vector-toc-text"> <span class="vector-toc-numb">6</span> <span>Official implementations</span> </div> </a> <button aria-controls="toc-Official_implementations-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Official implementations subsection</span> </button> <ul id="toc-Official_implementations-sublist" class="vector-toc-list"> <li id="toc-Hardware" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Hardware"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.1</span> <span>Hardware</span> </div> </a> <ul id="toc-Hardware-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Compiler_wrappers" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Compiler_wrappers"> <div class="vector-toc-text"> <span class="vector-toc-numb">6.2</span> <span>Compiler wrappers</span> </div> </a> <ul id="toc-Compiler_wrappers-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Language_bindings" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Language_bindings"> <div class="vector-toc-text"> <span class="vector-toc-numb">7</span> <span>Language bindings</span> </div> </a> <button aria-controls="toc-Language_bindings-sublist" class="cdx-button cdx-button--weight-quiet cdx-button--icon-only vector-toc-toggle"> <span class="vector-icon mw-ui-icon-wikimedia-expand"></span> <span>Toggle Language bindings subsection</span> </button> <ul id="toc-Language_bindings-sublist" class="vector-toc-list"> <li id="toc-Common_Language_Infrastructure" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Common_Language_Infrastructure"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.1</span> <span>Common Language Infrastructure</span> </div> </a> <ul id="toc-Common_Language_Infrastructure-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Java" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Java"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.2</span> <span>Java</span> </div> </a> <ul id="toc-Java-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Julia" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Julia"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.3</span> <span>Julia</span> </div> </a> <ul id="toc-Julia-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-MATLAB" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#MATLAB"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.4</span> <span>MATLAB</span> </div> </a> <ul id="toc-MATLAB-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-OCaml" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#OCaml"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.5</span> <span>OCaml</span> </div> </a> <ul id="toc-OCaml-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-PARI/GP" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#PARI/GP"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.6</span> <span>PARI/GP</span> </div> </a> <ul id="toc-PARI/GP-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Python" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#Python"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.7</span> <span>Python</span> </div> </a> <ul id="toc-Python-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-R" class="vector-toc-list-item vector-toc-level-2"> <a class="vector-toc-link" href="#R"> <div class="vector-toc-text"> <span class="vector-toc-numb">7.8</span> <span>R</span> </div> </a> <ul id="toc-R-sublist" class="vector-toc-list"> </ul> </li> </ul> </li> <li id="toc-Example_program" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Example_program"> <div class="vector-toc-text"> <span class="vector-toc-numb">8</span> <span>Example program</span> </div> </a> <ul id="toc-Example_program-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-MPI-2_adoption" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#MPI-2_adoption"> <div class="vector-toc-text"> <span class="vector-toc-numb">9</span> <span>MPI-2 adoption</span> </div> </a> <ul id="toc-MPI-2_adoption-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Future" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Future"> <div class="vector-toc-text"> <span class="vector-toc-numb">10</span> <span>Future</span> </div> </a> <ul id="toc-Future-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-See_also" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#See_also"> <div class="vector-toc-text"> <span class="vector-toc-numb">11</span> <span>See also</span> </div> </a> <ul id="toc-See_also-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-References" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#References"> <div class="vector-toc-text"> <span class="vector-toc-numb">12</span> <span>References</span> </div> </a> <ul id="toc-References-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-Further_reading" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#Further_reading"> <div class="vector-toc-text"> <span class="vector-toc-numb">13</span> <span>Further reading</span> </div> </a> <ul id="toc-Further_reading-sublist" class="vector-toc-list"> </ul> </li> <li id="toc-External_links" class="vector-toc-list-item vector-toc-level-1"> <a class="vector-toc-link" href="#External_links"> <div class="vector-toc-text"> <span class="vector-toc-numb">14</span> <span>External links</span> </div> </a> <ul id="toc-External_links-sublist" class="vector-toc-list"> </ul> </li> </ul> </div> </div> </nav> </div> </div> <div class="mw-content-container"> <main id="content" class="mw-body"> <header class="mw-body-header vector-page-titlebar"> <nav aria-label="Contents" class="vector-toc-landmark"> <div id="vector-page-titlebar-toc" class="vector-dropdown vector-page-titlebar-toc vector-button-flush-left" > <input type="checkbox" id="vector-page-titlebar-toc-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-titlebar-toc" class="vector-dropdown-checkbox " aria-label="Toggle the table of contents" > <label id="vector-page-titlebar-toc-label" for="vector-page-titlebar-toc-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--icon-only " aria-hidden="true" ><span class="vector-icon mw-ui-icon-listBullet mw-ui-icon-wikimedia-listBullet"></span> <span class="vector-dropdown-label-text">Toggle the table of contents</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-titlebar-toc-unpinned-container" class="vector-unpinned-container"> </div> </div> </div> </nav> <h1 id="firstHeading" class="firstHeading mw-first-heading"><span class="mw-page-title-main">Message Passing Interface</span></h1> <div id="p-lang-btn" class="vector-dropdown mw-portlet mw-portlet-lang" > <input type="checkbox" id="p-lang-btn-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-p-lang-btn" class="vector-dropdown-checkbox mw-interlanguage-selector" aria-label="Go to an article in another language. Available in 23 languages" > <label id="p-lang-btn-label" for="p-lang-btn-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet cdx-button--action-progressive mw-portlet-lang-heading-23" aria-hidden="true" ><span class="vector-icon mw-ui-icon-language-progressive mw-ui-icon-wikimedia-language-progressive"></span> <span class="vector-dropdown-label-text">23 languages</span> </label> <div class="vector-dropdown-content"> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li class="interlanguage-link interwiki-ar mw-list-item"><a href="https://ar.wikipedia.org/wiki/%D9%88%D8%A7%D8%AC%D9%87%D8%A9_%D8%AA%D9%85%D8%B1%D9%8A%D8%B1_%D8%A7%D9%84%D8%B1%D8%B3%D8%A7%D8%A6%D9%84" title="واجهة تمرير الرسائل – Arabic" lang="ar" hreflang="ar" data-title="واجهة تمرير الرسائل" data-language-autonym="العربية" data-language-local-name="Arabic" class="interlanguage-link-target"><span>العربية</span></a></li><li class="interlanguage-link interwiki-be-x-old mw-list-item"><a href="https://be-tarask.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Belarusian (Taraškievica orthography)" lang="be-tarask" hreflang="be-tarask" data-title="Message Passing Interface" data-language-autonym="Беларуская (тарашкевіца)" data-language-local-name="Belarusian (Taraškievica orthography)" class="interlanguage-link-target"><span>Беларуская (тарашкевіца)</span></a></li><li class="interlanguage-link interwiki-ca mw-list-item"><a href="https://ca.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Catalan" lang="ca" hreflang="ca" data-title="Message Passing Interface" data-language-autonym="Català" data-language-local-name="Catalan" class="interlanguage-link-target"><span>Català</span></a></li><li class="interlanguage-link interwiki-cs mw-list-item"><a href="https://cs.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Czech" lang="cs" hreflang="cs" data-title="Message Passing Interface" data-language-autonym="Čeština" data-language-local-name="Czech" class="interlanguage-link-target"><span>Čeština</span></a></li><li class="interlanguage-link interwiki-de mw-list-item"><a href="https://de.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – German" lang="de" hreflang="de" data-title="Message Passing Interface" data-language-autonym="Deutsch" data-language-local-name="German" class="interlanguage-link-target"><span>Deutsch</span></a></li><li class="interlanguage-link interwiki-es mw-list-item"><a href="https://es.wikipedia.org/wiki/Interfaz_de_Paso_de_Mensajes" title="Interfaz de Paso de Mensajes – Spanish" lang="es" hreflang="es" data-title="Interfaz de Paso de Mensajes" data-language-autonym="Español" data-language-local-name="Spanish" class="interlanguage-link-target"><span>Español</span></a></li><li class="interlanguage-link interwiki-eu mw-list-item"><a href="https://eu.wikipedia.org/wiki/Mezu_Trukaketarako_Interfazea" title="Mezu Trukaketarako Interfazea – Basque" lang="eu" hreflang="eu" data-title="Mezu Trukaketarako Interfazea" data-language-autonym="Euskara" data-language-local-name="Basque" class="interlanguage-link-target"><span>Euskara</span></a></li><li class="interlanguage-link interwiki-fa mw-list-item"><a href="https://fa.wikipedia.org/wiki/%D8%B1%D8%A7%D8%A8%D8%B7_%D9%81%D8%B1%D8%B3%D8%AA%D8%A7%D8%AF%D9%86_%D9%BE%DB%8C%D8%A7%D9%85" title="رابط فرستادن پیام – Persian" lang="fa" hreflang="fa" data-title="رابط فرستادن پیام" data-language-autonym="فارسی" data-language-local-name="Persian" class="interlanguage-link-target"><span>فارسی</span></a></li><li class="interlanguage-link interwiki-fr mw-list-item"><a href="https://fr.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – French" lang="fr" hreflang="fr" data-title="Message Passing Interface" data-language-autonym="Français" data-language-local-name="French" class="interlanguage-link-target"><span>Français</span></a></li><li class="interlanguage-link interwiki-ko mw-list-item"><a href="https://ko.wikipedia.org/wiki/%EB%A9%94%EC%8B%9C%EC%A7%80_%EC%A0%84%EB%8B%AC_%EC%9D%B8%ED%84%B0%ED%8E%98%EC%9D%B4%EC%8A%A4" title="메시지 전달 인터페이스 – Korean" lang="ko" hreflang="ko" data-title="메시지 전달 인터페이스" data-language-autonym="한국어" data-language-local-name="Korean" class="interlanguage-link-target"><span>한국어</span></a></li><li class="interlanguage-link interwiki-it mw-list-item"><a href="https://it.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Italian" lang="it" hreflang="it" data-title="Message Passing Interface" data-language-autonym="Italiano" data-language-local-name="Italian" class="interlanguage-link-target"><span>Italiano</span></a></li><li class="interlanguage-link interwiki-lt mw-list-item"><a href="https://lt.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Lithuanian" lang="lt" hreflang="lt" data-title="Message Passing Interface" data-language-autonym="Lietuvių" data-language-local-name="Lithuanian" class="interlanguage-link-target"><span>Lietuvių</span></a></li><li class="interlanguage-link interwiki-nl mw-list-item"><a href="https://nl.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Dutch" lang="nl" hreflang="nl" data-title="Message Passing Interface" data-language-autonym="Nederlands" data-language-local-name="Dutch" class="interlanguage-link-target"><span>Nederlands</span></a></li><li class="interlanguage-link interwiki-ja mw-list-item"><a href="https://ja.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Japanese" lang="ja" hreflang="ja" data-title="Message Passing Interface" data-language-autonym="日本語" data-language-local-name="Japanese" class="interlanguage-link-target"><span>日本語</span></a></li><li class="interlanguage-link interwiki-no mw-list-item"><a href="https://no.wikipedia.org/wiki/Meldingsoverf%C3%B8ringsgrensesnitt" title="Meldingsoverføringsgrensesnitt – Norwegian Bokmål" lang="nb" hreflang="nb" data-title="Meldingsoverføringsgrensesnitt" data-language-autonym="Norsk bokmål" data-language-local-name="Norwegian Bokmål" class="interlanguage-link-target"><span>Norsk bokmål</span></a></li><li class="interlanguage-link interwiki-pl mw-list-item"><a href="https://pl.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Polish" lang="pl" hreflang="pl" data-title="Message Passing Interface" data-language-autonym="Polski" data-language-local-name="Polish" class="interlanguage-link-target"><span>Polski</span></a></li><li class="interlanguage-link interwiki-pt mw-list-item"><a href="https://pt.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Portuguese" lang="pt" hreflang="pt" data-title="Message Passing Interface" data-language-autonym="Português" data-language-local-name="Portuguese" class="interlanguage-link-target"><span>Português</span></a></li><li class="interlanguage-link interwiki-ru mw-list-item"><a href="https://ru.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Russian" lang="ru" hreflang="ru" data-title="Message Passing Interface" data-language-autonym="Русский" data-language-local-name="Russian" class="interlanguage-link-target"><span>Русский</span></a></li><li class="interlanguage-link interwiki-sk mw-list-item"><a href="https://sk.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Slovak" lang="sk" hreflang="sk" data-title="Message Passing Interface" data-language-autonym="Slovenčina" data-language-local-name="Slovak" class="interlanguage-link-target"><span>Slovenčina</span></a></li><li class="interlanguage-link interwiki-tr mw-list-item"><a href="https://tr.wikipedia.org/wiki/MPI" title="MPI – Turkish" lang="tr" hreflang="tr" data-title="MPI" data-language-autonym="Türkçe" data-language-local-name="Turkish" class="interlanguage-link-target"><span>Türkçe</span></a></li><li class="interlanguage-link interwiki-uk mw-list-item"><a href="https://uk.wikipedia.org/wiki/Message_Passing_Interface" title="Message Passing Interface – Ukrainian" lang="uk" hreflang="uk" data-title="Message Passing Interface" data-language-autonym="Українська" data-language-local-name="Ukrainian" class="interlanguage-link-target"><span>Українська</span></a></li><li class="interlanguage-link interwiki-vi mw-list-item"><a href="https://vi.wikipedia.org/wiki/Giao_th%E1%BB%A9c_truy%E1%BB%81n_th%C3%B4ng_%C4%91i%E1%BB%87p" title="Giao thức truyền thông điệp – Vietnamese" lang="vi" hreflang="vi" data-title="Giao thức truyền thông điệp" data-language-autonym="Tiếng Việt" data-language-local-name="Vietnamese" class="interlanguage-link-target"><span>Tiếng Việt</span></a></li><li class="interlanguage-link interwiki-zh mw-list-item"><a href="https://zh.wikipedia.org/wiki/%E8%A8%8A%E6%81%AF%E5%82%B3%E9%81%9E%E4%BB%8B%E9%9D%A2" title="訊息傳遞介面 – Chinese" lang="zh" hreflang="zh" data-title="訊息傳遞介面" data-language-autonym="中文" data-language-local-name="Chinese" class="interlanguage-link-target"><span>中文</span></a></li> </ul> <div class="after-portlet after-portlet-lang"><span class="wb-langlinks-edit wb-langlinks-link"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q127879#sitelinks-wikipedia" title="Edit interlanguage links" class="wbc-editpage">Edit links</a></span></div> </div> </div> </div> </header> <div class="vector-page-toolbar"> <div class="vector-page-toolbar-container"> <div id="left-navigation"> <nav aria-label="Namespaces"> <div id="p-associated-pages" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-associated-pages" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-nstab-main" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Message_Passing_Interface" title="View the content page [c]" accesskey="c"><span>Article</span></a></li><li id="ca-talk" class="vector-tab-noicon mw-list-item"><a href="/wiki/Talk:Message_Passing_Interface" rel="discussion" title="Discuss improvements to the content page [t]" accesskey="t"><span>Talk</span></a></li> </ul> </div> </div> <div id="vector-variants-dropdown" class="vector-dropdown emptyPortlet" > <input type="checkbox" id="vector-variants-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-variants-dropdown" class="vector-dropdown-checkbox " aria-label="Change language variant" > <label id="vector-variants-dropdown-label" for="vector-variants-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">English</span> </label> <div class="vector-dropdown-content"> <div id="p-variants" class="vector-menu mw-portlet mw-portlet-variants emptyPortlet" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> </ul> </div> </div> </div> </div> </nav> </div> <div id="right-navigation" class="vector-collapsible"> <nav aria-label="Views"> <div id="p-views" class="vector-menu vector-menu-tabs mw-portlet mw-portlet-views" > <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-view" class="selected vector-tab-noicon mw-list-item"><a href="/wiki/Message_Passing_Interface"><span>Read</span></a></li><li id="ca-edit" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-history" class="vector-tab-noicon mw-list-item"><a href="/w/index.php?title=Message_Passing_Interface&amp;action=history" title="Past revisions of this page [h]" accesskey="h"><span>View history</span></a></li> </ul> </div> </div> </nav> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-dropdown" class="vector-dropdown vector-page-tools-dropdown" > <input type="checkbox" id="vector-page-tools-dropdown-checkbox" role="button" aria-haspopup="true" data-event-name="ui.dropdown-vector-page-tools-dropdown" class="vector-dropdown-checkbox " aria-label="Tools" > <label id="vector-page-tools-dropdown-label" for="vector-page-tools-dropdown-checkbox" class="vector-dropdown-label cdx-button cdx-button--fake-button cdx-button--fake-button--enabled cdx-button--weight-quiet" aria-hidden="true" ><span class="vector-dropdown-label-text">Tools</span> </label> <div class="vector-dropdown-content"> <div id="vector-page-tools-unpinned-container" class="vector-unpinned-container"> <div id="vector-page-tools" class="vector-page-tools vector-pinnable-element"> <div class="vector-pinnable-header vector-page-tools-pinnable-header vector-pinnable-header-unpinned" data-feature-name="page-tools-pinned" data-pinnable-element-id="vector-page-tools" data-pinned-container-id="vector-page-tools-pinned-container" data-unpinned-container-id="vector-page-tools-unpinned-container" > <div class="vector-pinnable-header-label">Tools</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-page-tools.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-page-tools.unpin">hide</button> </div> <div id="p-cactions" class="vector-menu mw-portlet mw-portlet-cactions emptyPortlet vector-has-collapsible-items" title="More options" > <div class="vector-menu-heading"> Actions </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="ca-more-view" class="selected vector-more-collapsible-item mw-list-item"><a href="/wiki/Message_Passing_Interface"><span>Read</span></a></li><li id="ca-more-edit" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit" title="Edit this page [e]" accesskey="e"><span>Edit</span></a></li><li id="ca-more-history" class="vector-more-collapsible-item mw-list-item"><a href="/w/index.php?title=Message_Passing_Interface&amp;action=history"><span>View history</span></a></li> </ul> </div> </div> <div id="p-tb" class="vector-menu mw-portlet mw-portlet-tb" > <div class="vector-menu-heading"> General </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-whatlinkshere" class="mw-list-item"><a href="/wiki/Special:WhatLinksHere/Message_Passing_Interface" title="List of all English Wikipedia pages containing links to this page [j]" accesskey="j"><span>What links here</span></a></li><li id="t-recentchangeslinked" class="mw-list-item"><a href="/wiki/Special:RecentChangesLinked/Message_Passing_Interface" rel="nofollow" title="Recent changes in pages linked from this page [k]" accesskey="k"><span>Related changes</span></a></li><li id="t-upload" class="mw-list-item"><a href="/wiki/Wikipedia:File_Upload_Wizard" title="Upload files [u]" accesskey="u"><span>Upload file</span></a></li><li id="t-specialpages" class="mw-list-item"><a href="/wiki/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q"><span>Special pages</span></a></li><li id="t-permalink" class="mw-list-item"><a href="/w/index.php?title=Message_Passing_Interface&amp;oldid=1258236261" title="Permanent link to this revision of this page"><span>Permanent link</span></a></li><li id="t-info" class="mw-list-item"><a href="/w/index.php?title=Message_Passing_Interface&amp;action=info" title="More information about this page"><span>Page information</span></a></li><li id="t-cite" class="mw-list-item"><a href="/w/index.php?title=Special:CiteThisPage&amp;page=Message_Passing_Interface&amp;id=1258236261&amp;wpFormIdentifier=titleform" title="Information on how to cite this page"><span>Cite this page</span></a></li><li id="t-urlshortener" class="mw-list-item"><a href="/w/index.php?title=Special:UrlShortener&amp;url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FMessage_Passing_Interface"><span>Get shortened URL</span></a></li><li id="t-urlshortener-qrcode" class="mw-list-item"><a href="/w/index.php?title=Special:QrCode&amp;url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FMessage_Passing_Interface"><span>Download QR code</span></a></li> </ul> </div> </div> <div id="p-coll-print_export" class="vector-menu mw-portlet mw-portlet-coll-print_export" > <div class="vector-menu-heading"> Print/export </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="coll-download-as-rl" class="mw-list-item"><a href="/w/index.php?title=Special:DownloadAsPdf&amp;page=Message_Passing_Interface&amp;action=show-download-screen" title="Download this page as a PDF file"><span>Download as PDF</span></a></li><li id="t-print" class="mw-list-item"><a href="/w/index.php?title=Message_Passing_Interface&amp;printable=yes" title="Printable version of this page [p]" accesskey="p"><span>Printable version</span></a></li> </ul> </div> </div> <div id="p-wikibase-otherprojects" class="vector-menu mw-portlet mw-portlet-wikibase-otherprojects" > <div class="vector-menu-heading"> In other projects </div> <div class="vector-menu-content"> <ul class="vector-menu-content-list"> <li id="t-wikibase" class="wb-otherproject-link wb-otherproject-wikibase-dataitem mw-list-item"><a href="https://www.wikidata.org/wiki/Special:EntityPage/Q127879" title="Structured data on this page hosted by Wikidata [g]" accesskey="g"><span>Wikidata item</span></a></li> </ul> </div> </div> </div> </div> </div> </div> </nav> </div> </div> </div> <div class="vector-column-end"> <div class="vector-sticky-pinned-container"> <nav class="vector-page-tools-landmark" aria-label="Page tools"> <div id="vector-page-tools-pinned-container" class="vector-pinned-container"> </div> </nav> <nav class="vector-appearance-landmark" aria-label="Appearance"> <div id="vector-appearance-pinned-container" class="vector-pinned-container"> <div id="vector-appearance" class="vector-appearance vector-pinnable-element"> <div class="vector-pinnable-header vector-appearance-pinnable-header vector-pinnable-header-pinned" data-feature-name="appearance-pinned" data-pinnable-element-id="vector-appearance" data-pinned-container-id="vector-appearance-pinned-container" data-unpinned-container-id="vector-appearance-unpinned-container" > <div class="vector-pinnable-header-label">Appearance</div> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-pin-button" data-event-name="pinnable-header.vector-appearance.pin">move to sidebar</button> <button class="vector-pinnable-header-toggle-button vector-pinnable-header-unpin-button" data-event-name="pinnable-header.vector-appearance.unpin">hide</button> </div> </div> </div> </nav> </div> </div> <div id="bodyContent" class="vector-body" aria-labelledby="firstHeading" data-mw-ve-target-container> <div class="vector-body-before-content"> <div class="mw-indicators"> </div> <div id="siteSub" class="noprint">From Wikipedia, the free encyclopedia</div> </div> <div id="contentSub"><div id="mw-content-subtitle"></div></div> <div id="mw-content-text" class="mw-body-content"><div class="mw-content-ltr mw-parser-output" lang="en" dir="ltr"><div class="shortdescription nomobile noexcerpt noprint searchaux" style="display:none">Message-passing system for parallel computers</div> <style data-mw-deduplicate="TemplateStyles:r1251242444">.mw-parser-output .ambox{border:1px solid #a2a9b1;border-left:10px solid #36c;background-color:#fbfbfb;box-sizing:border-box}.mw-parser-output .ambox+link+.ambox,.mw-parser-output .ambox+link+style+.ambox,.mw-parser-output .ambox+link+link+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+style+.ambox,.mw-parser-output .ambox+.mw-empty-elt+link+link+.ambox{margin-top:-1px}html body.mediawiki .mw-parser-output .ambox.mbox-small-left{margin:4px 1em 4px 0;overflow:hidden;width:238px;border-collapse:collapse;font-size:88%;line-height:1.25em}.mw-parser-output .ambox-speedy{border-left:10px solid #b32424;background-color:#fee7e6}.mw-parser-output .ambox-delete{border-left:10px solid #b32424}.mw-parser-output .ambox-content{border-left:10px solid #f28500}.mw-parser-output .ambox-style{border-left:10px solid #fc3}.mw-parser-output .ambox-move{border-left:10px solid #9932cc}.mw-parser-output .ambox-protection{border-left:10px solid #a2a9b1}.mw-parser-output .ambox .mbox-text{border:none;padding:0.25em 0.5em;width:100%}.mw-parser-output .ambox .mbox-image{border:none;padding:2px 0 2px 0.5em;text-align:center}.mw-parser-output .ambox .mbox-imageright{border:none;padding:2px 0.5em 2px 0;text-align:center}.mw-parser-output .ambox .mbox-empty-cell{border:none;padding:0;width:1px}.mw-parser-output .ambox .mbox-image-div{width:52px}@media(min-width:720px){.mw-parser-output .ambox{margin:0 10%}}@media print{body.ns-0 .mw-parser-output .ambox{display:none!important}}</style><style data-mw-deduplicate="TemplateStyles:r1248332772">.mw-parser-output .multiple-issues-text{width:95%;margin:0.2em 0}.mw-parser-output .multiple-issues-text>.mw-collapsible-content{margin-top:0.3em}.mw-parser-output .compact-ambox .ambox{border:none;border-collapse:collapse;background-color:transparent;margin:0 0 0 1.6em!important;padding:0!important;width:auto;display:block}body.mediawiki .mw-parser-output .compact-ambox .ambox.mbox-small-left{font-size:100%;width:auto;margin:0}.mw-parser-output .compact-ambox .ambox .mbox-text{padding:0!important;margin:0!important}.mw-parser-output .compact-ambox .ambox .mbox-text-span{display:list-item;line-height:1.5em;list-style-type:disc}body.skin-minerva .mw-parser-output .multiple-issues-text>.mw-collapsible-toggle,.mw-parser-output .compact-ambox .ambox .mbox-image,.mw-parser-output .compact-ambox .ambox .mbox-imageright,.mw-parser-output .compact-ambox .ambox .mbox-empty-cell,.mw-parser-output .compact-ambox .hide-when-compact{display:none}</style><table class="box-Multiple_issues plainlinks metadata ambox ambox-content ambox-multiple_issues compact-ambox" role="presentation"><tbody><tr><td class="mbox-image"><div class="mbox-image-div"><span typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/b/b4/Ambox_important.svg/40px-Ambox_important.svg.png" decoding="async" width="40" height="40" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/b/b4/Ambox_important.svg/60px-Ambox_important.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/b/b4/Ambox_important.svg/80px-Ambox_important.svg.png 2x" data-file-width="40" data-file-height="40" /></span></span></div></td><td class="mbox-text"><div class="mbox-text-span"><div class="multiple-issues-text mw-collapsible"><b>This article has multiple issues.</b> Please help <b><a href="/wiki/Special:EditPage/Message_Passing_Interface" title="Special:EditPage/Message Passing Interface">improve it</a></b> or discuss these issues on the <b><a href="/wiki/Talk:Message_Passing_Interface" title="Talk:Message Passing Interface">talk page</a></b>. <small><i>(<a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove these messages</a>)</i></small> <div class="mw-collapsible-content"> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-Update plainlinks metadata ambox ambox-content ambox-Update" role="presentation"><tbody><tr><td class="mbox-image"><div class="mbox-image-div"><span typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/42px-Ambox_current_red_Americas.svg.png" decoding="async" width="42" height="34" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/63px-Ambox_current_red_Americas.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/84px-Ambox_current_red_Americas.svg.png 2x" data-file-width="360" data-file-height="290" /></span></span></div></td><td class="mbox-text"><div class="mbox-text-span">This article needs to be <b>updated</b>. The reason given is: MPI-4.0 was approved by the MPI Forum in June 2021.<span class="hide-when-compact"> Please help update this article to reflect recent events or newly available information.</span> <span class="date-container"><i>(<span class="date">October 2021</span>)</i></span></div></td></tr></tbody></table> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-Update plainlinks metadata ambox ambox-content ambox-Update" role="presentation"><tbody><tr><td class="mbox-image"><div class="mbox-image-div"><span typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/42px-Ambox_current_red_Americas.svg.png" decoding="async" width="42" height="34" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/63px-Ambox_current_red_Americas.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/84px-Ambox_current_red_Americas.svg.png 2x" data-file-width="360" data-file-height="290" /></span></span></div></td><td class="mbox-text"><div class="mbox-text-span">This article needs to be <b>updated</b>. The reason given is: MPI-4.1 was approved by the MPI Forum in November 2023 (<a rel="nofollow" class="external free" href="https://www.mpi-forum.org/docs/">https://www.mpi-forum.org/docs/</a>).<span class="hide-when-compact"> Please help update this article to reflect recent events or newly available information.</span> <span class="date-container"><i>(<span class="date">May 2024</span>)</i></span></div></td></tr></tbody></table> </div> </div><span class="hide-when-compact"><i> (<small><a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove this message</a></small>)</i></span></div></td></tr></tbody></table> <p>The <b>Message Passing Interface</b> (<b>MPI</b>) is a standardized and portable <a href="/wiki/Message-passing" class="mw-redirect" title="Message-passing">message-passing</a> standard designed to function on <a href="/wiki/Parallel_computing" title="Parallel computing">parallel computing</a> <a href="/wiki/Computer_architecture" title="Computer architecture">architectures</a>.<sup id="cite_ref-1" class="reference"><a href="#cite_note-1"><span class="cite-bracket">&#91;</span>1<span class="cite-bracket">&#93;</span></a></sup> The MPI standard defines the <a href="/wiki/Syntax_(programming_languages)" title="Syntax (programming languages)">syntax</a> and <a href="/wiki/Semantics" title="Semantics">semantics</a> of <a href="/wiki/Library_routine" class="mw-redirect" title="Library routine">library routines</a> that are useful to a wide range of users writing <a href="/wiki/Software_portability" title="Software portability">portable</a> message-passing programs in <a href="/wiki/C_(programming_language)" title="C (programming language)">C</a>, <a href="/wiki/C%2B%2B" title="C++">C++</a>, and <a href="/wiki/Fortran" title="Fortran">Fortran</a>. There are several <a href="/wiki/Open-source" class="mw-redirect" title="Open-source">open-source</a> MPI <a href="/wiki/Programming_language_implementation" title="Programming language implementation">implementations</a>, which fostered the development of a parallel <a href="/wiki/Software_industry" title="Software industry">software industry</a>, and encouraged development of portable and scalable large-scale parallel applications. </p> <meta property="mw:PageProp/toc" /> <div class="mw-heading mw-heading2"><h2 id="History">History</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=1" title="Edit section: History"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The message passing interface effort began in the summer of 1991 when a small group of researchers started discussions at a mountain retreat in Austria. Out of that discussion came a Workshop on Standards for Message Passing in a Distributed Memory Environment, held on April 29–30, 1992 in <a href="/wiki/Williamsburg,_Virginia" title="Williamsburg, Virginia">Williamsburg, Virginia</a>.<sup id="cite_ref-2" class="reference"><a href="#cite_note-2"><span class="cite-bracket">&#91;</span>2<span class="cite-bracket">&#93;</span></a></sup> Attendees at Williamsburg discussed the basic features essential to a standard message-passing interface and established a working group to continue the standardization process. <a href="/wiki/Jack_Dongarra" title="Jack Dongarra">Jack Dongarra</a>, <a href="/wiki/Tony_Hey" title="Tony Hey">Tony Hey</a>, and David W. Walker put forward a preliminary draft proposal, "MPI1", in November 1992. In November 1992 a meeting of the MPI working group took place in Minneapolis and decided to place the standardization process on a more formal footing. The MPI working group met every 6 weeks throughout the first 9 months of 1993. The draft MPI standard was presented at the Supercomputing '93 conference in November 1993.<sup id="cite_ref-3" class="reference"><a href="#cite_note-3"><span class="cite-bracket">&#91;</span>3<span class="cite-bracket">&#93;</span></a></sup> After a period of public comments, which resulted in some changes in MPI, version 1.0 of MPI was released in June 1994. These meetings and the email discussion together constituted the MPI Forum, membership of which has been open to all members of the <a href="/wiki/High-performance_computing" title="High-performance computing">high-performance-computing</a> community. </p><p>The MPI effort involved about 80 people from 40 organizations, mainly in the United States and Europe. Most of the major vendors of <a href="/wiki/Concurrent_computer" class="mw-redirect" title="Concurrent computer">concurrent computers</a> were involved in the MPI effort, collaborating with researchers from universities, government laboratories, and <a href="/wiki/Private_industry" class="mw-redirect" title="Private industry">industry</a>. </p><p>MPI provides parallel hardware vendors with a clearly defined base set of routines that can be efficiently implemented. As a result, hardware vendors can build upon this collection of standard <a href="/wiki/Low-level_programming_language" title="Low-level programming language">low-level</a> routines to create <a href="/wiki/High-level_programming_language" title="High-level programming language">higher-level</a> routines for the distributed-memory communication environment supplied with their <a href="/wiki/Parallel_machine" class="mw-redirect" title="Parallel machine">parallel machines</a>. MPI provides a simple-to-use portable interface for the basic user, yet one powerful enough to allow programmers to use the high-performance message passing operations available on advanced machines. </p><p>In an effort to create a universal standard for message passing, researchers did not base it off of a single system but it incorporated the most useful features of several systems, including those designed by IBM, <a href="/wiki/Intel" title="Intel">Intel</a>, <a href="/wiki/NCUBE" title="NCUBE">nCUBE</a>, PVM, Express, P4 and PARMACS. The message-passing paradigm is attractive because of wide portability and can be used in communication for distributed-memory and shared-memory multiprocessors, networks of workstations, and a combination of these elements. The paradigm can apply in multiple settings, independent of network speed or memory architecture. </p><p>Support for MPI meetings came in part from <a href="/wiki/DARPA" title="DARPA">DARPA</a> and from the U.S. <a href="/wiki/National_Science_Foundation" title="National Science Foundation">National Science Foundation</a> (NSF) under grant ASC-9310330, NSF Science and Technology Center Cooperative agreement number CCR-8809615, and from the <a href="/wiki/European_Commission" title="European Commission">European Commission</a> through Esprit Project P6643. The <a href="/wiki/University_of_Tennessee" title="University of Tennessee">University of Tennessee</a> also made financial contributions to the MPI Forum. </p> <div class="mw-heading mw-heading2"><h2 id="Overview">Overview</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=2" title="Edit section: Overview"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-Update plainlinks metadata ambox ambox-content ambox-Update" role="presentation"><tbody><tr><td class="mbox-image"><div class="mbox-image-div"><span typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/42px-Ambox_current_red_Americas.svg.png" decoding="async" width="42" height="34" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/63px-Ambox_current_red_Americas.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/5/53/Ambox_current_red_Americas.svg/84px-Ambox_current_red_Americas.svg.png 2x" data-file-width="360" data-file-height="290" /></span></span></div></td><td class="mbox-text"><div class="mbox-text-span">This section needs to be <b>updated</b>. The reason given is: The new features of the MPI-3 are not well described. According to the specification: "MPI-3 standard contains significant extensions to MPI functionality, including nonblocking collectives, new one-sided communication operations, and Fortran 2008 bindings.".<span class="hide-when-compact"> Please help update this article to reflect recent events or newly available information.</span> <span class="date-container"><i>(<span class="date">August 2022</span>)</i></span></div></td></tr></tbody></table> <p>MPI is a <a href="/wiki/Communication_protocol" title="Communication protocol">communication protocol</a> for programming<sup id="cite_ref-4" class="reference"><a href="#cite_note-4"><span class="cite-bracket">&#91;</span>4<span class="cite-bracket">&#93;</span></a></sup> <a href="/wiki/Parallel_computers" class="mw-redirect" title="Parallel computers">parallel computers</a>. Both point-to-point and collective communication are supported. MPI "is a message-passing application programmer interface, together with protocol and semantic specifications for how its features must behave in any implementation."<sup id="cite_ref-5" class="reference"><a href="#cite_note-5"><span class="cite-bracket">&#91;</span>5<span class="cite-bracket">&#93;</span></a></sup> MPI's goals are high performance, scalability, and portability. MPI remains the dominant model used in <a href="/wiki/High-performance_computing" title="High-performance computing">high-performance computing</a> today.<sup id="cite_ref-6" class="reference"><a href="#cite_note-6"><span class="cite-bracket">&#91;</span>6<span class="cite-bracket">&#93;</span></a></sup> </p><p>MPI is not sanctioned by any major standards body; nevertheless, it has become a <a href="/wiki/De_facto_standard" title="De facto standard"><i>de facto</i> standard</a> for <a href="/wiki/Communication" title="Communication">communication</a> among processes that model a <a href="/wiki/Parallel_programming" class="mw-redirect" title="Parallel programming">parallel program</a> running on a <a href="/wiki/Distributed_memory" title="Distributed memory">distributed memory</a> system. Actual distributed memory supercomputers such as computer clusters often run such programs. </p><p>The principal MPI-1 model has no <a href="/wiki/Shared_memory" title="Shared memory">shared memory</a> concept, and MPI-2 has only a limited <a href="/wiki/Distributed_shared_memory" title="Distributed shared memory">distributed shared memory</a> concept. Nonetheless, MPI programs are regularly run on shared memory computers, and both <a href="/wiki/MPICH" title="MPICH">MPICH</a> and <a href="/wiki/Open_MPI" title="Open MPI">Open MPI</a> can use shared memory for message transfer if it is available.<sup id="cite_ref-7" class="reference"><a href="#cite_note-7"><span class="cite-bracket">&#91;</span>7<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-8" class="reference"><a href="#cite_note-8"><span class="cite-bracket">&#91;</span>8<span class="cite-bracket">&#93;</span></a></sup> Designing programs around the MPI model (contrary to explicit <a href="/wiki/Shared_memory_(interprocess_communication)" class="mw-redirect" title="Shared memory (interprocess communication)">shared memory</a> models) has advantages when running on <a href="/wiki/Non-Uniform_Memory_Access" class="mw-redirect" title="Non-Uniform Memory Access">NUMA</a> architectures since MPI encourages <a href="/wiki/Locality_of_reference" title="Locality of reference">memory locality</a>. Explicit shared memory programming was introduced in MPI-3.<sup id="cite_ref-9" class="reference"><a href="#cite_note-9"><span class="cite-bracket">&#91;</span>9<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-10" class="reference"><a href="#cite_note-10"><span class="cite-bracket">&#91;</span>10<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-11" class="reference"><a href="#cite_note-11"><span class="cite-bracket">&#91;</span>11<span class="cite-bracket">&#93;</span></a></sup> </p><p>Although MPI belongs in layers 5 and higher of the <a href="/wiki/OSI_Reference_Model" class="mw-redirect" title="OSI Reference Model">OSI Reference Model</a>, implementations may cover most layers, with <a href="/wiki/Internet_socket" class="mw-redirect" title="Internet socket">sockets</a> and <a href="/wiki/Transmission_Control_Protocol" title="Transmission Control Protocol">Transmission Control Protocol</a> (TCP) used in the transport layer. </p><p>Most MPI implementations consist of a specific set of routines directly callable from <a href="/wiki/C_(programming_language)" title="C (programming language)">C</a>, <a href="/wiki/C%2B%2B" title="C++">C++</a>, <a href="/wiki/Fortran" title="Fortran">Fortran</a> (i.e., an API) and any language able to interface with such libraries, including <a href="/wiki/C_Sharp_(programming_language)" title="C Sharp (programming language)">C#</a>, <a href="/wiki/Java_(programming_language)" title="Java (programming language)">Java</a> or <a href="/wiki/Python_(programming_language)" title="Python (programming language)">Python</a>. The advantages of MPI over older message passing libraries are portability (because MPI has been implemented for almost every distributed memory architecture) and speed (because each implementation is in principle optimized for the hardware on which it runs). </p><p>MPI uses <a href="/wiki/Language_Independent_Specification" class="mw-redirect" title="Language Independent Specification">Language Independent Specifications</a> (LIS) for calls and language bindings. The first MPI standard specified <a href="/wiki/ANSI_C" title="ANSI C">ANSI C</a> and Fortran-77 bindings together with the LIS. The draft was presented at Supercomputing 1994 (November 1994)<sup id="cite_ref-SC94_12-0" class="reference"><a href="#cite_note-SC94-12"><span class="cite-bracket">&#91;</span>12<span class="cite-bracket">&#93;</span></a></sup> and finalized soon thereafter. About 128 functions constitute the MPI-1.3 standard which was released as the final end of the MPI-1 series in 2008.<sup id="cite_ref-MPI_Docs_13-0" class="reference"><a href="#cite_note-MPI_Docs-13"><span class="cite-bracket">&#91;</span>13<span class="cite-bracket">&#93;</span></a></sup> </p><p><span class="anchor" id="VERSIONS"></span> At present, the standard has several versions: version 1.3 (commonly abbreviated <i>MPI-1</i>), which emphasizes message passing and has a static runtime environment, MPI-2.2 (MPI-2), which includes new features such as parallel I/O, dynamic process management and remote memory operations,<sup id="cite_ref-Gropp99adv-pp4-5_14-0" class="reference"><a href="#cite_note-Gropp99adv-pp4-5-14"><span class="cite-bracket">&#91;</span>14<span class="cite-bracket">&#93;</span></a></sup> and MPI-3.1 (MPI-3), which includes extensions to the collective operations with non-blocking versions and extensions to the one-sided operations.<sup id="cite_ref-MPI_3.1_15-0" class="reference"><a href="#cite_note-MPI_3.1-15"><span class="cite-bracket">&#91;</span>15<span class="cite-bracket">&#93;</span></a></sup> MPI-2's LIS specifies over 500 functions and provides language bindings for ISO <a href="/wiki/C_(programming_language)" title="C (programming language)">C</a>, ISO <a href="/wiki/C%2B%2B" title="C++">C++</a>, and <a href="/wiki/Fortran_90" class="mw-redirect" title="Fortran 90">Fortran 90</a>. Object interoperability was also added to allow easier mixed-language message passing programming. A side-effect of standardizing MPI-2, completed in 1996, was clarifying the MPI-1 standard, creating the MPI-1.2. </p><p><i>MPI-2</i> is mostly a superset of MPI-1, although some functions have been deprecated. MPI-1.3 programs still work under MPI implementations compliant with the MPI-2 standard. </p><p><i>MPI-3.0</i> introduces significant updates to the MPI standard, including nonblocking versions of collective operations, enhancements to one-sided operations, and a Fortran 2008 binding. It removes deprecated C++ bindings and various obsolete routines and objects. Importantly, any valid MPI-2.2 program that avoids the removed elements is also valid in MPI-3.0. </p><p><i>MPI-3.1</i> is a minor update focused on corrections and clarifications, particularly for Fortran bindings. It introduces new functions for manipulating MPI_Aint values, nonblocking collective I/O routines, and methods for retrieving index values by name for MPI_T performance variables. Additionally, a general index was added. All valid MPI-3.0 programs are also valid in MPI-3.1. </p><p><i>MPI-4.0</i> is a major update that introduces large-count versions of many routines, persistent collective operations, partitioned communications, and a new MPI initialization method. It also adds application info assertions and improves error handling definitions, along with various smaller enhancements. Any valid MPI-3.1 program is compatible with MPI-4.0. </p><p>MPI-4.1 is a minor update focused on corrections and clarifications to the MPI-4.0 standard. It deprecates several routines, the MPI_HOST attribute key, and the mpif.h Fortran include file. A new routine has been added to inquire about the hardware running the MPI program. Any valid MPI-4.0 program remains valid in MPI-4.1. </p><p>MPI is often compared with <a href="/wiki/Parallel_Virtual_Machine" title="Parallel Virtual Machine">Parallel Virtual Machine</a> (PVM), which is a popular distributed environment and message passing system developed in 1989, and which was one of the systems that motivated the need for standard parallel message passing. Threaded shared memory programming models (such as <a href="/wiki/Pthreads" title="Pthreads">Pthreads</a> and <a href="/wiki/OpenMP" title="OpenMP">OpenMP</a>) and message passing programming (MPI/PVM) can be considered complementary and have been used together on occasion in, for example, servers with multiple large shared-memory nodes. </p> <div class="mw-heading mw-heading2"><h2 id="Functionality">Functionality</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=3" title="Edit section: Functionality"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-Unreferenced_section plainlinks metadata ambox ambox-content ambox-Unreferenced" role="presentation"><tbody><tr><td class="mbox-image"><div class="mbox-image-div"><span typeof="mw:File"><a href="/wiki/File:Question_book-new.svg" class="mw-file-description"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/99/Question_book-new.svg/50px-Question_book-new.svg.png" decoding="async" width="50" height="39" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/99/Question_book-new.svg/75px-Question_book-new.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/99/Question_book-new.svg/100px-Question_book-new.svg.png 2x" data-file-width="512" data-file-height="399" /></a></span></div></td><td class="mbox-text"><div class="mbox-text-span">This section <b>does not <a href="/wiki/Wikipedia:Citing_sources" title="Wikipedia:Citing sources">cite</a> any <a href="/wiki/Wikipedia:Verifiability" title="Wikipedia:Verifiability">sources</a></b>.<span class="hide-when-compact"> Please help <a href="/wiki/Special:EditPage/Message_Passing_Interface" title="Special:EditPage/Message Passing Interface">improve this section</a> by <a href="/wiki/Help:Referencing_for_beginners" title="Help:Referencing for beginners">adding citations to reliable sources</a>. Unsourced material may be challenged and <a href="/wiki/Wikipedia:Verifiability#Burden_of_evidence" title="Wikipedia:Verifiability">removed</a>.</span> <span class="date-container"><i>(<span class="date">July 2021</span>)</i></span><span class="hide-when-compact"><i> (<small><a href="/wiki/Help:Maintenance_template_removal" title="Help:Maintenance template removal">Learn how and when to remove this message</a></small>)</i></span></div></td></tr></tbody></table> <p>The MPI interface is meant to provide essential virtual topology, <a href="/wiki/Synchronization" title="Synchronization">synchronization</a>, and communication functionality between a set of processes (that have been mapped to nodes/servers/computer instances) in a language-independent way, with language-specific syntax (bindings), plus a few language-specific features. MPI programs always work with processes, but programmers commonly refer to the processes as processors. Typically, for maximum performance, each <a href="/wiki/CPU" class="mw-redirect" title="CPU">CPU</a> (or <a href="/wiki/Multi-core_(computing)" class="mw-redirect" title="Multi-core (computing)">core</a> in a multi-core machine) will be assigned just a single process. This assignment happens at runtime through the agent that starts the MPI program, normally called mpirun or mpiexec. </p><p>MPI library functions include, but are not limited to, point-to-point rendezvous-type send/receive operations, choosing between a <a href="/wiki/Cartesian_tree" title="Cartesian tree">Cartesian</a> or <a href="/wiki/Graph_(data_structure)" class="mw-redirect" title="Graph (data structure)">graph</a>-like logical process topology, exchanging data between process pairs (send/receive operations), combining partial results of computations (gather and reduce operations), synchronizing nodes (barrier operation) as well as obtaining network-related information such as the number of processes in the computing session, current processor identity that a process is mapped to, neighboring processes accessible in a logical topology, and so on. Point-to-point operations come in <a href="/wiki/Synchronization_(computer_science)" title="Synchronization (computer science)">synchronous</a>, <a href="/wiki/Asynchronous_i/o" class="mw-redirect" title="Asynchronous i/o">asynchronous</a>, buffered, and <i>ready</i> forms, to allow both relatively stronger and weaker <a href="/wiki/Semantics" title="Semantics">semantics</a> for the synchronization aspects of a rendezvous-send. Many outstanding<sup class="noprint Inline-Template" style="margin-left:0.1em; white-space:nowrap;">&#91;<i><a href="/wiki/Wikipedia:Please_clarify" title="Wikipedia:Please clarify"><span title="What is an &quot;outstanding&quot; operation? (April 2015)">clarification needed</span></a></i>&#93;</sup> operations are possible in asynchronous mode, in most implementations. </p><p>MPI-1 and MPI-2 both enable implementations that overlap communication and computation, but practice and theory differ. MPI also specifies <i><a href="/wiki/Thread_safe" class="mw-redirect" title="Thread safe">thread safe</a></i> interfaces, which have <a href="/wiki/Cohesion_(computer_science)" title="Cohesion (computer science)">cohesion</a> and <a href="/wiki/Coupling_(computer_science)" class="mw-redirect" title="Coupling (computer science)">coupling</a> strategies that help avoid hidden state within the interface. It is relatively easy to write multithreaded point-to-point MPI code, and some implementations support such code. <a href="/wiki/Multithreading_(computer_architecture)" title="Multithreading (computer architecture)">Multithreaded</a> collective communication is best accomplished with multiple copies of Communicators, as described below. </p> <div class="mw-heading mw-heading2"><h2 id="Concepts">Concepts</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=4" title="Edit section: Concepts"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>MPI provides several features. The following concepts provide context for all of those abilities and help the programmer to decide what functionality to use in their application programs. Four of MPI's eight basic concepts are unique to MPI-2. </p> <div class="mw-heading mw-heading3"><h3 id="Communicator">Communicator</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=5" title="Edit section: Communicator"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Communicator objects connect groups of processes in the MPI session. Each communicator gives each contained process an independent identifier and arranges its contained processes in an ordered <a href="/wiki/Topology_(disambiguation)" class="mw-disambig" title="Topology (disambiguation)">topology</a>. MPI also has explicit groups, but these are mainly good for organizing and reorganizing groups of processes before another communicator is made. MPI understands single group intracommunicator operations, and bilateral intercommunicator communication. In MPI-1, single group operations are most prevalent. <a href="/wiki/Bilateral_synchronization" title="Bilateral synchronization">Bilateral</a> operations mostly appear in MPI-2 where they include collective communication and dynamic in-process management. </p><p>Communicators can be partitioned using several MPI commands. These commands include <code>MPI_COMM_SPLIT</code>, where each process joins one of several colored sub-communicators by declaring itself to have that color. </p> <div class="mw-heading mw-heading3"><h3 id="Point-to-point_basics">Point-to-point basics</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=6" title="Edit section: Point-to-point basics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>A number of important MPI functions involve communication between two specific processes. A popular example is <code>MPI_Send</code>, which allows one specified process to send a message to a second specified process. Point-to-point operations, as these are called, are particularly useful in patterned or irregular communication, for example, a <a href="/wiki/Data_parallelism" title="Data parallelism">data-parallel</a> architecture in which each processor routinely swaps regions of data with specific other processors between calculation steps, or a <a href="/wiki/Master/slave_(technology)" class="mw-redirect" title="Master/slave (technology)">master–slave</a> architecture in which the master sends new task data to a slave whenever the prior task is completed. </p><p>MPI-1 specifies mechanisms for both <a href="/wiki/Blocking_(computing)" title="Blocking (computing)">blocking</a> and non-blocking point-to-point communication mechanisms, as well as the so-called 'ready-send' mechanism whereby a send request can be made only when the matching receive request has already been made. </p> <div class="mw-heading mw-heading3"><h3 id="Collective_basics">Collective basics</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=7" title="Edit section: Collective basics"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Collective_operation" title="Collective operation">Collective functions</a> involve communication among all processes in a process group (which can mean the entire process pool or a program-defined subset). A typical function is the <code>MPI_Bcast</code> call (short for "<a href="/wiki/Broadcasting_(computing)" class="mw-redirect" title="Broadcasting (computing)">broadcast</a>"). This function takes data from one node and sends it to all processes in the process group. A reverse operation is the <code>MPI_Reduce</code> call, which takes data from all processes in a group, performs an operation (such as summing), and stores the results on one node. <code>MPI_Reduce</code> is often useful at the start or end of a large distributed calculation, where each processor operates on a part of the data and then combines it into a result. </p><p>Other operations perform more sophisticated tasks, such as <code>MPI_Alltoall</code> which rearranges <i>n</i> items of data such that the <i>n</i>th node gets the <i>n</i>th item of data from each. </p> <div class="mw-heading mw-heading3"><h3 id="Derived_data_types">Derived data types</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=8" title="Edit section: Derived data types"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Many MPI functions require that you specify the type of data which is sent between processes. This is because MPI aims to support heterogeneous environments where types might be represented differently on the different nodes<sup id="cite_ref-node37_16-0" class="reference"><a href="#cite_note-node37-16"><span class="cite-bracket">&#91;</span>16<span class="cite-bracket">&#93;</span></a></sup> (for example they might be running different CPU architectures that have different <a href="/wiki/Endianness" title="Endianness">endianness</a>), in which case MPI implementations can perform <i>data conversion</i>.<sup id="cite_ref-node37_16-1" class="reference"><a href="#cite_note-node37-16"><span class="cite-bracket">&#91;</span>16<span class="cite-bracket">&#93;</span></a></sup> Since the C language does not allow a type itself to be passed as a parameter, MPI predefines the constants <code>MPI_INT</code>, <code>MPI_CHAR</code>, <code>MPI_DOUBLE</code> to correspond with <code>int</code>, <code>char</code>, <code>double</code>, etc. </p><p>Here is an example in C that passes arrays of <code>int</code>s from all processes to one. The one receiving process is called the "root" process, and it can be any designated process but normally it will be process 0. All the processes ask to send their arrays to the root with <code>MPI_Gather</code>, which is equivalent to having each process (including the root itself) call <code>MPI_Send</code> and the root make the corresponding number of ordered <code>MPI_Recv</code> calls to assemble all of these arrays into a larger one:<sup id="cite_ref-17" class="reference"><a href="#cite_note-17"><span class="cite-bracket">&#91;</span>17<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-highlight mw-highlight-lang-c mw-content-ltr" dir="ltr"><pre><span></span><span class="kt">int</span><span class="w"> </span><span class="n">send_array</span><span class="p">[</span><span class="mi">100</span><span class="p">];</span> <span class="kt">int</span><span class="w"> </span><span class="n">root</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">0</span><span class="p">;</span><span class="w"> </span><span class="cm">/* or whatever */</span> <span class="kt">int</span><span class="w"> </span><span class="n">num_procs</span><span class="p">,</span><span class="w"> </span><span class="o">*</span><span class="n">recv_array</span><span class="p">;</span> <span class="n">MPI_Comm_size</span><span class="p">(</span><span class="n">comm</span><span class="p">,</span><span class="w"> </span><span class="o">&amp;</span><span class="n">num_procs</span><span class="p">);</span> <span class="n">recv_array</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">malloc</span><span class="p">(</span><span class="n">num_procs</span><span class="w"> </span><span class="o">*</span><span class="w"> </span><span class="k">sizeof</span><span class="p">(</span><span class="n">send_array</span><span class="p">));</span> <span class="n">MPI_Gather</span><span class="p">(</span><span class="n">send_array</span><span class="p">,</span><span class="w"> </span><span class="k">sizeof</span><span class="p">(</span><span class="n">send_array</span><span class="p">)</span><span class="w"> </span><span class="o">/</span><span class="w"> </span><span class="k">sizeof</span><span class="p">(</span><span class="o">*</span><span class="n">send_array</span><span class="p">),</span><span class="w"> </span><span class="n">MPI_INT</span><span class="p">,</span> <span class="w"> </span><span class="n">recv_array</span><span class="p">,</span><span class="w"> </span><span class="k">sizeof</span><span class="p">(</span><span class="n">send_array</span><span class="p">)</span><span class="w"> </span><span class="o">/</span><span class="w"> </span><span class="k">sizeof</span><span class="p">(</span><span class="o">*</span><span class="n">send_array</span><span class="p">),</span><span class="w"> </span><span class="n">MPI_INT</span><span class="p">,</span> <span class="w"> </span><span class="n">root</span><span class="p">,</span><span class="w"> </span><span class="n">comm</span><span class="p">);</span> </pre></div> <p>However, you may instead wish to send data as one block as opposed to 100 <code>int</code>s. To do this define a "contiguous block" derived data type: </p> <div class="mw-highlight mw-highlight-lang-c mw-content-ltr" dir="ltr"><pre><span></span><span class="n">MPI_Datatype</span><span class="w"> </span><span class="n">newtype</span><span class="p">;</span> <span class="n">MPI_Type_contiguous</span><span class="p">(</span><span class="mi">100</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_INT</span><span class="p">,</span><span class="w"> </span><span class="o">&amp;</span><span class="n">newtype</span><span class="p">);</span> <span class="n">MPI_Type_commit</span><span class="p">(</span><span class="o">&amp;</span><span class="n">newtype</span><span class="p">);</span> <span class="n">MPI_Gather</span><span class="p">(</span><span class="n">array</span><span class="p">,</span><span class="w"> </span><span class="mi">1</span><span class="p">,</span><span class="w"> </span><span class="n">newtype</span><span class="p">,</span><span class="w"> </span><span class="n">receive_array</span><span class="p">,</span><span class="w"> </span><span class="mi">1</span><span class="p">,</span><span class="w"> </span><span class="n">newtype</span><span class="p">,</span><span class="w"> </span><span class="n">root</span><span class="p">,</span><span class="w"> </span><span class="n">comm</span><span class="p">);</span> </pre></div> <p>For passing a class or a data structure, <code>MPI_Type_create_struct</code> creates an MPI derived data type from <code>MPI_predefined</code> data types, as follows: </p> <div class="mw-highlight mw-highlight-lang-c mw-content-ltr" dir="ltr"><pre><span></span><span class="kt">int</span><span class="w"> </span><span class="n">MPI_Type_create_struct</span><span class="p">(</span><span class="kt">int</span><span class="w"> </span><span class="n">count</span><span class="p">,</span> <span class="w"> </span><span class="kt">int</span><span class="w"> </span><span class="o">*</span><span class="n">blocklen</span><span class="p">,</span> <span class="w"> </span><span class="n">MPI_Aint</span><span class="w"> </span><span class="o">*</span><span class="n">disp</span><span class="p">,</span> <span class="w"> </span><span class="n">MPI_Datatype</span><span class="w"> </span><span class="o">*</span><span class="n">type</span><span class="p">,</span> <span class="w"> </span><span class="n">MPI_Datatype</span><span class="w"> </span><span class="o">*</span><span class="n">newtype</span><span class="p">)</span> </pre></div> <p>where: </p> <ul><li><code>count</code> is a number of blocks, and specifies the length (in elements) of the arrays <code>blocklen</code>, <code>disp</code>, and <code>type</code>.</li> <li><code>blocklen</code> contains numbers of elements in each block,</li> <li><code>disp</code> contains byte displacements of each block,</li> <li><code>type</code> contains types of element in each block.</li> <li><code>newtype</code> (an output) contains the new derived type created by this function</li></ul> <p>The <code>disp</code> (displacements) array is needed for <a href="/wiki/Data_structure_alignment" title="Data structure alignment">data structure alignment</a>, since the compiler may pad the variables in a class or data structure. The safest way to find the distance between different fields is by obtaining their addresses in memory. This is done with <code>MPI_Get_address</code>, which is normally the same as C's <code>&amp;</code> operator but that might not be true when dealing with <a href="/wiki/Memory_segmentation" title="Memory segmentation">memory segmentation</a>.<sup id="cite_ref-18" class="reference"><a href="#cite_note-18"><span class="cite-bracket">&#91;</span>18<span class="cite-bracket">&#93;</span></a></sup> </p><p>Passing a data structure as one block is significantly faster than passing one item at a time, especially if the operation is to be repeated. This is because fixed-size blocks do not require <a href="/wiki/Serialization" title="Serialization">serialization</a> during transfer.<sup id="cite_ref-19" class="reference"><a href="#cite_note-19"><span class="cite-bracket">&#91;</span>19<span class="cite-bracket">&#93;</span></a></sup> </p><p>Given the following data structures: </p> <div class="mw-highlight mw-highlight-lang-c mw-content-ltr" dir="ltr"><pre><span></span><span class="k">struct</span><span class="w"> </span><span class="nc">A</span><span class="w"> </span><span class="p">{</span> <span class="w"> </span><span class="kt">int</span><span class="w"> </span><span class="n">f</span><span class="p">;</span> <span class="w"> </span><span class="kt">short</span><span class="w"> </span><span class="n">p</span><span class="p">;</span> <span class="p">};</span> <span class="k">struct</span><span class="w"> </span><span class="nc">B</span><span class="w"> </span><span class="p">{</span> <span class="w"> </span><span class="k">struct</span><span class="w"> </span><span class="nc">A</span><span class="w"> </span><span class="n">a</span><span class="p">;</span> <span class="w"> </span><span class="kt">int</span><span class="w"> </span><span class="n">pp</span><span class="p">,</span><span class="w"> </span><span class="n">vp</span><span class="p">;</span> <span class="p">};</span> </pre></div> <p>Here's the C code for building an MPI-derived data type: </p> <div class="mw-highlight mw-highlight-lang-c mw-content-ltr" dir="ltr"><pre><span></span><span class="k">static</span><span class="w"> </span><span class="k">const</span><span class="w"> </span><span class="kt">int</span><span class="w"> </span><span class="n">blocklen</span><span class="p">[]</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="p">{</span><span class="mi">1</span><span class="p">,</span><span class="w"> </span><span class="mi">1</span><span class="p">,</span><span class="w"> </span><span class="mi">1</span><span class="p">,</span><span class="w"> </span><span class="mi">1</span><span class="p">};</span> <span class="k">static</span><span class="w"> </span><span class="k">const</span><span class="w"> </span><span class="n">MPI_Aint</span><span class="w"> </span><span class="n">disp</span><span class="p">[]</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="p">{</span> <span class="w"> </span><span class="n">offsetof</span><span class="p">(</span><span class="k">struct</span><span class="w"> </span><span class="nc">B</span><span class="p">,</span><span class="w"> </span><span class="n">a</span><span class="p">)</span><span class="w"> </span><span class="o">+</span><span class="w"> </span><span class="n">offsetof</span><span class="p">(</span><span class="k">struct</span><span class="w"> </span><span class="nc">A</span><span class="p">,</span><span class="w"> </span><span class="n">f</span><span class="p">),</span> <span class="w"> </span><span class="n">offsetof</span><span class="p">(</span><span class="k">struct</span><span class="w"> </span><span class="nc">B</span><span class="p">,</span><span class="w"> </span><span class="n">a</span><span class="p">)</span><span class="w"> </span><span class="o">+</span><span class="w"> </span><span class="n">offsetof</span><span class="p">(</span><span class="k">struct</span><span class="w"> </span><span class="nc">A</span><span class="p">,</span><span class="w"> </span><span class="n">p</span><span class="p">),</span> <span class="w"> </span><span class="n">offsetof</span><span class="p">(</span><span class="k">struct</span><span class="w"> </span><span class="nc">B</span><span class="p">,</span><span class="w"> </span><span class="n">pp</span><span class="p">),</span> <span class="w"> </span><span class="n">offsetof</span><span class="p">(</span><span class="k">struct</span><span class="w"> </span><span class="nc">B</span><span class="p">,</span><span class="w"> </span><span class="n">vp</span><span class="p">)</span> <span class="p">};</span> <span class="k">static</span><span class="w"> </span><span class="n">MPI_Datatype</span><span class="w"> </span><span class="n">type</span><span class="p">[]</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="p">{</span><span class="n">MPI_INT</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_SHORT</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_INT</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_INT</span><span class="p">};</span> <span class="n">MPI_Datatype</span><span class="w"> </span><span class="n">newtype</span><span class="p">;</span> <span class="n">MPI_Type_create_struct</span><span class="p">(</span><span class="k">sizeof</span><span class="p">(</span><span class="n">type</span><span class="p">)</span><span class="w"> </span><span class="o">/</span><span class="w"> </span><span class="k">sizeof</span><span class="p">(</span><span class="o">*</span><span class="n">type</span><span class="p">),</span><span class="w"> </span><span class="n">blocklen</span><span class="p">,</span><span class="w"> </span><span class="n">disp</span><span class="p">,</span><span class="w"> </span><span class="n">type</span><span class="p">,</span><span class="w"> </span><span class="o">&amp;</span><span class="n">newtype</span><span class="p">);</span> <span class="n">MPI_Type_commit</span><span class="p">(</span><span class="o">&amp;</span><span class="n">newtype</span><span class="p">);</span> </pre></div> <div class="mw-heading mw-heading2"><h2 id="MPI-2_concepts">MPI-2 concepts</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=9" title="Edit section: MPI-2 concepts"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <div class="mw-heading mw-heading3"><h3 id="One-sided_communication">One-sided communication</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=10" title="Edit section: One-sided communication"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>MPI-2 defines three one-sided communications operations, <code>MPI_Put</code>, <code>MPI_Get</code>, and <code>MPI_Accumulate</code>, being a write to remote memory, a read from remote memory, and a reduction operation on the same memory across a number of tasks, respectively. Also defined are three different methods to synchronize this communication (global, pairwise, and remote locks) as the specification does not guarantee that these operations have taken place until a synchronization point. </p><p>These types of call can often be useful for algorithms in which synchronization would be inconvenient (e.g. distributed <a href="/wiki/Matrix_multiplication" title="Matrix multiplication">matrix multiplication</a>), or where it is desirable for tasks to be able to balance their load while other processors are operating on data. </p> <div class="mw-heading mw-heading3"><h3 id="Dynamic_process_management">Dynamic process management</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=11" title="Edit section: Dynamic process management"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-Expand_section plainlinks metadata ambox mbox-small-left ambox-content" role="presentation"><tbody><tr><td class="mbox-image"><span typeof="mw:File"><a href="/wiki/File:Wiki_letter_w_cropped.svg" class="mw-file-description"><img alt="[icon]" src="//upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Wiki_letter_w_cropped.svg/20px-Wiki_letter_w_cropped.svg.png" decoding="async" width="20" height="14" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Wiki_letter_w_cropped.svg/30px-Wiki_letter_w_cropped.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Wiki_letter_w_cropped.svg/40px-Wiki_letter_w_cropped.svg.png 2x" data-file-width="44" data-file-height="31" /></a></span></td><td class="mbox-text"><div class="mbox-text-span">This section <b>needs expansion</b>. You can help by <a class="external text" href="https://en.wikipedia.org/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=">adding to it</a>. <span class="date-container"><i>(<span class="date">June 2008</span>)</i></span></div></td></tr></tbody></table> <p>The key aspect is "the ability of an MPI process to participate in the creation of new MPI processes or to establish communication with MPI processes that have been started separately." The MPI-2 specification describes three main interfaces by which MPI processes can dynamically establish communications, <code>MPI_Comm_spawn</code>, <code>MPI_Comm_accept</code>/<code>MPI_Comm_connect</code> and <code>MPI_Comm_join</code>. The <code>MPI_Comm_spawn</code> interface allows an MPI process to spawn a number of instances of the named MPI process. The newly spawned set of MPI processes form a new <code>MPI_COMM_WORLD</code> intracommunicator but can communicate with the parent and the intercommunicator the function returns. <code>MPI_Comm_spawn_multiple</code> is an alternate interface that allows the different instances spawned to be different binaries with different arguments.<sup id="cite_ref-Gropp99adv-p7_20-0" class="reference"><a href="#cite_note-Gropp99adv-p7-20"><span class="cite-bracket">&#91;</span>20<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="I/O"><span id="I.2FO"></span>I/O</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=12" title="Edit section: I/O"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1251242444"><table class="box-Expand_section plainlinks metadata ambox mbox-small-left ambox-content" role="presentation"><tbody><tr><td class="mbox-image"><span typeof="mw:File"><a href="/wiki/File:Wiki_letter_w_cropped.svg" class="mw-file-description"><img alt="[icon]" src="//upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Wiki_letter_w_cropped.svg/20px-Wiki_letter_w_cropped.svg.png" decoding="async" width="20" height="14" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Wiki_letter_w_cropped.svg/30px-Wiki_letter_w_cropped.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Wiki_letter_w_cropped.svg/40px-Wiki_letter_w_cropped.svg.png 2x" data-file-width="44" data-file-height="31" /></a></span></td><td class="mbox-text"><div class="mbox-text-span">This section <b>needs expansion</b>. You can help by <a class="external text" href="https://en.wikipedia.org/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=">adding to it</a>. <span class="date-container"><i>(<span class="date">June 2008</span>)</i></span></div></td></tr></tbody></table> <p>The parallel I/O feature is sometimes called MPI-IO,<sup id="cite_ref-Gropp99adv-pp5-6_21-0" class="reference"><a href="#cite_note-Gropp99adv-pp5-6-21"><span class="cite-bracket">&#91;</span>21<span class="cite-bracket">&#93;</span></a></sup> and refers to a set of functions designed to abstract I/O management on distributed systems to MPI, and allow files to be easily accessed in a patterned way using the existing derived datatype functionality. </p><p>The little research that has been done on this feature indicates that it may not be trivial to get high performance gains by using MPI-IO. For example, an implementation of sparse <a href="/wiki/Matrix_multiplication" title="Matrix multiplication">matrix-vector multiplications</a> using the MPI I/O library shows a general behavior of minor performance gain, but these results are inconclusive.<sup id="cite_ref-22" class="reference"><a href="#cite_note-22"><span class="cite-bracket">&#91;</span>22<span class="cite-bracket">&#93;</span></a></sup> It was not until the idea of collective I/O<sup id="cite_ref-23" class="reference"><a href="#cite_note-23"><span class="cite-bracket">&#91;</span>23<span class="cite-bracket">&#93;</span></a></sup> implemented into MPI-IO that MPI-IO started to reach widespread adoption. Collective I/O substantially boosts applications' I/O bandwidth by having processes collectively transform the small and noncontiguous I/O operations into large and contiguous ones, thereby reducing the <a href="/wiki/Record_locking" title="Record locking">locking</a> and disk seek overhead. Due to its vast performance benefits, MPI-IO also became the underlying I/O layer for many state-of-the-art I/O libraries, such as <a href="/wiki/HDF5" class="mw-redirect" title="HDF5">HDF5</a> and <a href="/wiki/NetCDF" title="NetCDF">Parallel NetCDF</a>. Its popularity also triggered research on collective I/O optimizations, such as layout-aware I/O<sup id="cite_ref-24" class="reference"><a href="#cite_note-24"><span class="cite-bracket">&#91;</span>24<span class="cite-bracket">&#93;</span></a></sup> and cross-file aggregation.<sup id="cite_ref-25" class="reference"><a href="#cite_note-25"><span class="cite-bracket">&#91;</span>25<span class="cite-bracket">&#93;</span></a></sup><sup id="cite_ref-26" class="reference"><a href="#cite_note-26"><span class="cite-bracket">&#91;</span>26<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Official_implementations">Official implementations</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=13" title="Edit section: Official implementations"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <ul><li>The initial implementation of the MPI 1.x standard was <a href="/wiki/MPICH" title="MPICH">MPICH</a>, from <a href="/wiki/Argonne_National_Laboratory" title="Argonne National Laboratory">Argonne National Laboratory</a> (ANL) and <a href="/wiki/Mississippi_State_University" title="Mississippi State University">Mississippi State University</a>. <a href="/wiki/IBM" title="IBM">IBM</a> also was an early implementor, and most early 90s <a href="/wiki/Supercomputer" title="Supercomputer">supercomputer</a> companies either commercialized MPICH, or built their own implementation. <a href="/wiki/LAM/MPI" title="LAM/MPI">LAM/MPI</a> from <a href="/wiki/Ohio_Supercomputer_Center" title="Ohio Supercomputer Center">Ohio Supercomputer Center</a> was another early open implementation. ANL has continued developing MPICH for over a decade, and now offers MPICH-3.2, implementing the MPI-3.1 standard.</li> <li><a href="/wiki/Open_MPI" title="Open MPI">Open MPI</a> (not to be confused with <a href="/wiki/OpenMP" title="OpenMP">OpenMP</a>) was formed by the merging FT-MPI, LA-MPI, <a href="/wiki/LAM/MPI" title="LAM/MPI">LAM/MPI</a>, and PACX-MPI, and is found in many <a href="/wiki/TOP-500" class="mw-redirect" title="TOP-500">TOP-500</a> <a href="/wiki/Supercomputer" title="Supercomputer">supercomputers</a>.</li></ul> <p>Many other efforts are derivatives of MPICH, LAM, and other works, including, but not limited to, commercial implementations from <a href="/wiki/Hewlett_Packard_Enterprise" title="Hewlett Packard Enterprise">HPE</a>, <a href="/wiki/Intel" title="Intel">Intel</a>, <a href="/wiki/Microsoft" title="Microsoft">Microsoft</a>, and <a href="/wiki/NEC" title="NEC">NEC</a>. </p><p>While the specifications mandate a C and Fortran interface, the language used to implement MPI is not constrained to match the language or languages it seeks to support at runtime. Most implementations combine C, C++ and assembly language, and target C, C++, and Fortran programmers. Bindings are available for many other languages, including Perl, Python, R, Ruby, Java, and <a href="/wiki/Control_Language" title="Control Language">CL</a> (see <a href="#Language_bindings">#Language bindings</a>). </p><p>The <a href="/wiki/Application_binary_interface" title="Application binary interface">ABI</a> of MPI implementations are roughly split between <a href="/wiki/MPICH" title="MPICH">MPICH</a> and <a href="/wiki/Open_MPI" title="Open MPI">Open MPI</a> derivatives, so that a library from one family works as a drop-in replacement of one from the same family, but direct replacement across families is impossible. The French <a href="/wiki/French_Alternative_Energies_and_Atomic_Energy_Commission" title="French Alternative Energies and Atomic Energy Commission">CEA</a> maintains a wrapper interface to facilitate such switches.<sup id="cite_ref-27" class="reference"><a href="#cite_note-27"><span class="cite-bracket">&#91;</span>27<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="Hardware">Hardware</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=14" title="Edit section: Hardware"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>MPI hardware research focuses on implementing MPI directly in hardware, for example via <a href="/wiki/Processor-in-memory" class="mw-redirect" title="Processor-in-memory">processor-in-memory</a>, building MPI operations into the microcircuitry of the <a href="/wiki/Random-access_memory" title="Random-access memory">RAM</a> chips in each node. By implication, this approach is independent of language, operating system, and CPU, but cannot be readily updated or removed. </p><p>Another approach has been to add hardware acceleration to one or more parts of the operation, including hardware processing of MPI queues and using <a href="/wiki/Remote_direct_memory_access" title="Remote direct memory access">RDMA</a> to directly transfer data between memory and the <a href="/wiki/Network_interface_controller" title="Network interface controller">network interface controller</a> without CPU or OS kernel intervention. </p> <div class="mw-heading mw-heading3"><h3 id="Compiler_wrappers">Compiler wrappers</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=15" title="Edit section: Compiler wrappers"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><b>mpicc</b> (and similarly <b>mpic++</b>, <b>mpif90</b>, etc.) is a program that wraps over an existing compiler to set the necessary command-line flags when compiling code that uses MPI. Typically, it adds a few flags that enable the code to be the compiled and linked against the MPI library.<sup id="cite_ref-28" class="reference"><a href="#cite_note-28"><span class="cite-bracket">&#91;</span>28<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading2"><h2 id="Language_bindings">Language bindings</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=16" title="Edit section: Language bindings"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/Language_binding" title="Language binding">Bindings</a> are libraries that extend MPI support to other languages by wrapping an existing MPI implementation such as MPICH or Open MPI. </p> <div class="mw-heading mw-heading3"><h3 id="Common_Language_Infrastructure">Common Language Infrastructure</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=17" title="Edit section: Common Language Infrastructure"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The two managed <a href="/wiki/Common_Language_Infrastructure" title="Common Language Infrastructure">Common Language Infrastructure</a> <a href="/wiki/.NET_Framework" title=".NET Framework">.NET</a> implementations are Pure Mpi.NET<sup id="cite_ref-29" class="reference"><a href="#cite_note-29"><span class="cite-bracket">&#91;</span>29<span class="cite-bracket">&#93;</span></a></sup> and MPI.NET,<sup id="cite_ref-30" class="reference"><a href="#cite_note-30"><span class="cite-bracket">&#91;</span>30<span class="cite-bracket">&#93;</span></a></sup> a research effort at <a href="/wiki/Indiana_University" title="Indiana University">Indiana University</a> licensed under a <a href="/wiki/BSD" class="mw-redirect" title="BSD">BSD</a>-style license. It is compatible with <a href="/wiki/Mono_(software)" title="Mono (software)">Mono</a>, and can make full use of underlying low-latency MPI network fabrics. </p> <div class="mw-heading mw-heading3"><h3 id="Java">Java</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=18" title="Edit section: Java"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Although <a href="/wiki/Java_(programming_language)" title="Java (programming language)">Java</a> does not have an official MPI binding, several groups attempt to bridge the two, with different degrees of success and compatibility. One of the first attempts was Bryan Carpenter's mpiJava,<sup id="cite_ref-31" class="reference"><a href="#cite_note-31"><span class="cite-bracket">&#91;</span>31<span class="cite-bracket">&#93;</span></a></sup> essentially a set of <a href="/wiki/Java_Native_Interface" title="Java Native Interface">Java Native Interface</a> (JNI) wrappers to a local C MPI library, resulting in a hybrid implementation with limited portability, which also has to be compiled against the specific MPI library being used. </p><p>However, this original project also defined the mpiJava API<sup id="cite_ref-32" class="reference"><a href="#cite_note-32"><span class="cite-bracket">&#91;</span>32<span class="cite-bracket">&#93;</span></a></sup> (a <a href="/wiki/De_facto" title="De facto">de facto</a> MPI <a href="/wiki/API" title="API">API</a> for Java that closely followed the equivalent C++ bindings) which other subsequent Java MPI projects adopted. One less-used API is MPJ API, which was designed to be more <a href="/wiki/Object-oriented_programming" title="Object-oriented programming">object-oriented</a> and closer to <a href="/wiki/Sun_Microsystems" title="Sun Microsystems">Sun Microsystems</a>' coding conventions.<sup id="cite_ref-33" class="reference"><a href="#cite_note-33"><span class="cite-bracket">&#91;</span>33<span class="cite-bracket">&#93;</span></a></sup> Beyond the API, Java MPI libraries can be either dependent on a local MPI library, or implement the message passing functions in Java, while some like <a href="/w/index.php?title=P2P-MPI&amp;action=edit&amp;redlink=1" class="new" title="P2P-MPI (page does not exist)">P2P-MPI</a> also provide <a href="/wiki/Peer-to-peer" title="Peer-to-peer">peer-to-peer</a> functionality and allow mixed-platform operation. </p><p>Some of the most challenging parts of Java/MPI arise from Java characteristics such as the lack of explicit <a href="/wiki/Data_pointer" class="mw-redirect" title="Data pointer">pointers</a> and the <a href="/wiki/Flat_memory_model" title="Flat memory model">linear memory</a> address space for its objects, which make transferring multidimensional arrays and complex objects inefficient. Workarounds usually involve transferring one line at a time and/or performing explicit de-<a href="/wiki/Serialization" title="Serialization">serialization</a> and <a href="/wiki/Cast_(computer_science)" class="mw-redirect" title="Cast (computer science)">casting</a> at both the sending and receiving ends, simulating C or Fortran-like arrays by the use of a one-dimensional array, and pointers to primitive types by the use of single-element arrays, thus resulting in programming styles quite far from Java conventions. </p><p>Another Java message passing system is MPJ Express.<sup id="cite_ref-34" class="reference"><a href="#cite_note-34"><span class="cite-bracket">&#91;</span>34<span class="cite-bracket">&#93;</span></a></sup> Recent versions can be executed in cluster and multicore configurations. In the cluster configuration, it can execute parallel Java applications on clusters and clouds. Here Java sockets or specialized I/O interconnects like <a href="/wiki/Myrinet" title="Myrinet">Myrinet</a> can support messaging between MPJ Express processes. It can also utilize native C implementation of MPI using its native device. In the multicore configuration, a parallel Java application is executed on multicore processors. In this mode, MPJ Express processes are represented by Java threads. </p> <div class="mw-heading mw-heading3"><h3 id="Julia">Julia</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=19" title="Edit section: Julia"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>There is a <a href="/wiki/Julia_(programming_language)" title="Julia (programming language)">Julia</a> language wrapper for MPI.<sup id="cite_ref-35" class="reference"><a href="#cite_note-35"><span class="cite-bracket">&#91;</span>35<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="MATLAB">MATLAB</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=20" title="Edit section: MATLAB"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>There are a few academic implementations of MPI using <a href="/wiki/MATLAB" title="MATLAB">MATLAB</a>. MATLAB has its own parallel extension library implemented using MPI and <a href="/wiki/Parallel_Virtual_Machine" title="Parallel Virtual Machine">PVM</a>. </p> <div class="mw-heading mw-heading3"><h3 id="OCaml">OCaml</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=21" title="Edit section: OCaml"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>The OCamlMPI Module<sup id="cite_ref-36" class="reference"><a href="#cite_note-36"><span class="cite-bracket">&#91;</span>36<span class="cite-bracket">&#93;</span></a></sup> implements a large subset of MPI functions and is in active use in scientific computing. An 11,000-line <a href="/wiki/OCaml" title="OCaml">OCaml</a> program was "MPI-ified" using the module, with an additional 500 lines of code and slight restructuring and ran with excellent results on up to 170 nodes in a supercomputer.<sup id="cite_ref-37" class="reference"><a href="#cite_note-37"><span class="cite-bracket">&#91;</span>37<span class="cite-bracket">&#93;</span></a></sup> </p> <div class="mw-heading mw-heading3"><h3 id="PARI/GP"><span id="PARI.2FGP"></span>PARI/GP</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=22" title="Edit section: PARI/GP"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/PARI/GP" title="PARI/GP">PARI/GP</a> can be built<sup id="cite_ref-38" class="reference"><a href="#cite_note-38"><span class="cite-bracket">&#91;</span>38<span class="cite-bracket">&#93;</span></a></sup> to use MPI as its multi-thread engine, allowing to run parallel PARI and GP programs on MPI clusters unmodified. </p> <div class="mw-heading mw-heading3"><h3 id="Python">Python</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=23" title="Edit section: Python"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Actively maintained MPI wrappers for <a href="/wiki/Python_(programming_language)" title="Python (programming language)">Python</a> include: mpi4py,<sup id="cite_ref-39" class="reference"><a href="#cite_note-39"><span class="cite-bracket">&#91;</span>39<span class="cite-bracket">&#93;</span></a></sup> numba-mpi<sup id="cite_ref-40" class="reference"><a href="#cite_note-40"><span class="cite-bracket">&#91;</span>40<span class="cite-bracket">&#93;</span></a></sup> and numba-jax.<sup id="cite_ref-41" class="reference"><a href="#cite_note-41"><span class="cite-bracket">&#91;</span>41<span class="cite-bracket">&#93;</span></a></sup> </p><p>Discontinued developments include: <a href="/wiki/PyMPI" title="PyMPI">pyMPI</a>, pypar,<sup id="cite_ref-42" class="reference"><a href="#cite_note-42"><span class="cite-bracket">&#91;</span>42<span class="cite-bracket">&#93;</span></a></sup> MYMPI<sup id="cite_ref-43" class="reference"><a href="#cite_note-43"><span class="cite-bracket">&#91;</span>43<span class="cite-bracket">&#93;</span></a></sup> and the MPI submodule in <a href="/wiki/ScientificPython" class="mw-redirect" title="ScientificPython">ScientificPython</a>. </p> <div class="mw-heading mw-heading3"><h3 id="R">R</h3><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=24" title="Edit section: R"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p><a href="/wiki/R_(programming_language)" title="R (programming language)">R</a> bindings of MPI include <a href="/w/index.php?title=Rmpi&amp;action=edit&amp;redlink=1" class="new" title="Rmpi (page does not exist)">Rmpi</a><sup id="cite_ref-44" class="reference"><a href="#cite_note-44"><span class="cite-bracket">&#91;</span>44<span class="cite-bracket">&#93;</span></a></sup> and <a href="/wiki/Programming_with_Big_Data_in_R" title="Programming with Big Data in R">pbdMPI</a>,<sup id="cite_ref-45" class="reference"><a href="#cite_note-45"><span class="cite-bracket">&#91;</span>45<span class="cite-bracket">&#93;</span></a></sup> where Rmpi focuses on <a href="/wiki/Master/slave_(technology)" class="mw-redirect" title="Master/slave (technology)">manager-workers</a> parallelism while pbdMPI focuses on <a href="/wiki/SPMD" class="mw-redirect" title="SPMD">SPMD</a> parallelism. Both implementations fully support <a href="/wiki/Open_MPI" title="Open MPI">Open MPI</a> or <a href="/wiki/MPICH2" class="mw-redirect" title="MPICH2">MPICH2</a>. </p> <div class="mw-heading mw-heading2"><h2 id="Example_program">Example program</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=25" title="Edit section: Example program"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Here is a <a href="/wiki/%22Hello,_World!%22_program" title="&quot;Hello, World!&quot; program">"Hello, World!" program</a> in MPI written in C. In this example, we send a "hello" message to each processor, manipulate it trivially, return the results to the main process, and print the messages. </p> <div class="mw-highlight mw-highlight-lang-c mw-content-ltr" dir="ltr"><pre><span></span><span class="cm">/*</span> <span class="cm"> &quot;Hello World&quot; MPI Test Program</span> <span class="cm">*/</span> <span class="cp">#include</span><span class="w"> </span><span class="cpf">&lt;assert.h&gt;</span> <span class="cp">#include</span><span class="w"> </span><span class="cpf">&lt;stdio.h&gt;</span> <span class="cp">#include</span><span class="w"> </span><span class="cpf">&lt;string.h&gt;</span> <span class="cp">#include</span><span class="w"> </span><span class="cpf">&lt;mpi.h&gt;</span> <span class="kt">int</span><span class="w"> </span><span class="nf">main</span><span class="p">(</span><span class="kt">int</span><span class="w"> </span><span class="n">argc</span><span class="p">,</span><span class="w"> </span><span class="kt">char</span><span class="w"> </span><span class="o">**</span><span class="n">argv</span><span class="p">)</span> <span class="p">{</span> <span class="w"> </span><span class="kt">char</span><span class="w"> </span><span class="n">buf</span><span class="p">[</span><span class="mi">256</span><span class="p">];</span> <span class="w"> </span><span class="kt">int</span><span class="w"> </span><span class="n">my_rank</span><span class="p">,</span><span class="w"> </span><span class="n">num_procs</span><span class="p">;</span> <span class="w"> </span><span class="cm">/* Initialize the infrastructure necessary for communication */</span> <span class="w"> </span><span class="n">MPI_Init</span><span class="p">(</span><span class="o">&amp;</span><span class="n">argc</span><span class="p">,</span><span class="w"> </span><span class="o">&amp;</span><span class="n">argv</span><span class="p">);</span> <span class="w"> </span><span class="cm">/* Identify this process */</span> <span class="w"> </span><span class="n">MPI_Comm_rank</span><span class="p">(</span><span class="n">MPI_COMM_WORLD</span><span class="p">,</span><span class="w"> </span><span class="o">&amp;</span><span class="n">my_rank</span><span class="p">);</span> <span class="w"> </span><span class="cm">/* Find out how many total processes are active */</span> <span class="w"> </span><span class="n">MPI_Comm_size</span><span class="p">(</span><span class="n">MPI_COMM_WORLD</span><span class="p">,</span><span class="w"> </span><span class="o">&amp;</span><span class="n">num_procs</span><span class="p">);</span> <span class="w"> </span><span class="cm">/* Until this point, all programs have been doing exactly the same.</span> <span class="cm"> Here, we check the rank to distinguish the roles of the programs */</span> <span class="w"> </span><span class="k">if</span><span class="w"> </span><span class="p">(</span><span class="n">my_rank</span><span class="w"> </span><span class="o">==</span><span class="w"> </span><span class="mi">0</span><span class="p">)</span><span class="w"> </span><span class="p">{</span> <span class="w"> </span><span class="kt">int</span><span class="w"> </span><span class="n">other_rank</span><span class="p">;</span> <span class="w"> </span><span class="n">printf</span><span class="p">(</span><span class="s">&quot;We have %i processes.</span><span class="se">\n</span><span class="s">&quot;</span><span class="p">,</span><span class="w"> </span><span class="n">num_procs</span><span class="p">);</span> <span class="w"> </span><span class="cm">/* Send messages to all other processes */</span> <span class="w"> </span><span class="k">for</span><span class="w"> </span><span class="p">(</span><span class="n">other_rank</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">1</span><span class="p">;</span><span class="w"> </span><span class="n">other_rank</span><span class="w"> </span><span class="o">&lt;</span><span class="w"> </span><span class="n">num_procs</span><span class="p">;</span><span class="w"> </span><span class="n">other_rank</span><span class="o">++</span><span class="p">)</span> <span class="w"> </span><span class="p">{</span> <span class="w"> </span><span class="n">sprintf</span><span class="p">(</span><span class="n">buf</span><span class="p">,</span><span class="w"> </span><span class="s">&quot;Hello %i!&quot;</span><span class="p">,</span><span class="w"> </span><span class="n">other_rank</span><span class="p">);</span> <span class="w"> </span><span class="n">MPI_Send</span><span class="p">(</span><span class="n">buf</span><span class="p">,</span><span class="w"> </span><span class="mi">256</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_CHAR</span><span class="p">,</span><span class="w"> </span><span class="n">other_rank</span><span class="p">,</span> <span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_COMM_WORLD</span><span class="p">);</span> <span class="w"> </span><span class="p">}</span> <span class="w"> </span><span class="cm">/* Receive messages from all other processes */</span> <span class="w"> </span><span class="k">for</span><span class="w"> </span><span class="p">(</span><span class="n">other_rank</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">1</span><span class="p">;</span><span class="w"> </span><span class="n">other_rank</span><span class="w"> </span><span class="o">&lt;</span><span class="w"> </span><span class="n">num_procs</span><span class="p">;</span><span class="w"> </span><span class="n">other_rank</span><span class="o">++</span><span class="p">)</span> <span class="w"> </span><span class="p">{</span> <span class="w"> </span><span class="n">MPI_Recv</span><span class="p">(</span><span class="n">buf</span><span class="p">,</span><span class="w"> </span><span class="mi">256</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_CHAR</span><span class="p">,</span><span class="w"> </span><span class="n">other_rank</span><span class="p">,</span> <span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_COMM_WORLD</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_STATUS_IGNORE</span><span class="p">);</span> <span class="w"> </span><span class="n">printf</span><span class="p">(</span><span class="s">&quot;%s</span><span class="se">\n</span><span class="s">&quot;</span><span class="p">,</span><span class="w"> </span><span class="n">buf</span><span class="p">);</span> <span class="w"> </span><span class="p">}</span> <span class="w"> </span><span class="p">}</span><span class="w"> </span><span class="k">else</span><span class="w"> </span><span class="p">{</span> <span class="w"> </span><span class="cm">/* Receive message from process #0 */</span> <span class="w"> </span><span class="n">MPI_Recv</span><span class="p">(</span><span class="n">buf</span><span class="p">,</span><span class="w"> </span><span class="mi">256</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_CHAR</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span> <span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_COMM_WORLD</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_STATUS_IGNORE</span><span class="p">);</span> <span class="w"> </span><span class="n">assert</span><span class="p">(</span><span class="n">memcmp</span><span class="p">(</span><span class="n">buf</span><span class="p">,</span><span class="w"> </span><span class="s">&quot;Hello &quot;</span><span class="p">,</span><span class="w"> </span><span class="mi">6</span><span class="p">)</span><span class="w"> </span><span class="o">==</span><span class="w"> </span><span class="mi">0</span><span class="p">);</span> <span class="w"> </span><span class="cm">/* Send message to process #0 */</span> <span class="w"> </span><span class="n">sprintf</span><span class="p">(</span><span class="n">buf</span><span class="p">,</span><span class="w"> </span><span class="s">&quot;Process %i reporting for duty.&quot;</span><span class="p">,</span><span class="w"> </span><span class="n">my_rank</span><span class="p">);</span> <span class="w"> </span><span class="n">MPI_Send</span><span class="p">(</span><span class="n">buf</span><span class="p">,</span><span class="w"> </span><span class="mi">256</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_CHAR</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span> <span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="n">MPI_COMM_WORLD</span><span class="p">);</span> <span class="w"> </span><span class="p">}</span> <span class="w"> </span><span class="cm">/* Tear down the communication infrastructure */</span> <span class="w"> </span><span class="n">MPI_Finalize</span><span class="p">();</span> <span class="w"> </span><span class="k">return</span><span class="w"> </span><span class="mi">0</span><span class="p">;</span> <span class="p">}</span> </pre></div> <p>When run with 4 processes, it should produce the following output:<sup id="cite_ref-46" class="reference"><a href="#cite_note-46"><span class="cite-bracket">&#91;</span>46<span class="cite-bracket">&#93;</span></a></sup> </p> <pre>$ mpicc example.c &amp;&amp; mpiexec -n 4 ./a.out We have 4 processes. Process 1 reporting for duty. Process 2 reporting for duty. Process 3 reporting for duty. </pre> <p>Here, <code>mpiexec</code> is a command used to execute the example program with 4 <a href="/wiki/Process_(computing)" title="Process (computing)">processes</a>, each of which is an independent instance of the program at run time and assigned ranks (i.e. numeric IDs) 0, 1, 2, and 3. The name <code>mpiexec</code> is recommended by the MPI standard, although some implementations provide a similar command under the name <code>mpirun</code>. The <code>MPI_COMM_WORLD</code> is the communicator that consists of all the processes. </p><p>A single program, multiple data (<a href="/wiki/SPMD" class="mw-redirect" title="SPMD">SPMD</a>) programming model is thereby facilitated, but not required; many MPI implementations allow multiple, different, executables to be started in the same MPI job. Each process has its own rank, the total number of processes in the world, and the ability to communicate between them either with point-to-point (send/receive) communication, or by collective communication among the group. It is enough for MPI to provide an SPMD-style program with <code>MPI_COMM_WORLD</code>, its own rank, and the size of the world to allow algorithms to decide what to do. In more realistic situations, I/O is more carefully managed than in this example. MPI does not stipulate how standard I/O (stdin, stdout, stderr) should work on a given system. It generally works as expected on the rank-0 process, and some implementations also capture and funnel the output from other processes. </p><p>MPI uses the notion of process rather than processor. Program copies are <i>mapped</i> to processors by the MPI <a href="/wiki/Runtime_system" title="Runtime system">runtime</a>. In that sense, the parallel machine can map to one physical processor, or to <i>N</i> processors, where <i>N</i> is the number of available processors, or even something in between. For maximum parallel speedup, more physical processors are used. This example adjusts its behavior to the size of the world <i>N</i>, so it also seeks to scale to the runtime configuration without compilation for each size variation, although runtime decisions might vary depending on that absolute amount of concurrency available. </p> <div class="mw-heading mw-heading2"><h2 id="MPI-2_adoption">MPI-2 adoption</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=26" title="Edit section: MPI-2 adoption"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Adoption of MPI-1.2 has been universal, particularly in cluster computing, but acceptance of MPI-2.1 has been more limited. Issues include: </p> <ol><li>MPI-2 implementations include I/O and dynamic process management, and the size of the middleware is substantially larger. Most sites that use batch scheduling systems cannot support dynamic process management. MPI-2's parallel I/O is well accepted.<sup class="noprint Inline-Template Template-Fact" style="white-space:nowrap;">&#91;<i><a href="/wiki/Wikipedia:Citation_needed" title="Wikipedia:Citation needed"><span title="This claim needs references to reliable sources. (January 2011)">citation needed</span></a></i>&#93;</sup></li> <li>Many MPI-1.2 programs were developed before MPI-2. Portability concerns initially slowed adoption, although wider support has lessened this.</li> <li>Many MPI-1.2 applications use only a subset of that standard (16-25 functions) with no real need for MPI-2 functionality.</li></ol> <div class="mw-heading mw-heading2"><h2 id="Future">Future</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=27" title="Edit section: Future"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <p>Some aspects of the MPI's future appear solid; others less so. The MPI Forum reconvened in 2007 to clarify some MPI-2 issues and explore developments for a possible MPI-3, which resulted in versions MPI-3.0 (September 2012)<sup id="cite_ref-47" class="reference"><a href="#cite_note-47"><span class="cite-bracket">&#91;</span>47<span class="cite-bracket">&#93;</span></a></sup> and MPI-3.1 (June 2015)<sup id="cite_ref-48" class="reference"><a href="#cite_note-48"><span class="cite-bracket">&#91;</span>48<span class="cite-bracket">&#93;</span></a></sup>.The development continued with the approval of MPI-4.0 on June 9, 2021<sup id="cite_ref-49" class="reference"><a href="#cite_note-49"><span class="cite-bracket">&#91;</span>49<span class="cite-bracket">&#93;</span></a></sup>, and most recently, MPI-4.1 was approved on November 2, 2023.<sup id="cite_ref-50" class="reference"><a href="#cite_note-50"><span class="cite-bracket">&#91;</span>50<span class="cite-bracket">&#93;</span></a></sup> </p><p>Architectures are changing, with greater internal concurrency (<a href="/wiki/Multi-core_processor" title="Multi-core processor">multi-core</a>), better fine-grained concurrency control (threading, affinity), and more levels of <a href="/wiki/Memory_hierarchy" title="Memory hierarchy">memory hierarchy</a>. <a href="/wiki/Multithreading_(computer_architecture)" title="Multithreading (computer architecture)">Multithreaded</a> programs can take advantage of these developments more easily than single-threaded applications. This has already yielded separate, complementary standards for <a href="/wiki/Symmetric_multiprocessing" title="Symmetric multiprocessing">symmetric multiprocessing</a>, namely <a href="/wiki/OpenMP" title="OpenMP">OpenMP</a>. MPI-2 defines how standard-conforming implementations should deal with multithreaded issues, but does not require that implementations be multithreaded, or even thread-safe. MPI-3 adds the ability to use shared-memory parallelism within a node. Implementations of MPI such as Adaptive MPI, Hybrid MPI, Fine-Grained MPI, MPC and others offer extensions to the MPI standard that address different challenges in MPI. </p><p>Astrophysicist Jonathan Dursi wrote an opinion piece calling MPI obsolescent, pointing to newer technologies like the <a href="/wiki/Chapel_(programming_language)" title="Chapel (programming language)">Chapel</a> language, <a href="/wiki/Unified_Parallel_C" title="Unified Parallel C">Unified Parallel C</a>, <a href="/wiki/Apache_Hadoop" title="Apache Hadoop">Hadoop</a>, <a href="/wiki/Apache_Spark" title="Apache Spark">Spark</a> and <a href="/wiki/Apache_Flink" title="Apache Flink">Flink</a>.<sup id="cite_ref-51" class="reference"><a href="#cite_note-51"><span class="cite-bracket">&#91;</span>51<span class="cite-bracket">&#93;</span></a></sup> At the same time, nearly all of the projects in the <a href="/wiki/Exascale_computing" title="Exascale computing">Exascale Computing Project</a> build explicitly on MPI; MPI has been shown to scale to the largest machines as of the early 2020s and is widely considered to stay relevant for a long time to come. </p> <div class="mw-heading mw-heading2"><h2 id="See_also">See also</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=28" title="Edit section: See also"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1184024115">.mw-parser-output .div-col{margin-top:0.3em;column-width:30em}.mw-parser-output .div-col-small{font-size:90%}.mw-parser-output .div-col-rules{column-rule:1px solid #aaa}.mw-parser-output .div-col dl,.mw-parser-output .div-col ol,.mw-parser-output .div-col ul{margin-top:0}.mw-parser-output .div-col li,.mw-parser-output .div-col dd{page-break-inside:avoid;break-inside:avoid-column}</style><div class="div-col" style="column-width: 25em;"> <ul><li><a href="/wiki/Actor_model" title="Actor model">Actor model</a></li> <li><a href="/wiki/Bulk_synchronous_parallel" title="Bulk synchronous parallel">Bulk synchronous parallel</a></li> <li><a href="/wiki/Caltech_Cosmic_Cube" title="Caltech Cosmic Cube">Caltech Cosmic Cube</a></li> <li><a href="/wiki/Charm%2B%2B" title="Charm++">Charm++</a></li> <li><a href="/wiki/Co-array_Fortran" class="mw-redirect" title="Co-array Fortran">Co-array Fortran</a></li> <li><a href="/wiki/Global_Arrays" title="Global Arrays">Global Arrays</a></li> <li><a href="/wiki/Microsoft_Messaging_Passing_Interface" class="mw-redirect" title="Microsoft Messaging Passing Interface">Microsoft Messaging Passing Interface</a></li> <li><a href="/wiki/MVAPICH" title="MVAPICH">MVAPICH</a></li> <li><a href="/wiki/OpenHMPP" title="OpenHMPP">OpenHMPP</a></li> <li><a href="/wiki/Parallel_Virtual_Machine" title="Parallel Virtual Machine">Parallel Virtual Machine</a> (PVM)</li> <li><a href="/wiki/Partitioned_global_address_space" title="Partitioned global address space">Partitioned global address space</a></li> <li><a href="/wiki/Unified_Parallel_C" title="Unified Parallel C">Unified Parallel C</a></li> <li><a href="/wiki/X10_(programming_language)" title="X10 (programming language)">X10 (programming language)</a></li></ul> </div> <div class="mw-heading mw-heading2"><h2 id="References">References</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=29" title="Edit section: References"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1239543626">.mw-parser-output .reflist{margin-bottom:0.5em;list-style-type:decimal}@media screen{.mw-parser-output .reflist{font-size:90%}}.mw-parser-output .reflist .references{font-size:100%;margin-bottom:0;list-style-type:inherit}.mw-parser-output .reflist-columns-2{column-width:30em}.mw-parser-output .reflist-columns-3{column-width:25em}.mw-parser-output .reflist-columns{margin-top:0.3em}.mw-parser-output .reflist-columns ol{margin-top:0}.mw-parser-output .reflist-columns li{page-break-inside:avoid;break-inside:avoid-column}.mw-parser-output .reflist-upper-alpha{list-style-type:upper-alpha}.mw-parser-output .reflist-upper-roman{list-style-type:upper-roman}.mw-parser-output .reflist-lower-alpha{list-style-type:lower-alpha}.mw-parser-output .reflist-lower-greek{list-style-type:lower-greek}.mw-parser-output .reflist-lower-roman{list-style-type:lower-roman}</style><div class="reflist"> <div class="mw-references-wrap mw-references-columns"><ol class="references"> <li id="cite_note-1"><span class="mw-cite-backlink"><b><a href="#cite_ref-1">^</a></b></span> <span class="reference-text"><style data-mw-deduplicate="TemplateStyles:r1238218222">.mw-parser-output cite.citation{font-style:inherit;word-wrap:break-word}.mw-parser-output .citation q{quotes:"\"""\"""'""'"}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}.mw-parser-output .id-lock-free.id-lock-free a{background:url("//upload.wikimedia.org/wikipedia/commons/6/65/Lock-green.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-limited.id-lock-limited a,.mw-parser-output .id-lock-registration.id-lock-registration a{background:url("//upload.wikimedia.org/wikipedia/commons/d/d6/Lock-gray-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .id-lock-subscription.id-lock-subscription a{background:url("//upload.wikimedia.org/wikipedia/commons/a/aa/Lock-red-alt-2.svg")right 0.1em center/9px no-repeat}.mw-parser-output .cs1-ws-icon a{background:url("//upload.wikimedia.org/wikipedia/commons/4/4c/Wikisource-logo.svg")right 0.1em center/12px no-repeat}body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-free a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-limited a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-registration a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .id-lock-subscription a,body:not(.skin-timeless):not(.skin-minerva) .mw-parser-output .cs1-ws-icon a{background-size:contain;padding:0 1em 0 0}.mw-parser-output .cs1-code{color:inherit;background:inherit;border:none;padding:inherit}.mw-parser-output .cs1-hidden-error{display:none;color:var(--color-error,#d33)}.mw-parser-output .cs1-visible-error{color:var(--color-error,#d33)}.mw-parser-output .cs1-maint{display:none;color:#085;margin-left:0.3em}.mw-parser-output .cs1-kern-left{padding-left:0.2em}.mw-parser-output .cs1-kern-right{padding-right:0.2em}.mw-parser-output .citation .mw-selflink{font-weight:inherit}@media screen{.mw-parser-output .cs1-format{font-size:95%}html.skin-theme-clientpref-night .mw-parser-output .cs1-maint{color:#18911f}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .cs1-maint{color:#18911f}}</style><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://hpc.nmsu.edu/discovery/mpi/introduction/">"Message Passing Interface&#160;:: High Performance Computing"</a>. <i>hpc.nmsu.edu</i><span class="reference-accessdate">. Retrieved <span class="nowrap">2022-08-06</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=hpc.nmsu.edu&amp;rft.atitle=Message+Passing+Interface+%3A%3A+High+Performance+Computing&amp;rft_id=https%3A%2F%2Fhpc.nmsu.edu%2Fdiscovery%2Fmpi%2Fintroduction%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-2"><span class="mw-cite-backlink"><b><a href="#cite_ref-2">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWalker_DW1992" class="citation report cs1">Walker DW (August 1992). <a rel="nofollow" class="external text" href="https://web.archive.org/web/20231115183232/https://technicalreports.ornl.gov/1992/3445603661204.pdf">Standards for message-passing in a distributed memory environment</a> <span class="cs1-format">(PDF)</span> (Report). Oak Ridge National Lab., TN (United States), Center for Research on Parallel Computing (CRPC). p.&#160;25. <a href="/wiki/OSTI_(identifier)" class="mw-redirect" title="OSTI (identifier)">OSTI</a>&#160;<a rel="nofollow" class="external text" href="https://www.osti.gov/biblio/10170156">10170156</a>. ORNL/TM-12147. Archived from <a rel="nofollow" class="external text" href="https://technicalreports.ornl.gov/1992/3445603661204.pdf">the original</a> <span class="cs1-format">(PDF)</span> on 2023-11-15<span class="reference-accessdate">. Retrieved <span class="nowrap">2019-08-18</span></span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=report&amp;rft.btitle=Standards+for+message-passing+in+a+distributed+memory+environment&amp;rft.pages=25&amp;rft.pub=Oak+Ridge+National+Lab.%2C+TN+%28United+States%29%2C+Center+for+Research+on+Parallel+Computing+%28CRPC%29&amp;rft.date=1992-08&amp;rft_id=https%3A%2F%2Fwww.osti.gov%2Fbiblio%2F10170156%23id-name%3DOSTI&amp;rft.au=Walker+DW&amp;rft_id=https%3A%2F%2Ftechnicalreports.ornl.gov%2F1992%2F3445603661204.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-3"><span class="mw-cite-backlink"><b><a href="#cite_ref-3">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFThe_MPI_Forum,_CORPORATE1993" class="citation conference cs1">The MPI Forum, CORPORATE (November 15–19, 1993). "MPI: A Message Passing Interface". <i>Proceedings of the 1993 ACM/IEEE conference on Supercomputing</i>. <a rel="nofollow" class="external text" href="http://supercomputing.org/">Supercomputing '93</a>. Portland, Oregon, USA: ACM. pp.&#160;878–883. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F169627.169855">10.1145/169627.169855</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/0-8186-4340-4" title="Special:BookSources/0-8186-4340-4"><bdi>0-8186-4340-4</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=conference&amp;rft.atitle=MPI%3A+A+Message+Passing+Interface&amp;rft.btitle=Proceedings+of+the+1993+ACM%2FIEEE+conference+on+Supercomputing&amp;rft.place=Portland%2C+Oregon%2C+USA&amp;rft.pages=878-883&amp;rft.pub=ACM&amp;rft.date=1993-11-15%2F1993-11-19&amp;rft_id=info%3Adoi%2F10.1145%2F169627.169855&amp;rft.isbn=0-8186-4340-4&amp;rft.au=The+MPI+Forum%2C+CORPORATE&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-4"><span class="mw-cite-backlink"><b><a href="#cite_ref-4">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFNielsen2016" class="citation book cs1">Nielsen, Frank (2016). <a rel="nofollow" class="external text" href="https://www.researchgate.net/publication/314626214">"2. Introduction to MPI: The MessagePassing Interface"</a>. <a rel="nofollow" class="external text" href="https://franknielsen.github.io/HPC4DS/index.html"><i>Introduction to HPC with MPI for Data Science</i></a>. Springer. pp.&#160;195–211. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-3-319-21903-5" title="Special:BookSources/978-3-319-21903-5"><bdi>978-3-319-21903-5</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=2.+Introduction+to+MPI%3A+The+MessagePassing+Interface&amp;rft.btitle=Introduction+to+HPC+with+MPI+for+Data+Science&amp;rft.pages=195-211&amp;rft.pub=Springer&amp;rft.date=2016&amp;rft.isbn=978-3-319-21903-5&amp;rft.aulast=Nielsen&amp;rft.aufirst=Frank&amp;rft_id=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F314626214&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-5"><span class="mw-cite-backlink"><b><a href="#cite_ref-5">^</a></b></span> <span class="reference-text"><a href="#CITEREFGroppLuskSkjellum1996">Gropp, Lusk &amp; Skjellum 1996</a>, p.&#160;3</span> </li> <li id="cite_note-6"><span class="mw-cite-backlink"><b><a href="#cite_ref-6">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFSurKoopPanda2017" class="citation book cs1">Sur, Sayantan; Koop, Matthew J.; Panda, Dhabaleswar K. (4 August 2017). "MPI and communication---High-performance and scalable MPI over InfiniBand with reduced memory usage: An in-depth performance analysis". <i>Proceedings of the 2006 ACM/IEEE conference on Supercomputing - SC '06</i>. ACM. p.&#160;105. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1145%2F1188455.1188565">10.1145/1188455.1188565</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0769527000" title="Special:BookSources/978-0769527000"><bdi>978-0769527000</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:818662">818662</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=MPI+and+communication---High-performance+and+scalable+MPI+over+InfiniBand+with+reduced+memory+usage%3A+An+in-depth+performance+analysis&amp;rft.btitle=Proceedings+of+the+2006+ACM%2FIEEE+conference+on+Supercomputing+-+SC+%2706&amp;rft.pages=105&amp;rft.pub=ACM&amp;rft.date=2017-08-04&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A818662%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1145%2F1188455.1188565&amp;rft.isbn=978-0769527000&amp;rft.aulast=Sur&amp;rft.aufirst=Sayantan&amp;rft.au=Koop%2C+Matthew+J.&amp;rft.au=Panda%2C+Dhabaleswar+K.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-7"><span class="mw-cite-backlink"><b><a href="#cite_ref-7">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://knem.gforge.inria.fr/">KNEM: High-Performance Intra-Node MPI Communication</a> "MPICH2 (since release 1.1.1) uses KNEM in the DMA LMT to improve large message performance within a single node. Open MPI also includes KNEM support in its SM BTL component since release 1.5. Additionally, NetPIPE includes a KNEM backend since version 3.7.2."</span> </li> <li id="cite_note-8"><span class="mw-cite-backlink"><b><a href="#cite_ref-8">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.open-mpi.org/faq/?category=sm">"FAQ: Tuning the run-time characteristics of MPI sm communications"</a>. <i>www.open-mpi.org</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.open-mpi.org&amp;rft.atitle=FAQ%3A+Tuning+the+run-time+characteristics+of+MPI+sm+communications&amp;rft_id=https%3A%2F%2Fwww.open-mpi.org%2Ffaq%2F%3Fcategory%3Dsm&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-9"><span class="mw-cite-backlink"><b><a href="#cite_ref-9">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external free" href="https://software.intel.com/en-us/articles/an-introduction-to-mpi-3-shared-memory-programming?language=en">https://software.intel.com/en-us/articles/an-introduction-to-mpi-3-shared-memory-programming?language=en</a> "The MPI-3 standard introduces another approach to hybrid programming that uses the new MPI Shared Memory (SHM) model"</span> </li> <li id="cite_note-10"><span class="mw-cite-backlink"><b><a href="#cite_ref-10">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://insidehpc.com/2016/01/shared-memory-mpi-3-0/">Shared Memory and MPI 3.0</a> "Various benchmarks can be run to determine which method is best for a particular application, whether using MPI + OpenMP or the MPI SHM extensions. On a fairly simple test case, speedups over a base version that used point to point communication were up to 5X, depending on the message."</span> </li> <li id="cite_note-11"><span class="mw-cite-backlink"><b><a href="#cite_ref-11">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.caam.rice.edu/~mk51/presentations/SIAMPP2016_4.pdf">Using MPI-3 Shared Memory As a Multicore Programming System</a> (PDF presentation slides)</span> </li> <li id="cite_note-SC94-12"><span class="mw-cite-backlink"><b><a href="#cite_ref-SC94_12-0">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://hpc.sagepub.com/content/8/3-4.toc">Table of Contents — September 1994, 8 (3-4)</a>. Hpc.sagepub.com. Retrieved on 2014-03-24.</span> </li> <li id="cite_note-MPI_Docs-13"><span class="mw-cite-backlink"><b><a href="#cite_ref-MPI_Docs_13-0">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.mpi-forum.org/docs/">MPI Documents</a>. Mpi-forum.org. Retrieved on 2014-03-24.</span> </li> <li id="cite_note-Gropp99adv-pp4-5-14"><span class="mw-cite-backlink"><b><a href="#cite_ref-Gropp99adv-pp4-5_14-0">^</a></b></span> <span class="reference-text"><a href="#CITEREFGroppLuskSkjellum1999b">Gropp, Lusk &amp; Skjellum 1999b</a>, pp.&#160;4–5</span> </li> <li id="cite_note-MPI_3.1-15"><span class="mw-cite-backlink"><b><a href="#cite_ref-MPI_3.1_15-0">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.mpi-forum.org/docs/mpi-3.1/mpi31-report.pdf">MPI: A Message-Passing Interface Standard<br />Version 3.1, Message Passing Interface Forum, June 4, 2015</a>. <a rel="nofollow" class="external free" href="http://www.mpi-forum.org">http://www.mpi-forum.org</a>. Retrieved on 2015-06-16.</span> </li> <li id="cite_note-node37-16"><span class="mw-cite-backlink">^ <a href="#cite_ref-node37_16-0"><sup><i><b>a</b></i></sup></a> <a href="#cite_ref-node37_16-1"><sup><i><b>b</b></i></sup></a></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://mpi-forum.org/docs/mpi-1.1/mpi-11-html/node37.html">"Type matching rules"</a>. <i>mpi-forum.org</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=mpi-forum.org&amp;rft.atitle=Type+matching+rules&amp;rft_id=http%3A%2F%2Fmpi-forum.org%2Fdocs%2Fmpi-1.1%2Fmpi-11-html%2Fnode37.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-17"><span class="mw-cite-backlink"><b><a href="#cite_ref-17">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.open-mpi.org/doc/v1.8/man3/MPI_Gather.3.php">"MPI_Gather(3) man page (version 1.8.8)"</a>. <i>www.open-mpi.org</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.open-mpi.org&amp;rft.atitle=MPI_Gather%283%29+man+page+%28version+1.8.8%29&amp;rft_id=https%3A%2F%2Fwww.open-mpi.org%2Fdoc%2Fv1.8%2Fman3%2FMPI_Gather.3.php&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-18"><span class="mw-cite-backlink"><b><a href="#cite_ref-18">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.mpich.org/static/docs/v3.1/www3/MPI_Get_address.html">"MPI_Get_address"</a>. <i>www.mpich.org</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.mpich.org&amp;rft.atitle=MPI_Get_address&amp;rft_id=http%3A%2F%2Fwww.mpich.org%2Fstatic%2Fdocs%2Fv3.1%2Fwww3%2FMPI_Get_address.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-19"><span class="mw-cite-backlink"><b><a href="#cite_ref-19">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.boost.org/doc/libs/1_55_0/doc/html/mpi/python.html#mpi.python_skeleton_content">Boost.MPI Skeleton/Content Mechanism rationale</a> (performance comparison graphs were produced using <a href="/wiki/NetPIPE" title="NetPIPE">NetPIPE</a>)</span> </li> <li id="cite_note-Gropp99adv-p7-20"><span class="mw-cite-backlink"><b><a href="#cite_ref-Gropp99adv-p7_20-0">^</a></b></span> <span class="reference-text"><a href="#CITEREFGroppLuskSkjelling1999b">Gropp, Lusk &amp; Skjelling 1999b</a>, p.&#160;7<span class="error harv-error" style="display: none; font-size:100%"> harvnb error: no target: CITEREFGroppLuskSkjelling1999b (<a href="/wiki/Category:Harv_and_Sfn_template_errors" title="Category:Harv and Sfn template errors">help</a>)</span></span> </li> <li id="cite_note-Gropp99adv-pp5-6-21"><span class="mw-cite-backlink"><b><a href="#cite_ref-Gropp99adv-pp5-6_21-0">^</a></b></span> <span class="reference-text"><a href="#CITEREFGroppLuskSkjelling1999b">Gropp, Lusk &amp; Skjelling 1999b</a>, pp.&#160;5–6<span class="error harv-error" style="display: none; font-size:100%"> harvnb error: no target: CITEREFGroppLuskSkjelling1999b (<a href="/wiki/Category:Harv_and_Sfn_template_errors" title="Category:Harv and Sfn template errors">help</a>)</span></span> </li> <li id="cite_note-22"><span class="mw-cite-backlink"><b><a href="#cite_ref-22">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://marcovan.hulten.org/report.pdf">"Sparse matrix-vector multiplications using the MPI I/O library"</a> <span class="cs1-format">(PDF)</span>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Sparse+matrix-vector+multiplications+using+the+MPI+I%2FO+library&amp;rft_id=http%3A%2F%2Fmarcovan.hulten.org%2Freport.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-23"><span class="mw-cite-backlink"><b><a href="#cite_ref-23">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.mcs.anl.gov/~thakur/papers/romio-coll.pdf">"Data Sieving and Collective I/O in ROMIO"</a> <span class="cs1-format">(PDF)</span>. IEEE. Feb 1999.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=Data+Sieving+and+Collective+I%2FO+in+ROMIO&amp;rft.pub=IEEE&amp;rft.date=1999-02&amp;rft_id=http%3A%2F%2Fwww.mcs.anl.gov%2F~thakur%2Fpapers%2Fromio-coll.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-24"><span class="mw-cite-backlink"><b><a href="#cite_ref-24">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChenSunThakurRoth2011" class="citation book cs1">Chen, Yong; Sun, Xian-He; Thakur, Rajeev; Roth, Philip C.; Gropp, William D. (Sep 2011). "LACIO: A New Collective I/O Strategy for Parallel I/O Systems". <i>2011 IEEE International Parallel &amp; Distributed Processing Symposium</i>. IEEE. pp.&#160;794–804. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.699.8972">10.1.1.699.8972</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FIPDPS.2011.79">10.1109/IPDPS.2011.79</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-61284-372-8" title="Special:BookSources/978-1-61284-372-8"><bdi>978-1-61284-372-8</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:7110094">7110094</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=LACIO%3A+A+New+Collective+I%2FO+Strategy+for+Parallel+I%2FO+Systems&amp;rft.btitle=2011+IEEE+International+Parallel+%26+Distributed+Processing+Symposium&amp;rft.pages=794-804&amp;rft.pub=IEEE&amp;rft.date=2011-09&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.699.8972%23id-name%3DCiteSeerX&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A7110094%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FIPDPS.2011.79&amp;rft.isbn=978-1-61284-372-8&amp;rft.aulast=Chen&amp;rft.aufirst=Yong&amp;rft.au=Sun%2C+Xian-He&amp;rft.au=Thakur%2C+Rajeev&amp;rft.au=Roth%2C+Philip+C.&amp;rft.au=Gropp%2C+William+D.&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-25"><span class="mw-cite-backlink"><b><a href="#cite_ref-25">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFTeng_WangKevin_VaskoZhuo_LiuHui_Chen2016" class="citation journal cs1">Teng Wang; Kevin Vasko; Zhuo Liu; Hui Chen; Weikuan Yu (2016). "Enhance parallel input/output with cross-bundle aggregation". <i>The International Journal of High Performance Computing Applications</i>. <b>30</b> (2): 241–256. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1177%2F1094342015618017">10.1177/1094342015618017</a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:12067366">12067366</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=The+International+Journal+of+High+Performance+Computing+Applications&amp;rft.atitle=Enhance+parallel+input%2Foutput+with+cross-bundle+aggregation&amp;rft.volume=30&amp;rft.issue=2&amp;rft.pages=241-256&amp;rft.date=2016&amp;rft_id=info%3Adoi%2F10.1177%2F1094342015618017&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A12067366%23id-name%3DS2CID&amp;rft.au=Teng+Wang&amp;rft.au=Kevin+Vasko&amp;rft.au=Zhuo+Liu&amp;rft.au=Hui+Chen&amp;rft.au=Weikuan+Yu&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-26"><span class="mw-cite-backlink"><b><a href="#cite_ref-26">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFWangVaskoLiuChen2014" class="citation book cs1">Wang, Teng; Vasko, Kevin; Liu, Zhuo; Chen, Hui; Yu, Weikuan (Nov 2014). "BPAR: A Bundle-Based Parallel Aggregation Framework for Decoupled I/O Execution". <i>2014 International Workshop on Data Intensive Scalable Computing Systems</i>. IEEE. pp.&#160;25–32. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1109%2FDISCS.2014.6">10.1109/DISCS.2014.6</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-1-4673-6750-9" title="Special:BookSources/978-1-4673-6750-9"><bdi>978-1-4673-6750-9</bdi></a>. <a href="/wiki/S2CID_(identifier)" class="mw-redirect" title="S2CID (identifier)">S2CID</a>&#160;<a rel="nofollow" class="external text" href="https://api.semanticscholar.org/CorpusID:2402391">2402391</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=bookitem&amp;rft.atitle=BPAR%3A+A+Bundle-Based+Parallel+Aggregation+Framework+for+Decoupled+I%2FO+Execution&amp;rft.btitle=2014+International+Workshop+on+Data+Intensive+Scalable+Computing+Systems&amp;rft.pages=25-32&amp;rft.pub=IEEE&amp;rft.date=2014-11&amp;rft_id=https%3A%2F%2Fapi.semanticscholar.org%2FCorpusID%3A2402391%23id-name%3DS2CID&amp;rft_id=info%3Adoi%2F10.1109%2FDISCS.2014.6&amp;rft.isbn=978-1-4673-6750-9&amp;rft.aulast=Wang&amp;rft.aufirst=Teng&amp;rft.au=Vasko%2C+Kevin&amp;rft.au=Liu%2C+Zhuo&amp;rft.au=Chen%2C+Hui&amp;rft.au=Yu%2C+Weikuan&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-27"><span class="mw-cite-backlink"><b><a href="#cite_ref-27">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFcea-hpc" class="citation web cs1">cea-hpc. <a rel="nofollow" class="external text" href="https://github.com/cea-hpc/wi4mpi">"cea-hpc/wi4mpi: Wrapper interface for MPI"</a>. <i>GitHub</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=GitHub&amp;rft.atitle=cea-hpc%2Fwi4mpi%3A+Wrapper+interface+for+MPI&amp;rft.au=cea-hpc&amp;rft_id=https%3A%2F%2Fgithub.com%2Fcea-hpc%2Fwi4mpi&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-28"><span class="mw-cite-backlink"><b><a href="#cite_ref-28">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://www.mpich.org/static/docs/latest/www1/mpicc.html">mpicc</a>. Mpich.org. Retrieved on 2014-03-24.</span> </li> <li id="cite_note-29"><span class="mw-cite-backlink"><b><a href="#cite_ref-29">^</a></b></span> <span class="reference-text"> <a rel="nofollow" class="external text" href="http://www.purempi.net">Pure Mpi.NET</a></span> </li> <li id="cite_note-30"><span class="mw-cite-backlink"><b><a href="#cite_ref-30">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.osl.iu.edu/research/mpi.net/">"MPI.NET: High-Performance C# Library for Message Passing"</a>. <i>www.osl.iu.edu</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.osl.iu.edu&amp;rft.atitle=MPI.NET%3A+High-Performance+C%23+Library+for+Message+Passing&amp;rft_id=http%3A%2F%2Fwww.osl.iu.edu%2Fresearch%2Fmpi.net%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-31"><span class="mw-cite-backlink"><b><a href="#cite_ref-31">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.hpjava.org/mpiJava.html">"mpiJava Home Page"</a>. <i>www.hpjava.org</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.hpjava.org&amp;rft.atitle=mpiJava+Home+Page&amp;rft_id=http%3A%2F%2Fwww.hpjava.org%2FmpiJava.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-32"><span class="mw-cite-backlink"><b><a href="#cite_ref-32">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.hpjava.org/theses/shko/thesis_paper/node33.html">"Introduction to the mpiJava API"</a>. <i>www.hpjava.org</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.hpjava.org&amp;rft.atitle=Introduction+to+the+mpiJava+API&amp;rft_id=http%3A%2F%2Fwww.hpjava.org%2Ftheses%2Fshko%2Fthesis_paper%2Fnode33.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-33"><span class="mw-cite-backlink"><b><a href="#cite_ref-33">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://www.hpjava.org/papers/MPJ-CPE/cpempi/node6.html">"The MPJ API Specification"</a>. <i>www.hpjava.org</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.hpjava.org&amp;rft.atitle=The+MPJ+API+Specification&amp;rft_id=http%3A%2F%2Fwww.hpjava.org%2Fpapers%2FMPJ-CPE%2Fcpempi%2Fnode6.html&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-34"><span class="mw-cite-backlink"><b><a href="#cite_ref-34">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://mpj-express.org/">"MPJ Express Project"</a>. <i>mpj-express.org</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=mpj-express.org&amp;rft.atitle=MPJ+Express+Project&amp;rft_id=http%3A%2F%2Fmpj-express.org%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-35"><span class="mw-cite-backlink"><b><a href="#cite_ref-35">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation cs2"><a rel="nofollow" class="external text" href="https://github.com/JuliaParallel/MPI.jl"><i>JuliaParallel/MPI.jl</i></a>, Parallel Julia, 2019-10-03<span class="reference-accessdate">, retrieved <span class="nowrap">2019-10-08</span></span></cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=JuliaParallel%2FMPI.jl&amp;rft.pub=Parallel+Julia&amp;rft.date=2019-10-03&amp;rft_id=https%3A%2F%2Fgithub.com%2FJuliaParallel%2FMPI.jl&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-36"><span class="mw-cite-backlink"><b><a href="#cite_ref-36">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="http://cristal.inria.fr/~xleroy/software.html#ocamlmpi">"Xavier Leroy - Software"</a>. <i>cristal.inria.fr</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=cristal.inria.fr&amp;rft.atitle=Xavier+Leroy+-+Software&amp;rft_id=http%3A%2F%2Fcristal.inria.fr%2F~xleroy%2Fsoftware.html%23ocamlmpi&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-37"><span class="mw-cite-backlink"><b><a href="#cite_ref-37">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external text" href="http://caml.inria.fr/pub/ml-archives/caml-list/2003/07/155910c4eeb09e684f02ea4ae342873b.en.html">Archives of the Caml mailing list &gt; Message from Yaron M. Minsky</a>. Caml.inria.fr (2003-07-15). Retrieved on 2014-03-24.</span> </li> <li id="cite_note-38"><span class="mw-cite-backlink"><b><a href="#cite_ref-38">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://pari.math.u-bordeaux.fr/pub/pari/manuals/2.13.3/parallel.pdf">"Introduction to parallel GP"</a> <span class="cs1-format">(PDF)</span>. <i>pari.math.u-bordeaux.fr</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=pari.math.u-bordeaux.fr&amp;rft.atitle=Introduction+to+parallel+GP&amp;rft_id=https%3A%2F%2Fpari.math.u-bordeaux.fr%2Fpub%2Fpari%2Fmanuals%2F2.13.3%2Fparallel.pdf&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-39"><span class="mw-cite-backlink"><b><a href="#cite_ref-39">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://mpi4py.readthedocs.io/">"mpi4py"</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=mpi4py&amp;rft_id=https%3A%2F%2Fmpi4py.readthedocs.io%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-40"><span class="mw-cite-backlink"><b><a href="#cite_ref-40">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://pypi.org/p/numba-mpi">"numba-mpi"</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=numba-mpi&amp;rft_id=https%3A%2F%2Fpypi.org%2Fp%2Fnumba-mpi&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-41"><span class="mw-cite-backlink"><b><a href="#cite_ref-41">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://mpi4jax.readthedocs.io/">"mpi4jax"</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=mpi4jax&amp;rft_id=https%3A%2F%2Fmpi4jax.readthedocs.io%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-42"><span class="mw-cite-backlink"><b><a href="#cite_ref-42">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://code.google.com/p/pypar/">"Google Code Archive - Long-term storage for Google Code Project Hosting"</a>. <i>code.google.com</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=code.google.com&amp;rft.atitle=Google+Code+Archive+-+Long-term+storage+for+Google+Code+Project+Hosting.&amp;rft_id=https%3A%2F%2Fcode.google.com%2Fp%2Fpypar%2F&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-43"><span class="mw-cite-backlink"><b><a href="#cite_ref-43">^</a></b></span> <span class="reference-text">Now part of <a rel="nofollow" class="external text" href="http://sourceforge.net/projects/pydusa/">Pydusa</a></span> </li> <li id="cite_note-44"><span class="mw-cite-backlink"><b><a href="#cite_ref-44">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFYu2002" class="citation journal cs1">Yu, Hao (2002). <a rel="nofollow" class="external text" href="https://cran.r-project.org/package=Rmpi">"Rmpi: Parallel Statistical Computing in R"</a>. <i>R News</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=R+News&amp;rft.atitle=Rmpi%3A+Parallel+Statistical+Computing+in+R&amp;rft.date=2002&amp;rft.aulast=Yu&amp;rft.aufirst=Hao&amp;rft_id=https%3A%2F%2Fcran.r-project.org%2Fpackage%3DRmpi&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-45"><span class="mw-cite-backlink"><b><a href="#cite_ref-45">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFChenOstrouchovSchmidtPatel2012" class="citation web cs1">Chen, Wei-Chen; Ostrouchov, George; Schmidt, Drew; Patel, Pragneshkumar; Yu, Hao (2012). <a rel="nofollow" class="external text" href="https://cran.r-project.org/package=pbdMPI">"pbdMPI: Programming with Big Data -- Interface to MPI"</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=unknown&amp;rft.btitle=pbdMPI%3A+Programming+with+Big+Data+--+Interface+to+MPI&amp;rft.date=2012&amp;rft.aulast=Chen&amp;rft.aufirst=Wei-Chen&amp;rft.au=Ostrouchov%2C+George&amp;rft.au=Schmidt%2C+Drew&amp;rft.au=Patel%2C+Pragneshkumar&amp;rft.au=Yu%2C+Hao&amp;rft_id=https%3A%2F%2Fcran.r-project.org%2Fpackage%3DpbdMPI&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> <li id="cite_note-46"><span class="mw-cite-backlink"><b><a href="#cite_ref-46">^</a></b></span> <span class="reference-text">The output snippet was produced on an ordinary Linux desktop system with Open MPI installed. <a href="/wiki/Linux_distribution" title="Linux distribution">Distros</a> usually place the mpicc command into an openmpi-devel or libopenmpi-dev package, and sometimes make it necessary to run "module add mpi/openmpi-x86_64" or similar before mpicc and mpiexec are available.</span> </li> <li id="cite_note-47"><span class="mw-cite-backlink"><b><a href="#cite_ref-47">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external free" href="https://www.mpi-forum.org/docs/mpi-3.0/mpi30-report.pdf">https://www.mpi-forum.org/docs/mpi-3.0/mpi30-report.pdf</a></span> </li> <li id="cite_note-48"><span class="mw-cite-backlink"><b><a href="#cite_ref-48">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external free" href="https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report.pdf">https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report.pdf</a></span> </li> <li id="cite_note-49"><span class="mw-cite-backlink"><b><a href="#cite_ref-49">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external free" href="https://www.mpi-forum.org/docs/mpi-4.0/mpi40-report.pdf">https://www.mpi-forum.org/docs/mpi-4.0/mpi40-report.pdf</a></span> </li> <li id="cite_note-50"><span class="mw-cite-backlink"><b><a href="#cite_ref-50">^</a></b></span> <span class="reference-text"><a rel="nofollow" class="external free" href="https://www.mpi-forum.org/docs/mpi-4.1/mpi41-report.pdf">https://www.mpi-forum.org/docs/mpi-4.1/mpi41-report.pdf</a></span> </li> <li id="cite_note-51"><span class="mw-cite-backlink"><b><a href="#cite_ref-51">^</a></b></span> <span class="reference-text"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite class="citation web cs1"><a rel="nofollow" class="external text" href="https://www.dursi.ca/post/hpc-is-dying-and-mpi-is-killing-it">"HPC is dying, and MPI is killing it"</a>. <i>www.dursi.ca</i>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=unknown&amp;rft.jtitle=www.dursi.ca&amp;rft.atitle=HPC+is+dying%2C+and+MPI+is+killing+it&amp;rft_id=https%3A%2F%2Fwww.dursi.ca%2Fpost%2Fhpc-is-dying-and-mpi-is-killing-it&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></span> </li> </ol></div></div> <div class="mw-heading mw-heading2"><h2 id="Further_reading">Further reading</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=30" title="Edit section: Further reading"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1184024115"><div class="div-col" style="column-width: 30em;"> <ul><li><style data-mw-deduplicate="TemplateStyles:r1041539562">.mw-parser-output .citation{word-wrap:break-word}.mw-parser-output .citation:target{background-color:rgba(0,127,255,0.133)}</style><span class="citation foldoc">This article is based on material taken from <a rel="nofollow" class="external text" href="https://foldoc.org/Message+Passing+Interface">Message Passing Interface</a> at the <i><a href="/wiki/Free_On-line_Dictionary_of_Computing" title="Free On-line Dictionary of Computing">Free On-line Dictionary of Computing</a></i> &#32;prior to 1 November 2008 and incorporated under the "relicensing" terms of the <a href="/wiki/GNU_Free_Documentation_License" title="GNU Free Documentation License">GFDL</a>, version 1.3 or later.</span></li> <li>Aoyama, Yukiya; Nakano, Jun (1999) <i><a rel="nofollow" class="external text" href="https://web.archive.org/web/20080119023608/http://www.redbooks.ibm.com/abstracts/sg245380.html">RS/6000 SP: Practical MPI Programming</a></i>, ITSO</li> <li>Foster, Ian (1995) <i>Designing and Building Parallel Programs (Online)</i> Addison-Wesley <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/0-201-57594-9" title="Special:BookSources/0-201-57594-9">0-201-57594-9</a>, chapter 8 <i><a rel="nofollow" class="external text" href="http://www-unix.mcs.anl.gov/dbpp/text/node94.html#SECTION03500000000000000000">Message Passing Interface</a></i></li> <li>Wijesuriya, Viraj Brian (2010-12-29) <a rel="nofollow" class="external text" href="https://www.daniweb.com/forums/post1428830.html#post1428830"><i>Daniweb: Sample Code for Matrix Multiplication using MPI Parallel Programming Approach</i></a></li> <li><i>Using MPI</i> series: <ul><li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGroppLuskSkjellum1994" class="citation book cs1">Gropp, William; Lusk, Ewing; Skjellum, Anthony (1994). <a rel="nofollow" class="external text" href="https://archive.org/details/usingmpiportable00grop"><i>Using MPI: portable parallel programming with the message-passing interface</i></a>. Cambridge, MA, USA: <a href="/wiki/MIT_Press" title="MIT Press">MIT Press</a> Scientific And Engineering Computation Series. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-262-57104-3" title="Special:BookSources/978-0-262-57104-3"><bdi>978-0-262-57104-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Using+MPI%3A+portable+parallel+programming+with+the+message-passing+interface&amp;rft.place=Cambridge%2C+MA%2C+USA&amp;rft.pub=MIT+Press+Scientific+And+Engineering+Computation+Series&amp;rft.date=1994&amp;rft.isbn=978-0-262-57104-3&amp;rft.aulast=Gropp&amp;rft.aufirst=William&amp;rft.au=Lusk%2C+Ewing&amp;rft.au=Skjellum%2C+Anthony&amp;rft_id=https%3A%2F%2Farchive.org%2Fdetails%2Fusingmpiportable00grop&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGroppLuskSkjellum1999a" class="citation book cs1">Gropp, William; Lusk, Ewing; Skjellum, Anthony (1999a). <a rel="nofollow" class="external text" href="http://mitpress.mit.edu/books/using-mpi-second-edition"><i>Using MPI, 2nd Edition: Portable Parallel Programming with the Message Passing Interface</i></a>. Cambridge, MA, USA: <a href="/wiki/MIT_Press" title="MIT Press">MIT Press</a> Scientific And Engineering Computation Series. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-262-57132-6" title="Special:BookSources/978-0-262-57132-6"><bdi>978-0-262-57132-6</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Using+MPI%2C+2nd+Edition%3A+Portable+Parallel+Programming+with+the+Message+Passing+Interface&amp;rft.place=Cambridge%2C+MA%2C+USA&amp;rft.pub=MIT+Press+Scientific+And+Engineering+Computation+Series&amp;rft.date=1999&amp;rft.isbn=978-0-262-57132-6&amp;rft.aulast=Gropp&amp;rft.aufirst=William&amp;rft.au=Lusk%2C+Ewing&amp;rft.au=Skjellum%2C+Anthony&amp;rft_id=http%3A%2F%2Fmitpress.mit.edu%2Fbooks%2Fusing-mpi-second-edition&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGroppLuskSkjellum1999b" class="citation book cs1">Gropp, William; Lusk, Ewing; Skjellum, Anthony (1999b). <a rel="nofollow" class="external text" href="http://mitpress.mit.edu/books/using-mpi-2"><i>Using MPI-2: Advanced Features of the Message Passing Interface</i></a>. <a href="/wiki/MIT_Press" title="MIT Press">MIT Press</a>. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-262-57133-3" title="Special:BookSources/978-0-262-57133-3"><bdi>978-0-262-57133-3</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Using+MPI-2%3A+Advanced+Features+of+the+Message+Passing+Interface&amp;rft.pub=MIT+Press&amp;rft.date=1999&amp;rft.isbn=978-0-262-57133-3&amp;rft.aulast=Gropp&amp;rft.aufirst=William&amp;rft.au=Lusk%2C+Ewing&amp;rft.au=Skjellum%2C+Anthony&amp;rft_id=http%3A%2F%2Fmitpress.mit.edu%2Fbooks%2Fusing-mpi-2&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGroppLuskSkjellum2014" class="citation book cs1">Gropp, William; Lusk, Ewing; Skjellum, Anthony (2014). <a rel="nofollow" class="external text" href="http://mitpress.mit.edu/books/using-mpi-third-edition"><i>Using MPI, 3rd edition: Portable Parallel Programming with the Message-Passing Interface</i></a>. Cambridge, MA, USA: <a href="/wiki/MIT_Press" title="MIT Press">MIT Press</a> Scientific And Engineering Computation Series. <a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-262-52739-2" title="Special:BookSources/978-0-262-52739-2"><bdi>978-0-262-52739-2</bdi></a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&amp;rft.genre=book&amp;rft.btitle=Using+MPI%2C+3rd+edition%3A+Portable+Parallel+Programming+with+the+Message-Passing+Interface&amp;rft.place=Cambridge%2C+MA%2C+USA&amp;rft.pub=MIT+Press+Scientific+And+Engineering+Computation+Series&amp;rft.date=2014&amp;rft.isbn=978-0-262-52739-2&amp;rft.aulast=Gropp&amp;rft.aufirst=William&amp;rft.au=Lusk%2C+Ewing&amp;rft.au=Skjellum%2C+Anthony&amp;rft_id=http%3A%2F%2Fmitpress.mit.edu%2Fbooks%2Fusing-mpi-third-edition&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></li></ul></li> <li><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><cite id="CITEREFGroppLuskSkjellum1996" class="citation journal cs1">Gropp, William; Lusk, Ewing; Skjellum, Anthony (1996). "A High-Performance, Portable Implementation of the MPI Message Passing Interface". <i>Parallel Computing</i>. <b>22</b> (6): 789–828. <a href="/wiki/CiteSeerX_(identifier)" class="mw-redirect" title="CiteSeerX (identifier)">CiteSeerX</a>&#160;<span class="id-lock-free" title="Freely accessible"><a rel="nofollow" class="external text" href="https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.9485">10.1.1.102.9485</a></span>. <a href="/wiki/Doi_(identifier)" class="mw-redirect" title="Doi (identifier)">doi</a>:<a rel="nofollow" class="external text" href="https://doi.org/10.1016%2F0167-8191%2896%2900024-5">10.1016/0167-8191(96)00024-5</a>.</cite><span title="ctx_ver=Z39.88-2004&amp;rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Ajournal&amp;rft.genre=article&amp;rft.jtitle=Parallel+Computing&amp;rft.atitle=A+High-Performance%2C+Portable+Implementation+of+the+MPI+Message+Passing+Interface&amp;rft.volume=22&amp;rft.issue=6&amp;rft.pages=789-828&amp;rft.date=1996&amp;rft_id=https%3A%2F%2Fciteseerx.ist.psu.edu%2Fviewdoc%2Fsummary%3Fdoi%3D10.1.1.102.9485%23id-name%3DCiteSeerX&amp;rft_id=info%3Adoi%2F10.1016%2F0167-8191%2896%2900024-5&amp;rft.aulast=Gropp&amp;rft.aufirst=William&amp;rft.au=Lusk%2C+Ewing&amp;rft.au=Skjellum%2C+Anthony&amp;rfr_id=info%3Asid%2Fen.wikipedia.org%3AMessage+Passing+Interface" class="Z3988"></span></li> <li>Pacheco, Peter S. (1997) <i><a rel="nofollow" class="external text" href="https://books.google.com/books?&amp;id=tCVkM1z2aOoC">Parallel Programming with MPI</a></i>.<a rel="nofollow" class="external autonumber" href="http://www.cs.usfca.edu/mpi/">[1]</a> 500 pp. Morgan Kaufmann <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/1-55860-339-5" title="Special:BookSources/1-55860-339-5">1-55860-339-5</a>.</li> <li><i>MPI—The Complete Reference</i> series: <ul><li>Snir, Marc; Otto, Steve W.; Huss-Lederman, Steven; Walker, David W.; Dongarra, Jack J. (1995) <i><a rel="nofollow" class="external text" href="http://www.netlib.org/utk/papers/mpi-book/mpi-book.html">MPI: The Complete Reference</a></i>. MIT Press Cambridge, MA, USA. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/0-262-69215-5" title="Special:BookSources/0-262-69215-5">0-262-69215-5</a></li> <li>Snir, Marc; Otto, Steve W.; Huss-Lederman, Steven; Walker, David W.; Dongarra, Jack J. (1998) <i>MPI—The Complete Reference: Volume 1, The MPI Core</i>. MIT Press, Cambridge, MA. <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/0-262-69215-5" title="Special:BookSources/0-262-69215-5">0-262-69215-5</a></li> <li>Gropp, William; Huss-Lederman, Steven; Lumsdaine, Andrew; Lusk, Ewing; Nitzberg, Bill; Saphir, William; and Snir, Marc (1998) <i><a rel="nofollow" class="external text" href="https://web.archive.org/web/20010803093058/http://mitpress.mit.edu/book-home.tcl?isbn=0262571234">MPI—The Complete Reference: Volume 2, The MPI-2 Extensions</a></i>. MIT Press, Cambridge, MA <link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1238218222"><a href="/wiki/ISBN_(identifier)" class="mw-redirect" title="ISBN (identifier)">ISBN</a>&#160;<a href="/wiki/Special:BookSources/978-0-262-57123-4" title="Special:BookSources/978-0-262-57123-4">978-0-262-57123-4</a></li></ul></li> <li>Firuziaan, Mohammad; Nommensen, O. (2002) <i>Parallel Processing via MPI &amp; OpenMP</i>, Linux Enterprise, 10/2002</li> <li>Vanneschi, Marco (1999) <i>Parallel paradigms for scientific computing</i> In Proceedings of the European School on Computational Chemistry (1999, Perugia, Italy), number 75 in <i><a rel="nofollow" class="external text" href="https://books.google.com/books?&amp;id=zMqVdFgVnrgC">Lecture Notes in Chemistry</a></i>, pages 170–183. Springer, 2000</li> <li>Bala, Bruck, Cypher, Elustondo, A Ho, CT Ho, Kipnis, Snir (1995) ″<a rel="nofollow" class="external text" href="https://ieeexplore.ieee.org/abstract/document/342126/">A portable and tunable collective communication library for scalable parallel computers</a>" in IEEE Transactions on Parallel and Distributed Systems,″ vol. 6, no. 2, pp.&#160;154–164, Feb 1995.</li></ul> </div> <div class="mw-heading mw-heading2"><h2 id="External_links">External links</h2><span class="mw-editsection"><span class="mw-editsection-bracket">[</span><a href="/w/index.php?title=Message_Passing_Interface&amp;action=edit&amp;section=31" title="Edit section: External links"><span>edit</span></a><span class="mw-editsection-bracket">]</span></span></div> <style data-mw-deduplicate="TemplateStyles:r1235681985">.mw-parser-output .side-box{margin:4px 0;box-sizing:border-box;border:1px solid #aaa;font-size:88%;line-height:1.25em;background-color:var(--background-color-interactive-subtle,#f8f9fa);display:flow-root}.mw-parser-output .side-box-abovebelow,.mw-parser-output .side-box-text{padding:0.25em 0.9em}.mw-parser-output .side-box-image{padding:2px 0 2px 0.9em;text-align:center}.mw-parser-output .side-box-imageright{padding:2px 0.9em 2px 0;text-align:center}@media(min-width:500px){.mw-parser-output .side-box-flex{display:flex;align-items:center}.mw-parser-output .side-box-text{flex:1;min-width:0}}@media(min-width:720px){.mw-parser-output .side-box{width:238px}.mw-parser-output .side-box-right{clear:right;float:right;margin-left:1em}.mw-parser-output .side-box-left{margin-right:1em}}</style><style data-mw-deduplicate="TemplateStyles:r1237033735">@media print{body.ns-0 .mw-parser-output .sistersitebox{display:none!important}}@media screen{html.skin-theme-clientpref-night .mw-parser-output .sistersitebox img[src*="Wiktionary-logo-en-v2.svg"]{background-color:white}}@media screen and (prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .sistersitebox img[src*="Wiktionary-logo-en-v2.svg"]{background-color:white}}</style><div class="side-box side-box-right plainlinks sistersitebox"><style data-mw-deduplicate="TemplateStyles:r1126788409">.mw-parser-output .plainlist ol,.mw-parser-output .plainlist ul{line-height:inherit;list-style:none;margin:0;padding:0}.mw-parser-output .plainlist ol li,.mw-parser-output .plainlist ul li{margin-bottom:0}</style> <div class="side-box-flex"> <div class="side-box-image"><span class="noviewer" typeof="mw:File"><span><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/d/df/Wikibooks-logo-en-noslogan.svg/40px-Wikibooks-logo-en-noslogan.svg.png" decoding="async" width="40" height="40" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/d/df/Wikibooks-logo-en-noslogan.svg/60px-Wikibooks-logo-en-noslogan.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/d/df/Wikibooks-logo-en-noslogan.svg/80px-Wikibooks-logo-en-noslogan.svg.png 2x" data-file-width="400" data-file-height="400" /></span></span></div> <div class="side-box-text plainlist">Wikibooks has a book on the topic of: <i><b><a href="https://en.wikibooks.org/wiki/Message-Passing_Interface" class="extiw" title="wikibooks:Message-Passing Interface">Message-Passing Interface</a></b></i></div></div> </div> <ul><li><span class="official-website"><span class="url"><a rel="nofollow" class="external text" href="https://www.mpi-forum.org/">Official website</a></span></span></li> <li><a rel="nofollow" class="external text" href="https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report.pdf">Official MPI-3.1 standard</a> (<a rel="nofollow" class="external text" href="https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/mpi31-report.htm">unofficial HTML version</a>)</li> <li><a rel="nofollow" class="external text" href="http://polaris.cs.uiuc.edu/~padua/cs320/mpi/tutorial.pdf">Tutorial on MPI: The Message-Passing Interface</a></li> <li><a rel="nofollow" class="external text" href="http://moss.csc.ncsu.edu/~mueller/cluster/mpi.guide.pdf">A User's Guide to MPI</a></li> <li><a rel="nofollow" class="external text" href="https://www.citutor.org/bounce.php?course=21">Tutorial: Introduction to MPI (self-paced, includes self-tests and exercises)</a></li></ul> <div class="navbox-styles"><style data-mw-deduplicate="TemplateStyles:r1129693374">.mw-parser-output .hlist dl,.mw-parser-output .hlist ol,.mw-parser-output .hlist ul{margin:0;padding:0}.mw-parser-output .hlist dd,.mw-parser-output .hlist dt,.mw-parser-output .hlist li{margin:0;display:inline}.mw-parser-output .hlist.inline,.mw-parser-output .hlist.inline dl,.mw-parser-output .hlist.inline ol,.mw-parser-output .hlist.inline ul,.mw-parser-output .hlist dl dl,.mw-parser-output .hlist dl ol,.mw-parser-output .hlist dl ul,.mw-parser-output .hlist ol dl,.mw-parser-output .hlist ol ol,.mw-parser-output .hlist ol ul,.mw-parser-output .hlist ul dl,.mw-parser-output .hlist ul ol,.mw-parser-output .hlist ul ul{display:inline}.mw-parser-output .hlist .mw-empty-li{display:none}.mw-parser-output .hlist dt::after{content:": "}.mw-parser-output .hlist dd::after,.mw-parser-output .hlist li::after{content:" · ";font-weight:bold}.mw-parser-output .hlist dd:last-child::after,.mw-parser-output .hlist dt:last-child::after,.mw-parser-output .hlist li:last-child::after{content:none}.mw-parser-output .hlist dd dd:first-child::before,.mw-parser-output .hlist dd dt:first-child::before,.mw-parser-output .hlist dd li:first-child::before,.mw-parser-output .hlist dt dd:first-child::before,.mw-parser-output .hlist dt dt:first-child::before,.mw-parser-output .hlist dt li:first-child::before,.mw-parser-output .hlist li dd:first-child::before,.mw-parser-output .hlist li dt:first-child::before,.mw-parser-output .hlist li li:first-child::before{content:" (";font-weight:normal}.mw-parser-output .hlist dd dd:last-child::after,.mw-parser-output .hlist dd dt:last-child::after,.mw-parser-output .hlist dd li:last-child::after,.mw-parser-output .hlist dt dd:last-child::after,.mw-parser-output .hlist dt dt:last-child::after,.mw-parser-output .hlist dt li:last-child::after,.mw-parser-output .hlist li dd:last-child::after,.mw-parser-output .hlist li dt:last-child::after,.mw-parser-output .hlist li li:last-child::after{content:")";font-weight:normal}.mw-parser-output .hlist ol{counter-reset:listitem}.mw-parser-output .hlist ol>li{counter-increment:listitem}.mw-parser-output .hlist ol>li::before{content:" "counter(listitem)"\a0 "}.mw-parser-output .hlist dd ol>li:first-child::before,.mw-parser-output .hlist dt ol>li:first-child::before,.mw-parser-output .hlist li ol>li:first-child::before{content:" ("counter(listitem)"\a0 "}</style><style data-mw-deduplicate="TemplateStyles:r1236075235">.mw-parser-output .navbox{box-sizing:border-box;border:1px solid #a2a9b1;width:100%;clear:both;font-size:88%;text-align:center;padding:1px;margin:1em auto 0}.mw-parser-output .navbox .navbox{margin-top:0}.mw-parser-output .navbox+.navbox,.mw-parser-output .navbox+.navbox-styles+.navbox{margin-top:-1px}.mw-parser-output .navbox-inner,.mw-parser-output .navbox-subgroup{width:100%}.mw-parser-output .navbox-group,.mw-parser-output .navbox-title,.mw-parser-output .navbox-abovebelow{padding:0.25em 1em;line-height:1.5em;text-align:center}.mw-parser-output .navbox-group{white-space:nowrap;text-align:right}.mw-parser-output .navbox,.mw-parser-output .navbox-subgroup{background-color:#fdfdfd}.mw-parser-output .navbox-list{line-height:1.5em;border-color:#fdfdfd}.mw-parser-output .navbox-list-with-group{text-align:left;border-left-width:2px;border-left-style:solid}.mw-parser-output tr+tr>.navbox-abovebelow,.mw-parser-output tr+tr>.navbox-group,.mw-parser-output tr+tr>.navbox-image,.mw-parser-output tr+tr>.navbox-list{border-top:2px solid #fdfdfd}.mw-parser-output .navbox-title{background-color:#ccf}.mw-parser-output .navbox-abovebelow,.mw-parser-output .navbox-group,.mw-parser-output .navbox-subgroup .navbox-title{background-color:#ddf}.mw-parser-output .navbox-subgroup .navbox-group,.mw-parser-output .navbox-subgroup .navbox-abovebelow{background-color:#e6e6ff}.mw-parser-output .navbox-even{background-color:#f7f7f7}.mw-parser-output .navbox-odd{background-color:transparent}.mw-parser-output .navbox .hlist td dl,.mw-parser-output .navbox .hlist td ol,.mw-parser-output .navbox .hlist td ul,.mw-parser-output .navbox td.hlist dl,.mw-parser-output .navbox td.hlist ol,.mw-parser-output .navbox td.hlist ul{padding:0.125em 0}.mw-parser-output .navbox .navbar{display:block;font-size:100%}.mw-parser-output .navbox-title .navbar{float:left;text-align:left;margin-right:0.5em}body.skin--responsive .mw-parser-output .navbox-image img{max-width:none!important}@media print{body.ns-0 .mw-parser-output .navbox{display:none!important}}</style></div><div role="navigation" class="navbox" aria-labelledby="Parallel_computing" style="padding:3px"><table class="nowraplinks hlist mw-collapsible mw-collapsed navbox-inner" style="border-spacing:0;background:transparent;color:inherit"><tbody><tr><th scope="col" class="navbox-title" colspan="2"><link rel="mw-deduplicated-inline-style" href="mw-data:TemplateStyles:r1129693374"><style data-mw-deduplicate="TemplateStyles:r1239400231">.mw-parser-output .navbar{display:inline;font-size:88%;font-weight:normal}.mw-parser-output .navbar-collapse{float:left;text-align:left}.mw-parser-output .navbar-boxtext{word-spacing:0}.mw-parser-output .navbar ul{display:inline-block;white-space:nowrap;line-height:inherit}.mw-parser-output .navbar-brackets::before{margin-right:-0.125em;content:"[ "}.mw-parser-output .navbar-brackets::after{margin-left:-0.125em;content:" ]"}.mw-parser-output .navbar li{word-spacing:-0.125em}.mw-parser-output .navbar a>span,.mw-parser-output .navbar a>abbr{text-decoration:inherit}.mw-parser-output .navbar-mini abbr{font-variant:small-caps;border-bottom:none;text-decoration:none;cursor:inherit}.mw-parser-output .navbar-ct-full{font-size:114%;margin:0 7em}.mw-parser-output .navbar-ct-mini{font-size:114%;margin:0 4em}html.skin-theme-clientpref-night .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}@media(prefers-color-scheme:dark){html.skin-theme-clientpref-os .mw-parser-output .navbar li a abbr{color:var(--color-base)!important}}@media print{.mw-parser-output .navbar{display:none!important}}</style><div class="navbar plainlinks hlist navbar-mini"><ul><li class="nv-view"><a href="/wiki/Template:Parallel_computing" title="Template:Parallel computing"><abbr title="View this template">v</abbr></a></li><li class="nv-talk"><a href="/wiki/Template_talk:Parallel_computing" title="Template talk:Parallel computing"><abbr title="Discuss this template">t</abbr></a></li><li class="nv-edit"><a href="/wiki/Special:EditPage/Template:Parallel_computing" title="Special:EditPage/Template:Parallel computing"><abbr title="Edit this template">e</abbr></a></li></ul></div><div id="Parallel_computing" style="font-size:114%;margin:0 4em"><a href="/wiki/Parallel_computing" title="Parallel computing">Parallel computing</a></div></th></tr><tr><th scope="row" class="navbox-group" style="width:1%">General</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Distributed_computing" title="Distributed computing">Distributed computing</a></li> <li><a href="/wiki/Parallel_computing" title="Parallel computing">Parallel computing</a></li> <li><a href="/wiki/Massively_parallel" title="Massively parallel">Massively parallel</a></li> <li><a href="/wiki/Cloud_computing" title="Cloud computing">Cloud computing</a></li> <li><a href="/wiki/High-performance_computing" title="High-performance computing">High-performance computing</a></li> <li><a href="/wiki/Multiprocessing" title="Multiprocessing">Multiprocessing</a></li> <li><a href="/wiki/Manycore_processor" title="Manycore processor">Manycore processor</a></li> <li><a href="/wiki/General-purpose_computing_on_graphics_processing_units" title="General-purpose computing on graphics processing units">GPGPU</a></li> <li><a href="/wiki/Computer_network" title="Computer network">Computer network</a></li> <li><a href="/wiki/Systolic_array" title="Systolic array">Systolic array</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Levels</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Bit-level_parallelism" title="Bit-level parallelism">Bit</a></li> <li><a href="/wiki/Instruction-level_parallelism" title="Instruction-level parallelism">Instruction</a></li> <li><a href="/wiki/Task_parallelism" title="Task parallelism">Thread</a></li> <li><a href="/wiki/Task_parallelism" title="Task parallelism">Task</a></li> <li><a href="/wiki/Data_parallelism" title="Data parallelism">Data</a></li> <li><a href="/wiki/Memory-level_parallelism" title="Memory-level parallelism">Memory</a></li> <li><a href="/wiki/Loop-level_parallelism" title="Loop-level parallelism">Loop</a></li> <li><a href="/wiki/Pipeline_(computing)" title="Pipeline (computing)">Pipeline</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Multithreading_(computer_architecture)" title="Multithreading (computer architecture)">Multithreading</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Temporal_multithreading" title="Temporal multithreading">Temporal</a></li> <li><a href="/wiki/Simultaneous_multithreading" title="Simultaneous multithreading">Simultaneous</a> (SMT)</li> <li><a href="/wiki/Simultaneous_and_heterogeneous_multithreading" title="Simultaneous and heterogeneous multithreading">Simultaneous and heterogenous</a></li> <li><a href="/wiki/Speculative_multithreading" title="Speculative multithreading">Speculative</a> (SpMT)</li> <li><a href="/wiki/Preemption_(computing)" title="Preemption (computing)">Preemptive</a></li> <li><a href="/wiki/Computer_multitasking#Cooperative_multitasking" title="Computer multitasking">Cooperative</a></li> <li><a href="/wiki/Bulldozer_(microarchitecture)#Bulldozer_core" title="Bulldozer (microarchitecture)">Clustered multi-thread</a> (CMT)</li> <li><a href="/wiki/Hardware_scout" title="Hardware scout">Hardware scout</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Theory</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Parallel_RAM" title="Parallel RAM">PRAM model</a></li> <li><a href="/wiki/Parallel_external_memory" title="Parallel external memory">PEM model</a></li> <li><a href="/wiki/Analysis_of_parallel_algorithms" title="Analysis of parallel algorithms">Analysis of parallel algorithms</a></li> <li><a href="/wiki/Amdahl%27s_law" title="Amdahl&#39;s law">Amdahl's law</a></li> <li><a href="/wiki/Gustafson%27s_law" title="Gustafson&#39;s law">Gustafson's law</a></li> <li><a href="/wiki/Cost_efficiency" title="Cost efficiency">Cost efficiency</a></li> <li><a href="/wiki/Karp%E2%80%93Flatt_metric" title="Karp–Flatt metric">Karp–Flatt metric</a></li> <li><a href="/wiki/Parallel_slowdown" title="Parallel slowdown">Slowdown</a></li> <li><a href="/wiki/Speedup" title="Speedup">Speedup</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Elements</th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Process_(computing)" title="Process (computing)">Process</a></li> <li><a href="/wiki/Thread_(computing)" title="Thread (computing)">Thread</a></li> <li><a href="/wiki/Fiber_(computer_science)" title="Fiber (computer science)">Fiber</a></li> <li><a href="/wiki/Instruction_window" title="Instruction window">Instruction window</a></li> <li><a href="/wiki/Array_(data_structure)" title="Array (data structure)">Array</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Coordination</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Multiprocessing" title="Multiprocessing">Multiprocessing</a></li> <li><a href="/wiki/Memory_coherence" title="Memory coherence">Memory coherence</a></li> <li><a href="/wiki/Cache_coherence" title="Cache coherence">Cache coherence</a></li> <li><a href="/wiki/Cache_invalidation" title="Cache invalidation">Cache invalidation</a></li> <li><a href="/wiki/Barrier_(computer_science)" title="Barrier (computer science)">Barrier</a></li> <li><a href="/wiki/Synchronization_(computer_science)" title="Synchronization (computer science)">Synchronization</a></li> <li><a href="/wiki/Application_checkpointing" title="Application checkpointing">Application checkpointing</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_programming" title="Computer programming">Programming</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Stream_processing" title="Stream processing">Stream processing</a></li> <li><a href="/wiki/Dataflow_programming" title="Dataflow programming">Dataflow programming</a></li> <li><a href="/wiki/Parallel_programming_model" title="Parallel programming model">Models</a> <ul><li><a href="/wiki/Implicit_parallelism" title="Implicit parallelism">Implicit parallelism</a></li> <li><a href="/wiki/Explicit_parallelism" title="Explicit parallelism">Explicit parallelism</a></li> <li><a href="/wiki/Concurrency_(computer_science)" title="Concurrency (computer science)">Concurrency</a></li></ul></li> <li><a href="/wiki/Non-blocking_algorithm" title="Non-blocking algorithm">Non-blocking algorithm</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/Computer_hardware" title="Computer hardware">Hardware</a></th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Flynn%27s_taxonomy" title="Flynn&#39;s taxonomy">Flynn's taxonomy</a> <ul><li><a href="/wiki/Single_instruction,_single_data" title="Single instruction, single data">SISD</a></li> <li><a href="/wiki/Single_instruction,_multiple_data" title="Single instruction, multiple data">SIMD</a> <ul><li><a href="/wiki/Single_instruction,_multiple_threads" title="Single instruction, multiple threads">Array processing</a> (SIMT)</li> <li><a href="/wiki/Flynn%27s_taxonomy#Pipelined_processor" title="Flynn&#39;s taxonomy">Pipelined processing</a></li> <li><a href="/wiki/Flynn%27s_taxonomy#Associative_processor" title="Flynn&#39;s taxonomy">Associative processing</a></li></ul></li> <li><a href="/wiki/Multiple_instruction,_single_data" title="Multiple instruction, single data">MISD</a></li> <li><a href="/wiki/Multiple_instruction,_multiple_data" title="Multiple instruction, multiple data">MIMD</a></li></ul></li> <li><a href="/wiki/Dataflow_architecture" title="Dataflow architecture">Dataflow architecture</a></li> <li><a href="/wiki/Instruction_pipelining" title="Instruction pipelining">Pipelined processor</a></li> <li><a href="/wiki/Superscalar_processor" title="Superscalar processor">Superscalar processor</a></li> <li><a href="/wiki/Vector_processor" title="Vector processor">Vector processor</a></li> <li><a href="/wiki/Multiprocessing" title="Multiprocessing">Multiprocessor</a> <ul><li><a href="/wiki/Symmetric_multiprocessing" title="Symmetric multiprocessing">symmetric</a></li> <li><a href="/wiki/Asymmetric_multiprocessing" title="Asymmetric multiprocessing">asymmetric</a></li></ul></li> <li><a href="/wiki/Semiconductor_memory" title="Semiconductor memory">Memory</a> <ul><li><a href="/wiki/Shared_memory" title="Shared memory">shared</a></li> <li><a href="/wiki/Distributed_memory" title="Distributed memory">distributed</a></li> <li><a href="/wiki/Distributed_shared_memory" title="Distributed shared memory">distributed shared</a></li> <li><a href="/wiki/Uniform_memory_access" title="Uniform memory access">UMA</a></li> <li><a href="/wiki/Non-uniform_memory_access" title="Non-uniform memory access">NUMA</a></li> <li><a href="/wiki/Cache-only_memory_architecture" title="Cache-only memory architecture">COMA</a></li></ul></li> <li><a href="/wiki/Massively_parallel" title="Massively parallel">Massively parallel</a> computer</li> <li><a href="/wiki/Computer_cluster" title="Computer cluster">Computer cluster</a> <ul><li><a href="/wiki/Beowulf_cluster" title="Beowulf cluster">Beowulf cluster</a></li></ul></li> <li><a href="/wiki/Grid_computing" title="Grid computing">Grid computer</a></li> <li><a href="/wiki/Hardware_acceleration" title="Hardware acceleration">Hardware acceleration</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%"><a href="/wiki/API" title="API">APIs</a></th><td class="navbox-list-with-group navbox-list navbox-odd" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Ateji_PX" title="Ateji PX">Ateji PX</a></li> <li><a href="/wiki/Boost_(C%2B%2B_libraries)" title="Boost (C++ libraries)">Boost</a></li> <li><a href="/wiki/Chapel_(programming_language)" title="Chapel (programming language)">Chapel</a></li> <li><a href="/wiki/HPX" title="HPX">HPX</a></li> <li><a href="/wiki/Charm%2B%2B" title="Charm++">Charm++</a></li> <li><a href="/wiki/Cilk" title="Cilk">Cilk</a></li> <li><a href="/wiki/Coarray_Fortran" title="Coarray Fortran">Coarray Fortran</a></li> <li><a href="/wiki/CUDA" title="CUDA">CUDA</a></li> <li><a href="/wiki/Dryad_(programming)" title="Dryad (programming)">Dryad</a></li> <li><a href="/wiki/C%2B%2B_AMP" title="C++ AMP">C++ AMP</a></li> <li><a href="/wiki/Global_Arrays" title="Global Arrays">Global Arrays</a></li> <li><a href="/wiki/GPUOpen" title="GPUOpen">GPUOpen</a></li> <li><a class="mw-selflink selflink">MPI</a></li> <li><a href="/wiki/OpenMP" title="OpenMP">OpenMP</a></li> <li><a href="/wiki/OpenCL" title="OpenCL">OpenCL</a></li> <li><a href="/wiki/OpenHMPP" title="OpenHMPP">OpenHMPP</a></li> <li><a href="/wiki/OpenACC" title="OpenACC">OpenACC</a></li> <li><a href="/wiki/Parallel_Extensions" title="Parallel Extensions">Parallel Extensions</a></li> <li><a href="/wiki/Parallel_Virtual_Machine" title="Parallel Virtual Machine">PVM</a></li> <li><a href="/wiki/Pthreads" title="Pthreads">pthreads</a></li> <li><a href="/wiki/RaftLib" title="RaftLib">RaftLib</a></li> <li><a href="/wiki/ROCm" title="ROCm">ROCm</a></li> <li><a href="/wiki/Unified_Parallel_C" title="Unified Parallel C">UPC</a></li> <li><a href="/wiki/Threading_Building_Blocks" title="Threading Building Blocks">TBB</a></li> <li><a href="/wiki/ZPL_(programming_language)" class="mw-redirect" title="ZPL (programming language)">ZPL</a></li></ul> </div></td></tr><tr><th scope="row" class="navbox-group" style="width:1%">Problems</th><td class="navbox-list-with-group navbox-list navbox-even" style="width:100%;padding:0"><div style="padding:0 0.25em"> <ul><li><a href="/wiki/Automatic_parallelization" title="Automatic parallelization">Automatic parallelization</a></li> <li><a href="/wiki/Deadlock_(computer_science)" title="Deadlock (computer science)">Deadlock</a></li> <li><a href="/wiki/Deterministic_algorithm" title="Deterministic algorithm">Deterministic algorithm</a></li> <li><a href="/wiki/Embarrassingly_parallel" title="Embarrassingly parallel">Embarrassingly parallel</a></li> <li><a href="/wiki/Parallel_slowdown" title="Parallel slowdown">Parallel slowdown</a></li> <li><a href="/wiki/Race_condition" title="Race condition">Race condition</a></li> <li><a href="/wiki/Software_lockout" title="Software lockout">Software lockout</a></li> <li><a href="/wiki/Scalability" title="Scalability">Scalability</a></li> <li><a href="/wiki/Starvation_(computer_science)" title="Starvation (computer science)">Starvation</a></li></ul> </div></td></tr><tr><td class="navbox-abovebelow" colspan="2"><div> <ul><li><span class="noviewer" typeof="mw:File"><span title="Category"><img alt="" src="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/16px-Symbol_category_class.svg.png" decoding="async" width="16" height="16" class="mw-file-element" srcset="//upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/23px-Symbol_category_class.svg.png 1.5x, //upload.wikimedia.org/wikipedia/en/thumb/9/96/Symbol_category_class.svg/31px-Symbol_category_class.svg.png 2x" data-file-width="180" data-file-height="185" /></span></span>&#160;<a href="/wiki/Category:Parallel_computing" title="Category:Parallel computing">Category: Parallel computing</a></li></ul> </div></td></tr></tbody></table></div> <!-- NewPP limit report Parsed by mw‐web.codfw.main‐6b7f745dd4‐xmtkp Cached time: 20241125144758 Cache expiry: 2592000 Reduced expiry: false Complications: [vary‐revision‐sha1, show‐toc] CPU time usage: 0.823 seconds Real time usage: 0.995 seconds Preprocessor visited node count: 4676/1000000 Post‐expand include size: 130806/2097152 bytes Template argument size: 12149/2097152 bytes Highest expansion depth: 18/100 Expensive parser function count: 14/500 Unstrip recursion depth: 1/20 Unstrip post‐expand size: 176987/5000000 bytes Lua time usage: 0.485/10.000 seconds Lua memory usage: 7628242/52428800 bytes Number of Wikibase entities loaded: 1/400 --> <!-- Transclusion expansion time report (%,ms,calls,template) 100.00% 840.276 1 -total 40.42% 339.650 1 Template:Reflist 23.07% 193.880 21 Template:Cite_web 11.76% 98.858 1 Template:Parallel_computing 11.26% 94.652 1 Template:Navbox 9.55% 80.234 1 Template:Multiple_issues 9.52% 79.961 1 Template:Short_description 8.40% 70.581 16 Template:Main_other 7.59% 63.736 6 Template:Ambox 7.14% 60.022 3 Template:Update --> <!-- Saved in parser cache with key enwiki:pcache:idhash:221466-0!canonical and timestamp 20241125144758 and revision id 1258236261. Rendering was triggered because: page-view --> </div><!--esi <esi:include src="/esitest-fa8a495983347898/content" /> --><noscript><img src="https://login.wikimedia.org/wiki/Special:CentralAutoLogin/start?type=1x1" alt="" width="1" height="1" style="border: none; position: absolute;"></noscript> <div class="printfooter" data-nosnippet="">Retrieved from "<a dir="ltr" href="https://en.wikipedia.org/w/index.php?title=Message_Passing_Interface&amp;oldid=1258236261">https://en.wikipedia.org/w/index.php?title=Message_Passing_Interface&amp;oldid=1258236261</a>"</div></div> <div id="catlinks" class="catlinks" data-mw="interface"><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/wiki/Help:Category" title="Help:Category">Categories</a>: <ul><li><a href="/wiki/Category:Application_programming_interfaces" title="Category:Application programming interfaces">Application programming interfaces</a></li><li><a href="/wiki/Category:Parallel_computing" title="Category:Parallel computing">Parallel computing</a></li></ul></div><div id="mw-hidden-catlinks" class="mw-hidden-catlinks mw-hidden-cats-hidden">Hidden categories: <ul><li><a href="/wiki/Category:Harv_and_Sfn_no-target_errors" title="Category:Harv and Sfn no-target errors">Harv and Sfn no-target errors</a></li><li><a href="/wiki/Category:Articles_with_short_description" title="Category:Articles with short description">Articles with short description</a></li><li><a href="/wiki/Category:Short_description_matches_Wikidata" title="Category:Short description matches Wikidata">Short description matches Wikidata</a></li><li><a href="/wiki/Category:Wikipedia_articles_in_need_of_updating_from_October_2021" title="Category:Wikipedia articles in need of updating from October 2021">Wikipedia articles in need of updating from October 2021</a></li><li><a href="/wiki/Category:All_Wikipedia_articles_in_need_of_updating" title="Category:All Wikipedia articles in need of updating">All Wikipedia articles in need of updating</a></li><li><a href="/wiki/Category:Wikipedia_articles_in_need_of_updating_from_May_2024" title="Category:Wikipedia articles in need of updating from May 2024">Wikipedia articles in need of updating from May 2024</a></li><li><a href="/wiki/Category:Articles_with_multiple_maintenance_issues" title="Category:Articles with multiple maintenance issues">Articles with multiple maintenance issues</a></li><li><a href="/wiki/Category:Wikipedia_articles_in_need_of_updating_from_August_2022" title="Category:Wikipedia articles in need of updating from August 2022">Wikipedia articles in need of updating from August 2022</a></li><li><a href="/wiki/Category:Articles_needing_additional_references_from_July_2021" title="Category:Articles needing additional references from July 2021">Articles needing additional references from July 2021</a></li><li><a href="/wiki/Category:All_articles_needing_additional_references" title="Category:All articles needing additional references">All articles needing additional references</a></li><li><a href="/wiki/Category:Wikipedia_articles_needing_clarification_from_April_2015" title="Category:Wikipedia articles needing clarification from April 2015">Wikipedia articles needing clarification from April 2015</a></li><li><a href="/wiki/Category:Articles_to_be_expanded_from_June_2008" title="Category:Articles to be expanded from June 2008">Articles to be expanded from June 2008</a></li><li><a href="/wiki/Category:All_articles_to_be_expanded" title="Category:All articles to be expanded">All articles to be expanded</a></li><li><a href="/wiki/Category:All_articles_with_unsourced_statements" title="Category:All articles with unsourced statements">All articles with unsourced statements</a></li><li><a href="/wiki/Category:Articles_with_unsourced_statements_from_January_2011" title="Category:Articles with unsourced statements from January 2011">Articles with unsourced statements from January 2011</a></li><li><a href="/wiki/Category:Articles_with_example_C_code" title="Category:Articles with example C code">Articles with example C code</a></li></ul></div></div> </div> </main> </div> <div class="mw-footer-container"> <footer id="footer" class="mw-footer" > <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last edited on 18 November 2024, at 21:53<span class="anonymous-show">&#160;(UTC)</span>.</li> <li id="footer-info-copyright">Text is available under the <a href="/wiki/Wikipedia:Text_of_the_Creative_Commons_Attribution-ShareAlike_4.0_International_License" title="Wikipedia:Text of the Creative Commons Attribution-ShareAlike 4.0 International License">Creative Commons Attribution-ShareAlike 4.0 License</a>; additional terms may apply. By using this site, you agree to the <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Terms_of_Use" class="extiw" title="foundation:Special:MyLanguage/Policy:Terms of Use">Terms of Use</a> and <a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy" class="extiw" title="foundation:Special:MyLanguage/Policy:Privacy policy">Privacy Policy</a>. Wikipedia® is a registered trademark of the <a rel="nofollow" class="external text" href="https://wikimediafoundation.org/">Wikimedia Foundation, Inc.</a>, a non-profit organization.</li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Privacy_policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/wiki/Wikipedia:About">About Wikipedia</a></li> <li id="footer-places-disclaimers"><a href="/wiki/Wikipedia:General_disclaimer">Disclaimers</a></li> <li id="footer-places-contact"><a href="//en.wikipedia.org/wiki/Wikipedia:Contact_us">Contact Wikipedia</a></li> <li id="footer-places-wm-codeofconduct"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Universal_Code_of_Conduct">Code of Conduct</a></li> <li id="footer-places-developers"><a href="https://developer.wikimedia.org">Developers</a></li> <li id="footer-places-statslink"><a href="https://stats.wikimedia.org/#/en.wikipedia.org">Statistics</a></li> <li id="footer-places-cookiestatement"><a href="https://foundation.wikimedia.org/wiki/Special:MyLanguage/Policy:Cookie_statement">Cookie statement</a></li> <li id="footer-places-mobileview"><a href="//en.m.wikipedia.org/w/index.php?title=Message_Passing_Interface&amp;mobileaction=toggle_view_mobile" class="noprint stopMobileRedirectToggle">Mobile view</a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-copyrightico"><a href="https://wikimediafoundation.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/static/images/footer/wikimedia-button.svg" width="84" height="29" alt="Wikimedia Foundation" loading="lazy"></a></li> <li id="footer-poweredbyico"><a href="https://www.mediawiki.org/" class="cdx-button cdx-button--fake-button cdx-button--size-large cdx-button--fake-button--enabled"><img src="/w/resources/assets/poweredby_mediawiki.svg" alt="Powered by MediaWiki" width="88" height="31" loading="lazy"></a></li> </ul> </footer> </div> </div> </div> <div class="vector-settings" id="p-dock-bottom"> <ul></ul> </div><script>(RLQ=window.RLQ||[]).push(function(){mw.config.set({"wgHostname":"mw-web.codfw.main-694cf4987f-dvcv7","wgBackendResponseTime":165,"wgPageParseReport":{"limitreport":{"cputime":"0.823","walltime":"0.995","ppvisitednodes":{"value":4676,"limit":1000000},"postexpandincludesize":{"value":130806,"limit":2097152},"templateargumentsize":{"value":12149,"limit":2097152},"expansiondepth":{"value":18,"limit":100},"expensivefunctioncount":{"value":14,"limit":500},"unstrip-depth":{"value":1,"limit":20},"unstrip-size":{"value":176987,"limit":5000000},"entityaccesscount":{"value":1,"limit":400},"timingprofile":["100.00% 840.276 1 -total"," 40.42% 339.650 1 Template:Reflist"," 23.07% 193.880 21 Template:Cite_web"," 11.76% 98.858 1 Template:Parallel_computing"," 11.26% 94.652 1 Template:Navbox"," 9.55% 80.234 1 Template:Multiple_issues"," 9.52% 79.961 1 Template:Short_description"," 8.40% 70.581 16 Template:Main_other"," 7.59% 63.736 6 Template:Ambox"," 7.14% 60.022 3 Template:Update"]},"scribunto":{"limitreport-timeusage":{"value":"0.485","limit":"10.000"},"limitreport-memusage":{"value":7628242,"limit":52428800},"limitreport-logs":"anchor_id_list = table#1 {\n [\"CITEREFChenOstrouchovSchmidtPatel2012\"] = 1,\n [\"CITEREFChenSunThakurRoth2011\"] = 1,\n [\"CITEREFGroppLuskSkjellum1994\"] = 1,\n [\"CITEREFGroppLuskSkjellum1996\"] = 1,\n [\"CITEREFGroppLuskSkjellum1999a\"] = 1,\n [\"CITEREFGroppLuskSkjellum1999b\"] = 1,\n [\"CITEREFGroppLuskSkjellum2014\"] = 1,\n [\"CITEREFNielsen2016\"] = 1,\n [\"CITEREFSurKoopPanda2017\"] = 1,\n [\"CITEREFTeng_WangKevin_VaskoZhuo_LiuHui_Chen2016\"] = 1,\n [\"CITEREFThe_MPI_Forum,_CORPORATE1993\"] = 1,\n [\"CITEREFWalker_DW1992\"] = 1,\n [\"CITEREFWangVaskoLiuChen2014\"] = 1,\n [\"CITEREFYu2002\"] = 1,\n [\"CITEREFcea-hpc\"] = 1,\n [\"VERSIONS\"] = 1,\n}\ntemplate_list = table#1 {\n [\"Anchor\"] = 1,\n [\"Citation\"] = 1,\n [\"Citation needed\"] = 1,\n [\"Cite book\"] = 8,\n [\"Cite conference\"] = 1,\n [\"Cite journal\"] = 3,\n [\"Cite report\"] = 1,\n [\"Cite web\"] = 21,\n [\"Clarify\"] = 1,\n [\"Div col\"] = 2,\n [\"Div col end\"] = 2,\n [\"Expand section\"] = 2,\n [\"FOLDOC\"] = 1,\n [\"Harvnb\"] = 4,\n [\"ISBN\"] = 5,\n [\"Multiple issues\"] = 1,\n [\"Official website\"] = 1,\n [\"Parallel computing\"] = 1,\n [\"Reflist\"] = 1,\n [\"Short description\"] = 1,\n [\"Unreferenced section\"] = 1,\n [\"Update\"] = 3,\n [\"Wikibooks\"] = 1,\n}\narticle_whitelist = table#1 {\n}\n"},"cachereport":{"origin":"mw-web.codfw.main-6b7f745dd4-xmtkp","timestamp":"20241125144758","ttl":2592000,"transientcontent":false}}});});</script> <script type="application/ld+json">{"@context":"https:\/\/schema.org","@type":"Article","name":"Message Passing Interface","url":"https:\/\/en.wikipedia.org\/wiki\/Message_Passing_Interface","sameAs":"http:\/\/www.wikidata.org\/entity\/Q127879","mainEntity":"http:\/\/www.wikidata.org\/entity\/Q127879","author":{"@type":"Organization","name":"Contributors to Wikimedia projects"},"publisher":{"@type":"Organization","name":"Wikimedia Foundation, Inc.","logo":{"@type":"ImageObject","url":"https:\/\/www.wikimedia.org\/static\/images\/wmf-hor-googpub.png"}},"datePublished":"2003-05-04T18:55:49Z","dateModified":"2024-11-18T21:53:38Z","headline":"message-passing system for parallel computers"}</script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10