CINXE.COM

Bayesian statistics - Scholarpedia

<!DOCTYPE html> <html lang="en" dir="ltr" class="client-nojs"> <head> <title>Bayesian statistics - Scholarpedia</title> <meta charset="UTF-8" /> <meta name="generator" content="MediaWiki 1.19.17" /> <meta name="citation_title" content="Bayesian statistics" /> <meta name="citation_author" content="David Spiegelhalter" /> <meta name="citation_author" content="Kenneth Rice" /> <meta name="citation_date" content="2009/8/7" /> <meta name="citation_journal_title" content="Scholarpedia" /> <meta name="citation_issn" content="1941-6016" /> <meta name="citation_volume" content="4" /> <meta name="citation_issue" content="8" /> <meta name="citation_firstpage" content="5230" /> <meta name="citation_doi" content="10.4249/scholarpedia.5230" /> <link rel="shortcut icon" href="/w/images/6/64/Favicon.ico" /> <link rel="search" type="application/opensearchdescription+xml" href="/w/opensearch_desc.php" title="Scholarpedia (en)" /> <link rel="EditURI" type="application/rsd+xml" href="http://www.scholarpedia.org/w/api.php?action=rsd" /> <link rel="alternate" type="application/atom+xml" title="Scholarpedia Atom feed" href="/w/index.php?title=Special:RecentChanges&amp;feed=atom" /> <link rel="stylesheet" href="http://www.scholarpedia.org/w/load.php?debug=false&amp;lang=en&amp;modules=mediawiki.legacy.commonPrint%2Cshared%7Cskins.vector&amp;only=styles&amp;skin=vector&amp;*" /> <link rel="stylesheet" href="/w/skins/vector/font-awesome.min.css" /> <link rel="stylesheet" href="/w/skins/vector/local-screen.css" /><meta name="ResourceLoaderDynamicStyles" content="" /> <link rel="stylesheet" href="http://www.scholarpedia.org/w/load.php?debug=false&amp;lang=en&amp;modules=site&amp;only=styles&amp;skin=vector&amp;*" /> <style>a:lang(ar),a:lang(ckb),a:lang(fa),a:lang(kk-arab),a:lang(mzn),a:lang(ps),a:lang(ur){text-decoration:none}a.new,#quickbar a.new{color:#ba0000} /* cache key: wikidb:resourceloader:filter:minify-css:7:c88e2bcd56513749bec09a7e29cb3ffa */</style> <script src="http://www.scholarpedia.org/w/load.php?debug=false&amp;lang=en&amp;modules=startup&amp;only=scripts&amp;skin=vector&amp;*"></script> <script>if(window.mw){ mw.config.set({"wgCanonicalNamespace":"","wgCanonicalSpecialPageName":false,"wgNamespaceNumber":0,"wgPageName":"Bayesian_statistics","wgTitle":"Bayesian statistics","wgCurRevisionId":185711,"wgArticleId":5230,"wgIsArticle":true,"wgAction":"view","wgUserName":null,"wgUserGroups":["*"],"wgCategories":["Statistics","Multiple Curators"],"wgBreakFrames":false,"wgPageContentLanguage":"en","wgSeparatorTransformTable":["",""],"wgDigitTransformTable":["",""],"wgRelevantPageName":"Bayesian_statistics","wgRestrictionEdit":[],"wgRestrictionMove":[],"wgVectorEnabledModules":{"collapsiblenav":true,"collapsibletabs":true,"editwarning":false,"expandablesearch":false,"footercleanup":false,"sectioneditlinks":false,"simplesearch":true,"experiments":true}}); }</script><script>if(window.mw){ mw.loader.implement("user.options",function($){mw.user.options.set({"ccmeonemails":0,"cols":80,"date":"default","diffonly":0,"disablemail":0,"disablesuggest":0,"editfont":"default","editondblclick":0,"editsection":1,"editsectiononrightclick":0,"enotifminoredits":0,"enotifrevealaddr":0,"enotifusertalkpages":1,"enotifwatchlistpages":1,"extendwatchlist":0,"externaldiff":0,"externaleditor":0,"fancysig":0,"forceeditsummary":0,"gender":"unknown","hideminor":0,"hidepatrolled":0,"highlightbroken":1,"imagesize":2,"justify":0,"math":1,"minordefault":0,"newpageshidepatrolled":0,"nocache":0,"noconvertlink":0,"norollbackdiff":0,"numberheadings":0,"previewonfirst":0,"previewontop":1,"quickbar":5,"rcdays":7,"rclimit":50,"rememberpassword":0,"rows":25,"searchlimit":20,"showhiddencats":0,"showjumplinks":1,"shownumberswatching":1,"showtoc":1,"showtoolbar":1,"skin":"vector","stubthreshold":0,"thumbsize":2,"underline":2,"uselivepreview":0,"usenewrc":0,"watchcreations":0,"watchdefault":0,"watchdeletion":0, "watchlistdays":3,"watchlisthideanons":0,"watchlisthidebots":0,"watchlisthideliu":0,"watchlisthideminor":0,"watchlisthideown":0,"watchlisthidepatrolled":0,"watchmoves":0,"wllimit":250,"vector-simplesearch":1,"vector-collapsiblenav":1,"variant":"en","language":"en","searchNs0":true,"searchNs1":false,"searchNs2":false,"searchNs3":false,"searchNs4":false,"searchNs5":false,"searchNs6":false,"searchNs7":false,"searchNs8":false,"searchNs9":false,"searchNs10":false,"searchNs11":false,"searchNs12":false,"searchNs13":false,"searchNs14":false,"searchNs15":false,"searchNs200":false,"searchNs201":false,"searchNs400":false,"searchNs401":false});;},{},{});mw.loader.implement("user.tokens",function($){mw.user.tokens.set({"editToken":"+\\","watchToken":false});;},{},{}); /* cache key: wikidb:resourceloader:filter:minify-js:7:e87579b4b142a5fce16144e6d8ce1889 */ }</script> <script>if(window.mw){ mw.loader.load(["mediawiki.page.startup","mediawiki.legacy.wikibits","mediawiki.legacy.ajax"]); }</script> <link rel="canonical" href="http://www.scholarpedia.org/article/Bayesian_statistics" /> <!--[if lt IE 7]><style type="text/css">body{behavior:url("/w/skins/vector/csshover.min.htc")}</style><![endif]--></head> <body class="mediawiki ltr sitedir-ltr ns-0 ns-subject page-Bayesian_statistics skin-vector action-view cp-body-published"> <div id="mw-page-base" class="noprint"></div> <div id="mw-head-base" class="noprint"></div> <!-- content --> <div id="content" class="mw-body"> <a id="top"></a> <div id="mw-js-message" style="display:none;"></div> <!-- sitenotice --> <div id="siteNotice"><script type="text/javascript"> /* <![CDATA[ */ document.writeln("\x3cdiv id=\"localNotice\" lang=\"en\" dir=\"ltr\"\x3e\x3cp style=text-align:left;font-style:italic\x3eScholarpedia is supported by \x3ca href=\'http://www.braincorp.com\'\x3eBrain Corporation\x3c/a\x3e\x3c/p\x3e\x3c/div\x3e"); /* ]]> */ </script></div> <!-- /sitenotice --> <!-- firstHeading --> <h1 id="firstHeading" class="firstHeading"> <span dir="auto">Bayesian statistics</span> </h1> <!-- /firstHeading --> <div class="cp-googleplus"> <div class="g-plusone" align="right" data-size="small" data-annotation="inline" data-width="180"></div> <script type="text/javascript"> (function () { var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true; po.src = 'https://apis.google.com/js/plusone.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s); })(); </script> </div> <!-- bodyContent --> <div id="bodyContent"> <!-- tagline --> <div id="siteSub">From Scholarpedia</div> <!-- /tagline --> <!-- subtitle --> <div id="contentSub"><span class="subpages"><table class="cp-citation-subtitle" width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="bottom"> <td align="left">David Spiegelhalter and Kenneth Rice (2009), Scholarpedia, 4(8):5230.</td> <td align="center"><a href="http://dx.doi.org/10.4249/scholarpedia.5230">doi:10.4249/scholarpedia.5230</a></td> <td align="right">revision #185711 [<a href="/w/index.php?title=Bayesian_statistics&amp;action=cite&amp;rev=185711" title="Bayesian statistics">link to/cite this article</a>]</td> </tr> </table> </span></div> <!-- /subtitle --> <!-- jumpto --> <div id="jump-to-nav" class="mw-jump"> Jump to: <a href="#mw-head">navigation</a>, <a href="#p-search">search</a> </div> <!-- /jumpto --> <!-- bodycontent --> <div id="mw-content-text" lang="en" dir="ltr" class="mw-content-ltr"><div class="cp-box-container"><div class="cp-curator-box noprint"><b><u>Post-publication activity</u></b><br /><button class="cp-button btn"></button><p><span class="cp-title-label">Curator:</span> <a href="/article/User:David_Spiegelhalter" title="User:David Spiegelhalter">David Spiegelhalter</a> </p><div class="cp-assistants hidden"><div><span class="cp-title-label">Contributors:</span><p>&nbsp;</p></div><div><span>0.50 - </span><p><a href="/article/User:Kenneth_Rice" title="User:Kenneth Rice">Kenneth Rice</a> </p></div><div><span>0.33 - </span><p><a href="/article/User:Eugene_M._Izhikevich" title="User:Eugene M. Izhikevich">Eugene M. Izhikevich</a> </p></div><div><span>0.17 - </span><p><a href="/article/User:David_Regis" title="User:David Regis">David Regis</a> </p></div><div><span></span><p><a href="/article/User:Nick_Orbeck" title="User:Nick Orbeck">Nick Orbeck</a> </p></div><div><span></span><p><a href="/article/User:Michael_Hardy" title="User:Michael Hardy">Michael Hardy</a> </p></div><div><span></span><p><a href="/article/User:Andrew_Gelman" title="User:Andrew Gelman">Andrew Gelman</a> </p></div><div><span></span><p><a href="/article/User:Robert_MacKay" title="User:Robert MacKay">Robert MacKay</a> </p></div></div></div></div><div class="cp-author-order"><ul id="sp_authors"><li id="sort-1"><p><a href="/article/User:David_Spiegelhalter" title="User:David Spiegelhalter"><span class="bold">Dr. David Spiegelhalter</span>, Cambridge University, UK</a> </p></li><li id="sort-2"><p><a href="/article/User:Kenneth_Rice" title="User:Kenneth Rice"><span class="bold">Kenneth Rice</span>, University of Washington, Seattle, WA, USA</a> </p></li></ul></div><p><strong><span class="tex2jax_ignore">Bayesian statistics</span></strong> is a system for describing epistemological uncertainty using the mathematical <span class="tex2jax_ignore">language</span> of <a href="/w/index.php?title=Catgeory:Probability_Theory&amp;action=edit&amp;redlink=1" class="new" title="Catgeory:Probability Theory (page does not exist)">probability</a>. In the 'Bayesian paradigm,' degrees of belief in states of nature are specified; these are non-negative, and the total belief in all states of nature is fixed to be one. Bayesian statistical methods start with existing 'prior' beliefs, and update these using data to give 'posterior' beliefs, which may be used as the basis for inferential decisions. </p> <table id="toc" class="toc"><tr><td><div id="toctitle"><h2>Contents</h2></div> <ul> <li class="toclevel-1 tocsection-1"><a href="#Background"><span class="tocnumber">1</span> <span class="toctext">Background</span></a></li> <li class="toclevel-1 tocsection-2"><a href="#Bayes.27_Theorem"><span class="tocnumber">2</span> <span class="toctext">Bayes' Theorem</span></a></li> <li class="toclevel-1 tocsection-3"><a href="#Use_of_Bayes.27_Theorem:_a_simple_example"><span class="tocnumber">3</span> <span class="toctext">Use of Bayes' Theorem: a simple example</span></a></li> <li class="toclevel-1 tocsection-4"><a href="#Prior_distributions"><span class="tocnumber">4</span> <span class="toctext">Prior distributions</span></a></li> <li class="toclevel-1 tocsection-5"><a href="#Prediction"><span class="tocnumber">5</span> <span class="toctext">Prediction</span></a></li> <li class="toclevel-1 tocsection-6"><a href="#Making_Bayesian_Decisions"><span class="tocnumber">6</span> <span class="toctext">Making Bayesian Decisions</span></a></li> <li class="toclevel-1 tocsection-7"><a href="#Computation_for_Bayesian_statistics"><span class="tocnumber">7</span> <span class="toctext">Computation for Bayesian statistics</span></a> <ul> <li class="toclevel-2 tocsection-8"><a href="#The_Monte_Carlo_method"><span class="tocnumber">7.1</span> <span class="toctext">The Monte Carlo method</span></a></li> <li class="toclevel-2 tocsection-9"><a href="#Markov_Chain_Monte_Carlo_.28MCMC.29"><span class="tocnumber">7.2</span> <span class="toctext">Markov Chain Monte Carlo (MCMC)</span></a></li> </ul> </li> <li class="toclevel-1 tocsection-10"><a href="#Applications_of_Bayesian_statistical_methods"><span class="tocnumber">8</span> <span class="toctext">Applications of Bayesian statistical methods</span></a></li> <li class="toclevel-1 tocsection-11"><a href="#Open_Areas_in_Bayesian_Statistics"><span class="tocnumber">9</span> <span class="toctext">Open Areas in Bayesian Statistics</span></a> <ul> <li class="toclevel-2 tocsection-12"><a href="#Hypothesis_testing_and_model_choice"><span class="tocnumber">9.1</span> <span class="toctext">Hypothesis testing and model choice</span></a></li> <li class="toclevel-2 tocsection-13"><a href="#Robustness_and_reporting"><span class="tocnumber">9.2</span> <span class="toctext">Robustness and reporting</span></a></li> <li class="toclevel-2 tocsection-14"><a href="#Model_criticism"><span class="tocnumber">9.3</span> <span class="toctext">Model criticism</span></a></li> </ul> </li> <li class="toclevel-1 tocsection-15"><a href="#Connections_and_comparisons_with_other_schools_of_statistical_inference"><span class="tocnumber">10</span> <span class="toctext">Connections and comparisons with other schools of statistical inference</span></a></li> <li class="toclevel-1 tocsection-16"><a href="#References"><span class="tocnumber">11</span> <span class="toctext">References</span></a></li> <li class="toclevel-1 tocsection-17"><a href="#Further_reading"><span class="tocnumber">12</span> <span class="toctext">Further reading</span></a></li> <li class="toclevel-1 tocsection-18"><a href="#See_also"><span class="tocnumber">13</span> <span class="toctext">See also</span></a></li> </ul> </td></tr></table> <h3> <span class="mw-headline" id="Background"> Background </span></h3> <p>In 1763, Thomas Bayes published a paper on the problem of <i>induction</i>, that is, arguing from the specific to the general. In modern <a href="/article/Language" title="Language">language</a> and notation, Bayes wanted to use Binomial data comprising \(r\) successes out of \(n\) attempts to learn about the underlying chance \(\theta\) of each attempt succeeding. Bayes' key contribution was to use a probability distribution to represent uncertainty about \(\theta\ .\) This distribution represents 'epistemological' uncertainty, due to lack of knowledge about the world, rather than 'aleatory' probability arising from the essential unpredictability of future events, as may be familiar from games of chance. </p><p>Modern 'Bayesian statistics' is still based on formulating probability distributions to express uncertainty about unknown quantities. These can be underlying parameters of a system (induction) or future observations (prediction). </p> <h3> <span class="mw-headline" id="Bayes.27_Theorem"> Bayes' Theorem </span></h3> <p>In its raw form, Bayes' Theorem is a result in conditional probability, stating that for two random quantities \(y\) and \(\theta\ ,\) \[ p(\theta|y) = p(y|\theta) p(\theta) / p(y),\] </p><p>where \(p(\cdot)\) denotes a <a rel="nofollow" class="external text" href="http://en.wikipedia.org/wiki/Probability_distribution">probability distribution</a>, and \(p(\cdot|\cdot)\) a conditional distribution. When \(y\) represents data and \(\theta\) represents parameters in a statistical model, Bayes Theorem provides the basis for <a href="/article/Bayesian_inference" class="mw-redirect" title="Bayesian inference">Bayesian inference</a>. The 'prior' distribution \(p(\theta)\) (epistemological uncertainty) is combined with 'likelihood' \(p(y|\theta)\) to provide a 'posterior' distribution \(p(\theta|y)\) (updated epistemological uncertainty): the likelihood is derived from an aleatory sampling model \(p(y|\theta)\) but considered as function of \(\theta\) for fixed \(y\ .\) </p><p>While an innocuous theory, practical use of the Bayesian approach requires consideration of complex practical issues, including the source of the prior distribution, the choice of a likelihood function, computation and summary of the posterior distribution in high-dimensional problems, and making a convincing presentation of the analysis. </p><p>Bayes theorem can be thought of as way of <i>coherently</i> updating our uncertainty in the light of new evidence. The use of a probability distribution as a 'language' to express our uncertainty is not an arbitrary choice: it can in fact be determined from deeper principles of logical reasoning or rational behavior; see Jaynes (2003) or Lindley (1953). In particular, De Finetti (1937) showed that making a qualitative assumptions of <i>exchangeability</i> of binary observations (i.e. that their joint distribution is unaffected by label-permutation) is equivalent to assuming they are each independent conditional on some unknown parameter \(\theta\ ,\) where \(\theta\) has a prior distribution and is the limiting frequency with which the events occur. </p> <h3> <span class="mw-headline" id="Use_of_Bayes.27_Theorem:_a_simple_example">Use of Bayes' Theorem: a simple example</span></h3> <div id="fig:BayesianStatisticsFig1.png" class="thumb tright"><div class="thumbinner" style="width:347px;"><a href="/article/File:BayesianStatisticsFig1.png" class="image"><img alt="" src="/w/images/thumb/0/0f/BayesianStatisticsFig1.png/345px-BayesianStatisticsFig1.png" width="345" height="302" class="thumbimage" /></a> <div class="thumbcaption"><div class="magnify"><a href="/article/File:BayesianStatisticsFig1.png" class="internal" title="Enlarge"><img src="/w/skins/common/images/magnify-clip.png" width="15" height="11" alt="" /></a></div>Figure 1: Prior, likelihood and posterior distributions for \(\theta\ ,\) the rate of infections per 10,000 bed-days. The posterior distribution is a formal compromise between the likelihood, summarizing the evidence in the data alone, and the prior distribution, which summarizes external evidence which suggested higher rates.</div></div></div> <p>Suppose a hospital has around 200 beds occupied each day, and we want to know the underlying risk that a patient will be infected by MRSA (methicillin-resistant <i>Staphylococcus aureus</i>). Looking back at the first six months of the year, we count \(y=\) 20 infections in 40,000 bed-days. A simple estimate of the underlying risk \(\theta\) would be 20/40,000 \(=\) 5 infections per 10,000 bed-days. This is also the <a href="/w/index.php?title=Maximum-likelihood_estimate&amp;action=edit&amp;redlink=1" class="new" title="Maximum-likelihood estimate (page does not exist)">maximum-likelihood estimate</a>, if we assume that the observation \(y\) is drawn from a <a href="/w/index.php?title=Poisson_distribution&amp;action=edit&amp;redlink=1" class="new" title="Poisson distribution (page does not exist)">Poisson distribution</a> with mean \(\theta N\) where \(N = 4\) is the number of bed-days/\(10,000,\) so that \[p(y|\theta) = (\theta N)^y e^{-\theta N}/y!\ .\] </p><p>However, other evidence about the underlying risk may exist, such as the previous year's rates or rates in similar hospitals which may be included as part of a hierarchical model (see below). Suppose this other information, on its own, suggests plausible values of \(\theta\) of around 10 per 10,000, with 95% of the support for \(\theta\) lying between 5 and 17. This judgement about \(\theta\) may be expressed as a prior probability distribution. Say, for convenience, the Gamma\((a,b)\) family of distributions is chosen to formally describe our knowledge about \(\theta\ .\) This family has density \[p(\theta) = b^a \theta^{a-1}e^{-b\theta}/\Gamma(a)\ ;\] choosing \(a=10\) and \(b=1\) gives a prior distribution with appropriate properties, as shown in Figure 1. </p><p>Figure 1 also shows a density proportional to the likelihood function, under an assumed Poisson model. Using Bayes Theorem, the posterior distribution \(p(\theta|y)\) is \[\propto \theta^y e^{-\theta N} \theta^{a-1}e^{-b\theta} \propto \theta^{y+a-1}e^{-\theta (N+b)}\ ,\] i.e. a Gamma\((y+a,N+b)\) distribution - this closed-form posterior, within the same parametric family as the prior, is an example of a <i>conjugate</i> Bayesian analysis. Figure 1 shows that this posterior is primarily influenced by the likelihood function but is 'shrunk' towards the prior distribution to reflect that the expectation based on external evidence was of a higher rate than that actually observed. This can be thought of as an automatic adjustment for '<a rel="nofollow" class="external text" href="http://en.wikipedia.org/wiki/Regression_to_the_mean%7C">Regression to the mean</a>', in that the prior distribution will tend to counteract chance highs or lows in the data. </p> <h2> <span class="mw-headline" id="Prior_distributions"> Prior distributions </span></h2> <p>The prior distribution is central to Bayesian statistics and yet remains controversial unless there is a physical sampling mechanism to justify a choice of \(p(\theta)\ .\) One option is to seek 'objective' prior distributions that can be used in situations where judgemental input is supposed to be minimized, such as in scientific publications. While progress in <a href="/w/index.php?title=Objective_Bayes&amp;action=edit&amp;redlink=1" class="new" title="Objective Bayes (page does not exist)">Objective Bayes</a> methods has been made for simple situations, a universal theory of priors that represent zero or minimal information has been elusive. </p><p>A complete alternative is the fully subjectivist position, which compels one to elicit priors on all parameters based on the personal judgement of appropriate individuals. A pragmatic compromise recognizes that Bayesian statistical analyses must usually be justified to external bodies and therefore the prior distribution should, as far as possible, be based on convincing external evidence or at least be guaranteed to be weakly informative: of course, exactly the same holds for the choice of functional form for the sampling distribution which will also be a subject of judgement and will need to be justified. Bayesian analysis is perhaps best seen as a process for obtaining posterior distributions or predictions based on a range of assumptions about both prior distributions and likelihoods: arguing in this way, sensitivity analysis and reasoned justification for both prior and likelihood become vital. </p><p>Sets of prior distributions can themselves share unknown parameters, forming <a rel="nofollow" class="external text" href="http://en.wikipedia.org/wiki/Hierarchical_models%7C"><i>hierarchical</i> models</a>. These feature strongly within applied Bayesian analysis and provide a powerful basis for pooling evidence from multiple sources in order to reach more precise conclusions. Essentially a compromise is reached between the two extremes of assuming the sources are estimating (a) precisely the same, or (b) totally unrelated, parameters. The degree of pooling is itself estimated from the data according to the similarity of the sources, but this does not avoid the need for careful judgement about whether the sources are indeed exchangeable, in the sense that we have no external reasons to believe that certain sources are systematically different from others. </p> <h2> <span class="mw-headline" id="Prediction"> Prediction </span></h2> <p>One of the strengths of the Bayesian paradigm is its ease in making predictions. If current uncertainty about \(\theta\) is summarized by a posterior distribution \(p(\theta|y)\ ,\) a <i>predictive distribution</i> for any quantity \(z\) that depends on \(\theta\) through a sampling distribution \(p(z|\theta)\) can be obtained as follows; \[p(z|y) = \int p(z|\theta) p(\theta|y)\,\,d\theta\] provided that \(y\) and \(z\) are conditionally independent given \(\theta\ ,\) which will generally hold except in time series or spatial models. </p><p>In the MRSA example above, suppose we wanted to predict the number of infections \(z\) over the next six months, or 40,000 bed-days. This prediction is given by \[p(z|y) = \int \frac{(\theta N)^z e^{-\theta N}}{z!} \,\,\, \frac{(N+b)^{y+a} \theta^{y+a-1} e^{-\theta (N+b)}}{\Gamma(y+a)} \,\,d\theta = \frac{\Gamma(z+y+a)}{\Gamma(y+a)z!} p^{y+a}(1-p)^z\ ,\] where \(p = (N+b)/(2N+b)\ .\) This Negative Binomial <i>predictive distribution</i> for \(z\) is shown in Figure 2. </p> <div id="fig:BayesianStatisticsFig2.png" class="thumb tright"><div class="thumbinner" style="width:347px;"><a href="/article/File:BayesianStatisticsFig2.png" class="image"><img alt="" src="/w/images/thumb/a/a6/BayesianStatisticsFig2.png/345px-BayesianStatisticsFig2.png" width="345" height="302" class="thumbimage" /></a> <div class="thumbcaption"><div class="magnify"><a href="/article/File:BayesianStatisticsFig2.png" class="internal" title="Enlarge"><img src="/w/skins/common/images/magnify-clip.png" width="15" height="11" alt="" /></a></div>Figure 2: Predictive distribution for number of infections in the next six months, expressed as Negative Binomial\((a+y+1,\frac{b+N}{b+2N})\) distribution with \(a =\) 10, \(b =\) 1, \(y=\) 20, \(N=\)4. The mean is 25 and standard deviation is 6.7, and the probability that there are more than 20 infections is 73%. Essentially, more infections are predicted for the second six months, because external evidence suggests the observations were lucky in the first half of the year.</div></div></div> <h2> <span class="mw-headline" id="Making_Bayesian_Decisions"> Making Bayesian Decisions </span></h2> <p>For inference, a full report of the posterior distribution is the correct and final conclusion of a statistical analysis. However, this may be impractical, particularly when the posterior is high-dimensional. Instead, posterior summaries are commonly reported, for example the posterior mean and variance, or particular tail areas. If the analysis is performed with the goal of making a specific decision, measures of <a href="/w/index.php?title=Utility&amp;action=edit&amp;redlink=1" class="new" title="Utility (page does not exist)">utility</a>, or <i>loss functions</i> can be used to derive the posterior summary that is the 'best' decision, given the data. </p><p>In <a href="/w/index.php?title=Decision_Theory&amp;action=edit&amp;redlink=1" class="new" title="Decision Theory (page does not exist)">Decision Theory</a>, the loss function describes how bad a particular decision would be, given a true state of nature. Given a particular posterior, the Bayes rule is the decision which minimizes the expected loss with respect to that posterior. If a rule is <i>admissible</i> (meaning that there is no rule with strictly greater utility, for at least some state of nature) it can be shown to be a Bayes rule for some proper prior and utility function. </p><p>Many intuitively-reasonable summaries of posteriors can also be motivated as Bayes rules. The posterior mean for some parameter \(\theta\) is the Bayes rule when the loss function is the square of the distance from \(\theta\) to the decision. As noted, for example, by Schervish (1995), quantile-based credible intervals can be justified as a Bayes rule for a bivariate decision problem, and Highest Posterior Density intervals can be justified as a Bayes rule for a set-valued decision problem. </p><p>As a specific example, suppose we had to provide a point prediction for the number of MRSA cases in the next 6 months. For every case that we over-estimate, we will lose 10 units of wasted resources, but for every case that we under-estimate we will lose 50 units through having to make emergency provision. Our selected estimate is that \(t\) which will minimise the expected total cost, given by \[ \sum_{z=0}^{t-1} 10(t-z)p(z|y) + \sum_{z=t+1}^\infty 50(z-t)p(z|y) \] </p><p>The optimal choice of \(t\) can be calculated to be 30, considerably more than the expected value 24, reflecting our fear of under-estimation. </p><p><br /> </p> <h2> <span class="mw-headline" id="Computation_for_Bayesian_statistics"> Computation for Bayesian statistics </span></h2> <p>Bayesian analysis requires evaluating expectations of functions of random quantities as a basis for inference, where these quantities may have posterior distributions which are multivariate or of complex form or often both. This meant that for many years Bayesian statistics was essentially restricted to conjugate analysis, where the mathematical form of the prior and likelihood are jointly chosen to ensure that the posterior may be evaluated with ease. Numerical integration methods based on analytic approximations or quadrature were developed in 70s and 80s with some success, but a revolutionary change occurred in the early 1990s with the adoption of indirect methods, notably <a href="/w/index.php?title=Monte_Carlo_Markov_Chain&amp;action=edit&amp;redlink=1" class="new" title="Monte Carlo Markov Chain (page does not exist)">Monte Carlo Markov Chain</a>). </p> <h3> <span class="mw-headline" id="The_Monte_Carlo_method"> The Monte Carlo method </span></h3> <p>Any posterior distribution \(p(\theta|y)\) may be approximated by taking a very large random sample of realizations of \(\theta\) from \(p(\theta|y)\ ;\) the approximate properties of \(p(\theta|y)\) by the respective summaries of the realizations. For example, the posterior mean and variance of \(\theta\) may be approximated by the mean and variance of a large number of realizations from \(p(\theta|y)\ .\) Similarly, quantiles of the realizations estimate quantiles of the posterior, and the mode of a smoothed histogram of the realizations may be used to estimate the posterior mode. </p><p>Samples from the posterior can be generated in several ways, without exact knowledge of \(p(\theta|y)\ .\) Direct methods include <a href="/w/index.php?title=Rejection_sampling&amp;action=edit&amp;redlink=1" class="new" title="Rejection sampling (page does not exist)">rejection sampling</a>, which generates independent proposals for \(\theta\ ,\) and accepts them at a rate whereby those retained are proportional to the desired posterior. <a href="/w/index.php?title=Importance_sampling&amp;action=edit&amp;redlink=1" class="new" title="Importance sampling (page does not exist)">Importance sampling</a> can also be used to numerically evaluate relevant integrals; by appropriately weighting independent samples from a user-chosen distribution on \(\theta\ ,\) properties of the posterior \(p(\theta|y) \)can be estimated. </p> <h3> <span class="mw-headline" id="Markov_Chain_Monte_Carlo_.28MCMC.29"> <a href="/article/Markov_Chain" title="Markov Chain">Markov Chain</a> Monte Carlo (MCMC) </span></h3> <p>Realizations from the posterior used in Monte Carlo methods need not be independent, or generated directly. If the conditional distribution of each parameter is known (conditional on all other parameters), one simple way to generate a possibly-dependent sample of data points is via <a href="/w/index.php?title=Gibbs_Sampling&amp;action=edit&amp;redlink=1" class="new" title="Gibbs Sampling (page does not exist)">Gibbs Sampling</a>. This <a href="/article/Algorithm" title="Algorithm">algorithm</a> generates one parameter at a time; as it sequentially updates each parameter, the entire parameter space is explored. It is appropriate to start from multiple starting points in order to check convergence, and in the long-run, the 'chains' of realizations produced will reflect the posterior of interest. </p><p>More general versions of the same argument include the <a href="/w/index.php?title=Metropolis-Hastings_algorithm&amp;action=edit&amp;redlink=1" class="new" title="Metropolis-Hastings algorithm (page does not exist)">Metropolis-Hastings algorithm</a>; developing practical algorithms to approximate posterior distributions for complex problems remains an active area of research. </p> <h2> <span class="mw-headline" id="Applications_of_Bayesian_statistical_methods"> Applications of Bayesian statistical methods </span></h2> <p>Explicitly Bayesian statistical methods tend to be used in three main situations. The first is where one has no alternative but to include quantitative prior judgments, due to lack of data on some aspect of a model, or because the inadequacies of some evidence has to be acknowledged through making assumptions about the biases involved. These situations can occur when a policy decision must be made on the basis of a combination of imperfect evidence from multiple sources, an example being the encouragement of Bayesian methods by the Food and Drug Administration (FDA) division responsible for medical devices. </p><p>The second situation is with moderate-size problems with multiple sources of evidence, where hierarchical models can be constructed on the assumption of shared prior distributions whose parameters can be estimated from the data. Common application areas include meta-analysis, disease mapping, multi-centre studies, and so on. With weakly-informative prior distributions the conclusions may often be numerically similar to classic techniques, even if the interpretations may be different. </p><p>The third area concerns where a huge joint probability model is constructed, relating possibly thousands of observations and parameters, and the only feasible way of making inferences on the unknown quantities is through taking a Bayesian approach: examples include image processing, spam filtering, signal analysis, and gene expression data. Classical model-fitting fails, and MCMC or other approximate methods become essential. </p><p>There is also extensive use of Bayesian ideas of parameter uncertainty but without explicit use of Bayes theorem. If a deterministic prediction model has been constructed, but some of the parameter inputs are uncertain, then a joint prior distribution can be placed on those parameters and the resulting uncertainty propagated through the model, often using Monte Carlo methods, to produce a predictive probability distribution. This technique is used widely in risk analysis, health economic modelling and climate projections, and is sometimes known as <i>probabilistic sensitivity analysis</i>. </p><p>Another setting where the 'updating' inherent in the Bayesian approach is suitable is in machine-learning; simple examples can be found in modern software for spam filtering, suggesting which books or movies a user might enjoy given his or her past preferences, or ranking schemes for millions of on-line gamers. Formal inference may only be approximately carried out, but the Bayesian perspective allows a flexible and adaptive response to each additional item of information. </p> <h2> <span class="mw-headline" id="Open_Areas_in_Bayesian_Statistics"> Open Areas in Bayesian Statistics </span></h2> <p>The philosophical rationale for using Bayesian methods was largely established and settled by the pioneering work of De Finetti, Savage, Jaynes and Lindley. However, widespread concern remain over how to apply these methods in practice, where various concerns over sensitivity to assumptions can detract from the rhetorical impact of Bayesians' epistemological validity. </p> <h3> <span class="mw-headline" id="Hypothesis_testing_and_model_choice"> Hypothesis testing and model choice </span></h3> <p>Jeffreys (1939) developed a procedure for using data \(y\) to test between alternative scientific hypotheses \(H_0\) and \(H_1\ ,\) by computing the <i>Bayes factor</i> \(p(y|H_0)/p(y|H_1)\ .\) He suggested thresholds for strength of evidence for or against the hypotheses. The Bayes factor can be combined with the prior odds \(p(H_0)/p(H_1)\) to give posterior probabilities of each hypothesis, that can be used to weight predictions in <a href="/w/index.php?title=Bayesian_Model_Averaging&amp;action=edit&amp;redlink=1" class="new" title="Bayesian Model Averaging (page does not exist)">Bayesian Model Averaging</a> (BMA). Although BMA can be an effective pragmatic device for prediction, the use of posterior model probabilities for scientific hypothesis-testing is controversial even among the Bayesian community, for both philosophical and practical reasons: first, it may not make sense to talk of probabilities of hypotheses that we know are not strictly 'true', and second, the calculation of the Bayes factor can be extremely sensitive to apparently innocuous prior assumptions about parameters within each hypothesis. For example, the ordinate of a widely dispersed uniform prior distribution would be irrelevant for estimation within a single model, but becomes crucial when comparing models. </p><p>It has also been argued that model choice is not necessarily the same as identifying the 'true' model, particularly as in most circumstances no true model exists and so posterior model probabilities are not interpretable or useful. Instead, other criteria, such as the Akaike Information Criterion or the Deviance Information Criterion, are concerned with selecting models that are expected to make good short-term predictions. </p><p><br /> </p> <h3> <span class="mw-headline" id="Robustness_and_reporting"> Robustness and reporting </span></h3> <p>In the uncommon situation that the data are extensive and of simple structure, the prior assumptions will be unimportant and the assumed sampling model will be uncontroversial. More generally we would like to report that any conclusions are robust to reasonable changes in both prior and assumed model: this has been termed <i>inference robustness</i> to distinguish it from the frequentist idea of robustness of procedures when applied to different data. (<a href="/w/index.php?title=Frequentist_statistics&amp;action=edit&amp;redlink=1" class="new" title="Frequentist statistics (page does not exist)">Frequentist statistics</a> uses the properties of statistical procedures over repeated applications to make inference based on the data at hand) </p><p>Bayesian statistical analysis can be complex to carry out, and explicitly includes both qualitative and quantitative judgement. This suggests the need for agreed standards for analysis and reporting, but these have not yet been developed. In particular, audiences should ideally fully understand the contribution of the prior distribution to the conclusions, the reasonableness of the prior assumptions, the robustness to alternative models and priors, and the adequacy of the computational methods. </p> <h3> <span class="mw-headline" id="Model_criticism"> Model criticism </span></h3> <p>In the archetypal Bayesian paradigm there is no need for testing whether a single model adequately fits the data, since we should be always comparing two competing models using hypothesis-testing methods. However there has been recent growth in techniques for testing absolute adequacy, generally involving the simulation of replicate data and checking whether specific characteristics of the observed data match those of the replicates. Procedures for model criticism in complex hierarchical models are still being developed. It is also reasonable to check there is not strong conflict between different data sources or between prior and data, and general measures of conflict in complex models is also a subject of current research. </p> <h2> <span class="mw-headline" id="Connections_and_comparisons_with_other_schools_of_statistical_inference"> Connections and comparisons with other schools of statistical inference </span></h2> <p>At a simple level, 'classical' likelihood-based inference closely resembles Bayesian inference using a flat prior, making the posterior and likelihood proportional. However, this underestimates the deep philosophical differences between Bayesian and <a href="/w/index.php?title=Frequentist_inference&amp;action=edit&amp;redlink=1" class="new" title="Frequentist inference (page does not exist)">frequentist inference</a>; Bayesian make statements about the relative evidence for parameter values given a dataset, while frequentists compare the relative chance of datasets given a parameter value. </p><p>The incompatibility of these two views has long been a source of contention between different schools of statisticians; there is little agreement over which is 'right', 'most appropriate' or even 'most useful'. Nevertheless, in many cases, estimates, intervals, and other decisions will be extremely similar for Bayesian and frequentist analyses. <a href="/w/index.php?title=Bernstein_von_Mises_Theorems&amp;action=edit&amp;redlink=1" class="new" title="Bernstein von Mises Theorems (page does not exist)">Bernstein von Mises Theorems</a> give general results proving approximate large-sample agreement between Bayesian and frequentist methods, for large classes of standard parametric and semi-parametric models. A notable exception is in hypothesis testing, where default Bayesian and frequentist methods can give strongly discordant conclusions. Also, establishing Bayesian interpretations of non-model based frequentist analyses (such as <a href="/w/index.php?title=Generalized_Estimating_Equations&amp;action=edit&amp;redlink=1" class="new" title="Generalized Estimating Equations (page does not exist)">Generalized Estimating Equations</a>) remains an open area. </p><p>Some qualities sought in non-Bayesian inference (such as adherence to the <a rel="nofollow" class="external text" href="http://en.wikipedia.org/wiki/Likelihood_principle%7Clikelihood">principle</a> and exploitation of <a href="/w/index.php?title=Sufficient_statistic&amp;action=edit&amp;redlink=1" class="new" title="Sufficient statistic (page does not exist)">sufficiency</a>) are natural consequences of following a Bayesian approach. Also, many Bayesian procedures can also, quite straightforwardly, be calibrated to have desired frequentist properties, such as intervals with 95% coverage. This can be useful when justifying Bayesian methods to external bodies such as regulatory agencies, and we might expect an increased use of 'hybrid' techniques in which a Bayesian interpretation is given to the inferences, but the long-run behaviour of the procedure is also taken into account. </p> <h2> <span class="mw-headline" id="References"> References </span></h2> <ul><li> Thomas Bayes (1763), "An Essay towards solving a Problem in the Doctrine of Chances" Phil. Trans. Royal Society London </li><li> B. de Finetti, La Prevision: Ses Lois Logiques, Ses Sources Subjectives (1937) Annales de l'Institut Henri Poincare, 7: 1-68. Translated as Foresight: Its Logical Laws, Its Subjective Sources, in Kyburg, H. E. and Smokler, H. E. eds., (1964). Studies in Subjective Probability. Wiley, New York, 91-158 </li><li> E.T. Jaynes Probability Theory: The <a href="/article/Logic" title="Logic">Logic</a> of Science (2003) Cambridge University Press, Cambridge, UK </li><li> H. Jeffreys (1939) Theory of Probability Oxford, Clarendon Press </li><li> D.V. Lindley: Statistical Inference (1953) Journal of the Royal Statistical Society, Series B, 16: 30-76 </li><li> Schervish, M. J. (1995) Theory of Statistics. Springer-Verlag, New York. </li></ul> <h2> <span class="mw-headline" id="Further_reading"> Further reading </span></h2> <ul><li> Bernardo and Smith (1994) Bayesian Theory, Wiley </li><li> Berger (1993) Statistical Decision Theory and Bayesian Analysis, Springer-Verlag </li><li> Carlin and Louis (2008) Bayesian Methods for Data Analysis (Third Edition) Chapman and Hall/CRC </li><li> Gelman, Carlin, Stern and Rubin (2003) Bayesian Data Analysis (Second Edition) Chapman and Hall/CRC </li><li> Gelman and Hill (2007) Data Analysis Using Regression and Multilevel/Hierarchical Models, Cambridge University Press </li><li> Lindley (1991) Making Decisions (2nd Edition) Wiley </li><li> Robert (2007) The Bayesian Choice: From Decision-Theoretic Foundations to Computational Implementation (Second Edition), Springer-Verlag </li></ul> <h2> <span class="mw-headline" id="See_also">See also</span></h2> <!-- Tidy found serious XHTML errors --> <!-- NewPP limit report Preprocessor node count: 91/1000000 Post鈥恊xpand include size: 0/2097152 bytes Template argument size: 0/2097152 bytes Expensive parser function count: 0/100 ExtLoops count: 0/100 --> <div class="cp-footer"><table cellpadding="0" border="0"><tr><td>Sponsored by: <a href="/article/User:Eugene_M._Izhikevich" title="User:Eugene M. Izhikevich"><span>Eugene M. Izhikevich</span>, <span>Editor-in-Chief of Scholarpedia, the peer-reviewed open-access encyclopedia</span></a></td></tr><tr><td>Sponsored by: <a href="/article/User:Michael_Hardy" title="User:Michael Hardy"><span>Dr. Michael Hardy</span>, <span>University of Minnesota, School of Mathematics</span></a></td></tr><tr><td><a rel="nofollow" class="external text" href="http://www.scholarpedia.org/w/index.php?title=Bayesian_statistics&amp;oldid=60276">Reviewed by</a>: <a href="/article/User:Anonymous" title="User:Anonymous"><span>Anonymous</span></a></td></tr><tr><td><a rel="nofollow" class="external text" href="http://www.scholarpedia.org/w/index.php?title=Bayesian_statistics&amp;oldid=60276">Reviewed by</a>: <a href="/article/User:Andrew_Gelman" title="User:Andrew Gelman"><span>Dr. Andrew Gelman</span>, <span>University of Columbia, New York, NY</span></a></td></tr><tr><td>Accepted on: <a rel="nofollow" class="external text" href="http://www.scholarpedia.org/w/index.php?title=Bayesian_statistics&amp;oldid=60276">2009-03-25 00:15:45 GMT</a></td></tr></table></div> </div> <!-- /bodycontent --> <!-- printfooter --> <div class="printfooter"> Retrieved from "<a href="http://www.scholarpedia.org/w/index.php?title=Bayesian_statistics&amp;oldid=185711">http://www.scholarpedia.org/w/index.php?title=Bayesian_statistics&amp;oldid=185711</a>" </div> <!-- /printfooter --> <!-- catlinks --> <div id='catlinks' class='catlinks'><div id="mw-normal-catlinks" class="mw-normal-catlinks"><a href="/article/Special:Categories" title="Special:Categories">Categories</a>: <ul><li><a href="/article/Category:Statistics" title="Category:Statistics">Statistics</a></li><li><a href="/article/Category:Multiple_Curators" title="Category:Multiple Curators">Multiple Curators</a></li></ul></div></div> <!-- /catlinks --> <div class="visualClear"></div> <!-- debughtml --> <!-- /debughtml --> </div> <!-- /bodyContent --> </div> <!-- /content --> <!-- header --> <div id="mw-head" class="noprint"> <!-- 0 --> <div id="p-personal" class=""> <h5>Personal tools</h5> <ul> <li id="pt-login"><a href="/w/index.php?title=Special:UserLogin&amp;returnto=Bayesian+statistics" title="You are encouraged to log in; however, it is not mandatory [o]" accesskey="o">Log in</a></li> </ul> </div> <!-- /0 --> <div id="left-navigation"> <!-- 0 --> <div id="p-namespaces" class="vectorTabs"> <h5>Namespaces</h5> <ul> <li id="ca-nstab-main" class="selected"><span><a href="/article/Bayesian_statistics" title="View the content page [c]" accesskey="c">Page</a></span></li> <li id="ca-talk"><span><a href="/article/Talk:Bayesian_statistics" title="Discussion about the content page [t]" accesskey="t">Discussion</a></span></li> </ul> </div> <!-- /0 --> <!-- 1 --> <div id="p-variants" class="vectorMenu emptyPortlet"> <h4> </h4> <h5><span>Variants</span><a href="#"></a></h5> <div class="menu"> <ul> </ul> </div> </div> <!-- /1 --> </div> <div id="right-navigation"> <!-- 0 --> <div id="p-views" class="vectorTabs"> <h5>Views</h5> <ul> <li id="ca-view" class="selected"><span><a href="/article/Bayesian_statistics" >Read</a></span></li> <li id="ca-viewsource"><span><a href="/w/index.php?title=Bayesian_statistics&amp;action=edit" title="This page is protected.&#10;You can view its source [e]" accesskey="e">View source</a></span></li> <li id="ca-history" class="collapsible"><span><a href="/w/index.php?title=Bayesian_statistics&amp;action=history" title="Past revisions of this page [h]" accesskey="h">View history</a></span></li> </ul> </div> <!-- /0 --> <!-- 1 --> <div id="p-cactions" class="vectorMenu emptyPortlet"> <h5><span>Actions</span><a href="#"></a></h5> <div class="menu"> <ul> </ul> </div> </div> <!-- /1 --> <!-- 2 --> <div id="p-search"> <h5><label for="searchInput">Search</label></h5> <form action="/w/index.php" id="searchform"> <div id="simpleSearch"> <input name="search" title="Search Scholarpedia [f]" accesskey="f" id="searchInput" /> <button name="button" title="Search the pages for this text" id="searchButton"><img src="/w/skins/vector/images/search-ltr.png?303" alt="Search" /></button> <input type='hidden' name="title" value="Special:Search"/> </div> </form> </div> <!-- /2 --> </div> </div> <!-- /header --> <!-- panel --> <div id="mw-panel" class="noprint"> <!-- logo --> <div id="p-logo"><a style="background-image: url(/w/skins/vector/images/splogo.png);" href="/article/Main_Page" title="Visit the main page"></a></div> <!-- /logo --> <!-- navigation --> <div class="portal" id='p-navigation'> <h5>Navigation</h5> <div class="body"> <ul> <li id="n-mainpage-description"><a href="/article/Main_Page" title="Visit the main page [z]" accesskey="z">Main page</a></li> <li id="n-About"><a href="/article/Scholarpedia:About">About</a></li> <li id="n-Propose-a-new-article"><a href="/article/Special:ProposeArticle">Propose a new article</a></li> <li id="n-Instructions-for-Authors"><a href="/article/Scholarpedia:Instructions_for_Authors">Instructions for Authors</a></li> <li id="n-randompage"><a href="/article/Special:Random" title="Load a random page [x]" accesskey="x">Random article</a></li> <li id="n-FAQs"><a href="/article/Help:Frequently_Asked_Questions">FAQs</a></li> <li id="n-Help"><a href="/article/Scholarpedia:Help">Help</a></li> </ul> </div> </div> <!-- /navigation --> <!-- Focal areas --> <div class="portal" id='p-Focal_areas'> <h5>Focal areas</h5> <div class="body"> <ul> <li id="n-Astrophysics"><a href="/article/Encyclopedia:Astrophysics">Astrophysics</a></li> <li id="n-Celestial-mechanics"><a href="/article/Encyclopedia:Celestial_Mechanics">Celestial mechanics</a></li> <li id="n-Computational-neuroscience"><a href="/article/Encyclopedia:Computational_neuroscience">Computational neuroscience</a></li> <li id="n-Computational-intelligence"><a href="/article/Encyclopedia:Computational_intelligence">Computational intelligence</a></li> <li id="n-Dynamical-systems"><a href="/article/Encyclopedia:Dynamical_systems">Dynamical systems</a></li> <li id="n-Physics"><a href="/article/Encyclopedia:Physics">Physics</a></li> <li id="n-Touch"><a href="/article/Encyclopedia:Touch">Touch</a></li> <li id="n-More-topics"><a href="/article/Scholarpedia:Topics">More topics</a></li> </ul> </div> </div> <!-- /Focal areas --> <!-- Activity --> <div class="portal" id='p-Activity'> <h5>Activity</h5> <div class="body"> <ul> <li id="n-Recently-published-articles"><a href="/article/Special:RecentlyPublished">Recently published articles</a></li> <li id="n-Recently-sponsored-articles"><a href="/article/Special:RecentlySponsored">Recently sponsored articles</a></li> <li id="n-recentchanges"><a href="/article/Special:RecentChanges" title="A list of recent changes in the wiki [r]" accesskey="r">Recent changes</a></li> <li id="n-All-articles"><a href="/article/Special:AllPages">All articles</a></li> <li id="n-List-all-Curators"><a href="/article/Special:ListCurators">List all Curators</a></li> <li id="n-List-all-users"><a href="/article/Special:ListUsers">List all users</a></li> <li id="n-Journal"><a href="/article/Special:Journal">Scholarpedia Journal</a></li> </ul> </div> </div> <!-- /Activity --> <!-- SEARCH --> <!-- /SEARCH --> <!-- TOOLBOX --> <div class="portal" id='p-tb'> <h5>Tools</h5> <div class="body"> <ul> <li id="t-whatlinkshere"><a href="/article/Special:WhatLinksHere/Bayesian_statistics" title="A list of all wiki pages that link here [j]" accesskey="j">What links here</a></li> <li id="t-recentchangeslinked"><a href="/article/Special:RecentChangesLinked/Bayesian_statistics" title="Recent changes in pages linked from this page [k]" accesskey="k">Related changes</a></li> <li id="t-specialpages"><a href="/article/Special:SpecialPages" title="A list of all special pages [q]" accesskey="q">Special pages</a></li> <li><a href="/w/index.php?title=Bayesian_statistics&amp;printable=yes" rel="alternate">Printable version</a></li> <li id="t-permalink"><a href="/w/index.php?title=Bayesian_statistics&amp;oldid=185711" title="Permanent link to this revision of the page">Permanent link</a></li> </ul> </div> </div> <!-- /TOOLBOX --> <!-- LANGUAGES --> <!-- /LANGUAGES --> </div> <!-- /panel --> <!-- footer --> <div id="footer"> <div id="footer-icons"> <ul class="social"> <li><a href="https://twitter.com/scholarpedia" target="_blank"><img src="/w/skins/vector/images/twitter.png?303" /></a></li> <li><a href="https://plus.google.com/112873162496270574424" target="_blank"><img src="https://ssl.gstatic.com/images/icons/gplus-16.png" /></a></li> <li><a href="http://www.facebook.com/Scholarpedia" target="_blank"><img src="/w/skins/vector/images/facebook.png?303" /></a></li> <li><a href="http://www.linkedin.com/groups/Scholarpedia-4647975/about" target="_blank"><img src="/w/skins/vector/images/linkedin.png?303" /></a></li> </ul> <ul id="footer-icons" class="noprint"> <li id="footer-poweredbyico"> <a href="http://www.mediawiki.org/"><img src="/w/skins/common/images/poweredby_mediawiki_88x31.png" alt="Powered by MediaWiki" width="88" height="31" /></a> <a href="http://www.mathjax.org/"><img src="/w/skins/common/images/MathJaxBadge.gif" alt="Powered by MathJax" width="88" height="31" /></a> <a href="http://creativecommons.org/licenses/by-nc-sa/3.0/deed.en_US"><img src="/w/skins/common/88x31.png" alt="Creative Commons License" width="88" height="31" /></a> </li> </ul> </div> <ul id="footer-info"> <li id="footer-info-lastmod"> This page was last modified on 9 February 2018, at 10:02.</li> <li id="footer-info-viewcount">This page has been accessed 461,021 times.</li> <li id="footer-info-copyright"> <span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">"Bayesian statistics"</span> by <a xmlns:cc="http://creativecommons.org/ns#" href="http://www.scholarpedia.org/article/Bayesian_statistics" property="cc:attributionName" rel="cc:attributionURL"> David Spiegelhalter and Kenneth Rice </a> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/deed.en_US"> Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License</a>. Permissions beyond the scope of this license are described in the <a xmlns:cc="http://creativecommons.org/ns#" href="http://www.scholarpedia.org/article/Scholarpedia:Terms_of_use" rel="cc:morePermissions">Terms of Use</a></li> </ul> <ul id="footer-places"> <li id="footer-places-privacy"><a href="/article/Scholarpedia:Privacy_policy" title="Scholarpedia:Privacy policy">Privacy policy</a></li> <li id="footer-places-about"><a href="/article/Scholarpedia:About" class="mw-redirect" title="Scholarpedia:About">About Scholarpedia</a></li> <li id="footer-places-disclaimer"><a href="/article/Scholarpedia:General_disclaimer" title="Scholarpedia:General disclaimer">Disclaimers</a></li> </ul> <div style="clear:both"></div> </div> <!-- /footer --> <script src="http://www.scholarpedia.org/w/load.php?debug=false&amp;lang=en&amp;modules=skins.vector&amp;only=scripts&amp;skin=vector&amp;*"></script> <script>if(window.mw){ mw.loader.load(["jquery.ui.dialog","curatorpedia.dashboard","curatorpedia.confirm","mediawiki.user","mediawiki.page.ready","ext.vector.collapsibleNav","ext.vector.collapsibleTabs","ext.vector.simpleSearch"], null, true); }</script> <script> var wgSitename = 'http://www.scholarpedia.org';</script> <script type='text/x-mathjax-config'> //<![CDATA[ MathJax.Hub.Config({ styles: { ".MathJax_Display": { display: "table-cell ! important", padding: "1em 0 ! important", width: (MathJax.Hub.Browser.isMSIE && (document.documentMode||0) < 8 ? "100% ! important" : "1000em ! important") } }, extensions: ["tex2jax.js","TeX/noErrors.js", "TeX/AMSmath.js","TeX/AMSsymbols.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: false, element: "content", ignoreClass: "(tex2jax_ignore|mw-search-results|searchresults)", /* note: this is part of a regex, check the docs! */ skipTags: ["script","noscript","style","textarea","code"] /* removed pre as wikimedia renders math in there */ }, TeX: { Macros: { /* Wikipedia compatibility: these macros are used on Wikipedia */ empty: '\\emptyset', P: '\\unicode{xb6}', Alpha: '\\unicode{x391}', /* FIXME: These capital Greeks don't show up in bold in \boldsymbol ... */ Beta: '\\unicode{x392}', Epsilon: '\\unicode{x395}', Zeta: '\\unicode{x396}', Eta: '\\unicode{x397}', Iota: '\\unicode{x399}', Kappa: '\\unicode{x39a}', Mu: '\\unicode{x39c}', Nu: '\\unicode{x39d}', Pi: '\\unicode{x3a0}', Rho: '\\unicode{x3a1}', Sigma: '\\unicode{x3a3}', Tau: '\\unicode{x3a4}', Chi: '\\unicode{x3a7}', C: '\\mathbb{C}', /* the complex numbers */ N: '\\mathbb{N}', /* the natural numbers */ Q: '\\mathbb{Q}', /* the rational numbers */ R: '\\mathbb{R}', /* the real numbers */ Z: '\\mathbb{Z}', /* the integer numbers */ /* some extre macros for ease of use; these are non-standard! */ F: '\\mathbb{F}', /* a finite field */ HH: '\\mathcal{H}', /* a Hilbert space */ bszero: '\\boldsymbol{0}', /* vector of zeros */ bsone: '\\boldsymbol{1}', /* vector of ones */ bst: '\\boldsymbol{t}', /* a vector 't' */ bsv: '\\boldsymbol{v}', /* a vector 'v' */ bsw: '\\boldsymbol{w}', /* a vector 'w' */ bsx: '\\boldsymbol{x}', /* a vector 'x' */ bsy: '\\boldsymbol{y}', /* a vector 'y' */ bsz: '\\boldsymbol{z}', /* a vector 'z' */ bsDelta: '\\boldsymbol{\\Delta}', /* a vector '\Delta' */ E: '\\mathrm{e}', /* the exponential */ rd: '\\,\\mathrm{d}', /* roman d for use in integrals: $\int f(x) \rd x$ */ rdelta: '\\,\\delta', /* delta operator for use in sums */ rD: '\\mathrm{D}', /* differential operator D */ /* example from MathJax on how to define macros with parameters: */ /* bold: ['{\\bf #1}', 1] */ RR: '\\mathbb{R}', ZZ: '\\mathbb{Z}', NN: '\\mathbb{N}', QQ: '\\mathbb{Q}', CC: '\\mathbb{C}', FF: '\\mathbb{F}' } } }); //]]> //<![CDATA[ MathJax.Hub.config.tex2jax.inlineMath.push(['$','$']); MathJax.Hub.config.tex2jax.displayMath.push(['$$','$$']); //]]> </script> <script type='text/javascript' src='https://cdn.mathjax.org/mathjax/2.3-latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'></script> <script src="http://www.scholarpedia.org/w/load.php?debug=false&amp;lang=en&amp;modules=site&amp;only=scripts&amp;skin=vector&amp;*"></script> <script type="text/javascript"> var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www."); document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E")); </script> <script type="text/javascript"> var pageTracker = _gat._getTracker("UA-22078568-1"); pageTracker._initData(); pageTracker._trackPageview(); </script><!-- Served in 0.311 secs. --> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10