CINXE.COM

Folk Psychology as Mental Simulation (Stanford Encyclopedia of Philosophy)

<!DOCTYPE html> <!--[if lt IE 7]> <html class="ie6 ie"> <![endif]--> <!--[if IE 7]> <html class="ie7 ie"> <![endif]--> <!--[if IE 8]> <html class="ie8 ie"> <![endif]--> <!--[if IE 9]> <html class="ie9 ie"> <![endif]--> <!--[if !IE]> --> <html lang="en"> <!-- <![endif]--> <head> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title> Folk Psychology as Mental Simulation (Stanford Encyclopedia of Philosophy) </title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="robots" content="noarchive, noodp" /> <meta property="citation_title" content="Folk Psychology as Mental Simulation" /> <meta property="citation_author" content="Barlassina, Luca" /> <meta property="citation_author" content="Gordon, Robert M." /> <meta property="citation_publication_date" content="1997/12/08" /> <meta name="DC.title" content="Folk Psychology as Mental Simulation" /> <meta name="DC.creator" content="Barlassina, Luca" /> <meta name="DC.creator" content="Gordon, Robert M." /> <meta name="DCTERMS.issued" content="1997-12-08" /> <meta name="DCTERMS.modified" content="2017-03-28" /> <!-- NOTE: Import webfonts using this link: --> <link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:400,300,600,200&amp;subset=latin,latin-ext" rel="stylesheet" type="text/css" /> <link rel="stylesheet" type="text/css" media="screen" href="../../css/bootstrap.min.css" /> <link rel="stylesheet" type="text/css" media="screen" href="../../css/bootstrap-responsive.min.css" /> <link rel="stylesheet" type="text/css" href="../../css/font-awesome.min.css" /> <!--[if IE 7]> <link rel="stylesheet" type="text/css" href="../../css/font-awesome-ie7.min.css"> <![endif]--> <link rel="stylesheet" type="text/css" media="screen" href="../../css/style.css" /> <link rel="stylesheet" type="text/css" media="print" href="../../css/print.css" /> <link rel="stylesheet" type="text/css" href="../../css/entry.css" /> <!--[if IE]> <link rel="stylesheet" type="text/css" href="../../css/ie.css" /> <![endif]--> <script type="text/javascript" src="../../js/jquery-1.9.1.min.js"></script> <script type="text/javascript" src="../../js/bootstrap.min.js"></script> <!-- NOTE: Javascript for sticky behavior needed on article and ToC pages --> <script type="text/javascript" src="../../js/jquery-scrolltofixed-min.js"></script> <script type="text/javascript" src="../../js/entry.js"></script> <!-- SEP custom script --> <script type="text/javascript" src="../../js/sep.js"></script> </head> <!-- NOTE: The nojs class is removed from the page if javascript is enabled. Otherwise, it drives the display when there is no javascript. --> <body class="nojs article" id="pagetopright"> <div id="container"> <div id="header-wrapper"> <div id="header"> <div id="branding"> <div id="site-logo"><a href="../../index.html"><img src="../../symbols/sep-man-red.png" alt="SEP home page" /></a></div> <div id="site-title"><a href="../../index.html">Stanford Encyclopedia of Philosophy</a></div> </div> <div id="navigation"> <div class="navbar"> <div class="navbar-inner"> <div class="container"> <button class="btn btn-navbar collapsed" data-target=".collapse-main-menu" data-toggle="collapse" type="button"> <i class="icon-reorder"></i> Menu </button> <div class="nav-collapse collapse-main-menu in collapse"> <ul class="nav"> <li class="dropdown open"><a id="drop1" href="#" class="dropdown-toggle" data-toggle="dropdown" role="button"><i class="icon-book"></i> Browse</a> <ul class="dropdown-menu" role="menu" aria-labelledby="drop1"> <li role="menuitem"><a href="../../contents.html">Table of Contents</a></li> <li role="menuitem"><a href="../../new.html">What's New</a></li> <li role="menuitem"><a href="https://plato.stanford.edu/cgi-bin/encyclopedia/random">Random Entry</a></li> <li role="menuitem"><a href="../../published.html">Chronological</a></li> <li role="menuitem"><a href="../../archives/">Archives</a></li> </ul> </li> <li class="dropdown open"><a id="drop2" href="#" class="dropdown-toggle" data-toggle="dropdown" role="button"><i class="icon-info-sign"></i> About</a> <ul class="dropdown-menu" role="menu" aria-labelledby="drop2"> <li role="menuitem"><a href="../../info.html">Editorial Information</a></li> <li role="menuitem"><a href="../../about.html">About the SEP</a></li> <li role="menuitem"><a href="../../board.html">Editorial Board</a></li> <li role="menuitem"><a href="../../cite.html">How to Cite the SEP</a></li> <li role="menuitem"><a href="../../special-characters.html">Special Characters</a></li> <li role="menuitem"><a href="../../tools/">Advanced Tools</a></li> <li role="menuitem"><a href="../../contact.html">Contact</a></li> </ul> </li> <li class="dropdown open"><a id="drop3" href="#" class="dropdown-toggle" data-toggle="dropdown" role="button"><i class="icon-leaf"></i> Support SEP</a> <ul class="dropdown-menu" role="menu" aria-labelledby="drop3"> <li role="menuitem"><a href="../../support/">Support the SEP</a></li> <li role="menuitem"><a href="../../support/friends.html">PDFs for SEP Friends</a></li> <li role="menuitem"><a href="../../support/donate.html">Make a Donation</a></li> <li role="menuitem"><a href="../../support/sepia.html">SEPIA for Libraries</a></li> </ul> </li> </ul> </div> </div> </div> </div> </div> <!-- End navigation --> <div id="search"> <form id="search-form" method="get" action="../../search/searcher.py"> <input type="search" name="query" placeholder="Search SEP" /> <div class="search-btn-wrapper"><button class="btn search-btn" type="submit" aria-label="search"><i class="icon-search"></i></button></div> </form> </div> <!-- End search --> </div> <!-- End header --> </div> <!-- End header wrapper --> <div id="content"> <!-- Begin article sidebar --> <div id="article-sidebar" class="sticky"> <div class="navbar"> <div class="navbar-inner"> <div class="container"> <button class="btn btn-navbar" data-target=".collapse-sidebar" data-toggle="collapse" type="button"> <i class="icon-reorder"></i> Entry Navigation </button> <div id="article-nav" class="nav-collapse collapse-sidebar in collapse"> <ul class="nav"> <li><a href="#toc">Entry Contents</a></li> <li><a href="#Bib">Bibliography</a></li> <li><a href="#Aca">Academic Tools</a></li> <li><a href="https://leibniz.stanford.edu/friends/preview/folkpsych-simulation/">Friends PDF Preview <i class="icon-external-link"></i></a></li> <li><a href="https://plato.stanford.edu/cgi-bin/encyclopedia/archinfo.cgi?entry=folkpsych-simulation">Author and Citation Info <i class="icon-external-link"></i></a> </li> <li><a href="#pagetopright" class="back-to-top">Back to Top <i class="icon-angle-up icon2x"></i></a></li> </ul> </div> </div> </div> </div> </div> <!-- End article sidebar --> <!-- NOTE: Article content must have two wrapper divs: id="article" and id="article-content" --> <div id="article"> <div id="article-content"> <!-- BEGIN ARTICLE HTML --> <div id="aueditable"><!--DO NOT MODIFY THIS LINE AND ABOVE--> <h1>Folk Psychology as Mental Simulation</h1><div id="pubinfo"><em>First published Mon Dec 8, 1997; substantive revision Tue Mar 28, 2017</em></div> <div id="preamble"> <p> The capacity for &ldquo;mindreading&rdquo; is understood in philosophy of mind and cognitive science as the capacity to represent, reason about, and respond to others&rsquo; mental states. Essentially the same capacity is also known as &ldquo;folk psychology&rdquo;, &ldquo;Theory of Mind&rdquo;, and &ldquo;mentalizing&rdquo;. An example of everyday mindreading: you notice that Tom&rsquo;s <em>fright</em> <em>embarrassed</em> Mary and <em>surprised</em> Bill, who had <em>believed</em> that Tom <em>wanted</em> to <em>try</em> everything. Mindreading is of crucial importance for our social life: our ability to predict, explain, and/or coordinate with others&rsquo; actions on countless occasions crucially relies on representing their mental states. For instance, by attributing to Steve the desire for a banana and the belief that there are no more bananas at home but there are some left at the local grocery store, you can: (i) <em>explain</em> why Steve has just left home; (ii) <em>predict</em> where Steve is heading; and (iii) <em>coordinate</em> your behavior with his (meet him at the store, or prepare a surpise party while he is gone). Without mindreading, (i)&ndash;(iii) do not come easily&mdash;if they come at all. That much is fairly uncontroversial. What is controversial is how to explain mindreading. That is, how do people arrive at representing others&rsquo; mental states? This is the main question to which the Simulation (or, mental simulation) Theory (ST) of mindreading offers an answer.</p> <p> Common sense has it that, in many circumstances, we arrive at representing others&rsquo; mental states by putting ourselves in their shoes, or taking their perspective. For example, I can try to figure out my chess opponent&rsquo;s next decision by imagining what I would decide if I were in her place. (Although we may also speak of this as a kind of <em>empathy</em>, that term must be understood here without any implication of sympathy or benevolence.)</p> <p> ST takes this commonsensical idea seriously and develops it into a fully-fledged theory. At the core of the theory, we find the thesis that <em>mental simulation</em> plays a central role in mindreading: we typically arrive at representing others&rsquo; mental states by simulating their mental states in our own mind. So, to figure out my chess opponent&rsquo;s next decision, I mentally switch roles with her in the game. In doing this, I simulate her relevant <em>beliefs</em> and <em>goals</em>, and then feed these simulated mental states into my decision-making mechanism and let the mechanism produce a simulated <em>decision</em>. This decision is projected on or attributed to the opponent. In other words, the basic idea of ST is that if the resources our own brain uses to guide our own behavior can be modified to work as representations of other people&rsquo;s mental states, then we have no need to store general information about what makes people tick: we just do the ticking for them. Accordingly, ST challenges the Theory-Theory of mindreading (TT), the view that a tacit psychological theory underlies the ability to represent and reason about others&rsquo; mental states. While TT maintains that mindreading is an information-rich and theory-driven process, ST sees it as informationally poor and process driven (Goldman 1989).</p> <p> This entry is organized as follows. In section 1 (The Origins and Varieties of ST), we briefly reconstruct ST&rsquo;s history and elaborate further on ST&rsquo;s main theoretical aims. We then go on to explain the very idea of mental simulation (section 2: What is Meant by &ldquo;Mental Simulation&rdquo;?) In section 3 (Two Types of Simulation Processes), we consider the cognitive architecture underlying mental simulation and introduce the distinction between high-level and low-level simulation processes. In section 4 (The Role of Mental Simulation in Mindreading), we discuss what role mental simulation is supposed to play in mindreading, according to ST. This discussion carries over to section 5 (Simulation Theory and Theory-Theory), where we contrast the accounts of mindreading given by ST and TT. Finally, section 6 (Simulation Theory: Pros and Cons) examines some of the main arguments in favour of and against ST as theory of mindreading.</p> </div> <div id="toc"> <!--Entry Contents--> <ul> <li><a href="#OrigVariST">1. The Origins and Varieties of ST</a></li> <li><a href="#WhatMeanMentSimu">2. What is Meant by &ldquo;Mental Simulation&rdquo;?</a> <ul> <li><a href="#MentSimuRese">2.1 Mental Simulation as Resemblance</a></li> <li><a href="#MentSimuReus">2.2 Mental Simulation as Reuse</a></li> <li><a href="#RelaStatProc">2.3 Relations, States, and Processes</a></li> <li><a href="#FinaWorr">2.4 Final Worries</a></li> </ul></li> <li><a href="#TwoTypeSimuProc">3. Two Types of Simulation Processes</a> <ul> <li><a href="#HighLeveSimuProc">3.1 High-Level Simulation Processes</a></li> <li><a href="#LowLeveSimuProc">3.2 Low-Level Simulation Processes</a></li> </ul></li> <li><a href="#RoleMentSimuMind">4. The Role of Mental Simulation in Mindreading</a> <ul> <li><a href="#CentMentSimuMind">4.1 The Centrality of Mental Simulation in Mindreading</a></li> <li><a href="#ConsCaus">4.2 Constitution or Causation?</a></li> <li><a href="#MindWithJudg">4.3 Mindreading without Judgement </a></li> <li><a href="#MindIntr">4.4 Mindreading and Introspection</a></li> <li><a href="#Summ">4.5 Summary</a></li> </ul></li> <li><a href="#SimuTheoTheoTheo">5. Simulation Theory and Theory-Theory</a> <ul> <li><a href="#TheoTheo">5.1 The Theory-Theory</a></li> <li><a href="#CollCoop">5.2 Collapse or Cooperation?</a></li> </ul></li> <li><a href="#SimuTheoProsCons">6. Simulation Theory: Pros and Cons</a> <ul> <li><a href="#MirrNeurCont">6.1 The Mirror Neurons Controversy</a></li> <li><a href="#SelfOthe">6.2 Self and Others</a></li> <li><a href="#DeveFind">6.3 Developmental Findings</a></li> </ul></li> <li><a href="#Conc">7. Conclusion</a></li> <li><a href="#Bib">Bibliography</a></li> <li><a href="#Aca">Academic Tools</a></li> <li><a href="#Oth">Other Internet Resources</a></li> <li><a href="#Rel">Related Entries</a></li> </ul> <!--Entry Contents--> <hr /> </div> <div id="main-text"> <h2 id="OrigVariST">1. The Origins and Varieties of ST</h2> <p> The idea that we often arrive at representing other people&rsquo;s mental states by mentally simulating those states in ourselves has a distinguished history in philosophy and the human sciences. Robert Gordon (1995) traces it back to David Hume (1739) and Adam Smith&rsquo;s (1759) notion of sympathy; Jane Heal (2003) and Gordon (2000) find simulationist themes in the <em>Verstehen</em> approach to the philosophy of history (e.g., Dilthey 1894); Alvin Goldman (2006) considers Theodor Lipps&rsquo;s (1903) account of empathy (<em>Einf&uuml;hlung</em>) as a precursor of the notion of mental simulation.</p> <p> In its modern guise, ST was established in 1986, with the publication of Robert Gordon&rsquo;s &ldquo;Folk Psychology as Simulation&rdquo; and Jane Heal&rsquo;s &ldquo;Replication and Functionalism&rdquo;. These two articles criticized the Theory-Theory and introduced ST as a better account of mindreading. In his article, Gordon discussed psychological findings concerning the development of the capacity to represent others&rsquo; false beliefs. This attracted the interest of developmental psychologists, especially Paul Harris (1989, 1992), who presented empirical support for ST, and Alison Gopnik (Gopnik &amp; Wellman 1992) and Joseph Perner (Perner &amp; Howes 1992), who argued against it&mdash;Perner has since come to defend a hybrid version of ST (Perner &amp; K&uuml;hberger 2005).</p> <p> Alvin Goldman was an early and influential defender of ST (1989) and has done much to give the theory its prominence. His work with the neuroscientist Vittorio Gallese (Gallese &amp; Goldman 1998) was the first to posit an important connection between ST and the newly discovered mirror neurons. Goldman&rsquo;s 2006 book <em>Simulating Minds</em> is the clearest and most comprehensive account to date of the relevant philosophical and empirical issues. Among other philosophical proponents of ST, Gregory Currie and Susan Hurley have been influential.</p> <p> Since the late 1980s, ST has been one of the central players in the philosophical, psychological, and neuroscientific discussions of mindreading. It has however been argued that the fortunes of ST have had a notable negative consequence: the expression &ldquo;mental simulation&rdquo; has come to be used broadly and in a variety of ways, making &ldquo;Simulation Theory&rdquo; a blanket term lumping together many distinct approaches to mindreading. Stephen Stich and Shaun Nichols (1997) already urged dropping it in favor of a finer-grained terminology. There is some merit to this. ST is in fact better conceived of as a <em>family of theories</em> rather than a single theory. All the members of the family agree on the thesis that mental simulation, rather than a body of knowledge about other minds, plays a central role in mindreading. However, different members of the family can differ from one another in significant respects.</p> <p> One fundamental area of disagreement among Simulation Theorists is the very nature of ST&mdash;what <em>kind</em> of theory ST is supposed to be&mdash;and what philosophers can contribute to it. Some Simulation Theorists take the question &ldquo;How do people arrive at representing others&rsquo; mental states?&rdquo; as a straightforward empirical question about the cognitive processes and mechanisms underlying mindreading (Goldman 2006; Hurley 2008). According to them, ST is thus a theory in <em>cognitive science</em>, to which philosophers can contribute exactly as theoretical physicists contribute to physics: </p> <blockquote> <p> theorists specialize in creating and tweaking theoretical structures that comport with experimental data, whereas experimentalists have the primary job of generating the data. (Goldman 2006: 22) </p> </blockquote> <p> Other philosophical defenders of ST, however, do not conceive of themselves as theoretical cognitive scientists at all. For example, Heal (1998) writes that: </p> <blockquote> <p> it is commonly taken that the inquiry into &hellip; the extent of simulation in psychological understanding is empirical, and that scientific investigation is the way to tell whether ST &hellip; is correct. But this perception is confused. It is an <em>a priori</em> truth &hellip; that simulation must be given a substantial role in our personal-level account of psychological understanding. (Heal 1998: 477&ndash;478) </p> </blockquote> <p> Adjudicating this meta-philosophical dispute goes well beyond the aim of this entry. To be as inclusive as we can, we shall adopt a &ldquo;balanced diet&rdquo; approach: we shall discuss the extent to which ST is supported by empirical findings from psychology and neuroscience, and, <em>at the same time</em>, we shall dwell on &ldquo;purely philosophical&rdquo; problems concerning ST. We leave to the reader the task of evaluating which aspects should be put at the centre of the inquiry.</p> <p> Importantly, even those who agree on the general nature of ST might disagree on other crucial issues. We will focus on what are typically taken to be the three most important bones of contention among Simulation Theorists: what is meant by &ldquo;mental simulation&rdquo;? (<a href="#WhatMeanMentSimu">section 2</a>). What types of simulation processes are there? (<a href="#TwoTypeSimuProc">section 3</a>). What is the role of mental simulation in mindreading? (<a href="#RoleMentSimuMind">section 4</a>). After having considered what keeps Simulation Theorists apart, we shall move to discuss what holds them together, i.e., the opposition to the Theory-Theory of mindreading (<a href="#SimuTheoTheoTheo">section 5</a> and <a href="#SimuTheoProsCons">section 6</a>). This should give the reader a sense of the &ldquo;unity amidst diversity&rdquo; that characterizes ST.</p> <h2 id="WhatMeanMentSimu">2. What is Meant by &ldquo;Mental Simulation&rdquo;?</h2> <p> In common parlance, we talk of putting ourselves in others&rsquo; shoes, or empathizing with other people. This talk is typically understood as adopting someone else&rsquo;s point of view, or perspective, in our imagination. For example, it is quite natural to interpret the request &ldquo;Try to show some empathy for John!&rdquo; as asking you to use your imaginative capacity to consider the world from John&rsquo;s perspective. But what is it for someone to imaginatively adopt someone else&rsquo;s perspective? To a first approximation, according to Simulation Theorists, it consists of <em>mentally</em> <em>simulating</em>, or re-creating, someone else&rsquo;s mental states. Currie and Ravenscroft (2002) make this point quite nicely: </p> <blockquote> <p> Imagination enables us to project ourselves into another situation and to see, or think about, the world from another perspective. These situations and perspectives &hellip; might be those of another actual person, [or] the perspective we would have on things if we believed something we actually don&rsquo;t believe, [or] that of a fictional character. &hellip; <em>Imagination recreates the mental states of others</em>. (Currie &amp; Ravenscroft 2002: 1, emphasis added). </p> </blockquote> <p> Thus, according to ST, empathizing with John&rsquo;s sadness consists of mentally simulating his <em>sadness</em>, and adopting Mary&rsquo;s political point of view consists of mentally simulating her political <em>beliefs</em>. This is the intuitive and general sense of mental simulation that Simulation Theorists have in mind.</p> <p> Needless to say, this intuitive characterization of &ldquo;mental simulation&rdquo; is loose. What exactly does it mean to say that a mental state is a mental simulation of another mental state? Clearly, we need a precise answer to this question, if the notion of mental simulation is to be the fundamental building block of a theory. Simulation Theorists, however, differ over how to answer this question. The central divide concerns whether &ldquo;mental simulation&rdquo; should be defined in terms of <em>resemblance</em> (Heal 1986, 2003; Goldman 2006, 2008a) or in terms of <em>reuse</em> (Hurley 2004, 2008; Gallese &amp; Sinigaglia 2011). We consider these two proposals in turn.</p> <h3 id="MentSimuRese">2.1 Mental Simulation as Resemblance</h3> <p> The simplest interpretation of &ldquo;mental simulation&rdquo; in terms of resemblance goes like this:</p> <div class="indent" id="ex-res1"> <p style="margin:0"> (RES-1) Token state <i>M</i>* is a mental simulation of token state <i>M</i> if and only if:</p> <ol> <li>Both <i>M</i> and <i>M</i>* are mental states</li> <li> <i>M</i>* resembles <i>M</i> in some significant respects</li> </ol> </div> <p> Two clarifications are in order. First, we will elaborate on the &ldquo;significant respects&rdquo; in which a mental state has to resemble another mental state in due course (see, in particular, <a href="#TwoTypeSimuProc">section 3</a>). For the moment, it will suffice to mention some relevant dimensions of resemblance: similar functional role; similar content; similar phenomenology; similar neural basis (an important discussion of this topic is Fisher 2006). Second, RES-1 defines &ldquo;mental simulation&rdquo; as a <em>dyadic</em> <em>relation</em> between mental states (the relation <em>being a mental simulation of</em>). However, the expression &ldquo;mental simulation&rdquo; is also often used to pick out a <em>monadic property</em> of mental states &mdash;the property <em>being a simulated mental state</em> (as will become clear soon, &ldquo;simulated mental state&rdquo; does not refer here to the state which is simulated, but to the state that does the simulating). For example, it is common to find in the literature sentences like &ldquo;<i>M</i>* is a mental simulation&rdquo;. To avoid ambiguities, we shall adopt the following terminological conventions:</p> <ul> <li> We shall use the expression &ldquo;mental simulation of&rdquo; to express the relation <em>being a mental simulation of</em>.</li> <li> We shall use the expression &ldquo;simulated mental state&rdquo; to express the property <em>being a simulated mental state</em>.</li> <li> We shall use the expression &ldquo;mental simulation&rdquo; in a way that is deliberately ambiguous between &ldquo;mental simulation of&rdquo; and &ldquo;simulated mental state&rdquo;.</li> </ul> <p> It follows from this that, strictly speaking, <a href="#ex-res1">RES-1</a> is a definition of &ldquo;mental simulation of&rdquo;. Throughout this entry, we shall characterize &ldquo;simulated mental state&rdquo; in terms of &ldquo;mental simulation of&rdquo;: we shall say that if <i>M</i>* is <em>a mental simulation</em> <em>of</em> <i>M</i>, then <i>M</i>* is a <em>simulated mental state</em>.<sup>[<a href="notes.html#note-1" id="ref-1">1</a>]</sup></p> <p> With these clarifications in place, we will consider the strengths and weaknesses of <a href="#ex-res1">RES-1</a>. Suppose that Lisa is seeing a yellow banana. At the present moment, there is no yellow banana in my own surroundings; thus, I cannot have that (type of) visual experience. Still, I can <em>visualize</em> what Lisa is seeing. Intuitively, my visual imagery of a yellow banana is a mental simulation of Lisa&rsquo;s visual experience. <a href="#ex-res1">RES-1</a> captures this, given that both my visual imagery and Lisa&rsquo;s visual experience are mental states and the former resembles the latter.</p> <p> <a href="#ex-res1">RES-1</a>, however, faces an obvious problem (Goldman 2006). The resemblance relation is symmetric: for any <i>x</i> and <i>y</i>, if <i>x</i> resembles <i>y</i>, then <i>y</i> resembles <i>x</i>. Accordingly, it follows from <a href="#ex-res1">RES-1</a> that Lisa&rsquo;s visual experience is a mental simulation of my visual imagery. But this is clearly wrong. There is no sense in which one person&rsquo;s perceptual experience can be a mental simulation of another person&rsquo;s mental imagery (see Ramsey 2010 for other difficulties with <a href="#ex-res1">RES-1</a>).</p> <p> In order to solve this problem, Goldman (2006) proposes the following resemblance-based definition of &ldquo;mental simulation of&rdquo;:</p> <div class="indent" id="ex-res2"> <p style="margin:0"> (RES-2) Token state <i>M</i>* is a mental simulation of token state <i>M</i> if and only if:</p> <ol> <li> Both <i>M</i> and <i>M</i>* are mental states</li> <li> <i>M</i>* resembles <i>M</i> in some significant respects</li> <li> In resembling <i>M</i>, <i>M</i>* fulfils at least one of its functions</li> </ol> </div> <p> Under the plausible assumption that one of the functions of visual imagery is to resemble visual experiences, RES-2 correctly predicts that my visual imagery of a yellow banana counts as a mental simulation of Lisa&rsquo;s visual experience. At the same time, since visual experiences do not have the function of resembling visual images, RES-2 does not run into the trouble of categorizing the former as a mental simulation of the latter.</p> <h3 id="MentSimuReus">2.2 Mental Simulation as Reuse</h3> <p> Clearly, <a href="#ex-res2">RES-2</a> is a better definition of &ldquo;mental simulation of&rdquo; than <a href="#ex-res1">RES-1</a>. Hurley (2008), however, argued that it won&rsquo;t do either, since it fails to distinguish ST from its main competitor, i.e., the Theory-Theory (TT), according to which mindreading depends on a body of information about mental states and processes (<a href="#SimuTheoTheoTheo">section 5</a>). The crux of Hurley&rsquo;s argument is this. Suppose that a token visual image <i>V</i>* resembles a token visual experience <i>V</i> and, in doing so, fulfils one of its functions. In this case, <a href="#ex-res2">RES-2</a> is satisfied. But now suppose further that visualization works like a computer simulation: it generates its outputs on the basis of a body of information about vision. On this assumption, <a href="#ex-res2">RES-2</a> still categorizes <i>V</i>* as a mental simulation of <i>V</i>, <em>even though <i>V</i>* has been generated by exactly the kind of process described by TT</em>: a theory-driven and information-rich process.</p> <p> According to Hurley (who follows here a suggestion by Currie &amp; Ravenscroft 2002), the solution to this difficulty lies in the realization that &ldquo;the fundamental &hellip; concept of simulation is <em>reuse</em>, not resemblance&rdquo; (Hurley 2008: 758, emphasis added). Hurley&rsquo;s reuse-based definition of &ldquo;mental simulation of&rdquo; can be articulated as follows:</p> <div class="indent" id="ex-reu"> <p style="margin:0"> (REU) Token state <i>M</i>* is a mental simulation of token state <i>M</i> if and only if:</p> <ol> <li> Both <i>M</i> and <i>M</i>* are mental states</li> <li> <i>M</i> is generated by token cognitive process <i>P</i></li> <li> <i>M</i>* is generated by token cognitive process <i>P</i>*</li> <li> <i>P</i> is implemented by the <em>use</em> of a token cognitive mechanism of type <i>C</i></li> <li> <i>P</i>* is implemented by the <em>reuse</em> of a token cognitive mechanism of type <i>C</i></li> </ol> </div> <p> To have a full understanding of REU, we need to answer three questions: (a) What is a cognitive <em>process</em>? (b) What is a cognitive <em>mechanism</em>? (c) What is the difference between <em>using</em> and <em>reusing</em> a certain cognitive mechanism? Let&rsquo;s do it!</p> <p> It is a commonplace that explanation in cognitive science is structured into different levels. Given our aims, we can illustrate this idea through the classical <em>tri-level hypothesis</em> formulated by David Marr (1982). Suppose that one wants to explain a certain <em>cognitive capacity</em>, say, vision (or mindreading, or moral judgment). The first level of explanation, the most abstract one, consists in describing what the cognitive capacity does&mdash;what task it performs, what problem it solves, what function it computes. For example, the task performed by vision is roughly &ldquo;to derive properties of the world from images of it&rdquo; (Marr 1982: 23). The second level of analysis specifies <em>how</em> the task is accomplished: what algorithm our mind uses to compute the function. Importantly, this level of analysis abstracts from the particular physical structures that implement the algorithm in our head. It is only at the third level of analysis that the details of the <em>physical implementation</em> of the algorithm in our brain are spelled out.</p> <p> With these distinctions at hand, we can answer questions (a) and (b). A <em>cognitive process</em> is a cognitive capacity considered as an information-processing activity and taken in abstraction from its physical implementation. Thus, cognitive processes are individuated in terms of what function they perform and/or in terms of what algorithms compute these functions (fair enough, the &ldquo;and/or&rdquo; is a very big deal, but it is something we can leave aside here). This means that the same (type of) cognitive process can be multiply realized in different physical structures. For example, parsing (roughly, the cognitive process that assigns a grammatical structure to a string of signs) can be implemented both by a human brain and a computer. On the contrary, <em>cognitive mechanisms</em> are particular (types of) physical structures&mdash;e.g., a certain part of the brain&mdash;implementing certain cognitive processes. More precisely, cognitive mechanisms are organized structures carrying out cognitive processes in virtue of how their constituent parts interact (Bechtel 2008; Craver 2007; Machamer et al. 2000).</p> <p> We now turn to question (c), which concerns the distinction between use and reuse of a cognitive mechanism. At a first approximation, a cognitive mechanism is <em>used</em> when it performs its primary function, while it is <em>reused</em> when it is activated to perform a different, non-primary function. For example, one is <em>using</em> one&rsquo;s visual mechanism when one employs it to see, while one is <em>reusing</em> it when one employs it to conjure up a visual image (see Anderson 2008, 2015 for further discussion of the notion of reuse). All this is a bit sketchy, but it will do.</p> <p> Let&rsquo;s now go back to <a href="#ex-reu">REU</a>. The main idea behind it is that whether a mental state is a mental simulation of another mental state depends on the <em>cognitive processes</em> generating these two mental states, and on the <em>cognitive mechanisms</em> implementing such cognitive processes. More precisely, in order for mental state <i>M</i>* to be a mental simulation of mental state <i>M</i>, it has to be case that: (<i>i</i>) cognitive processes <i>P</i>* and <i>P</i>, which respectively generate <i>M</i>* and <i>M</i>, are both implemented by the same (type of) cognitive mechanism <i>C</i>; (ii) <i>P</i> is implemented by the <em>use</em> of <i>C</i>, while <i>P</i>* is implemented by the <em>reuse</em> of <i>C</i>. </p> <p> Now that we know what <a href="#ex-reu">REU</a> means, we can consider whether it fares better than <a href="#ex-res2">RES-2</a> in capturing the nature of the relation of mental simulation. It would seem so. Consider this hypothetical scenario. Lisa is seeing a yellow banana, and her visual experience has been generated by cognitive process <i>V</i><sup>1</sup>, which has been implemented by the use of her visual mechanism. I am visualizing a yellow banana, and my visual image has been generated by cognitive process <i>V</i><sup>2</sup>, which has been implemented by the reuse of my visual mechanism. Rosanna-the-Super-Reasoner is also visualizing a yellow banana, but her visual image has been generated by an information-rich cognitive process: a process drawing upon Rosanna&rsquo;s detailed knowledge of vision and implemented by her incredibly powerful reasoning mechanism. <a href="#ex-reu">REU</a> correctly predicts that my visual image is a mental simulation of Lisa&rsquo;s visual experience, but not vice versa. More importantly, it also predicts that Rosanna&rsquo;s visual image does not count as a mental simulation of Lisa&rsquo;s visual experience, given that Rosanna&rsquo;s cognitive process was not implemented by the reuse of the visual mechanism. In this way, <a href="#ex-reu">REU</a> solves the problem faced by <a href="#ex-res2">RES-2</a> in distinguishing ST from TT.</p> <p> Should we then conclude that &ldquo;mental simulation of&rdquo; has to be defined in terms of reuse, rather than in terms of resemblance? Goldman (2008a) is still not convinced. Suppose that while Lisa is seeing a yellow banana, I am using my visual mechanism to visualize the Golden Gate Bridge. Now, even though Lisa&rsquo;s visual experience and my visual image have been respectively generated by the use and the reuse of the visual mechanism, it would be bizarre to say that my mental state is a mental simulation of Lisa&rsquo;s. Why? Because my mental state doesn&rsquo;t resemble Lisa&rsquo;s (she is seeing a yellow banana; I am visualizing the Golden Gate Bridge!) Thus&mdash;Goldman concludes&mdash;resemblance should be taken as <em>the</em> central feature of mental simulation.</p> <h3 id="RelaStatProc">2.3 Relations, States, and Processes</h3> <p> In order to overcome the difficulties faced by trying to define &ldquo;mental simulation of&rdquo; in terms of <em>either</em> replication <em>or</em> reuse, philosophers have built on the insights of both RES and <a href="#ex-reu">REU</a> and have proposed definitions that combine replication and reuse elements (Currie &amp; Ravenscroft 2002; in recent years, Goldman himself seems to have favoured a mixed account; see Goldman 2012a). Here is one plausible definition:</p> <div class="indent" id="ex-res-reu"> <p style="margin:0"> (RES+REU) Token state <i>M</i>* is a mental simulation of token state <i>M</i> if and only if:</p> <ol> <li> Both <i>M</i> and <i>M</i>* are mental states</li> <li> <i>M</i>* resembles <i>M</i> in some significant respects</li> <li> <i>M</i> is generated by token cognitive process <i>P</i></li> <li> <i>M</i>* is generated by token cognitive process <i>P</i>*</li> <li> <i>P</i> is implemented by the <em>use</em> of a token cognitive mechanism of type <i>C</i></li> <li> <i>P</i>* is implemented by the <em>reuse</em> of a token cognitive mechanism of type <i>C</i></li> </ol> </div> <p> RES+REU has at least three important virtues. The <em>first</em> is that it solves all the aforementioned problems for RES and <a href="#ex-reu">REU</a>&mdash;we leave to the reader the exercise of showing that this is indeed the case.</p> <p> The <em>second</em> is that it fits nicely with an idea that loomed large in the simulationist literature: the idea that <em>simulated mental states</em> are &ldquo;pretend&rdquo; (&ldquo;as if&rdquo;, &ldquo;quasi-&rdquo;) states&mdash;imperfect copies of, surrogates for, the &ldquo;genuine&rdquo; states normally produced by a certain cognitive mechanism, obtained by taking this cognitive mechanism &ldquo;off-line&rdquo;. Consider the following case. Frank is in front of <em>Central Caf&eacute;</em> (and <em>believes</em> that he is there). He <em>desires</em> to drink a beer and <em>believes</em> that he can buy one at <em>Central Caf&eacute;</em>. When he feeds these mental states into his decision-making mechanism, the mechanism implements a decision-making process, which outputs the <em>decision</em> to enter the caf&eacute;. In this case, Frank&rsquo;s decision-making mechanism was &ldquo;on-line&rdquo;&mdash;i.e., he <em>used</em> it; he employed it for its primary function. My situation is different. I don&rsquo;t believe I am in front of <em>Central Caf&eacute;</em>, nor do I desire to drink a beer right now. Still, I can imagine believing and desiring so. When I feed these imagined states into my decision-making mechanism, I am not employing it for its primary function. Rather, I am taking it off-line (I am <em>reusing</em> it). As a result, the cognitive process implemented by my mechanism will output a <em>merely imagined decision</em> to enter the caf&eacute;. Now, it seems fair to say that my imagined decision resembles Frank&rsquo;s decision (more on this in <a href="#TwoTypeSimuProc">section 3</a>). If you combine this with how these two mental states have been generated, the result is that my imagined decision is a mental simulation of Frank&rsquo;s decision, and thus it is a simulated mental state. It is also clear why Frank&rsquo;s decision is <em>genuine</em>, while my simulated mental state is just a <em>pretend</em> decision: all else being equal, Frank&rsquo;s decision to enter <em>Central Caf&eacute;</em> will cause him to enter the caf&eacute;; on the contrary, no such behaviour will result from my simulated decision. I have <em>not really</em> decided so. Mine was just a <em>quasi</em>-decision&mdash;an imperfect copy of, a surrogate for, Frank&rsquo;s genuine decision.</p> <p> And here is <a href="#ex-res-reu">RES+REU</a>&rsquo;s <em>third</em> virtue. So far, we have said that &ldquo;mental simulation&rdquo; can either pick out a dyadic relation between mental states or a monadic property of mental states. In fact, its ambiguity runs deeper than this, since philosophers and cognitive scientists also use &ldquo;mental simulation&rdquo; to refer to a monadic property of <em>cognitive processes</em>, namely, the property <em>being a (mental) simulation process</em> (or: &ldquo;process of mental simulation&rdquo;, &ldquo;simulational process&rdquo;, &ldquo;simulative process&rdquo;, etc.) As a first stab, a (mental) simulation process is a cognitive process generating simulated mental states. <a href="#ex-res-reu">RES+REU</a> has the resources to capture this usage of &ldquo;mental simulation&rdquo; too. Indeed, <a href="#ex-res-reu">RES+REU</a> <em>implicitly</em> contains the following definition of &ldquo;simulation process&rdquo;:</p> <div class="indent" id="ex-proc"> <p style="margin:0"> (PROC): Token process <i>P</i>* is a (mental) simulation process if and only if:</p> <ol> <li> <i>P</i>* generates token state <i>M</i>*</li> <li> <i>M</i>* resembles another token state, <i>M</i>, in some significant respects</li> <li> Both <i>M</i> and <i>M</i>* are mental states</li> <li> <i>M</i> is generated by token process <i>P</i></li> <li> Both <i>P</i> and <i>P</i>* are cognitive processes</li> <li> <i>P</i> is implemented by the <em>use</em> of a token cognitive mechanism of type <i>C</i></li> <li> <i>P</i>* is implemented by the <em>reuse</em> of a token cognitive mechanism of type <i>C</i></li> </ol> </div> <p> Go back to the case in which Lisa was having a visual experience of a yellow banana, while I was having a visual image of a yellow banana. Our two mental states resembled one another, but different cognitive processes generated them: <em>seeing</em> in Lisa&rsquo;s case, and <em>visualizing</em> in my case. Moreover, Lisa&rsquo;s <em>seeing</em> was implemented by the use of the visual mechanism, while my <em>visualizing</em> was implemented by its reuse. According to PROC, the latter cognitive process, but not the former, was thus a simulation process.</p> <p> To sum up, <a href="#ex-res-reu">RES+REU</a> captures many of the crucial features that Simulation Theorists ascribe to mental simulation. For this reason, we shall adopt it as our working definition of &ldquo;mental simulation of&rdquo;&mdash;consequently, we shall adopt PROC as a definition of &ldquo;simulated mental state&rdquo;.<sup>[<a href="notes.html#note-2" id="ref-2">2</a>]</sup> We can put this into a diagram.</p> <div class="figure center"> <img src="figure1.svg" alt="[a diagram consisting of a hexagon labelled 'C' with an arrow labeled 'use' pointing to a diamond labeled 'P' on the upper left, an arrow points from this diamond up to a rectangle labeled 'M'. From the hexagon is also an arrow labeled 're-use' pointing to a diamond labeled 'P*' on the upper right, an arrow points from this diamond up to a rectangle labeled 'M*'. The two rectangles are connected by a dashed double-headed arrow labeled 'resemblance']" /> <p> <span class="figlabel">Figure 1</span></p> </div> <p> The hexagon at the bottom depicts a cognitive mechanism <i>C</i> (it could be, say, the visual mechanism). When <i>C</i> is used (arrow on the left), it implements cognitive process <i>P</i> (say, seeing); when it is re-used (arrow on the right), it implements cognitive process <i>P</i>* (say, visualizing). <i>P</i> generates mental state <i>M</i> (say, a visual experience of a red tomato), while <i>P</i>* generates mental state <i>M</i>* (say, a visual image of a red tomato). These two mental states (<i>M</i> and <i>M</i>*) resemble one another. Given this: <i>M</i>* is a <em>mental simulation of</em> <i>M</i>; <i>M</i>* is a <em>simulated mental state</em>; and <i>P</i>* is a <em>simulation process</em>.<sup>[<a href="notes.html#note-3" id="ref-3">3</a>]</sup></p> <h3 id="FinaWorr">2.4 Final Worries</h3> <p> In this section, we shall finally consider three worries raised for adopting <a href="#ex-res-reu">RES+REU</a> as a definition of &ldquo;mental simulation of&rdquo;. If you have already had enough of <a href="#ex-res-reu">RES+REU</a>, please feel free to move straight to <a href="#TwoTypeSimuProc">section 3</a>.</p> <p> Heal (1994) pointed out a problem with committing ST to a particular account of the cognitive mechanisms that underlie it. Suppose that the human mind contains two distinct decision-making mechanisms: <em>Mec1</em>, which takes beliefs and desires as input, and generates decisions as output; and <em>Mec2</em>, which works by following exactly the same logical principles as <em>Mec1</em>, but takes imagined beliefs and imagined desires as input and generates imagined decisions as output. Consider again Frank&rsquo;s decision to enter <em>Central Caf&eacute;</em> and my imagined decision to do so. According to the two mechanisms hypothesis, Frank desired to drink a beer and believed that he could buy one at <em>Central Caf&eacute;</em>, fed these mental states into <em>Mec1</em>, which generated the decision to enter the caf&eacute;. As for me, I fed the imagined desire to drink a beer and the imagined belief that I could buy one at <em>Central Caf&eacute;</em> into a distinct (type of) mechanism, i.e., <em>Mec2</em>, which generated the imagined decision to enter <em>Central Caf&eacute;</em>. Here is the question: does my imagined decision to enter <em>Central Caf&eacute;</em> count as a mental simulation of Frank&rsquo;s decision to do so? If your answer is &ldquo;Yes, it does&rdquo;, then <a href="#ex-res-reu">RES+REU</a> is in trouble, since my imagined decision was not generated by reusing the same (type of) cognitive mechanism that Frank used to generate his decision; his decision was generated by <em>Mec1</em>, my imagined decision by <em>Mec2</em>. Thus, Heal concludes, a definition &ldquo;mental simulation of&rdquo; should not contain any commitment about cognitive mechanisms&mdash;it should not make any implementation claim&mdash;but should be given at a more abstract level of description.</p> <p> In the face of this difficulty, a defender of <a href="#ex-res-reu">RES+REU</a> can say the following. First, she might reject the intuition that, in the two mechanisms scenario, my imagined decision counts as a mental simulation of Frank&rsquo;s decision. At a minimum, she might say that this scenario does not elicit any robust intuition in one direction or the other: it is not clear whether these two mental states stand in the relation <em>being a mental simulation of</em>. Second, she might downplay the role of intuitions in the construction of a definition for &ldquo;mental simulation of&rdquo; and cognate notions. In particular, if she conceives of ST as an empirical theory in cognitive science, she will be happy to discount the evidential value of intuitions <em>if countervailing theoretical considerations are available</em>. This, e.g., is Currie and Ravenscroft&rsquo;s (2002) position, who write that </p> <blockquote> <p> there are two reasons &hellip; why the Simulation Theorist should prefer [a one mechanism hypothesis]: &hellip; first, the postulation of two mechanisms is less economical than the postulation of one; second, &hellip; we have very good reasons to think that imagination-based decision making does not operate in isolation from the subject&rsquo;s real beliefs and desires. &hellip; If imagination and belief operate under a system of inferential apartheid&mdash;as the two-mechanisms view has it&mdash;how could this happen? (Currie &amp; Ravenscroft 2002: 67&ndash;68)</p> </blockquote> <p> A second worry has to do with the fact that <a href="#ex-res-reu">RES+REU</a> appears to be too liberal. Take this case. Yesterday, Angelina had the visual experience of a red apple. On the night of June 15, 1815, Napoleon conjured up the visual image of a red apple. Angelina used her visual mechanism to see, while Napoleon reused his to imagine. If we add to this that Napoleon&rsquo;s mental state resembled Angelina&rsquo;s, <a href="#ex-res-reu">RES+REU</a> predicts that Napoleon&rsquo;s (token) visual image was a mental simulation of Angelina&rsquo;s (token) visual experience. This might strike one as utterly bizarre. In fact, not only did Napoleon not <em>intend</em> to simulate Angelina&rsquo;s experience: he <em>could not</em> even have intended to do it. After all, Angelina was born roughly 150 years after Napoleon&rsquo;s death. By the same token, it is also impossible that Napoleon&rsquo;s visual image has been <em>caused</em> by Angelina&rsquo;s visual experience. As a matter of fact, the visual image Napoleon had on the night of June 15, 1815 is <em>entirely disconnected</em> from the visual experience that Angelina had yesterday. Thus, how could the former be a mental simulation of the latter? If you think about it, the problem is even worse than this. <a href="#ex-res-reu">RES+REU</a> has it that Napoleon&rsquo;s visual image of a red apple is a mental simulation of <em>all</em> the visual experiences of a red apple that have obtained in the past, that are currently obtaining, and that will obtain in the future. Isn&rsquo;t that absurd?</p> <p> Again, a defender of <a href="#ex-res-reu">RES+REU</a> can give a two-fold answer. First, she can develop an argument that this is not absurd at all. Intuitively, the following principle seems to be true:</p> <div class="indent" id="ex-type"> <p> (TYPE): the mental state type <em>visual image of a red apple</em> is a mental simulation of the mental state type <em>visual experience of a red apple</em>.</p> </div> <p> If TYPE is correct, then the following principle has to be true as well:</p> <div class="indent" id="ex-token"> <p> (TOKEN): Any token mental state of the type <em>visual image of a red apple</em> is a mental simulation of every token mental state of the type <em>visual experience of a red apple</em>.</p> </div> <p> But TOKEN entails that Napoleon&rsquo;s (token) visual image of a red apple is a mental simulation of Angelina&rsquo;s (token) visual experience of a red apple, which is exactly what <a href="#ex-res-reu">RES+REU</a> predicts. Thus, <a href="#ex-res-reu">RES+REU</a>&rsquo;s prediction, rather than being absurd, independently follows from quite intuitive assumptions. Moreover, even though TOKEN and <a href="#ex-res-reu">RES+REU</a> make the same prediction about the Napoleon-Angelina case, TOKEN is not entailed by <a href="#ex-res-reu">RES+REU</a>, since the latter contains a <em>restriction</em> on how visual images have to be generated. Thus, if one finds TOKEN intuitively acceptable, it is hard to see how one can find <a href="#ex-res-reu">RES+REU</a> to be <em>too liberal</em>.</p> <p> The second component of the answer echoes one of the answers given to Heal: for a Simulation Theorist who conceives of ST as a theory in cognitive science, intuitions have a limited value in assessing a definition of &ldquo;mental simulation of&rdquo;. In fact, the main aim of this definition is not that of capturing folk intuitions, but rather that of offering a clear enough picture of the relation of mental simulation on the basis of which an adequate theory of mindreading can be built. So, if the proposed definition fails, say, to help distinguishing ST from TT, or is of limited use in theory-building, or is contradicted by certain important results from cognitive science, then one has a good reason to abandon it. On the contrary, it should not be a cause for concern if <a href="#ex-res-reu">RES+REU</a> does not match the folk concept MENTAL SIMULATION OF. The notion &ldquo;mental simulation of&rdquo; is a term of art&mdash;like, say, the notions of I-Language or of Curved Space. These notions <em>do</em> poorly match the folk concepts of language and space, but linguists and physicists do not take this to be a problem. The same applies to the notion of mental simulation.</p> <p> And here is the third and final worry. <a href="#ex-res-reu">RES+REU</a> is supposed to be a definition of &ldquo;mental simulation of&rdquo; on the basis of which a theory of mindreading can be built. However, neither <a href="#ex-res-reu">RES+REU</a> nor <a href="#ex-proc">PROC</a> make any reference to the idea of representing others&rsquo; mental states. Thus, how could these definitions help us to construct a Simulation Theory <em>of mindreading</em>? The answer is simple: they will help us exactly as a clear definition of &ldquo;computation&rdquo;, which has nothing to do with how the mind works, helped to develop the Computational Theory of Mind (see entry on <a href="../computational-mind/">computational theory of mind</a>).</p> <p> Here is another way to make the point. ST is made up of <em>two distinct claims</em>: the first is that mental simulation is psychologically real, i.e., that there are mental states and processes satisfying <a href="#ex-res-reu">RES+REU</a> and <a href="#ex-proc">PROC</a>. The second claim is that mental simulation plays a central role in mindreading. Clearly, the second claim cannot be true if the first is false. However, the second claim can be false even if the first claim is true: mental simulation could be psychologically real, but play no role in mindreading at all. Hence, Simulation Theorists have to do three things. First, they have to establish that mental simulation is psychologically real. We consider this issue in <a href="#TwoTypeSimuProc">section 3</a>. Second, they have to articulate ST <em>as a theory of mindreading</em>. That is, they have to spell out in some detail the crucial role that mental simulation is supposed to play in representing others&rsquo; mental states, and contrast the resulting theory with other accounts of mindreading. We dwell on this in sections <a href="#RoleMentSimuMind">4</a> and <a href="#SimuTheoTheoTheo">5</a>. Finally, Simulation Theorists have to provide evidence in support of their theory of mindreading&mdash;that is, they have to give us good reasons to believe that mental simulation does play a crucial role in representing others&rsquo; mental states. We discuss this issue in <a href="#SimuTheoProsCons">section 6</a>.</p> <h2 id="TwoTypeSimuProc">3. Two Types of Simulation Processes</h2> <p> Now that we have definitions of &ldquo;mental simulation of&rdquo; and cognate notions, it is time to consider which mental states and processes satisfy them, if any. Are there really <em>simulated mental states</em>? That is, are there mental states generated by the <em>reuse</em> of cognitive mechanisms? And do these mental states <em>resemble</em> the mental states generated by the use of such mechanisms? For example, is it truly the case that visual images are <em>mental simulations of</em> visual experiences? What about decisions, emotions, beliefs, desires, and bodily sensations? Can our minds generate simulated counterparts of all these types of mental states? In this section, we consider how Simulation Theorists have tackled these problems. We will do so by focusing on the following question: are there really <em>simulation processes</em> (as defined by <a href="#ex-proc">PROC</a>)? If the answer to this question is positive, it follows that there are mental states standing in the relation of mental simulation (as defined by <a href="#ex-res-reu">RES+REU</a>), and thus simulated mental states.</p> <p> Following Goldman (2006), it has become customary among Simulation Theorists to argue for the existence of two types of simulation processes: <em>high-level simulation</em> <em>processes</em> and <em>low-level simulation processes</em> (see, however, de Vignemont 2009). By exploring this distinction, we begin to articulate the cognitive architecture underlying mental simulation proposed by ST.</p> <h3 id="HighLeveSimuProc">3.1 High-Level Simulation Processes</h3> <p> High-level simulation processes are cognitive processes with the following features: (a) they are typically conscious, under voluntary control, and stimulus-independent; (b) they satisfy <a href="#ex-proc">PROC</a>, that is, they are implemented by the <em>reuse</em> of a certain cognitive mechanism, <i>C</i>, and their output states <em>resemble</em> the output states generated by the use of <i>C</i>.<sup>[<a href="notes.html#note-4" id="ref-4">4</a>]</sup> Here are some cognitive processes that, according to Simulation Theorists, qualify as high-level simulation processes. Visualizing: the cognitive process generating visual images (Currie 1995; Currie &amp; Ravenscroft 2002; Goldman 2006); motor imagination: the cognitive process generating imagined bodily movements and actions (Currie &amp; Ravenscroft 1997, 2002; Goldman 2006); imagining deciding: the cognitive process generating decision-like imaginings (Currie &amp; Ravenscroft 2002); imagining believing: the cognitive process generating belief-like imaginings (Currie &amp; Ravenscroft 2002); imagining desiring: the cognitive process generating desire-like imaginings (Currie 2002). In what follows, we shall consider a couple of them in some detail.</p> <p> Visualizing first. It is not particularly hard to see why visualizing satisfies condition (a). Typically: one can decide to visualize (or stop visualizing) something; the process is not driven by perceptual stimuli; and at least some parts of the visualization process are conscious. There might be cases in which visualizing is not under voluntary control, is stimulus-driven and, maybe, even entirely unconscious. This, however, is not a problem, since we know that there are clear cases satisfying (a).</p> <p> Unsurprisingly, the difficult task for Simulation Theorists is to establish that visualizing has feature (b), that is: it is implemented by the <em>reuse</em> of the visual mechanism; and its outputs (that is, visual images) <em>resemble</em> genuine visual experiences. Simulation Theorists maintain that they have strong empirical evidence supporting the claim that visualizing satisfies <a href="#ex-proc">PROC</a>. Here is a sample (this and further evidence is extensively discussed in Currie 1995, Currie &amp; Ravenscroft 2002, and in Goldman 2006):</p> <ol style="list-style-type: lower-roman"> <li> visualizing recruits some of the brain areas involved in vision (Kosslyn et al. 1999);</li> <li> left-neglect patients have the same deficit in both seeing and visualizing&mdash;i.e., they do not have perceptual experience of the left half of the visual space and they also fail to imagine the left half of the imagined space (Bisiach &amp; Luzzatti 1978);</li> <li> ocular movements occurring during visualizing approximate those happening during seeing (Spivey et al. 2000);</li> <li> some patients systematically mistake visual images for perceptual states (Goldenberg et al. 1995);</li> <li> visual perception and visualizing exhibit similar patterns of information-processing (facilitations, constraints, illusions) (Decety &amp; Michel 1989; Kosslyn et al. 1999)</li> </ol> <p> On this basis, Simulation Theorists conclude that visualizing is indeed implemented by the <em>reuse</em> of the visual mechanism (evidence i and ii) and that its outputs, i.e., visual images, do <em>resemble</em> visual experiences (evidence iii, iv, and v). Thus, visualizing is a process that qualifies as high-level simulation, and visual images are simulated mental states.</p> <p> Visual images are mental simulations of perceptual states. Are there high-level simulation processes whose outputs instead are mental simulations of propositional attitudes? (If you think that visual experiences are propositional attitudes, you can rephrase the question as follows: are there high-level simulation processes whose outputs are mental simulations of non-sensory states?) Three candidate processes have received a fair amount of attention in the simulationist literature: imagining desiring, imagining deciding, and imagining believing. The claims made by Simulation Theorists about these cognitive processes and their output states have generated an intense debate (Doggett &amp; Egan 2007; Funkhouser &amp; Spaulding 2009; Kieran &amp; Lopes 2003; Nichols 2006a, 2006b; Nichols &amp; Stich 2003; Velleman 2000). We do not have space to review it here (two good entry points are the introduction to Nichols 2006a and the entry on <a href="../imagination/">imagination</a>). Rather, we shall confine ourselves to briefly illustrating the simulationist case in favour of the thesis that imagining believing is a high-level simulation process.</p> <p> I don&rsquo;t believe that Rome is in France, but I can imagine believing it. Imagining believing typically is a conscious, stimulus-independent process, under voluntary control. Thus, imagining believing satisfies condition (a). In order for it to count as an instance of high-level <em>simulation</em> process, it also needs to have feature (b), that is: (b.i) its outputs (i.e., belief-like imaginings) have to <em>resemble</em> genuine beliefs in some significant respects; (b.ii) it has to be implemented by the <em>reuse</em> of the cognitive mechanism (whose use implements the cognitive process) that generates genuine beliefs&mdash;let us call it &ldquo;the belief-forming mechanism&rdquo;. Does imagining believing satisfy (b)? Currie and Ravenscroft (2002) argue in favour of (b.i). Beliefs are individuated in terms of their content and functional role. Belief-like imaginings&mdash;Currie and Ravenscroft say&mdash;have the same content and a similar functional role to their genuine counterparts. For example, the belief that Rome is in France and the belief-like imagining that Rome is in France have exactly the same propositional content: <em>that Rome is in France</em>. Moreover, belief-like imaginings mirror the inferential role of genuine beliefs. If one believes both that Rome is in France and that French is the language spoken in France, one can infer the belief that French is the language spoken in Rome. Analogously, from the belief-like imagining that Rome is in France and the genuine belief that French is the language spoken in France, one can infer the belief-like imagining that French is the language spoken in Rome. So far, so good (but see Nichols 2006b).</p> <p> What about (b.ii)? Direct evidence bearing on it is scarce. However, Simulation Theorists can give an argument along the following lines. First, one owes an explanation of why belief-like imaginings are, well, belief-like&mdash;as we have said above, it seems that they have the same type of content as, and a functional role similar to, genuine beliefs. A possible explanation for this is that both types of mental states are generated by (cognitive processes implemented by) the same cognitive mechanism. Second, it goes without saying that our mind contains a mechanism for generating beliefs (the belief-forming mechanism), and that there must be some mechanism or another in charge of generating belief-like imaginings. It is also well known that cognitive mechanisms are evolutionary costly to build and maintain. Thus, evolution might have adopted the parsimonious strategy of redeploying a pre-existing mechanism (the belief-forming mechanism) for a non-primary function, i.e., generating belief-like imaginings&mdash;in general, this hypothesis is also supported by the idea that neural reuse is one of the fundamental organizational principle of the brain (Anderson 2008). If one puts these two strands of reasoning together, one gets a <em>prima facie</em> case for the claim that imagining believing is implemented by the reuse of the belief-forming mechanism&mdash;that is, a <em>prima facie</em> case for the conclusion that imagining believing satisfies (b.ii). Since imagining believing appears also to satisfy (b.i) and (a), lacking evidence to the contrary, Simulation Theorists are justified in considering it to be a high-level simulation process.</p> <p> Let&rsquo;s take stock. We have examined a few suggested instances of high-level simulation processes. If Simulation Theorists are correct, they exhibit the following commonalities: they satisfy <a href="#ex-proc">PROC</a> (this is why they are <em>simulation</em> processes); they are typically conscious, under voluntary control, and stimulus-independent (this is why they are <em>high-level</em>). Do they have some other important features in common? Yes, they do&mdash;Simulation Theorists say. They all are <em>under the control</em> of a single cognitive mechanism: imagination (more precisely, Currie &amp; Ravenscroft (2002) talk of Re-Creative Imagination, while Goldman (2006, 2009) uses the expression &ldquo;Enactment Imagination&rdquo;). The following passage will give you the basic gist of the proposal:</p> <blockquote> <p> What is distinctive to high-level simulation is the psychological mechanism &hellip; that produces it, the mechanism of imagination. This psychological system is capable of producing a wide variety of simulational events: simulated seeings (i.e., visual imagery), &hellip; simulated motor actions (motor imagery), simulated beliefs, &hellip; and so forth. &hellip; In producing simulational outputs, imagination does not operate all by itself. &hellip; For example, it recruits parts of the visual system to produce visual imagery &hellip;. Nonetheless, imagination &ldquo;&lsquo;takes the lead&rdquo;&rsquo; in directing or controlling the other systems it enlists for its project. (Goldman 2009: 484&ndash;85)</p> </blockquote> <p> Here is another way to make the point. We already know that, according to ST, visualizing is implemented by the reuse of the visual mechanism. In the above passage, Goldman adds that the reuse of the visual mechanism is initiated, guided and controlled by imagination. The same applies, <em>mutatis mutandis</em>, to all cases of high-level simulation processes. For example, in imagining hearing, imagination &ldquo;gets in control&rdquo; of the auditory mechanism, takes it off-line, and (re)uses it to generate simulated auditory experiences. Goldman (2012b, Goldman &amp; Jordan 2013) supports this claim by making reference to neuroscientific data indicating that the same core brain network, the so-called &ldquo;default network&rdquo;, subserves all the following self-projections: prospection (projecting oneself into one&rsquo;s future); episodic memory (projecting oneself into one&rsquo;s past); perspective taking (projecting oneself into other minds); and navigation (projecting oneself into other places) (see Buckner &amp; Carroll 2007 for a review). These different self-projections presumably involve different high-level simulation processes. However, they all have something in common: they all involve imagination-based perspectival shifts. Therefore, the fact that there is one brain network common to all these self-projections lends some support to the claim that there is one common cognitive mechanism, i.e., imagination, which initiates, guides, and controls all high-level simulation processes. </p> <p> If Goldman is right, and all high-level simulation processes are guided by imagination, we can then explain why, in our common parlance, we tend to describe high-level simulation processes and outputs in terms of imaginings, images, imagery, etc. More importantly, we can also explain why high-level simulation processes are conscious, under voluntary control, and stimulus-independent. These are, after all, typical properties of imaginative processes. However, there are simulation processes that typically are neither conscious, nor under voluntary control, nor stimulus independent. This indicates that they are not imagination-based. It is to this other type of simulation processes that we now turn.</p> <h3 id="LowLeveSimuProc">3.2 Low-Level Simulation Processes</h3> <p> Low-level simulation processes are cognitive processes with these features: (a*) they are typically unconscious, automatic, and stimulus-driven; (b) they satisfy <a href="#ex-proc">PROC</a>, that is, they are implemented by the <em>reuse</em> of a certain cognitive mechanism, <i>C</i>, and their output states <em>resemble</em> the output states generated by the use of <i>C</i>. What cognitive processes are, according to ST, instances of low-level simulation? The answer can be given in two words: mirroring processes. Clarifying what these two words mean, however, will take some time.</p> <p> The story begins at the end of the 1980s in Parma, Italy, where the neuroscientist Giacomo Rizzolatti and his team were investigating the properties of the neurons in the macaque monkey ventral premotor cortex. Through single-cell recording experiments, they discovered that the activity of neurons in the area F5 is correlated with goal-directed motor actions and not with particular movements (Rizzolatti et al. 1988). For example, some F5 neurons fire when the monkey grasps an object, regardless of whether the monkey uses the left or the right hand. A plausible interpretation of these results is that neurons in monkey area F5 encode <em>motor intentions</em> (i.e., those intentions causing and guiding actions like reaching, grasping, holding, etc.) and not mere <em>kinematic instructions</em> (i.e., those representations specifying the fine-grained motor details of an action). (In-depth philosophical analyses of the notion of motor intention can be found in: Brozzo forthcoming; Butterfill &amp; Sinigaglia 2014; Pacherie 2000). This was an already interesting result, but it was not what the Parma group became famous for. Rather, their striking discovery happened a few years later, helped by serendipity. Researchers were recording the activity of F5 neurons in a macaque monkey performing an object-retrieval task. In between trials, the monkey stood still and watched an experimenter setting up the new trial, with microelectrodes still measuring the monkey&rsquo;s brain activity. Surprisingly, some of the F5 neurons turned out to fire when the monkey <em>saw</em> the experimenter grasping and placing objects. This almost immediately led to new experiments, which revealed that a portion of F5 neurons not only fire when the monkey performs a certain goal-directed motor action (say, bringing a piece of food to the mouth), but also when it sees another agent performing the same (type of) action (di Pellegrino et al. 1992; Gallese et al. 1996; Rizzolatti et al. 1996). For this reason, these neurons were aptly called &ldquo;<em>mirror neurons</em>&rdquo;, and it was proposed that they encode motor intentions both during action execution and action observation (Rizzolatti &amp; Sinigaglia 2007, forthcoming). Later studies found mirror neurons also in the macaque monkey inferior parietal lobule (Gallese et al. 2002), which together with the ventral premotor cortex constitutes the monkey <em>cortical mirror neuron circuit</em> (Rizzolatti &amp; Craighero 2004).</p> <p> Subsequent evidence suggested that an <em>action mirror mechanism</em>&mdash;that is, a cognitive mechanism that gets activated both when an individual performs a certain goal-directed motor action and when she sees another agent performing the same action&mdash;also exists in the human brain (for reviews, see Rizzolatti &amp; Craighero 2004, and Rizzolatti &amp; Sinigaglia forthcoming). In fact, it appears that there are <em>mirror mechanisms</em> in the human brain outside the action domain as well: a mirror mechanism for disgust (Wicker et al. 2003), one for pain (Singer at al. 2004; Avenanti et al. 2005), and one for touch (Blakemore et al. 2005). Given the variety of mirror mechanisms, it is not easy to give a definition that fits them all. Goldman (2008b) has quite a good one though, and we will draw from it: a cognitive mechanism is a mirror mechanism if and only if it gets activated both when an individual undergoes a certain mental event <em>endogenously</em> and when she <em>perceives a sign</em> that another individual is undergoing the same (type of) mental event. For example, the pain mirror mechanism gets activated both when individuals experience &ldquo;a painful stimulus and &hellip; when they observe a signal indicating that [someone else] is receiving a similar pain stimulus&rdquo; (Singer et al. 2004: 1157).</p> <p> Having introduced the notions of mirror neuron and mirror mechanism, we can define the crucial notion of this section: <em>mirroring process</em>. We have seen that mirror mechanisms can get activated in two distinct modes: (i) endogenously; (ii) in the perception mode. For example, my action mirror mechanism gets endogenously activated when I grasp a mug, while it gets activated in the perception mode when I see you grasping a mug. Following again Goldman (2008b), let us say that a cognitive process is a mirroring process if and only if it is constituted by the activation of a mirror mechanism <em>in the perception mode</em>. For example, what goes on in my brain when I see you grasping a mug counts as a mirroring process.</p> <p> Now that we know what mirroring processes are, we can return to our initial problem&mdash;i.e., whether they are low-level simulation processes (remember that a cognitive process is a low-level simulation process if and only if: (a*) it is typically unconscious, automatic, and stimulus-driven; (b) it satisfies <a href="#ex-proc">PROC</a>). For reasons of space, we will focus on disgust mirroring only.</p> <p> Wicker et al. (2003) carried out an fMRI study in which participants first observed videos of disgusted facial expressions and subsequently underwent a disgust experience via inhaling foul odorants. It turned out that the same neural area&mdash;the left anterior insula&mdash;that was preferentially activated during the experience of disgust was also preferentially activated during the observation of the disgusted facial expressions. These results indicate the existence of a disgust mirror <em>mechanism</em>. Is disgust <em>mirroring</em> (the activation of the disgust mirror mechanism in the perception mode) a low-level simulation process? Simulation Theorists answer in the affirmative.</p> <p> Here is why disgust mirroring satisfies (a*): the process is <em>stimulus-driven</em>: it is sensitive to certain perceptual stimuli (disgusted facial expressions); it is <em>automatic</em>; and it is typically <em>unconscious</em> (even though its output, i.e., &ldquo;mirrored disgust&rdquo;, is sometimes conscious). What about condition (b)? Presumably, the primary (evolutionary) function of the disgust mechanism is to produce a disgust response to spoiled food, germs, parasites etc. (Rozin et al. 2008). In the course of evolution, this mechanism could have been subsequently co-opted to also get activated by the perception (of a sign) that someone else is experiencing disgust, in order to facilitate social learning of food preferences (Gari&eacute;py et al. 2014). If this is correct, then disgust mirroring is implemented by the <em>reuse</em> of the disgust mechanism (by employing this mechanism for a function different than its primary one). Moreover, the output of disgust mirroring <em>resembles</em> the genuine experience of disgust in at least two significant respects: first, both mental states have the same neural basis; second, when conscious, they share a similar phenomenology. Accordingly, (b) is satisfied. By putting all this together, Simulation Theorists conclude that disgust mirroring is a low-level simulation process, and mirrored disgust is a simulated mental state (Goldman 2008b; Barlassina 2013)</p> <h2 id="RoleMentSimuMind">4. The Role of Mental Simulation in Mindreading</h2> <p> In the previous section, we examined the case for the psychological reality of mental simulation. We now turn to ST <em>as a theory of mindreading</em>. We will tackle two main issues: the extent to which mindreading is simulation-based, and how simulation-based mindreading works.</p> <h3 id="CentMentSimuMind">4.1 The Centrality of Mental Simulation in Mindreading</h3> <p> ST proposes that mental simulation plays a <em>central role</em> in mindreading, i.e., it plays a central role in the capacity to represent and reason about others&rsquo; mental state. What does &ldquo;central&rdquo; mean here? Does it mean <em>the</em> central role, with other contributors to mindreading being merely peripheral? This is an important question, since in recent years there have been proposed hybrid models according to which both mental simulation and theorizing play important roles in mindreading (see <a href="#CollCoop">section 5.2</a>). </p> <p> A possible interpretation of the claim that mental simulation plays a central role in representing others&rsquo; mental states is that mindreading events are <em>always</em> simulation-based, even if they sometimes also involve theory. Some Simulation Theorists, however, reject this interpretation, since they maintain that there are mindreading events in which mental simulation plays no role at all (Currie &amp; Ravenscroft 2002). For example, if I know that Little Jimmy is happy every time he finds a dollar, and I also know that he has just found a dollar, I do not need to undergo any simulation process to conclude that Little Jimmy is happy right now. I just need to carry out a simple logical inference.</p> <p> However, generalizations like, &ldquo;Little Jimmy is happy every time he finds a dollar,&rdquo; are <em>ceteris paribus</em> rules. People readily recognize exceptions: for example, we recognize situations in which Jimmy would probably not be happy even if he found a dollar, including some in which finding a dollar might actually make him unhappy. Rather than applying some additional or more complex rules that cover such situations, it is arguable that putting ourselves in Jimmy's situation and using &ldquo;good common sense&rdquo; alerts us to to these exceptions and overrides the rule. If that is correct, then simulation is acting as an overseer or governor even when people appear to be simply applying rules. </p> <p> Goldman (2006) suggests that we cash out the central role of mental simulation in representing others&rsquo; mental states as follows: mindreading is <em>often</em> simulation-based. Goldman&rsquo;s suggestion, however, turns out to be empty, since he explicitly refuses to specify what &ldquo;often&rdquo; means in this context. </p> <blockquote> <p> How often is often? Every Tuesday, Thursday, and Saturday? Precisely what claim does ST mean to make? It is unreasonable to demand a precise answer at this time. (Goldman 2006: 42; see also Goldman 2002; Jeannerod &amp; Pacherie 2004)</p> </blockquote> <p> Perhaps a better way to go is to characterize the centrality of mental simulation for mindreading not in terms of <em>frequency of use</em>, but in terms of <em>importance</em>. Currie and Ravenscroft make the very plausible suggestion that &ldquo;one way to see how important a faculty is for performing a certain task is to examine what happens when the faculty is lacking or damaged&rdquo; (Currie &amp; Ravenscroft 2002: 51). On this basis, one could say that mental simulation plays a <em>central</em> role in mindreading if and only if: if one&rsquo;s simulational capacity (i.e., the capacity to undergo simulation processes/simulated mental states) were impaired, then one&rsquo;s mindreading capacity would be <em>significantly</em> impaired. </p> <p> An elaboration of this line of thought comes from Gordon (2005)&mdash; see also Gordon (1986, 1996) and Peacocke (2005)&mdash;who argues that someone lacking the capacity for mental simulation would not be able to represent mental states as such, since she is incapable of representing anyone as having a mind in the first place. Gordon&rsquo;s argument is essentially as follows:</p> <div class="indent"> <p> We represent something as having a mind, as having mental states and processes, only if we represent it as a subject (&ldquo;subject of experience,&rdquo; in formulations of &ldquo;the hard problem of consciousness&rdquo;), where &ldquo;a subject&rdquo; is understood as a generic &ldquo;I&rdquo;. This distinguishes it from a &ldquo;mere object&rdquo; (and also is a necessary condition for a more benevolent sort of empathy).</p> <p> To represent something as another &ldquo;I&rdquo; is to represent it as a possible target of self-projection: as something one might (with varying degrees of success) imaginatively put oneself in the place of. (Of course, one can fancifully put oneself in the place of just about anything&mdash;a suspension bridge, even; but that is not a <em>reductio ad absurdum</em>, because one can also fancifully represent just about anything as having a mind.) </p> </div> <p> It is not clear, however, what consequences Gordon&rsquo;s conceptual argument would have for mindreading, if any. Even if a capacity to self-project were needed for representing mental states as such, would lack of this capacity necessarily impair mindreading? That is, couldn't one predict explain, predict, and coordinate behavior using a theory of internal states, without conceptualizing these as states of an I or subject? As a more general point, Simulation Theorists have never provided a principled account of what would constitute a &ldquo;significant impairment&rdquo; of mindreading capacity.</p> <p> To cut a long story short, ST claims that mental simulation plays a central role in mindreading, but at the present stage its proponents do not agree on what this centrality exactly amounts to. We will come back to this issue in <a href="#SimuTheoTheoTheo">section 5</a>, when we shall discuss the respective contributions of mental simulation and theorizing in mindreading.</p> <p> We now turn to a different problem: how does mental simulation contribute to mindreading when it does? That is, how does simulation-based mindreading work? Here again, Simulation Theorists disagree about what the right answer is. In what follows, we explore some dimensions of disagreement.</p> <h3 id="ConsCaus">4.2 Constitution or Causation?</h3> <p> Some Simulation Theorists defend a <em>strong view</em> of simulation-based mindreading (Gordon 1986, 1995, 1996; Gallese et al. 2004; Gallese &amp; Sinigaglia 2011). They maintain that many simulation-based mindreading events are (entirely) <em>constituted</em> by mental simulation events (where mental simulation events are simulated mental states or simulation processes). In other words, some Simulation Theorists claim that, <em>on many occasions</em>, the fact that a subject <i>S</i> is representing someone else&rsquo;s mental states is nothing over and above the fact that <i>S</i> is undergoing a mental simulation event: the former fact reduces to the latter. For example, Lisa&rsquo;s undergoing a mirrored disgust experience as a result of observing John&rsquo;s disgusted face would count as a mindreading event: Lisa&rsquo;s simulated mental state would represent John&rsquo;s disgust (Gallese et al. 2004). Let us call this &ldquo;the <em>Constitution View</em>&rdquo;.</p> <p> We shall elaborate on the details of the Constitution View in <a href="#MindWithJudg">section 4.3</a>. Before doing that, we consider an argument that has been directed against it over and over again, and which is supposed to show that the Constitution View is a non-starter (Fuller 1995; Heal 1995; Goldman 2008b; Jacob 2008, 2012). Lacking a better name, we will call it &ldquo;the <em>Anti-Constitution argument</em>&rdquo;. Here it is. By definition, a mindreading event is a mental event in which a subject, <i>S</i>, represents another subject, <i>Q</i>, as having a certain mental state <i>M</i>. Now&mdash;the argument continues&mdash;<em>the only way</em> in which <i>S</i> can represent <i>Q</i> as having <i>M</i> is this: <i>S</i> has to employ the <em>concept</em> of that mental state and form the <em>judgment</em>, or the <em>belief</em>, that <i>Q</i> is in <i>M</i>. Therefore, a mindreading event is identical to an event of judging that someone else has a certain mental state (where this entails the application of mentalistic concepts). It follows from this that mental simulation events cannot be constitutive of mindreading events, since the former events are not events of judging that someone else has a certain mental state. An example should clarify the matter. Consider Lisa again, who is undergoing a mirrored <em>disgust experience</em> as a result of observing John&rsquo;s disgusted face. Clearly, undergoing such a simulated disgust experience is a different mental event from <em>judging</em> that John is experiencing disgust. Therefore, Lisa&rsquo;s mental simulation does not constitute a mindreading event.</p> <p> In <a href="#MindWithJudg">section 4.3</a>, we will discuss how the defenders of the Constitution View have responded to this argument. Suppose for the moment that the Anti-Constitution argument is sound. What alternative pictures of simulation-based mindreading are available? Those Simulation Theorists who reject the Constitution View tend to endorse the <em>Causation View</em>, according to which mental simulation events never constitute mindreading events, but only <em>causally contribute</em> to them. The best developed version of this view is Goldman&rsquo;s (2006) <em>Three-Stage Model</em> (again, this is our label, not his), whose basic structure is as follows:</p> <div class="indent"> <p id="ex-stage1"> STAGE 1. <em>Mental simulation</em>: Subject <i>S</i> undergoes a simulation process, which outputs a token simulated mental state <em>m*</em>.</p> <p id="ex-stage2"> STAGE 2. <em>Introspection</em>: <i>S</i> introspects <em>m*</em> and categorizes/conceptualizes it <em>as</em> (a state of type) <i>M</i>.</p> <p id="ex-stage3"> STAGE 3. <em>Judgment</em>: <i>S</i> attributes (a state of type) <i>M</i> to another subject, <i>Q</i>, through the judgment <em><i>Q</i> is in <i>M</i></em>.</p> <p> (The causal relations among these stages are such that: STAGE 1 causes STAGE 2, and STAGE 2 in turn causes STAGE 3. See Spaulding 2012 for a discussion of the notion of causation in this context.)</p> </div> <p> Here is our trite example. On the basis of observing John&rsquo;s disgusted facial expression, Lisa comes to <em>judge</em> that John is having a disgust experience. How did she arrive at the formation of this judgment? Goldman&rsquo;s answer is as follows. The observation of John&rsquo;s disgusted facial expression triggered a disgust mirroring process in Lisa, resulting in Lisa&rsquo;s undergoing a mirrored disgust experience (STAGE 1). This caused Lisa to introspect her simulated disgust experience and to categorize it as a disgust experience (STAGE 2) (the technical notion of introspection used by Goldman will be discussed in <a href="#MindIntr">section 4.4</a>). This, in turn, brought about the formation of the judgment <em>John is having a disgust experience</em> (STAGE 3). Given that, according to Goldman, mindreading events are identical to events of judging that someone else has a certain mental state, it is only this last stage of Lisa&rsquo;s cognitive process that constitutes a mindreading event. On the other hand, the previous two stages were merely causal contributors to it. But mental simulation entirely took place at STAGE 1. This is why the Three-Stage Model is a version of the Causation View: according to the model, mental simulation events <em>causally contribute</em> to, but do not constitute, mindreading events.</p> <h3 id="MindWithJudg">4.3 Mindreading without Judgement </h3> <p> The main strategy adopted by the advocates of the Constitution View in responding to the Anti-Constitution argument consists in impugning the identification of mindreading events with events of <em>judging</em> that someone else has a certain mental state. A prominent version of this position is Gordon&rsquo;s (1995, 1996) <em>Radical Simulationism</em>, according to which representing someone else&rsquo;s mental states does not require the formation of <em>judgments</em> involving the application of <em>mentalistic concepts</em>. Rather, Gordon proposes that the main bulk of mindreading events are <em>non-conceptual representations</em> of others&rsquo; mental states, where these non-conceptual representations are constituted by mental simulation events. If this is true, many mindreading events are <em>constituted</em> by mental simulation events, and thus the Constitution View is correct.</p> <p> The following case should help to get Radical Simulationism across. Suppose that I want to represent the mental state that an individual&mdash;call him &ldquo;Mr Tees&rdquo;&mdash;is in right now. According to Gordon, there is a false assumption behind the idea that, in order to do so, I need to form a judgment with the content <em>Mr Tees is in <i>M</i></em> (where &ldquo;<i>M</i>&rdquo; is a placeholder for a mentalistic concept). The false assumption is that the only thing that I can do is to simulate <em>myself</em> in Mr Tees&rsquo;s situation. As Gordon points out, it is also possible for me to simulate <em>Mr Tees</em> in his situation. And if I do so, my very simulation of Mr Tees <em>constitutes</em> a representation of his mental state, without the need of forming any judgment. This is how Gordon makes his point:</p> <blockquote> <p> To simulate Mr Tees in his situation requires an egocentric shift, a recentering of my egocentric map on Mr Tees. He becomes in my imagination the referent of the first person pronoun &ldquo;I&rdquo;. &hellip; Such recentering is the prelude to transforming myself in imagination into Mr Tees as much as actors become the characters they play. &hellip; But once a personal <em>transformation</em> has been accomplished, &hellip; I am already representing <em>him</em> as being in a certain state of mind. (Gordon 1995: 55&ndash;56)</p> </blockquote> <p> It is important to stress the dramatic difference between Gordon&rsquo;s Radical Simulationism and Goldman&rsquo;s Three-Stage Model. According to the latter, mental simulation events <em>causally contribute</em> to representing other people&rsquo;s mental states, but the mindreading event proper is <em>always</em> constituted by a judgment (or a belief). Moreover, Goldman maintains that the ability to form such judgments requires both the capacity to <em>introspect</em> one&rsquo;s own mental states (more in this in <a href="#MindIntr">section 4.4</a>) and possession of <em>mentalistic concepts</em>. None of this is true of Radical Simulationism. Rather, Gordon proposes that, in the large majority of cases, it is the very mental simulation event itself that <em>constitutes</em> a representation of someone else&rsquo;s mental states. Furthermore, since such mental simulation events neither require the capacity for introspection nor possession of mentalistic concepts, Radical Simulationism entails the surprising conclusion that these two features play at best a very minor role in mindreading. A testable corollary is that social interaction often relies on an understanding of others that does not require the explicit application of mental state concepts.</p> <h3 id="MindIntr">4.4 Mindreading and Introspection</h3> <p> From what we have said so far, one could expect that Gordon should agree with Goldman on at least one point. Clearly, Gordon has to admit that there are <em>some cases</em> of mindreading in which a subject attributes a mental state to someone else through a judgment involving the application of mentalistic concepts. Surely, Gordon cannot deny that there are occasions in which we think things like <em>Mary believes that John is late</em> or <em>Pat desires to visit Lisbon</em>. Being a Simulation Theorist, Gordon will also presumably be eager to maintain that many such mindreading events are based on mental simulation events. But if Gordon admits that much, should he not also concede that Goldman&rsquo;s Three-Stage Model is the right account of <em>at least</em> those simulation-based mindreading events? Surprising as it may be, Gordon still disagrees.</p> <p> Gordon (1995) accepts that there are occasions in which a subject arrives at a <em>judgment</em> about someone else&rsquo;s mental state on the basis of some mental simulation event. He might also concede to Goldman that such a judgment involves mentalistic concepts (but see Gordon&rsquo;s 1995 distinction between <em>comprehending</em> and <em>uncomprehending</em> ascriptions). <em>Contra</em> Goldman, however, Gordon argues that <em>introspection</em> plays no role at all in the generation of these judgments. Focusing on a specific example will help us to clarify this further disagreement between Goldman and Gordon.</p> <p> Suppose that I know that Tom believes that (1) and (2):</p> <ol> <li><em>Fido is a dog</em></li> <li><em>All dogs enjoy watching TV</em></li> </ol> <p> On this basis, I attribute to Tom the further belief that (3):</p> <ol start="3"> <li><em>Fido enjoys watching TV</em></li> </ol> <p> Goldman&rsquo;s Three-Stage Model explains this mindreading act in the following way. FIRST STAGE: I imagine believing what Tom believes (i.e., I imagine believing that (1) and (2)); I then feed those belief-like imaginings into my reasoning mechanism (in the off-line mode); as a result, my reasoning mechanism outputs the imagined belief that (3). The SECOND STAGE of the process consists in introspecting this simulated belief and categorizing it <em>as</em> a belief. Crucially, in Goldman&rsquo;s model, &ldquo;introspection&rdquo; does not merely refer to the capacity to self-ascribe mental states. Rather, it picks out a <em>distinctive cognitive method</em> for self-ascription, a method which is typically described as <em>non-inferential</em> and <em>quasi-perceptual</em> (see the section Inner sense accounts in the entry on <a href="../self-knowledge/">self-knowledge</a>). In particular, Goldman (2006) characterizes introspection as a transduction process that takes the neural properties of a mental state token as input and outputs a categorization of the type of state. In the case that we are considering, my introspective mechanism takes the neural properties of my token simulated belief as input and categorizes it <em>as</em> a belief as output. After all this, the THIRD STAGE occurs: I project the categorized belief onto Tom, through the judgment <em>Tom believes</em> <em>that Fido enjoys watching TV</em>. (You might wonder where the <em>content</em> of Tom&rsquo;s belief comes from. Goldman (2006) has a story about that too, but we will leave this aside).</p> <p> What about Gordon? How does he explain, in a simulationist fashion but without resorting to introspection, the passage from knowing that Tom believes that (1) and (2) to judging that Tom believes that (3)? According to Gordon, the first step in the process is, of course, imagining <em>to be Tom</em>&mdash;thus believing, <em>in the context of the simulation</em>, that (1) and (2). This results (again in the context of the simulation) in the formation of the belief that (3). But how do I now go about discovering that *I*, Tom, believe that (3)? How can one perform such a self-ascription if not via introspection? A suggestion given by Gareth Evans will show us how&mdash;Gordon thinks.</p> <p> Evans (1982) famously argued that we answer the question &ldquo;Do I believe that <i>p</i>?&rdquo; by answering another question, namely &ldquo;Is it the case that <i>p</i>?&rdquo; In other words, according to Evans, we ascribe beliefs to ourselves not by introspecting, or by &ldquo;looking inside&rdquo;, but by looking &ldquo;outside&rdquo; and trying to ascertain how the world is. If, e.g., I want to know whether I believe that Manchester is bigger than Sheffield, I just ask myself &ldquo;Is Manchester bigger than Sheffield?&rdquo; If I answer in the affirmative, then I believe that Manchester is bigger than Sheffield. If I answer in the negative, then I believe that Manchester is <em>not</em> bigger than Sheffield. If I do not know what to answer, then I do <em>not</em> have any belief with regard to this subject matter.</p> <p> Gordon (1986, 1995) maintains that this self-ascription strategy&mdash;which he labels &ldquo;the <em>ascent routine</em>&rdquo; (Gordon 2007)&mdash;is also the strategy that we employ, in the context of a simulation, to determine the mental states of the simulated agent:</p> <blockquote> <p> In a simulation of <i>O</i>, I settle the question of whether <i>O</i> believes that <i>p</i> by simply asking &hellip; whether it is the case that <i>p</i>. That is, I simply concern myself with the world&mdash;<i>O</i>&rsquo;s world, the world from <i>O</i>&rsquo;s perspective. &hellip; Reporting <i>O</i>&rsquo;s beliefs <em>is</em> just reporting what is there. (Gordon 1995: 60)</p> </blockquote> <p> So, this is how, in Gordon&rsquo;s story, I come to judge that Tom has the belief that Fido enjoys watching TV. In the context of the simulation, *I* asked *myself* (where both &ldquo;*I*&rdquo; and &ldquo;*myself*&rdquo; in fact refer to Tom) whether *I* believe that Fido enjoys watching TV. And *I* answered this question by answering another question, namely, whether it is the case that Fido enjoys watching TV. Given that, from *my* perspective, Fido enjoys watching TV (after all, from *my* perspective, Fido is a dog and all dogs enjoy watching TV), *I* <em>expressed</em> my belief by saying: &ldquo;Yes, *I*, Tom, believe that Fido enjoys watching TV&rdquo;. As you can see, in such a story, introspection does not do anything. (We will come back to the role of introspection in mindreading in <a href="#SelfOthe">section 6.2</a>).</p> <h3 id="Summ">4.5 Summary</h3> <p> In sections 2, 3, and 4 we dwelt upon the &ldquo;internal&rdquo; disagreements among Simulation Theorists. It goes without saying that such disagreements are both wide and deep. In fact, different Simulation Theorists give different answers to such fundamental questions as: &ldquo;What is mental simulation?&rdquo;, &ldquo;How does mental simulation contribute to mindreading?, &lsquo;What is the role of introspection in mindreading?&rdquo; In light of such differences of opinion in the simulationist camp, one might conclude that, after all, Stich and Nichols (1997) were right when saying that there is no such thing as the Simulation <em>Theory</em>. However, if one considers what is shared among Simulation Theorists, one will realize that there is unity amidst this diversity. A good way to reveal the commonalities among different versions of ST is by contrasting ST with its arch-enemy, i.e., the Theory-Theory of mindreading. This is what we do in the next section.</p> <h2 id="SimuTheoTheoTheo">5. Simulation Theory and Theory-Theory</h2> <p> ST is only one of several accounts of mindreading on the market. A rough-and-ready list of the alternatives should at least include: the Intentional Stance Theory (Dennett 1987; Gergely &amp; Csibra 2003; Gergely et al. 1995); Interactionism (Gallagher 2001; Gallagher &amp; Hutto 2008; De Jaegher at al. 2010); and the Theory-Theory (Gopnik &amp; Wellman 1992; Gopnik &amp; Meltzoff 1997; Leslie 1994; Scholl &amp; Leslie 1999). In this entry, we will discuss the Theory-Theory (TT) only, given that the TT-ST controversy has constituted the focal point of the debate on mindreading during the last 30 years or so.</p> <h3 id="TheoTheo">5.1 The Theory-Theory</h3> <p> As suggested by its name, the Theory-Theory proposes that mindreading is grounded by the possession of a Theory of Mind (&ldquo;a folk psychology&rdquo;)&mdash;i.e., it is based on the tacit knowledge of the following body of information: a number of &ldquo;folk&rdquo; laws or principles connecting mental states with sensory stimuli, behavioural responses, and other mental states. Here are a couple of putative examples:</p> <div class="indent"> <p> <em>Law of sight</em>: If <i>S</i> is in front of object <i>O</i>, <i>S</i> directs her eye-gaze to <i>O</i>, <i>S</i>&rsquo;s visual system is properly functioning, and the environmental conditions are optimal, then <em>ceteris paribus</em> <i>S</i> will see <i>O</i>.</p> <p> <em>Law of the practical syllogism</em>: If <i>S</i> desires a certain outcome <i>G</i> and <i>S</i> believes that by performing a certain action <i>A</i> she will obtain <i>G</i>, then <em>ceteris paribus</em> <i>S</i> will decide to perform <i>A</i>.</p> </div> <p> The main divide among Theory-Theorists concerns how the Theory of Mind is acquired&mdash;i.e., it concerns where this body of knowledge comes from. According to the Child-Scientist Theory-Theory (Gopnik &amp; Wellman 1992; Gopnik &amp; Meltzoff 1997), a child constructs a Theory of Mind exactly as a scientist constructs a scientific theory: she collects evidence, formulates explanatory hypotheses, and revises these hypotheses in the light of further evidence. In other words, &ldquo;folk&rdquo; laws and principles are obtained through hypothesis testing and revision&mdash;a process that, according to proponents of this view, is guided by a general-purpose, Bayesian learning mechanism (Gopnik &amp; Wellman 2012). On the contrary, the Nativist Theory-Theory (Carruthers 2013; Scholl &amp; Leslie 1999) argues that a significant part of the Theory of Mind is innate, rather than learned. More precisely, Nativists typically consider the core of the Theory of Mind as resulting from the maturation of a cognitive module specifically dedicated to representing mental states</p> <p> These disagreements notwithstanding, the main tenet of TT is clear enough: attributions of mental states to other people are guided by the possession of a Theory of Mind. For example, if I know that you desire to buy a copy of <em>The New York Times</em> and I know that you believe that if you go to <em>News &amp; Booze</em> you can buy a copy, then I can use the <em>Law of the Practical Syllogism</em> to infer that you will decide to go to <em>News &amp; Booze</em>.</p> <p> TT has been so popular among philosophers and cognitive scientists that the explanation it proposes has ended up being the name of the very phenomenon to be explained: on many occasions, scholars use the expression &ldquo;Theory of Mind&rdquo; as a synonym of &ldquo;mindreading&rdquo;. Simulation Theorists, however, have never been particularly impressed by this. According to them, there is <em>no need</em> to invoke the tacit knowledge of a Theory of Mind to account for mindreading, since a more <em>parsimonious</em> explanation is available: we <em>reuse</em> our own cognitive mechanisms to mentally simulate others&rsquo; mental states. For example, why do I need to know the <em>Law of the Practical Syllogism</em>, if I can employ my own decision-making mechanism (which I have anyway) to simulate your decision? It is <em>uneconomical</em>&mdash;Simulation Theorists say&mdash;to resort to an information-rich strategy, if an information-poor strategy will do equally as well.</p> <p> The difference between TT and ST can be further illustrated through a nice example given by Stich and Nichols (1992). Suppose that you want to predict the behavior of an airplane in certain atmospheric conditions. You can collect the specifications of the airplane and infer, on the basis of aerodynamic <em>theory</em>, how the airplane will behave. Alternatively, you can build a model of the airplane and run a <em>simulation</em>. The former scenario approximates the way in which TT describes our capacity to represent others&rsquo; mental states, while the latter approximates ST. Two points need to be stressed, though. First, while knowledge of aerodynamic theory is explicit, TT says that our knowledge of the Theory of Mind is typically <em>implicit</em> (or tacit). That is, someone who knows aerodynamic theory is <em>aware</em> of the theory&rsquo;s laws and principles and is able to report them correctly, while the laws and principles constituting one&rsquo;s Theory of Mind typically lie outside awareness and reportability. Second, when we run a simulation of someone else&rsquo;s mental states, we do not need to build a model: we <em>are</em> the model&mdash;that is, we use <em>our own mind</em> as a model of others&rsquo; minds.</p> <p> Simulation Theorists maintain that the <em>default state</em> for the &ldquo;model&rdquo; is one in which the simulator simply makes no adjustments when simulating another individual. That is, ST has it that we are automatically disposed to attribute to a target mental states no different from our own current states. This would often serve adequately in social interaction between people who are cooperating or competing in what is for practical purposes the same situation. We tend to depart from this default when we perceive relevant differences between others&rsquo; situations and our own. In such cases, we might find ourselves adjusting for situational differences by putting ourselves imaginatively in what we consider the other&rsquo;s situation to be.</p> <p> We might also make adjustments for individual differences. An acquaintance will soon be choosing between candidate <em>a</em> and candidate <em>b</em> in an upcoming election. To us, projecting ourselves imaginatively into that voting situation, the choice is glaringly obvious: candidate <em>a</em>, by any reasonable criteria. But then we may wonder whether this imaginative projection into the voting situation adequately represents <em>our acquaintance</em> in that situation. We might recall things the person has said, or peculiarities of dress style, diet, or entertainment, that might seem relevant. Internalizing such behavior ourselves, trying to &ldquo;get behind&rdquo; it as an actor might get behind a scripted role, we might then put, as it were, a different person into the voting situation, one who might choose candidate <em>b</em>.</p> <p> Such a transformation would require quarantining some of our own mental states, preferences, and dispositions, inhibiting them so that they do not contaminate our off-line decision-making in the role of the other. Such inhibition of one's own mental states would be cognitively demanding. For that reason, ST predicts that mindreading will be subject to <em>egocentric errors</em>&mdash;that is, it predicts that we will often attribute to a target the mental state that we would have if we were in the target&rsquo;s situation, rather than the state the target is actually in (Goldman 2006). In <a href="#SelfOthe">section 6.2</a>, we shall discuss whether this prediction is borne out by the data.</p> <h3 id="CollCoop">5.2 Collapse or Cooperation?</h3> <p> On the face of it, ST and TT could not be more different from one another. Some philosophers, however, have argued that, on closer inspection, ST collapses into TT, thus revealing itself as a form of TT in disguise. The collapse argument was originally formulated by Daniel Dennett (1987):</p> <blockquote> <p> If I make believe I am a suspension bridge and wonder what I will do when the wind blows, what &ldquo;comes to my mind&rdquo; in my make-believe state depends on&hellip; my <em>knowledge</em> of physics&hellip; Why should my making believe I have your beliefs be any different? In both cases, knowledge of the imitated object is needed to drive the&hellip; &ldquo;simulation&rdquo;, and the knowledge must be&hellip; something like a <em>theory</em>. (Dennett 1987: 100&ndash;101, emphasis added)</p> </blockquote> <p> Dennett&rsquo;s point is clear. If I imagine being, say, a bridge, what I imagine will depend on my theory of bridges. Suppose that I have a folk theory of bridges that contains the following principle: &ldquo;A bridge cannot sustain a weight superior to its own weight&rdquo;. In this case, if I imagine an elephant weighing three tons walking over a bridge weighing two tons, I will imagine the bridge collapsing. Since my &ldquo;bridge-simulation&rdquo; is entirely <em>theory-driven</em>, &ldquo;simulation&rdquo; is a misnomer. The same carries over to &ldquo;simulating other people&rdquo;s mental states&rsquo;, Dennett says. If I try to imagine your mental states, what I imagine will depend entirely on my Theory of Mind. Therefore, the label &ldquo;mental simulation&rdquo; is misleading.</p> <p> Heal (1986) and Goldman (1989) promptly replied to Dennett. Fair enough, if a system <i>S</i> tries to simulate the state of a radically different system <i>Q</i> (e.g., if a human being tries to simulate the state of a bridge), then <i>S</i>&rsquo;s simulation must be guided by a theory. However, if a system <i>S</i> tries to simulate the state of a <em>relevantly similar</em> system <i>S</i>*, then <i>S</i>&rsquo;s simulation can be entirely <em>process-driven</em>: to simulate the state which <i>S</i>* is in, <i>S</i> simply has to run in itself a process similar to the one <i>S</i>* underwent. Given that, for all intents and purposes, human beings are relevantly similar to each other, a human being can mentally simulate what follows from having another human being&rsquo;s mental states without resorting to a body of theoretical knowledge about the mind&rsquo;s inner workings. She will just need to reuse her own cognitive mechanisms to implement a simulation process.</p> <p> This reply invited the following response (Jackson 1999). If the possibility of process-driven simulation is grounded in the similarity between the simulator and the simulated, then I have to assume that you are relevantly similar to me, when I mentally simulate your mental states. This particular assumption, in turn, will be derived from a <em>general principle</em>&mdash;something like &ldquo;Human beings are psychologically similar&rdquo;. Therefore, mental simulation is grounded in the possession of a theory. The threat of collapse is back! One reply to Jackson&rsquo;s arguments is as follows (for other replies see Goldman 2006): the fact that process-driven simulation is <em>grounded</em> in the similarity among human beings does not entail that, in order to run a simulation, a simulator must know (or believe, or assume) that such similarity obtains; no more, indeed, than the fact that the solubility of salt is <em>grounded</em> in the molecular structure of salt entails that a pinch of salt needs to know chemistry to dissolve in water.</p> <p> Granting that ST and TT are distinct theories, we can now ask a different question: are the theories better off individually or should they join forces somehow? Let us be more explicit. Can ST on its own offer an adequate account of mindreading (or at least of the great majority of its episodes)? And what about TT? A good number of theorists now believe that neither ST nor TT alone will do. Rather, many would agree that these two theories need to cooperate, if they want to reach a satisfactory explanation of mindreading. Some authors have put forward TT-ST hybrid models, i.e., models in which the tacit knowledge of a Theory of Mind is the central aspect of mindreading, but it is in many cases supplemented by simulation processes (Botterill &amp; Carruthers 1999; Nichols &amp; Stich 2003). Other authors have instead defended ST-TT hybrid models, namely, accounts of mindreading where the pride of place is given to mental simulation, but where the possession of a Theory of Mind plays some non-negligible role nonetheless (Currie &amp; Ravenscroft 2002; Goldman 2006; Heal 2003). Since this entry is dedicated to ST, we will briefly touch upon one instance of the latter variety of hybrid account.</p> <p> Heal (2003) suggested that the domain of ST is restricted to those mental processes involving <em>rational transitions</em> among <em>contentful</em> mental states. To wit, Heal maintains that mental simulation is the cognitive routine that we employ to represent other people&rsquo;s <em>rational processes</em>, i.e., those cognitive processes which are sensitive to the semantic content of the mental states involved. On the other hand, </p> <blockquote> <p> when starting point and/or outcome are [states] without content, and/or the connection is not [rationally] intelligible, there is no reason &hellip; to suppose that the process &hellip; can be simulated. (Heal 2003: 77)</p> </blockquote> <p> An example will clarify the matter. Suppose that I know that you desire to eat sushi, and that you believe that you can order sushi by calling <em>Yama Sushi</em>. To reach the conclusion that you will decide to call <em>Yama Sushi</em>, I only need to imagine desiring and believing what you desire and believe, and to run a simulated decision-making process in myself. No further knowledge is required to predict your decision: simulation alone will do the job. Consider, on the other hand, the situation in which I know that you took a certain drug and I want to figure out what your mental states will be. In this case&mdash;Heal says&mdash;my prediction cannot be based on mental simulation. Rather, I need to resort to a body of information about the likely psychological effects of that drug, i.e., I have to resort to a Theory of Mind (fair enough, I can also take the drug myself, but this will not count as mental simulation). This, according to Heal, generalizes to all cases in which a mental state is the input or the output of a <em>mere causal</em> process. In those cases, mental simulation is ineffective and should be replaced by theorizing. Still, those cases do not constitute the central part of mindreading. In fact, many philosophers and cognitive scientists would agree that the crucial component of human mindreading is the ability to reason about others&rsquo; <em>propositional attitudes</em>. And this is exactly the ability that, according to Heal, should be explained in term of mental simulation. This is why Heal&rsquo;s proposal counts as an ST-TT hybrid, rather than the other way around.</p> <h2 id="SimuTheoProsCons">6. Simulation Theory: Pros and Cons</h2> <p> ST has sparked a lively debate, which has been going on since the end of the 1980s. This debate has dealt with a great number of theoretical and empirical issues. On the theoretical side, we have seen philosophical discussions of the relation between ST and functionalism (Gordon 1986; Goldman 1989; Heal 2003; Stich &amp; Ravenscroft 1992), and of the role of tacit knowledge in cognitive explanations (Davies 1987; Heal 1994; Davies &amp; Stone 2001), just to name a few. Examples of empirical debates are: how to account for mindreading deficits in Autism Spectrum Disorders (Baron-Cohen 2000; Currie &amp; Ravenscroft 2002), or how to explain the evolution of mindreading (Carruthers 2009; Lurz 2011). It goes without saying that discussing all these bones of contention would require an entire book (most probably, a <em>series</em> of books). In the last section of this entry, we confine ourselves to briefly introducing the reader to a small sample of the main open issues concerning ST.</p> <h3 id="MirrNeurCont">6.1 The Mirror Neurons Controversy</h3> <p> We wrote that ST proposes that mirroring processes (i.e., activations of mirror mechanisms <em>in the perception mode</em>): (A) are (low-level) simulation processes, and (B) contribute (either constitutively or causally) to mindreading (Gallese et al. 2004; Gallese &amp; Goldman 1998; Goldman 2006, 2008b; Hurley 2005). Both (A) and (B) have been vehemently contested by ST&rsquo;s opponents.</p> <p> Beginning with (A), it has been argued that mirroring processes do not qualify as simulation processes, because they fail to satisfy the definition of &ldquo;simulation process&rdquo; (Gallagher 2007; Herschbach 2012; Jacob 2008; Spaulding 2012) and/or because they are better characterized in different terms, e.g., as enactive perceptual processes (Gallagher 2007) or as elements in an information-rich process (Spaulding 2012). As for (B), the main worry runs as follows. Granting that mirroring processes are simulation processes, what evidence do we have for the claim that they contribute to mindreading? This, in particular, has been asked with respect to the role of mirroring processes in &ldquo;action understanding&rdquo; (i.e., the interpretation of an agent&rsquo;s behavior in terms of the agent&rsquo;s intentions, goals, etc.). After all, the neuroscientific evidence just indicates that action mirroring <em>correlates</em> with episodes of action understanding, but correlation is not causation, let alone constitution. In fact, there are no studies examining whether disruption of the monkey mirror neuron circuit results in action understanding deficits, and the evidence on human action understanding following damage to the action mirror mechanism is inconclusive at best (Hickok 2009). In this regard, some authors have suggested that the most plausible hypothesis is instead that action mirroring follows (rather than causes or constitutes) the understanding of others&rsquo; mental states (Csibra 2007; Jacob 2008). For example, Jacob (2008) proposes that the job of mirroring processes in the action domain is just that of computing a representation of the observed agent&rsquo;s next <em>movement</em>, on the basis of a <em>previous representation</em> of the agent&rsquo;s intention. Similar deflationary accounts of the action mirror mechanism have been given by Brass et al. (2007), Hickok (2014), and Vannuscorps and Caramazza (2015)&mdash;these accounts typically take the STS (superior temporal sulcus, a brain region lacking mirror neurons) to be the critical neural area for action understanding.</p> <p> There are various ways to respond to these criticisms. A strong response argues that they are based on a misunderstanding of the relevant empirical findings, as well as on a mischaracterization of the role that ST attributes to the action mirror mechanism in action understanding (Rizzolatti &amp; Sinigaglia 2010, 2014). A weaker response holds that the focus on action understanding is a bit of a red herring, given that the most robust evidence in support of the central role played by mirroring processes in mindreading comes from the emotion domain (Goldman 2008b). We will consider the weaker response here.</p> <p> Goldman and Sripada (2005) discuss a series of paired deficits in emotion production and face-based emotion mindreading. These deficits&mdash;they maintain&mdash;are best explained by the hypothesis that one attributes emotions to someone else through simulating these emotions in oneself: when the ability to undergo the emotion breaks down, the mindreading capacity breaks down as well. Barlassina (2013) elaborates on this idea by considering Huntington&rsquo;s Disease (HD), a neurodegenerative disorder resulting in, among other things, damage to the disgust mirror mechanism. As predicted by ST, the difficulties individuals with HD have in experiencing disgust co-occur with an impairment in attributing disgust to someone else on the basis of observing her facial expression&mdash;despite perceptual abilities and knowledge about disgust being preserved in this clinical population. Individuals suffering from HD, however, exhibit an intact capacity for disgust mindreading on the basis of non-facial visual stimuli. For this reason, Barlassina concludes by putting forward an ST-TT hybrid model of disgust mindreading on the basis of visual stimuli.</p> <h3 id="SelfOthe">6.2 Self and Others</h3> <p> ST&rsquo;s central claim is that we reuse <em>our own</em> cognitive mechanisms to arrive at a representation of <em>other</em> people&rsquo;s mental states. This claim raises a number of issues concerning how ST conceptualizes the self-other relation. We will discuss a couple of them.</p> <p> Gallagher (2007: 355) writes that </p> <blockquote> <p> given the large diversity of motives, beliefs, desires, and behaviours in the world, it is not clear how a simulation process &hellip; can give me a reliable sense of what is going on in the other person&rsquo;s mind. </p> </blockquote> <p> There are two ways of interpreting Gallagher&rsquo;s worry. First, it can be read as saying that if mindreading is based on mental simulation, then it is hard to see how mental state attributions could be <em>epistemically justified</em>. This criticism, however, misses the mark entirely, since ST is not concerned with whether mental state attributions count as knowledge, but only with how, <em>as a matter of fact</em>, we go about forming such attributions. A second way to understand Gallagher&rsquo;s remarks is this: <em>as a matter of fact</em>, we are pretty successful in understanding other minds; however, given the difference among individual minds, this pattern of successes cannot be explained in terms of mental simulation.</p> <p> ST has a two-tier answer to the second reading of Gallagher&rsquo;s challenge. First, human beings are very similar with regard to cognitive processes such as perception, theoretical reasoning, practical reasoning, etc. For example, there is a very high probability that if both you and I look at the same scene, we will have the same visual experience. This explains why, in the large majority of cases, I can reuse my visual mechanism to successfully simulate your visual experiences. Second, even though we are quite good at recognizing others&rsquo; mental states, we are nonetheless prone to <em>egocentric errors</em>, i.e., we tend to attribute to a target the mental state that <em>we</em> would undergo if we were in the target&rsquo;s situation, rather than the actual mental state the target is in (Goldman 2006). A standard example is the <em>curse of knowledge</em> bias, where we take for granted that other people know what we know (Birch &amp; Bloom 2007). ST has a straightforward explanation of such egocentric errors (Gordon 1995; Goldman 2006): if we arrive at attributing mental states via mental simulation, the attribution accuracy will depend on our capacity to &ldquo;quarantine&rdquo; our genuine mental states, when they do not match the target&rsquo;s, and to replace them with more appropriate simulated mental states. This &ldquo;adjustment&rdquo; process, however, is a demanding one, because our genuine mental states exert a powerful tendency. Thus, Gallagher is right when he says that, on some occasions, &ldquo;if I project the results of my own simulation onto the other, I understand only myself in that other&rsquo;s situation, but I don&rsquo;t understand the other&rdquo; (Gallagher 2007: 355). However, given how widespread egocentric errors are, this counts as a point in favour of ST, rather than as an argument against it (but see de Vignemont &amp; Mercier 2016, and Saxe 2005).</p> <p> Carruthers (1996, 2009, 2011) raises a different problem for ST: no version of ST can adequately account for self-attributions of mental states. Recall that, according to Goldman (2006), simulation-based mindreading is a three-stage process in which we first mentally simulate a target&rsquo;s mental state, we then <em>introspect</em> and categorize the simulated mental state, and we finally attribute the categorized state to the target. Since Goldman&rsquo;s model has it that attributions of mental states to others asymmetrically depend on the ability to introspect one&rsquo;s own mental states, it predicts that: (A) introspection is (ontogenetically and phylogenetically) prior to the ability to represent others&rsquo; mental states; (B) there are cases in which introspection works just fine, but where the ability to represent others&rsquo; mental states is impaired (presumably, because the mechanism responsible for projecting one&rsquo;s mental states to the target is damaged). Carruthers (2009) argues that neither (A) nor (B) are borne out by the data. The former because there are no creatures that have introspective capacities but at the same time lack the ability to represent others&rsquo; mental states; the latter because there are no dissociation cases in which an intact capacity for introspection is paired with an impairment in the ability to represent others&rsquo; mental states.</p> <p> How might a Simulation Theorist respond to this objection? As we said in <a href="#RoleMentSimuMind">section 4</a>, Gordon&rsquo;s (1986, 1995, 1996) <em>Radical Simulationism</em> does not assign any role to introspection in mindreading. Rather, Gordon proposes that self-ascriptions are guided by ascent routines through which we answer the question &ldquo;Do I believe that <i>p</i>?&rdquo; by answering the lower-order question &ldquo;Is it the case that <i>p</i>?&rdquo; Carruthers (1996, 2011) thinks that this won&rsquo;t do either. Here is one of the many problems that Carruthers raises for this suggestion&mdash;we can call it &ldquo;<em>The Scope Problem&rdquo;</em>: </p> <blockquote> <p> this suggestion appears to have only a limited range of application. For even if it works for the case of belief, it is very hard to see how one might extend it to account for our knowledge of our own goals, decisions, or intentions&mdash;let alone our knowledge of our own attitudes of wondering, supposing, fearing, and so on. (Carruthers 2011: 81)</p> </blockquote> <p> Carruthers&rsquo; objections are important and deserve to be taken seriously. To discuss them, however, we would need to introduce a lot of further empirical evidence and many complex philosophical ideas about self-knowledge. This is not a task that we can take up here (the interested reader is encouraged to read, in addition to Gordon (2007) and Goldman (2009), the SEP entries on <a href="../self-knowledge/">self-knowledge</a> and on <a href="../introspection/">introspection</a>). The take-home message should be clear enough nonetheless: anybody who puts forward an account of mindreading should remember that such an account has to cohere with a plausible story about the cognitive mechanisms underlying self-attribution.</p> <h3 id="DeveFind">6.3 Developmental Findings</h3> <p> The development of mindreading capacities in children has been one of the central areas of empirical investigation. In particular, developmental psychologists have put a lot of effort into detailing how the ability to attribute false beliefs to others develops. Until 2005, the central experimental paradigm to test this ability was the <em>verbal false belief task</em> (Wimmer &amp; Perner 1983). Here is a classic version of it. A subject is introduced to two dolls, Sally and Anne, and three objects: Sally&rsquo;s ball, a basket, and a box. Sally puts her ball in the basket and leaves the scene. While Sally is away, Anne takes the ball out of the basket and puts it into the box. Sally then returns. The subject is asked where she thinks Sally will look for the ball. The correct answer, of course, is that Sally will look inside the basket. To give this answer, the subject has to attribute to Sally the <em>false belief</em> that the ball is in the basket. A number of experiments have found that while four-year old children pass this task, three-year old children fail it (for a review, see Wellman et al. 2001). For a long time, the mainstream interpretation of these findings has been that children acquire the ability to attribute false beliefs only around their fourth birthday (but see Clements &amp; Perner 1994 and Bloom &amp; German 2000).</p> <p> In 2005, this developmental timeline was called into question. Kristine Onishi and Ren&eacute;e Baillargeon (2005) published the result of a <em>non-verbal</em> version of the false belief task, which they administered to 15-month old infants. The experiment involves three steps. First, the infants see a toy between two boxes, one yellow and one green, and then an actor hiding the toy inside the green box. Next, the infants see the toy sliding out of the green box and hiding inside the yellow box. In the true belief condition (TB), the actor notices that the toy changes location, while in the false belief condition (FB) she does not. Finally, half of the infants see the actor reaching into the green box, while the other half sees the actor reaching into the yellow box. According to the <em>violation-of-expectation paradigm</em>, infants reliably look for a longer time at unexpected events. Therefore, if the infants expected the actor to search for the toy <em>on the basis of</em> the actor&rsquo;s belief about its location, then when the actor had a <em>true belief</em> that the toy was hidden in one box, the infants should look longer when the actor reached into the other box instead. Conversely, the infants should look longer at one box when the actor <em>falsely believed</em> that the toy was hidden in the other box. Strikingly, these predictions were confirmed in both the (TB) and (FB) conditions. On this basis, Onishi and Baillargeon (2005) concluded that children of 15 months possess the capacity to represent others&rsquo; false beliefs.</p> <p> This and subsequent versions of non-verbal false belief tasks attracted a huge amount of interest (at the current stage of research, there is evidence that sensitivity to others&rsquo; false beliefs is present in infants as young as 7 months&mdash;for a review, see Baillargeon at al. 2016). Above all, the following two questions have been widely discussed: why do children pass the non-verbal false belief task at such an early age, but do not pass the verbal version before the age of 4? Does passing the non-verbal false belief task really indicate the capacity to represent others&rsquo; false beliefs? (Perner &amp; Ruffman 2005; Apperly &amp; Butterfill 2009; Baillargeon et al. 2010; Carruthers 2013; Helming et al. 2014).</p> <p> Goldman and Jordan (2013) maintain that ST has a good answer to both questions. To begin with, they argue that it is implausible to attribute to infants such sophisticated meta-representational abilities as the ability to represent others&rsquo; false beliefs. Thus, Goldman and Jordan favour a deflationary view, according to which infants are <em>sensitive</em> to others&rsquo; false beliefs, but do not represent them <em>as such</em>. In particular, they propose that rather than believing that another subject <em><i>S</i> (falsely) believes that <i>p</i></em>, infants simply imagine how the world is from <i>S</i>&rsquo;s perspective&mdash;that is, they simply imagine that <i>p</i> is the case. This&mdash;Goldman and Jordan say&mdash;is a more primitive psychological competence than mindreading, since it does not involve forming a judgment about others&rsquo; mental states. This brings us to Goldman and Jordan&rsquo;s answer to the question &ldquo;why do children pass the verbal false belief task only at four?&rdquo; Passing this task requires fully-fledged mindreading abilities and executive functions such as inhibitory control. It takes quite a lot of time&mdash;around 3 to 4 years&mdash;before these functions and abilities come online.</p> <h2 id="Conc">7. Conclusion</h2> <p> Since the late 1980s, ST has received a great amount of attention from philosophers, psychologists, and neuroscientists. This is not surprising. Mindreading is a central human cognitive capacity, and ST challeges some basic assumptions about the cognitive processes and neural mechanisms underlying human social behavior. Moreover, ST touches upon a number of major philosophical problems, such as the relation between self-knowledge and knowledge of other minds, and the nature of mental concepts, including the concept of mind itself. In this entry, we have considered some of the fundamental empirical and philosophical issues surrounding ST. Many of them remain open. In particular, while the consensus view is now that both mental simulation and theorizing play important role in mindreading, the currently available evidence falls short of establishing what their respective roles are. In other words, it is likely that we shall end up adopting a hybrid model of mindreading that combines ST and TT, but, at the present stage, it is very difficult to predict what this hybrid model will look like. Hopefully, the joint work of philosophers and cognitive scientists will help to settle the matter.</p> </div> <div id="bibliography"> <h2 id="Bib">Bibliography</h2> <ul class="hanging"> <li>Anderson, Michael L., 2008, &ldquo;Neural Reuse: A Fundamental Organizational Principle of the Brain&rdquo;, <em>Behavioral and Brain Science</em>, 20(4): 239&ndash;313. doi:10.1017/S0140525X10000853</li> <li>&ndash;&ndash;&ndash;, 2015, <em>After Phrenology: Neural Reuse and the Interactive Brain</em>, Cambridge, MA: MIT Press.</li> <li>Apperly, Ian A. and Stephen A. Butterfill, 2009, &ldquo;Do Humans Have Two Systems to Track Beliefs and Belief-Like States?&rdquo;, <em>Psychological Review</em>, 116(4): 953&ndash;70. doi:10.1037/a0016923</li> <li>Avenanti, Alessio, Domenica Bueti, Gaspare Galati, &amp; Salvatore M. Aglioti, 2005, &ldquo;Transcranial Magnetic Stimulation Highlights the Sensorimotor Side of Empathy for Pain&rdquo;, <em>Nature Neuroscience</em>, 8(7): 955&ndash;960. doi:10.1038/nn1481</li> <li>Baillargeon, Ren&eacute;e, Rose M. Scott, and Zijing He, 2010, &ldquo;False-Belief Understanding in Infants&rdquo;, <em>Trends in Cognitive Sciences</em>, 14(3): 110&ndash;118. doi:10.1016/j.tics.2009.12.006</li> <li>Baillargeon, Ren&eacute;e, Rose M. Scott, and Lin Bian, 2016, &ldquo;Psychological Reasoning in Infancy&rdquo;, <em>Annual Review of Psychology</em>, 67: 159&ndash;186. doi:10.1146/annurev-psych-010213-115033 </li> <li>Barlassina, Luca, 2013, &ldquo;Simulation is not Enough: A Hybrid Model of Disgust Attribution on the Basis of Visual Stimuli&rdquo;, <em>Philosophical Psychology</em>, 26(3): 401&ndash;419. doi:10.1080/09515089.2012.659167</li> <li>Baron-Cohen, Simon, 2000, &ldquo;Theory of Mind and Autism: A Fifteen Year Review&rdquo;, in Simon Baron-Cohen, Helen Tager-Flusberg, and Donald J. Cohen (eds.); <em>Understanding Other Minds: Perspectives from Developmental Cognitive Neuroscience</em> (2nd edition), New York: Oxford University Press, pp. 3&ndash;20.</li> <li>Bechtel, William, 2008, <em>Mental Mechanisms: Philosophical Perspectives on Cognitive Neuroscience</em>, New York: Taylor and Francis.</li> <li>Birch, Susan A. and Paul Bloom, 2007, &ldquo;The Curse of Knowledge in Reasoning About False Beliefs&rdquo;, <em>Psychological Science</em>, 18(5): 382&ndash;386. doi:10.1111/j.1467-9280.2007.01909.x</li> <li>Bisiach, Edoardo and Claudio Luzzatti, 1978, &ldquo;Unilateral Neglect of Representational Space&rdquo;, <em>Cortex</em>, 14(1): 129&ndash;133. doi:10.1016/S0010-9452(78)80016-1</li> <li>Blakemore, S.-J., D. Bristow, G. Bird, C. Frith, and J. Ward, 2005, &ldquo;Somatosensory Activations During the Observation of Touch and a Case of Vision-Touch Synaesthesia&rdquo;, <em>Brain</em>, 128(7): 1571&ndash;1583. doi:10.1093/brain/awh500</li> <li>Bloom, Paul and Tim P. German, 2000, &ldquo;Two Reasons to Abandon the False Belief Task as a Test of Theory of Mind&rdquo;, <em>Cognition</em>, 77(1): B25&ndash;31. doi:10.1016/S0010-0277(00)00096-2</li> <li>Botterill, George and Peter Carruthers, 1999, <em>The Philosophy of Psychology</em>, Cambridge: Cambridge University Press.</li> <li>Brass, Marcel, Ruth M. Schmitt, Stephanie Spengler, and Gy&ouml;rgy Gergely, 2007, &ldquo;Investigating Action Understanding: Inferential Processes versus Action Simulation&rdquo;, <em>Current Biology</em>, 17(24): 2117&ndash;2121. doi:10.1016/j.cub.2007.11.057</li> <li>Brozzo, Chiara, forthcoming, &ldquo;Motor Intentions: How Intentions and Motor Representations Come Together&rdquo;, <em>Mind &amp; Language</em>.</li> <li>Buckner, Randy L. and Daniel C. Carroll, 2007, &ldquo;Self-Projection and the Brain&rdquo;, <em>Trends in Cognitive Science</em>, 11(2): 49&ndash;57. doi:10.1016/j.tics.2006.11.004</li> <li>Butterfill, Stephen A. and Corrado Sinigaglia, 2014, &ldquo;Intention and Motor Representation in Purposive Action&rdquo;, <em>Philosophy and Phenomenological Research</em>, 88(1): 119&ndash;145. doi:10.1111/j.1933-1592.2012.00604.x</li> <li>Carruthers, Peter, 1996, &ldquo;Simulation and Self-Knowledge: A Defense of Theory-Theory&rdquo;, in Carruthers and Smith 1996: 22&ndash;38. doi:10.1017/CBO9780511597985.004</li> <li>&ndash;&ndash;&ndash;, 2009, &ldquo;How we Know Our Own Minds: The Relationship between Mindreading and Metacognition&rdquo;, <em>Behavioral and Brain Sciences</em>, 32(2): 121&ndash;138. doi:10.1017/S0140525X09000545</li> <li>&ndash;&ndash;&ndash;, 2011, <em>The Opacity of Mind: An Integrative Theory of Self-Knowledge</em>, Oxford: Oxford University Press. doi:10.1093/acprof:oso/9780199596195.001.0001</li> <li>&ndash;&ndash;&ndash;, 2013, &ldquo;Mindreading in Infancy&rdquo;, <em>Mind and Language</em>, 28(2): 141&ndash;172. doi:10.1111/mila.12014</li> <li>Carruthers, Peter and Peter K. Smith (eds.), 1996, <em>Theories of Theories of Mind</em>, Cambridge: Cambridge University Press. doi:10.1017/CBO9780511597985</li> <li>Clements, Wendy A. and Josef Perner, 1994, &ldquo;Implicit Understanding of Belief&rdquo;, <em>Cognitive Development</em>, 9(4): 377&ndash;395. doi:10.1016/0885-2014(94)90012-4</li> <li>Craver, Carl F., 2007, <em>Explaining the Brain. Mechanisms and the Mosaic Unity of Neuroscience</em>, Oxford: Oxford University Press. doi:10.1093/acprof:oso/9780199299317.001.0001</li> <li>Csibra, Gergely, 2007, &ldquo;Action Mirroring and Action Understanding: An Alternative Account&rdquo;, in Patrick Haggard, Yves Rosetti, and Mitsuo Kawato (eds.) <em>Sensorimotor Foundations of Higher Cognition. Attention and Performance XII</em>, Oxford University Press, Oxford, pp. 453&ndash;459. doi:10.1093/acprof:oso/9780199231447.003.0020</li> <li>Currie, Gregory, 1995, &ldquo;Visual Imagery as the Simulation of Vision&rdquo;, <em>Mind and Language</em>, 10(1&ndash;2): 25&ndash;44. doi:10.1111/j.1468-0017.1995.tb00004.x</li> <li>&ndash;&ndash;&ndash;, 2002, &ldquo;Desire in Imagination&rdquo;, in Tamar Szabo Gendler and John Hawthorne (eds.), <em>Conceivability and Possibility</em>, Oxford: Oxford University Press, pp. 201&ndash;221.</li> <li>Currie, Gregory and Ian Ravenscroft, 1997, &ldquo;Mental Simulation and Motor Imagery&rdquo;, <em>Philosophy of Science</em>, 64(1): 161&ndash;80. doi:10.1086/392541</li> <li>&ndash;&ndash;&ndash;, 2002, <em>Recreative Minds: Imagination in Philosophy and Psychology</em>, Oxford: Oxford University Press. doi:10.1093/acprof:oso/9780198238089.001.0001</li> <li>Davies, Martin, 1987, &ldquo;Tacit Knowledge and Semantic Theory: Can a Five per Cent Difference Matter?&rdquo; <em>Mind</em>, 96(384): 441&ndash;462. doi:10.1093/mind/XCVI.384.441</li> <li>Davies, Martin and Tony Stone (eds.), 1995a, <em>Folk Psychology: The Theory of Mind Debate</em>, Oxford: Blackwell Publishers.</li> <li>&ndash;&ndash;&ndash; (eds.), 1995b, <em>Mental Simulation: Evaluations and Applications&mdash;Reading in Mind and Language</em>, Oxford: Blackwell Publishers.</li> <li>&ndash;&ndash;&ndash;, 2001, &ldquo;Mental Simulation, Tacit Theory, and the Threat of Collapse&rdquo;, <em>Philosophical Topics</em>, 29(1/2): 127&ndash;173. doi:10.5840/philtopics2001291/212</li> <li>Decety, Jean and Fran&ccedil;ois Michel, 1989, &ldquo;Comparative Analysis of Actual and Mental Movement Times in Two Graphic Tasks&rdquo;, <em>Brain and Cognition</em>, 11(1): 87&ndash;97. doi:10.1016/0278-2626(89)90007-9</li> <li>De Jaegher, Hanne, Ezequiel Di Paolo, and Shaun Gallagher, 2010, &ldquo;Can Social Interaction Constitute Social Cognition?&rdquo;<em>Trends in Cognitive Sciences</em>, 14(10): 441&ndash;447. doi:10.1016/j.tics.2010.06.009</li> <li>Dennett, Daniel C., 1987, <em>The Intentional Stance</em>, Cambridge, MA: MIT Press.</li> <li>de Vignemont, Fr&eacute;d&eacute;rique, 2009, &ldquo;Drawing the Boundary Between Low-Level and High-Level Mindreading&rdquo;, <em>Philosophical Studies</em>, 144(3): 457&ndash;466. doi:10.1007/s11098-009-9354-1</li> <li>de Vignemont, Fr&eacute;d&eacute;rique and Hugo Mercier, 2016, &ldquo;Under Influence: Is Altercentric Bias Compatible with Simulation Theory?&rdquo; in Brian P. McLaughlin and Hilary Kornblith (eds.), <em>Goldman and his Critics</em>, Oxford: Blackwell. doi:10.1002/9781118609378.ch13</li> <li>Dilthey, Wilhelm, [1894] 1977, <em>Descriptive Psychology and Historical Understanding</em>, Richard M. Zaner and Kenneth L. Heiges (trans.), with an introduction by Rudolf A. Makkreel, The Hague: Martinus Nijhof. doi:10.1007/978-94-009-9658-8</li> <li>di Pellegrino, G., L. Fadiga, L. Fogassi, V. Gallese, and G. Rizzolatti, 1992, &ldquo;Understanding Motor Events: A Neuropsychological Study&rdquo;, <em>Experimental Brain Research</em>, 91(1): 176&ndash;180. doi:10.1007/BF00230027</li> <li>Doggett, Tyler and Andy Egan, 2007, &ldquo;Wanting Things You Don&rsquo;t Want: The Case for an Imaginative Analogue of Desire&rdquo;, <em>Philosophers' Imprint</em>, 7(9). [<a href="http://hdl.handle.net/2027/spo.3521354.0007.009" target="other">Doggett and Egan 2007 available online</a>]</li> <li>Evans, Gareth, 1982, <em>The Varieties of Reference</em>, Oxford: Oxford University Press.</li> <li>Fisher, Justin C., 2006, &ldquo;Does Simulation Theory Really Involve Simulation?&rdquo; <em>Philosophical Psychology</em>, 19(4): 417&ndash;432. doi:10.1080/09515080600726377</li> <li>Fuller, Gary, 1995, &ldquo;Simulation and Psychological Concepts&rdquo;, in Davies and Stone 1995b: chapter 1, pp. 19&ndash;32</li> <li>Funkhouser, Eric and Shannon Spaulding, 2009, &ldquo;Imagination and Other Scripts&rdquo;, <em>Philosophical Studies</em>, 143(3): 291&ndash;314. doi:10.1007/s11098-009-9348-z</li> <li>Gallagher, Shaun, 2001, &ldquo;The Practice of Mind: Theory, Simulation, or Primary Interaction?&rdquo; <em>Journal of Consciousness Studies</em>, 8(5&ndash;7): 83&ndash;108.</li> <li>&ndash;&ndash;&ndash;, 2007, &ldquo;Simulation Trouble&rdquo;, <em>Social Neuroscience</em>, 2(3&ndash;4): 353&ndash;365. doi:10.1080/17470910601183549</li> <li>Gallagher, Shaun and Daniel D. Hutto, 2008, &ldquo;Understanding Others Through Primary Interaction and Narrative Practice&rdquo;, in Jordan Zlatev, Timothy P. Racine, Chris Sinha, &amp; Esa Itkonen (eds.), <em>The Shared Mind: Perspectives on Intersubjectivity</em>, Amsterdam: John Benjamins, pp. 17&ndash;38. doi:10.1075/celcr.12.04gal</li> <li>Gallese, Vittorio, 2001, &ldquo;The &lsquo;Shared Manifold&rsquo; Hypothesis: From Mirror Neurons to Empathy&rdquo;, <em>Journal of Consciousness Studies</em>, 8(5&ndash;7): 33&ndash;50.</li> <li>&ndash;&ndash;&ndash;, 2007, &ldquo;Before and Below &lsquo;Theory of Mind&rsquo;: Embodied Simulation and the Neural Correlates of Social Cognition&rdquo;, <em>Philosophical Transactions of the Royal Society B</em>, 362: 659&ndash;669. doi:10.1098/rstb.2006.2002</li> <li>Gallese, Vittorio and Alvin Goldman, 1998, &ldquo;Mirror Neurons and the Simulation Theory of Mind-reading&rdquo;, <em>Trends in Cognitive Sciences</em>, 2(12): 493&ndash;501. doi:10.1016/S1364-6613(98)01262-5</li> <li>Gallese, Vittorio and Corrado Sinigaglia, 2011, &ldquo;What is so Special about Embodied Simulation?&rdquo; <em>Trends in Cognitive Science</em>, 15(11): 512&ndash;9. doi:10.1016/j.tics.2011.09.003</li> <li>Gallese, Vittorio, Luciano Fadiga, Leonardo Fogassi, and Giacomo Rizzolatti, 1996, &ldquo;Action Recognition in the Premotor Cortex&rdquo;, <em>Brain</em>, 119(2): 593&ndash;609. doi:10.1093/brain/119.2.593</li> <li>Gallese, Vittorio, Leonardo Fogassi, Luciano Fadiga, and Giacomo Rizzolatti, 2002, &ldquo;Action Representation and the Inferior Parietal Lobule&rdquo;, in Wolfgang Prinz and Bernhard Hommel (eds.), <em>Common Mechanisms in Perception and Action</em> (Attention and Performance XIX), Oxford: Oxford University Press, pp. 247&ndash;266.</li> <li>Gallese, Vittorio, Christian Keysers, and Giacomo Rizzolatti, 2004, &ldquo;A Unifying View of the Basis of Social Cognition&rdquo;, <em>Trends in Cognitive Sciences</em>: 8(9): 396&ndash;403. doi:10.1016/j.tics.2004.07.002</li> <li>Gari&eacute;py, Jean-Fran&ccedil;ois, Karli K. Watson, Emily Du, Diana L. Xie, Joshua Erb, Dianna Amasino, and Michael L. Platt, 2014, &ldquo;Social Learning in Humans and Other Animals&rdquo;, <em>Frontiers in Neuroscience</em>, 31 March 2014, doi:10.3389/fnins.2014.00058.</li> <li>Gergely, Gy&ouml;rgy and Gergely Csibra, 2003, &ldquo;Teleological Reasoning in Infancy: The Na&iuml;ve Theory of Rational Action&rdquo;, <em>Trends in Cognitive Sciences</em>, 7(7): 287&ndash;292. doi:10.1016/S1364-6613(03)00128-1</li> <li>Gergely, Gy&ouml;rgy, Zolt&aacute;n N&aacute;dasdy, Gergely Csibra, and Szilvia B&iacute;r&oacute;, 1995, &ldquo;Taking the Intentional Stance at 12 Months of Age&rdquo;, <em>Cognition</em>, 56(2): 165&ndash;93. doi:10.1016/0010-0277(95)00661-H</li> <li>Goldenberg, Georg, Wolf M&uuml;llbacher, and Andreas Nowak, 1995, &ldquo;Imagery without Perception: A Case Study of Anosognosia for Cortical Blindness&rdquo;, <em>Neuropsychologia</em>, 33(11): 1373&ndash;1382. doi:10.1016/0028-3932(95)00070-J</li> <li>Goldman, Alvin I., 1989, &ldquo;Interpretation Psychologized&rdquo;, <em>Mind and Language</em>, 4(3): 161&ndash;185; reprinted in Davies and Stone 1995a, pp. 74&ndash;99. doi:10.1111/j.1468-0017.1989.tb00249.x</li> <li>&ndash;&ndash;&ndash;, 2002, &ldquo;Simulation Theory and Mental Concepts&rdquo;, in J&eacute;r&ocirc;me Dokic &amp; Jo&euml;lle Proust (eds.), <em>Simulation and Knowledge of Action</em>, Amsterdam ; Philadelphia: John Benjamins, 35&ndash;71.</li> <li>&ndash;&ndash;&ndash;, 2006, <em>Simulating Minds: The Philosophy, Psychology, and Neuroscience of Mindreading</em>, Oxford: Oxford University Press. doi:10.1093/0195138929.001.0001</li> <li>&ndash;&ndash;&ndash;, 2008a, &ldquo;Hurley on Simulation&rdquo;, <em>Philosophy and Phenomenological Research</em>, 77(3): 775&ndash;788. doi:10.1111/j.1933-1592.2008.00221.x</li> <li>&ndash;&ndash;&ndash;, 2008b, &ldquo;Mirroring, Mindreading, and Simulation&rdquo;, in Jaime A. Pineda (ed.), <em>Mirror Neuron Systems: The Role of Mirroring Processes in Social Cognition</em>, New York: Humana Press, pp. 311&ndash;330. doi:10.1007/978-1-59745-479-7_14</li> <li>&ndash;&ndash;&ndash;, 2009, &ldquo;<em>Pr&eacute;cis</em> of <em>Simulating Minds: : The Philosophy, Psychology, and Neuroscience of Mindreading</em>&rdquo; and &ldquo;Replies to Perner and Brandl, Saxe, Vignemont, and Carruthers&rdquo;, <em>Philosophical Studies</em> 144(3): 431&ndash;434, 477&ndash;491. doi:10.1007/s11098-009-9355-0 and doi:10.1007/s11098-009-9358-x</li> <li>&ndash;&ndash;&ndash;, 2012a, &ldquo;A Moderate Approach to Embodied Cognitive Science&rdquo;, <em>Review of Philosophy and Psychology</em>, 3(1): 71&ndash;88. doi:10.1007/s13164-012-0089-0</li> <li>&ndash;&ndash;&ndash;, 2012b, &ldquo;Theory of Mind&rdquo;, in Eric Margolis, Richard Samuels, and Stephen P. Stich (eds.), <em>The Oxford Handbook of Philosophy of Cognitive Science</em>, Oxford: Oxford University Press, 402&ndash;424. doi:10.1093/oxfordhb/9780195309799.013.0017</li> <li>Goldman, Alvin I. and Lucy C. Jordan, 2013, &ldquo;Mindreading by Simulation: The Roles of Imagination and Mirroring&rdquo;, in Simon Baron-Cohen, Michael Lombardo, and Helen Tager-Flusberg (eds.), <em>Understanding Other Minds: Perspectives From Developmental Social Neuroscience</em>, Oxford: Oxford University Press, 448&ndash;466. doi:10.1093/acprof:oso/9780199692972.003.0025</li> <li>Goldman, Alvin I. and Chandra Sekhar Sripada, 2005, &ldquo;Simulationist Models of Face-Based Emotion Recognition&rdquo;,<em>Cognition</em>, 94(3): 193&ndash;213. doi:10.1016/j.cognition.2004.01.005</li> <li>Gopnik, Alison and Andrew N. Meltzoff, 1997, <em>Words, Thoughts, and Theories</em>, Cambridge, MA: Bradford Books/MIT Press.</li> <li>Gopnik, Alison and Henry M. Wellman, 1992, &ldquo;Why the Child's Theory of Mind Really Is a Theory&rdquo;, <em>Mind and Language</em>, 7(1&ndash;2): 145&ndash;71: reprinted in Davies and Stone 1995a, pp. 232&ndash;258. doi:10.1111/j.1468-0017.1992.tb00202.x</li> <li>&ndash;&ndash;&ndash;, 2012, &ldquo;Reconstructing Constructivism: Causal Models, Bayesian Learning Mechanisms, and the Theory-Theory&rdquo;, <em>Psychological Bulletin</em>&rdquo;, 138(6):1085&ndash;108. doi:10.1037/a0028044</li> <li>Gordon, Robert M., 1986, &ldquo;Folk Psychology as Simulation&rdquo;, <em>Mind and Language</em>, 1(2): 158&ndash;171; reprinted in Davies and Stone 1995a, pp. 60&ndash;73. doi:10.1111/j.1468-0017.1986.tb00324.x</li> <li>&ndash;&ndash;&ndash;, 1995, &ldquo;Simulation Without Introspection or Inference From Me to You&rdquo;, in Davies &amp; Stone 1995b: 53&ndash;67.</li> <li>&ndash;&ndash;&ndash;, 1996, &ldquo;&lsquo;Radical&rsquo; Simulationism&rdquo;, in Carruthers &amp; Smith 1996: 11&ndash;21. doi:10.1017/CBO9780511597985.003</li> <li>&ndash;&ndash;&ndash;, 2000, &ldquo;Sellars&rsquo;s Rylean Revisited&rdquo;, <em>Protosoziologie</em>, 14: 102&ndash;114.</li> <li>&ndash;&ndash;&ndash;, 2005, &ldquo;Intentional Agents Like Myself,&rdquo;, in <em>Perspectives on Imitation: From Mirror Neurons to Memes</em>, S. Hurley &amp; N. Chater (eds.), Cambridge, MA: MIT Press</li> <li>&ndash;&ndash;&ndash;, 2007, &ldquo;Ascent Routines for Propositional Attitudes&rdquo;, <em>Synthese</em>, 159 (2): 151&ndash;165. doi:10.1007/s11229-007-9202-9</li> <li>Harris, Paul L., 1989, <em>Children and Emotion</em>, Oxford: Blackwell Publishers.</li> <li>&ndash;&ndash;&ndash;, 1992, &ldquo;From Simulation to Folk Psychology: The Case for Development&rdquo;, <em>Mind and Language</em>, 7(1&ndash;2): 120&ndash;144; reprinted in Davies and Stone 1995a, pp. 207&ndash;231. doi:10.1111/j.1468-0017.1992.tb00201.x</li> <li>Heal, Jane, 1986, &ldquo;Replication and Functionalism&rdquo;, in <em>Language, Mind, and Logic</em>, J. Butterfield (ed.), Cambridge: Cambridge University Press; reprinted in Davies and Stone 1995a, pp. 45&ndash;59.</li> <li>&ndash;&ndash;&ndash;, 1994, &ldquo;Simulation vs Theory-Theory: What is at Issue?&rdquo; in Christopher Peacocke (ed.), <em>Objectivity, Simulation, and the Unity of Consciousness: Current Issues in the Philosophy of Mind</em> (Proceedings of the British Academy, 83), Oxford: Oxford University Press, pp. 129&ndash;144. [<a href="http://www.britac.ac.uk/pubs/proc/volumes/pba83.html" target="other">Heal 1994 available online</a>]</li> <li>&ndash;&ndash;&ndash;, 1995, &ldquo;How to Think About Thinking&rdquo;, in Davies and Stone 1995b: chapter 2, pp. 33&ndash;52.</li> <li>&ndash;&ndash;&ndash;, 1998, &ldquo;Co-Cognition and Off-Line Simulation: Two Ways of Understanding the Simulation Approach&rdquo;, <em>Mind and Language</em>, 13(4): 477&ndash;498. doi:10.1111/1468-0017.00088</li> <li>&ndash;&ndash;&ndash;, 2003, <em>Mind, Reason and Imagination</em>, Cambridge: Cambridge University Press.</li> <li>Helming, Katharina A., Brent Strickland, and Pierre Jacob, 2014, &ldquo;Making Sense of Early False-Belief Understanding&rdquo;, <em>Trends in Cognitive Sciences</em>, 18(4): 167&ndash;170. doi:10.1016/j.tics.2014.01.005</li> <li>Herschbach, Mitchell, 2012, &ldquo;Mirroring Versus Simulation: On the Representational Function of Simulation&rdquo;, <em>Synthese</em>, 189(3): 483&ndash;51. doi:10.1007/s11229-011-9969-6</li> <li>Hickok, Gregory, 2009, &ldquo;Eight Problems for the Mirror Neuron Theory of Action Understanding in Monkeys and Humans&rdquo;, <em>Journal of Cognitive of Neuroscience</em>, 21(7): 1229&ndash;1243. doi:10.1162/jocn.2009.21189</li> <li>&ndash;&ndash;&ndash;, 2014, <em>The Myth of Mirror Neurons: The Real Neuroscience of Communication and Cognition</em>, New York: Norton.</li> <li>Hume, David, 1739, <em>A Treatise of Human Nature</em>, edited by L.A. Selby-Bigge, 2<sup>nd</sup> edition, revised by P.H. Nidditch, Oxford: Clarendon Press, 1975</li> <li>Hurley, Susan, 2005, &ldquo;The Shared Circuits Hypothesis: A Unified Functional Architecture for Control, Imitation, and Simulation&rdquo;, in <em>Perspectives on Imitation: From Neuroscience to Social Science, Volume 1: Mechanisms of Imitation and Imitation in Animals</em>, Susan Hurley &amp; Nick Chater (eds.), Cambridge, MA: MIT Press, pp. 177&ndash;193.</li> <li>&ndash;&ndash;&ndash;, 2008, &ldquo;Understanding Simulation&rdquo;, <em>Philosophy and Phenomenological Research</em>, 77(3): 755&ndash;774. doi:10.1111/j.1933-1592.2008.00220.x</li> <li>Jackson, Frank, 1999, &ldquo;All That Can Be at Issue in the Theory-Theory Simulation Debate&rdquo;, <em>Philosophical Papers</em>, 28(2): 77&ndash;95. doi:10.1080/05568649909506593</li> <li>Jacob, Pierre, 2008, &ldquo;What do Mirror Neurons Contribute to Human Social Cognition?&rdquo;, <em>Mind and Language</em>, 23(2): 190&ndash;223. doi:10.1111/j.1468-0017.2007.00337.x</li> <li>&ndash;&ndash;&ndash;, 2012, &ldquo;Sharing and Ascribing Goals&rdquo;, <em>Mind and Language</em>, 27(2): 200&ndash;227. doi:10.1111/j.1468-0017.2012.01441.x</li> <li>Jeannerod, Marc and Elisabeth Pacherie, 2004, &ldquo;Agency, Simulation and Self-Identification&rdquo;, <em>Mind and Language</em> 19(2): 113&ndash;146. doi:10.1111/j.1468-0017.2004.00251.x</li> <li>Kieran, Matthew and Dominic McIver Lopes (eds.), 2003, <em>Imagination, Philosophy, and the Arts</em>, London: Routledge.</li> <li>Kosslyn, S.M., A. Pascual-Leone, O. Felician, S. Camposano, J.P. Keenan, W.L. Thompson, G. Ganis, K.E. Sukel, and N.M. Alpert, 1999, &ldquo;The Role of Area 17 in Visual Imagery: Convergent Evidence from PET and rTMS&rdquo;, <em>Science</em>, 284(5411): 167&ndash;170. doi:10.1126/science.284.5411.167 </li> <li>Leslie, Alan M., 1994, &ldquo;Pretending and Believing: Issues in the Theory of ToMM&rdquo;, <em>Cognition</em>, 50(1&ndash;3): 211&ndash;238 . doi:10.1016/0010-0277(94)90029-9</li> <li>Lipps, Theodor, 1903, &ldquo;Einf&uuml;hlung, Innere Nachahmung und Organempfindung&rdquo;, <em>Archiv f&uuml;r gesamte Psychologie</em>, 1: 465&ndash;519. Translated as &ldquo;Empathy, Inner Imitation and Sense-Feelings&rdquo;, in <em>A Modern Book of Esthetics</em>, New York: Holt, Rinehart and Winston, 1979, pp. 374&ndash;382.</li> <li>Lurz, Robert W., 2011, <em>Mindreading Animals</em>, Cambridge, MA: MIT Press. doi:10.7551/mitpress/9780262016056.001.0001</li> <li>Machamer, Peter, Lindley Darden, and Carl F. Craver, 2000, &ldquo;Thinking about Mechanisms&rdquo;, <em>Philosophy of science</em>, 67(1): 1&ndash;25. doi:10.1086/392759</li> <li>Marr, D., 1982. <em>Vision</em>, San Francisco: Freeman Press.</li> <li>Nichols, Shaun (ed.), 2006a, <em>The Architecture of the Imagination: New Essays on Pretense, Possibility, and Fiction</em>, Oxford: Oxford University Press. doi:10.1093/acprof:oso/9780199275731.001.0001</li> <li>&ndash;&ndash;&ndash;, 2006b, &ldquo;Just the Imagination: Why Imagining Doesn't Behave Like Believing&rdquo;, <em>Mind &amp; Language</em>, 21(4): 459&ndash;474. doi:10.1111/j.1468-0017.2006.00286.x</li> <li>Nichols, Shaun and Stephen P. Stich, 2003, <em>Mindreading: An Integrated Account of Pretence, Self-Awareness, and Understanding of Other Minds</em>, Oxford: Oxford University Press. doi:10.1093/0198236107.001.0001</li> <li>Onishi, Kristine H. and Ren&eacute;e Baillargeon, 2005, &ldquo;Do 15-Month-Old Infants Understand False Beliefs?&rdquo; <em>Science</em>, 308(5719): 255&ndash;258. doi:10.1126/science.1107621</li> <li>Pacherie, Elisabeth, 2000, &ldquo;The Content of Intentions&rdquo;, <em>Mind and Language</em>, 15(4): 400&ndash;432. doi:10.1111/1468-0017.00142</li> <li>Peackocke, C. 2005, &ldquo;Another I: Representing Conscious States, Perception, and Others&rdquo;, in J. L. Berm&uacute;dez (ed.), <em>Thought, Reference, and Experience: Themes From the Philosophy of Gareth Evans</em>, Oxford: Clarendon Press</li> <li>Perner, Josef and Deborah Howes, 1992, &ldquo;&lsquo;He Thinks he Knows&rsquo; and more Developmental Evidence Against the Simulation (Role-Taking) Theory&rdquo;, <em>Mind and Language</em>, 7(1&ndash;2): 72&ndash;86; reprinted in Davies and Stone 1995a, pp. 159&ndash;173. doi:10.1111/j.1468-0017.1992.tb00197.x</li> <li>Perner Josef and Anton K&uuml;hberger, 2005, &ldquo;Mental Simulation: Royal Road to Other Minds?&rdquo;, in Bertram F. Malle and Sara D. Hodges (eds.), <em>Other Minds: How Humans Bridge the Divide Between Self and Others</em>, New York: Guilford Press, pp. 174&ndash;187.</li> <li>Perner, Josef and Ted Ruffman, 2005, &ldquo;Infants&rsquo; Insight in to the Mind: How Deep?&rdquo; <em>Science</em>, 308(5719): 214&ndash;216. doi:10.1126/science.1111656 </li> <li>Ramsey, William M., 2010, &ldquo;How Not to Build a Hybrid: Simulation vs. Fact-finding&rdquo;, <em>Philosophical Psychology</em>, 23(6): 775&ndash;795. doi:10.1080/09515089.2010.529047</li> <li>Rizzolatti, Giacomo and Laila Craighero, 2004, &ldquo;The Mirror-Neuron System&rdquo;, <em>Annual Review of Neuroscience</em>, 27: 169&ndash;92. doi:10.1146/annurev.neuro.27.070203.144230 </li> <li>Rizzolatti, Giacomo &amp; Corrado Sinigaglia, 2007, &ldquo;Mirror neurons and motor intentionality&rdquo;, <em>Functional Neurology</em>, 22(4): 205&ndash;210</li> <li>&ndash;&ndash;&ndash;, 2010, &ldquo;The Functional Role of the Parieto-Frontal Mirror Circuit: Interpretations and Misinterpretations&rdquo;, <em>Nature Reviews Neuroscience</em> 11: 264&ndash;274. doi:10.1038/nrn2805</li> <li>&ndash;&ndash;&ndash;, 2014, &ldquo;Review: A Curious Book on Mirror Neurons and Their Myth&rdquo;, <em>The American Journal of Psychology</em>, 128(4): 527&ndash;533. doi:10.5406/amerjpsyc.128.4.0527 </li> <li>&ndash;&ndash;&ndash;, forthcoming, &ldquo;The Mirror Mechanism: a Basic Principle of Brain Function&rdquo;, <em>Nature Reviews Neuroscience</em>, 17: 757&ndash;765. doi:10.1038/nrn.2016.135</li> <li>Rizzolatti, G., R. Camarda, L. Fogassi, M. Gentilucci, G. Luppino, and M. Matelli, 1988, &ldquo;Functional Organization of Inferior Area 6 in the Macaque Monkey&rdquo;, <em>Experimental Brain Research</em>, 71(1): 491&ndash;507. doi:10.1007/BF00248742</li> <li>Rizzolatti, Giacomo, Luciano Fadiga, Vittorio Gallese, and Leonardo Fogassi, 1996, &ldquo;Premotor Cortex and the Recognition of Motor Actions&rdquo;, <em>Cognitive Brain Research</em>, 3(2): 131&ndash;141. doi:10.1016/0926-6410(95)00038-0</li> <li>Rozin, Paul, Jonathan Haidt, and Clark R. McCauley, 2008, &ldquo;Disgust&rdquo;, in Michael Lewis, Jeannette M. Haviland&ndash;Jones &amp; Lisa Feldman Barrett (eds.), <em>Handbook of Emotions</em> (3rd edition), New York: Guilford Press, pp. 757&ndash;776.</li> <li>Saxe, Rebbecca, 2005, &ldquo;Against Simulation: The Argument from Error&rdquo;, <em>Trends in Cognitive Sciences</em>, 9(4): 174&ndash;179. doi:10.1016/j.tics.2005.01.012</li> <li>Scholl, Brian J. and Alan M. Leslie, 1999, &ldquo;Modularity, Development and Theory of Mind&rdquo;, <em>Mind and Language</em>, 14(1): 131&ndash;153. doi:10.1111/1468-0017.00106</li> <li>Singer, Tania, Ben Seymour, John O&rsquo;Doherty, Holger Kaube, Raymond J. Dolan, and Chris D. Frith, 2004, &ldquo;Empathy for Pain Involves the Affective but not Sensory Components of Pain&rdquo;, <em>Science</em>, 303(5661): 1157&ndash; 1162. doi:10.1126/science.1093535</li> <li>Smith, Adam, 1759, <em>The Theory of Moral Sentiments</em>, D.D. Raphael and A.L. Macfie (eds.), Oxford: Oxford University Press, 1976.</li> <li>Spaulding, Shannon, 2012, &ldquo;Mirror Neurons are not Evidence for the Simulation Theory&rdquo;, <em>Synthese</em>, 189(3): 515&ndash;534. doi:10.1007/s11229-012-0086-y</li> <li>Spivey, Michael J., Daniel C. Richardson, Melinda J. Tyler, and Ezekiel E. Young, 2000, &ldquo;Eye movements During Comprehension of Spoken Scene Descriptions&rdquo;, in <em>Proceedings of the 22<sup>nd</sup> Annual Conference of the Cognitive Science Society</em>, Mahwah, NJ: Erlbaum, pp. 487&ndash;492.</li> <li>Stich, Stephen and Shaun Nichols, 1992, &ldquo;Folk Psychology: Simulation or Tacit Theory?&rdquo;, <em>Mind and Language</em>, 7(1&ndash;2): 35&ndash;71; reprinted in Davies and Stone 1995a, pp. 123&ndash;158. doi:10.1111/j.1468-0017.1992.tb00196.x</li> <li>&ndash;&ndash;&ndash;, 1997, &ldquo;Cognitive Penetrability, Rationality, and Restricted Simulation&rdquo;, <em>Mind and Language</em>, 12(3&ndash;4): 297&ndash;326. doi:10.1111/j.1468-0017.1997.tb00076.x</li> <li>Stich, Stephen and Ian Ravenscroft, 1992, &ldquo;What <em>is</em> Folk Psychology?&rdquo; <em>Cognition</em>, 50(1&ndash;3): 447&ndash;68. doi:10.1016/0010-0277(94)90040-X</li> <li>Velleman, J. David, 2000, &ldquo;The Aim of Belief&rdquo;, in <em>The Possibility of Practical Reason</em>, Oxford: Oxford University Press, pp. 244&ndash;282</li> <li>Vannuscorps, Gilles and Alfonso Caramazza, 2015, &ldquo;Typical Action Perception and Interpretation without Motor Simulation&rdquo;, <em>Proceedings of the National Academy of Sciences</em>, 113(1): 1&ndash;6. doi:10.1073/pnas.1516978112</li> <li>Wellman, Henry M., David Cross, and Julanne Watson, 2001, &ldquo;Meta-Analysis of Theory-of-Mind Development: The Truth about False Belief&rdquo;, <em>Child Development</em>, 72(3): 655&ndash;684. doi:10.1111/1467-8624.00304</li> <li>Wicker, Bruno, Christian Keysers, Jane Plailly, Jean-Pierre Royet, Vittorio Gallese, and Giacomo Rizzolatti, 2003, &ldquo;Both of us Disgusted in <em>My</em> Insula: The Common Neural Basis of Seeing and Feeling Disgust&rdquo;, <em>Neuron</em>, 40(3): 655&ndash;664. doi:10.1016/S0896-6273(03)00679-2</li> <li>Wimmer, Heinz and Josef Perner, 1983, &ldquo;Beliefs About Beliefs: Representation and Constraint Function of Wrong Beliefs in Young Children&rsquo;s Understanding of Deception&rdquo;, <em>Cognition</em>, 13(1): 103&ndash;128. doi:10.1016/0010-0277(83)90004-5</li> </ul> </div> <div id="academic-tools"> <h2 id="Aca">Academic Tools</h2> <blockquote> <table class="vert-top"> <tr> <td> <img src="../../symbols/sepman-icon.jpg" alt="sep man icon" /> </td> <td><a href="https://plato.stanford.edu/cgi-bin/encyclopedia/archinfo.cgi?entry=folkpsych-simulation" target="other">How to cite this entry</a>.</td> </tr> <tr> <td> <img src="../../symbols/sepman-icon.jpg" alt="sep man icon" /> </td> <td><a href="https://leibniz.stanford.edu/friends/preview/folkpsych-simulation/" target="other">Preview the PDF version of this entry</a> at the <a href="https://leibniz.stanford.edu/friends/" target="other">Friends of the SEP Society</a>.</td> </tr> <tr> <td> <img src="../../symbols/inpho.png" alt="inpho icon" /> </td> <td><a href="https://www.inphoproject.org/entity?sep=folkpsych-simulation&amp;redirect=True" target="other">Look up topics and thinkers related to this entry</a> at the Internet Philosophy Ontology Project (InPhO).</td> </tr> <tr> <td> <img src="../../symbols/pp.gif" alt="phil papers icon" /> </td> <td><a href="http://philpapers.org/sep/folkpsych-simulation/" target="other">Enhanced bibliography for this entry</a> at <a href="http://philpapers.org/" target="other">PhilPapers</a>, with links to its database.</td> </tr> </table> </blockquote> </div> <div id="other-internet-resources"> <h2 id="Oth">Other Internet Resources</h2> <p> [Please contact the authors with suggestions.]</p> </div> <div id="related-entries"> <h2 id="Rel">Related Entries</h2> <p> <a href="../folkpsych-theory/">folk psychology: as a theory</a> | <a href="../imagination/">imagination</a> | <a href="../introspection/">introspection</a> | <a href="../materialism-eliminative/">materialism: eliminative</a> | <a href="../computational-mind/">mind: computational theory of</a> | <a href="../self-knowledge/">self-knowledge</a> </p> </div> <div id="acknowledgments"> <h3>Acknowledgments</h3> <p> The authors would like to thank Tom Cochrane, Jeremy Dunham, Steve Laurence, and an anonymous referee for comments on earlier drafts of this entry.</p> </div> </div><!-- #aueditable --><!--DO NOT MODIFY THIS LINE AND BELOW--> <!-- END ARTICLE HTML --> </div> <!-- End article-content --> <div id="article-copyright"> <p> <a href="../../info.html#c">Copyright &copy; 2017</a> by <br /> Luca Barlassina &lt;<a href="m&#97;ilto:l&#37;2ebarlassina&#37;40sheffield&#37;2eac&#37;2euk"><em>l<abbr title=" dot ">&#46;</abbr>barlassina<abbr title=" at ">&#64;</abbr>sheffield<abbr title=" dot ">&#46;</abbr>ac<abbr title=" dot ">&#46;</abbr>uk</em></a>&gt;<br /> <a href="https://www.umsl.edu/~philo/directory/emeritus/gordon.html" target="other">Robert M. Gordon</a> </p> </div> </div> <!-- End article --> <!-- NOTE: article banner is outside of the id="article" div. --> <div id="article-banner" class="scroll-block"> <div id="article-banner-content"> <a href="../../fundraising/"> Open access to the SEP is made possible by a world-wide funding initiative.<br /> The Encyclopedia Now Needs Your Support<br /> Please Read How You Can Help Keep the Encyclopedia Free</a> </div> </div> <!-- End article-banner --> </div> <!-- End content --> <div id="footer"> <div id="footer-menu"> <div class="menu-block"> <h4><i class="icon-book"></i> Browse</h4> <ul role="menu"> <li role="menuitem"><a href="../../contents.html">Table of Contents</a></li> <li role="menuitem"><a href="../../new.html">What's New</a></li> <li role="menuitem"><a href="https://plato.stanford.edu/cgi-bin/encyclopedia/random">Random Entry</a></li> <li role="menuitem"><a href="../../published.html">Chronological</a></li> <li role="menuitem"><a href="../../archives/">Archives</a></li> </ul> </div> <div class="menu-block"> <h4><i class="icon-info-sign"></i> About</h4> <ul role="menu"> <li role="menuitem"><a href="../../info.html">Editorial Information</a></li> <li role="menuitem"><a href="../../about.html">About the SEP</a></li> <li role="menuitem"><a href="../../board.html">Editorial Board</a></li> <li role="menuitem"><a href="../../cite.html">How to Cite the SEP</a></li> <li role="menuitem"><a href="../../special-characters.html">Special Characters</a></li> <li role="menuitem"><a href="../../tools/">Advanced Tools</a></li> <li role="menuitem"><a href="../../accessibility.html">Accessibility</a></li> <li role="menuitem"><a href="../../contact.html">Contact</a></li> </ul> </div> <div class="menu-block"> <h4><i class="icon-leaf"></i> Support SEP</h4> <ul role="menu"> <li role="menuitem"><a href="../../support/">Support the SEP</a></li> <li role="menuitem"><a href="../../support/friends.html">PDFs for SEP Friends</a></li> <li role="menuitem"><a href="../../support/donate.html">Make a Donation</a></li> <li role="menuitem"><a href="../../support/sepia.html">SEPIA for Libraries</a></li> </ul> </div> </div> <!-- End footer menu --> <div id="mirrors"> <div id="mirror-info"> <h4><i class="icon-globe"></i> Mirror Sites</h4> <p>View this site from another server:</p> </div> <div class="btn-group open"> <a class="btn dropdown-toggle" data-toggle="dropdown" href="https://plato.stanford.edu/"> <span class="flag flag-usa"></span> USA (Main Site) <span class="caret"></span> <span class="mirror-source">Philosophy, Stanford University</span> </a> <ul class="dropdown-menu"> <li><a href="../../mirrors.html">Info about mirror sites</a></li> </ul> </div> </div> <!-- End mirrors --> <div id="site-credits"> <p>The Stanford Encyclopedia of Philosophy is <a href="../../info.html#c">copyright &copy; 2024</a> by <a href="https://mally.stanford.edu/">The Metaphysics Research Lab</a>, Department of Philosophy, Stanford University</p> <p>Library of Congress Catalog Data: ISSN 1095-5054</p> </div> <!-- End site credits --> </div> <!-- End footer --> </div> <!-- End container --> <!-- NOTE: Script required for drop-down button to work (mirrors). --> <script> $('.dropdown-toggle').dropdown(); </script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10