CINXE.COM
Classical Logic (Stanford Encyclopedia of Philosophy)
<!DOCTYPE html> <!--[if lt IE 7]> <html class="ie6 ie"> <![endif]--> <!--[if IE 7]> <html class="ie7 ie"> <![endif]--> <!--[if IE 8]> <html class="ie8 ie"> <![endif]--> <!--[if IE 9]> <html class="ie9 ie"> <![endif]--> <!--[if !IE]> --> <html lang="en"> <!-- <![endif]--> <head> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title> Classical Logic (Stanford Encyclopedia of Philosophy) </title> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta name="robots" content="noarchive, noodp" /> <meta property="citation_title" content="Classical Logic" /> <meta property="citation_author" content="Shapiro, Stewart" /> <meta property="citation_author" content="Kouri Kissel, Teresa" /> <meta property="citation_publication_date" content="2000/09/16" /> <meta name="DC.title" content="Classical Logic" /> <meta name="DC.creator" content="Shapiro, Stewart" /> <meta name="DC.creator" content="Kouri Kissel, Teresa" /> <meta name="DCTERMS.issued" content="2000-09-16" /> <meta name="DCTERMS.modified" content="2022-06-29" /> <!-- NOTE: Import webfonts using this link: --> <link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:400,300,600,200&subset=latin,latin-ext" rel="stylesheet" type="text/css" /> <link rel="stylesheet" type="text/css" media="screen" href="../../css/bootstrap.min.css" /> <link rel="stylesheet" type="text/css" media="screen" href="../../css/bootstrap-responsive.min.css" /> <link rel="stylesheet" type="text/css" href="../../css/font-awesome.min.css" /> <!--[if IE 7]> <link rel="stylesheet" type="text/css" href="../../css/font-awesome-ie7.min.css"> <![endif]--> <link rel="stylesheet" type="text/css" media="screen" href="../../css/style.css" /> <link rel="stylesheet" type="text/css" media="print" href="../../css/print.css" /> <link rel="stylesheet" type="text/css" href="../../css/entry.css" /> <!--[if IE]> <link rel="stylesheet" type="text/css" href="../../css/ie.css" /> <![endif]--> <script type="text/javascript" src="../../js/jquery-1.9.1.min.js"></script> <script type="text/javascript" src="../../js/bootstrap.min.js"></script> <!-- NOTE: Javascript for sticky behavior needed on article and ToC pages --> <script type="text/javascript" src="../../js/jquery-scrolltofixed-min.js"></script> <script type="text/javascript" src="../../js/entry.js"></script> <!-- SEP custom script --> <script type="text/javascript" src="../../js/sep.js"></script> </head> <!-- NOTE: The nojs class is removed from the page if javascript is enabled. Otherwise, it drives the display when there is no javascript. --> <body class="nojs article" id="pagetopright"> <div id="container"> <div id="header-wrapper"> <div id="header"> <div id="branding"> <div id="site-logo"><a href="../../index.html"><img src="../../symbols/sep-man-red.png" alt="SEP home page" /></a></div> <div id="site-title"><a href="../../index.html">Stanford Encyclopedia of Philosophy</a></div> </div> <div id="navigation"> <div class="navbar"> <div class="navbar-inner"> <div class="container"> <button class="btn btn-navbar collapsed" data-target=".collapse-main-menu" data-toggle="collapse" type="button"> <i class="icon-reorder"></i> Menu </button> <div class="nav-collapse collapse-main-menu in collapse"> <ul class="nav"> <li class="dropdown open"><a id="drop1" href="#" class="dropdown-toggle" data-toggle="dropdown" role="button"><i class="icon-book"></i> Browse</a> <ul class="dropdown-menu" role="menu" aria-labelledby="drop1"> <li role="menuitem"><a href="../../contents.html">Table of Contents</a></li> <li role="menuitem"><a href="../../new.html">What's New</a></li> <li role="menuitem"><a href="https://plato.stanford.edu/cgi-bin/encyclopedia/random">Random Entry</a></li> <li role="menuitem"><a href="../../published.html">Chronological</a></li> <li role="menuitem"><a href="../../archives/">Archives</a></li> </ul> </li> <li class="dropdown open"><a id="drop2" href="#" class="dropdown-toggle" data-toggle="dropdown" role="button"><i class="icon-info-sign"></i> About</a> <ul class="dropdown-menu" role="menu" aria-labelledby="drop2"> <li role="menuitem"><a href="../../info.html">Editorial Information</a></li> <li role="menuitem"><a href="../../about.html">About the SEP</a></li> <li role="menuitem"><a href="../../board.html">Editorial Board</a></li> <li role="menuitem"><a href="../../cite.html">How to Cite the SEP</a></li> <li role="menuitem"><a href="../../special-characters.html">Special Characters</a></li> <li role="menuitem"><a href="../../tools/">Advanced Tools</a></li> <li role="menuitem"><a href="../../contact.html">Contact</a></li> </ul> </li> <li class="dropdown open"><a id="drop3" href="#" class="dropdown-toggle" data-toggle="dropdown" role="button"><i class="icon-leaf"></i> Support SEP</a> <ul class="dropdown-menu" role="menu" aria-labelledby="drop3"> <li role="menuitem"><a href="../../support/">Support the SEP</a></li> <li role="menuitem"><a href="../../support/friends.html">PDFs for SEP Friends</a></li> <li role="menuitem"><a href="../../support/donate.html">Make a Donation</a></li> <li role="menuitem"><a href="../../support/sepia.html">SEPIA for Libraries</a></li> </ul> </li> </ul> </div> </div> </div> </div> </div> <!-- End navigation --> <div id="search"> <form id="search-form" method="get" action="../../search/searcher.py"> <input type="search" name="query" placeholder="Search SEP" /> <div class="search-btn-wrapper"><button class="btn search-btn" type="submit" aria-label="search"><i class="icon-search"></i></button></div> </form> </div> <!-- End search --> </div> <!-- End header --> </div> <!-- End header wrapper --> <div id="content"> <!-- Begin article sidebar --> <div id="article-sidebar" class="sticky"> <div class="navbar"> <div class="navbar-inner"> <div class="container"> <button class="btn btn-navbar" data-target=".collapse-sidebar" data-toggle="collapse" type="button"> <i class="icon-reorder"></i> Entry Navigation </button> <div id="article-nav" class="nav-collapse collapse-sidebar in collapse"> <ul class="nav"> <li><a href="#toc">Entry Contents</a></li> <li><a href="#Bib">Bibliography</a></li> <li><a href="#Aca">Academic Tools</a></li> <li><a href="https://leibniz.stanford.edu/friends/preview/logic-classical/">Friends PDF Preview <i class="icon-external-link"></i></a></li> <li><a href="https://plato.stanford.edu/cgi-bin/encyclopedia/archinfo.cgi?entry=logic-classical">Author and Citation Info <i class="icon-external-link"></i></a> </li> <li><a href="#pagetopright" class="back-to-top">Back to Top <i class="icon-angle-up icon2x"></i></a></li> </ul> </div> </div> </div> </div> </div> <!-- End article sidebar --> <!-- NOTE: Article content must have two wrapper divs: id="article" and id="article-content" --> <div id="article"> <div id="article-content"> <!-- BEGIN ARTICLE HTML --> <div id="aueditable"><!--DO NOT MODIFY THIS LINE AND ABOVE--> <h1>Classical Logic</h1><div id="pubinfo"><em>First published Sat Sep 16, 2000; substantive revision Wed Jun 29, 2022</em></div> <div id="preamble"> <p> Typically, a <em>logic</em> consists of a formal or informal language together with a deductive system and/or a model-theoretic semantics. The language has components that correspond to a part of a natural language like English or Greek. The deductive system is to capture, codify, or simply record <em>arguments</em> that are <em>valid</em> for the given language, and the semantics is to capture, codify, or record the meanings, or truth-conditions for at least part of the language.</p> <p> The following sections provide the basics of a typical logic, sometimes called “classical elementary logic” or “classical first-order logic”. Section 2 develops a formal language, with a rigorous syntax and grammar. The formal language is a recursively defined collection of strings on a fixed alphabet. As such, it has no meaning, or perhaps better, the meaning of its formulas is given by the deductive system and the semantics. Some of the symbols have counterparts in ordinary language. We define an <em>argument</em> to be a non-empty collection of sentences in the formal language, one of which is designated to be the conclusion. The other sentences (if any) in an argument are its premises. Section 3 sets up a deductive system for the language, in the spirit of natural deduction. An argument is <em>derivable</em> if there is a deduction from some or all of its premises to its conclusion. Section 4 provides a model-theoretic semantics. An argument is <em>valid</em> if there is no interpretation (in the semantics) in which its premises are all true and its conclusion false. This reflects the longstanding view that a valid argument is truth-preserving.</p> <p> In Section 5, we turn to relationships between the deductive system and the semantics, and in particular, the relationship between derivability and validity. We show that an argument is derivable only if it is valid. This pleasant feature, called <em>soundness</em>, entails that no deduction takes one from true premises to a false conclusion. Thus, deductions preserve truth. Then we establish a converse, called <em>completeness</em>, that an argument is valid only if it is derivable. This shows that the deductive system is rich enough to provide a deduction for every valid argument. So there are enough deductions: all and only valid arguments are derivable. We briefly indicate other features of the logic, some of which are corollaries to soundness and completeness.</p> <p> The final section, Section 6, is devoted to the a brief examination of the philosophical position that classical logic is “the one right logic”.</p> </div> <div id="toc"> <!--Entry Contents--> <ul> <li><a href="#Intr">1. Introduction </a></li> <li><a href="#Lang">2. Language</a> <ul> <li><a href="#BuilBloc">2.1 Building blocks</a></li> <li><a href="#AtomForm">2.2 Atomic formulas </a></li> <li><a href="#CompForm">2.3 Compound formulas</a></li> <li><a href="#FeatSynt">2.4 Features of the syntax </a></li> </ul></li> <li><a href="#Dedu">3. Deduction</a></li> <li><a href="#Sema">4. Semantics</a></li> <li><a href="#MetaTheo">5. Meta-theory</a></li> <li><a href="#OneRighLogi">6. The One Right Logic? </a> <ul> <li><a href="#RivaClasFirsOrdeLogi">6.1 Rivals to classical, first-order logic</a></li> <li><a href="#SublClasFirsOrdeLogi">6.2 Sublogics of classical, first-order logic</a></li> </ul></li> <li><a href="#Bib">Bibliography</a> <ul> <li><a href="#FurtRead">Further Reading</a></li> </ul></li> <li><a href="#Aca">Academic Tools</a></li> <li><a href="#Oth">Other Internet Resources</a></li> <li><a href="#Rel">Related Entries</a></li> </ul> <!--Entry Contents--> <hr /> </div> <div id="main-text"> <h2 id="Intr">1. Introduction</h2> <p> Today, logic is a branch of mathematics and a branch of philosophy. In most large universities, both departments offer courses in logic, and there is usually a lot of overlap between them. Formal languages, deductive systems, and model-theoretic semantics are mathematical objects and, as such, the logician is interested in their mathematical properties and relations. Soundness, completeness, and most of the other results reported below are typical examples. Philosophically, logic is at least closely related to the study of <em>correct reasoning</em>. Reasoning is an epistemic, mental activity. So logic is at least closely allied with epistemology. Logic is also a central branch of computer science, due, in part, to interesting computational relations in logical systems, and, in part, to the close connection between formal deductive argumentation and reasoning (see the entries on <a href="../recursive-functions/">recursive functions</a>, <a href="../computability/">computability and complexity</a>, and <a href="../computer-science/">philosophy of computer science</a>).</p> <p> This raises questions concerning the philosophical relevance of the various mathematical aspects of logic. How do deducibility and validity, as properties of formal languages – sets of strings on a fixed alphabet – relate to correct reasoning? What do the mathematical results reported below have to do with the original philosophical issues concerning valid reasoning? This is an instance of the philosophical problem of explaining how mathematics applies to non-mathematical reality.</p> <p> Typically, ordinary deductive reasoning takes place in a natural language, or perhaps a natural language augmented with some mathematical symbols. So our question begins with the relationship between a natural language and a formal language. Without attempting to be comprehensive, it may help to sketch several options on this matter.</p> <p> One view is that the formal languages accurately exhibit actual features of certain fragments of a natural language. Some philosophers claim that declarative sentences of natural language have underlying <em>logical forms</em> and that these forms are displayed by formulas of a formal language. Other writers hold that (successful) declarative sentences express <em>propositions</em>; and formulas of formal languages somehow display the forms of these propositions. On views like this, the components of a logic provide the underlying deep structure of correct reasoning. A chunk of reasoning in natural language is correct if the forms underlying the sentences constitute a valid or deducible argument. See for example, Montague [1974], Davidson [1984], Lycan [1984] (and the entry on <a href="../logical-form/">logical form</a>).</p> <p> Another view, held at least in part by Gottlob Frege and Wilhelm Leibniz, is that because natural languages are fraught with vagueness and ambiguity, they should be <em>replaced</em> by formal languages. A similar view, held by W. V. O. Quine (e.g., [1960], [1986]), is that a natural language should be <em>regimented</em>, cleaned up for serious scientific and metaphysical work. One desideratum of the enterprise is that the logical structures in the regimented language should be transparent. It should be easy to “read off” the logical properties of each sentence. A regimented language is similar to a formal language regarding, for example, the explicitly presented rigor of its syntax and its truth conditions.</p> <p> On a view like this, deducibility and validity represent <em>idealizations</em> of correct reasoning in natural language. A chunk of reasoning is correct to the extent that it corresponds to, or can be regimented by, a valid or deducible argument in a formal language.</p> <p> When mathematicians and many philosophers engage in deductive reasoning, they occasionally invoke formulas in a formal language to help disambiguate, or otherwise clarify what they mean. In other words, sometimes formulas in a formal language are <em>used</em> in ordinary reasoning. This suggests that one might think of a formal language as an <em>addendum</em> to a natural language. Then our present question concerns the relationship between this addendum and the original language. What do deducibility and validity, as sharply defined on the addendum, tell us about correct deductive reasoning in general?</p> <p> Another view is that a formal language is a <em>mathematical model</em> of a natural language in roughly the same sense as, say, a collection of point masses is a model of a system of physical objects, and the Bohr construction is a model of an atom. In other words, a formal language displays certain features of natural languages, or idealizations thereof, while ignoring or simplifying other features. The purpose of mathematical models is to shed light on what they are models of, without claiming that the model is accurate in all respects or that the model should replace what it is a model of. On a view like this, deducibility and validity represent mathematical models of (perhaps different aspects of) correct reasoning in natural languages. Correct chunks of deductive reasoning correspond, more or less, to valid or deducible arguments; incorrect chunks of reasoning roughly correspond to invalid or non-deducible arguments. See, for example, Corcoran [1973], Shapiro [1998], and Cook [2002].</p> <p> There is no need to adjudicate this matter here. Perhaps the truth lies in a combination of the above options, or maybe some other option is the correct, or most illuminating one. We raise the matter only to lend some philosophical perspective to the formal treatment that follows.</p> <h2 id="Lang">2. Language</h2> <p> Here we develop the basics of a formal language, or to be precise, a class of formal languages. Again, a formal language is a recursively defined set of strings on a fixed alphabet. Some aspects of the formal languages correspond to, or have counterparts in, natural languages like English. Technically, this “counterpart relation” is not part of the formal development, but we will mention it from time to time, to motivate some of the features and results.</p> <h3 id="BuilBloc">2.1 Building blocks</h3> <p> We begin with analogues of <em>singular terms</em>, linguistic items whose function is to denote a person or object. We call these <em>terms</em>. We assume a stock of <em>individual constants</em>. These are lower-case letters, near the beginning of the Roman alphabet, with or without numerical subscripts:</p> \[ a, a_1, b_{23}, c, d_{22}, \text{etc}. \] <p> We envisage a potential infinity of individual constants. In the present system each constant is a single character, and so individual constants do not have an internal syntax. Thus we have an infinite alphabet. This could be avoided by taking a constant like \(d_{22}\), for example, to consist of three characters, a lowercase “\(d\)” followed by a pair of subscript “2”s.</p> <p> We also assume a stock of <em>individual variables</em>. These are lower-case letters, near the end of the alphabet, with or without numerical subscripts:</p> \[ w, x, y_{12}, z, z_4, \text{etc}. \] <p> In ordinary mathematical reasoning, there are two functions terms need to fulfill. We need to be able to denote specific, but unspecified (or arbitrary) objects, and sometimes we need to express generality. In our system, we use some constants in the role of unspecified reference and variables to express generality. Both uses are recapitulated in the formal treatment below. Some logicians employ different symbols for unspecified objects (sometimes called “individual parameters”) and variables used to express generality. </p> <p> Constants and variables are the only terms in our formal language, so all of our terms are simple, corresponding to proper names and some uses of pronouns. We call a term closed if it is not a variable. In general, we use \(v\) to represent variables, and \(t\) to represent a closed term, an individual constant. Some authors also introduce <em>function letters</em>, which allow complex terms corresponding to: “\(7+4\)” and “the wife of Bill Clinton”, or complex terms containing variables, like “the father of \(x\)” and “\(x/y\)”. Logic books aimed at mathematicians are likely to contain function letters, probably due to the centrality of functions in mathematical discourse. Books aimed at a more general audience (or at philosophy students), may leave out function letters, since it simplifies the syntax and theory. We follow the latter route here. This is an instance of a general tradeoff between presenting a system with greater expressive resources, at the cost of making its formal treatment more complex. </p> <p> For each natural number \(n\), we introduce a stock of \(n\)-place <em>predicate letters</em>. These are upper-case letters at the beginning or middle of the alphabet. A superscript indicates the number of places, and there may or may not be a subscript. For example, </p> \[ A^3, B^{3}_2, P^3, \text{etc}. \] <p> are three-place predicate letters. We often omit the superscript, when no confusion will result. We also add a special two-place predicate symbol “\(=\)” for identity.</p> <p> Zero-place predicate letters are sometimes called “sentence letters”. They correspond to free-standing sentences whose internal structure does not matter. One-place predicate letters, called “monadic predicate letters”, correspond to linguistic items denoting properties, like “being a man”, “being red”, or “being a prime number”. Two-place predicate letters, called “binary predicate letters”, correspond to linguistic items denoting binary relations, like “is a parent of” or “is greater than”. Three-place predicate letters correspond to three-place relations, like “lies on a straight line between”. And so on. </p> <p> The <em>non-logical terminology</em> of the language consists of its individual constants and predicate letters. The symbol “\(=\)”, for identity, is not a non-logical symbol. In taking identity to be logical, we provide explicit treatment for it in the deductive system and in the model-theoretic semantics. Most authors do the same, but there is some controversy over the issue (Quine [1986, Chapter 5]). If \(K\) is a set of constants and predicate letters, then we give the fundamentals of a language \(\LKe\) built on this set of non-logical terminology. It may be called the <em>first-order language with identity</em> on \(K\). A similar language that lacks the symbol for identity (or which takes identity to be non-logical) may be called \(\mathcal{L}1K\), the <em>first-order language without identity</em> on \(K\).</p> <h3 id="AtomForm">2.2 Atomic formulas</h3> <p> If \(V\) is an \(n\)-place predicate letter in \(K\), and \(t_1, \ldots,t_n\) are terms of \(K\), then \(Vt_1 \ldots t_n\) is an <em>atomic formula</em> of \(\LKe\). Notice that the terms \(t_1, \ldots,t_n\) need not be distinct. Examples of atomic formulas include:</p> \[ P^4 xaab, C^1 x, C^1 a, D^0, A^3 abc. \] <p> The last one is an analogue of a statement that a certain relation \((A)\) holds between three objects \((a, b, c)\). If \(t_1\) and \(t_2\) are terms, then \(t_1 =t_2\) is also an atomic formula of \(\LKe\). It corresponds to an assertion that \(t_1\) is identical to \(t_2\).</p> <p> If an atomic formula has no variables, then it is called an <em>atomic sentence</em>. If it does have variables, it is called <em>open</em>. In the above list of examples, the first and second are open; the rest are sentences. </p> <h3 id="CompForm">2.3 Compound formulas</h3> <p> We now introduce the final items of the lexicon:</p> \[ \neg, \amp, \vee, \rightarrow, \forall, \exists, (, ) \] <p> We give a recursive definition of a <em>formula</em> of \(\LKe\):</p> <ol> <li>All atomic formulas of \(\LKe\) are formulas of \(\LKe\).</li> <li>If \(\theta\) is a formula of \(\LKe\), then so is \(\neg \theta\).</li> </ol> <p> A formula corresponding to \(\neg \theta\) thus says that it is not the case that \(\theta\). The symbol “\(\neg\)” is called “negation”, and is a unary connective.</p> <ol start="3"> <li>If \(\theta\) and \(\psi\) are formulas of \(\LKe\), then so is \((\theta \amp \psi)\).</li> </ol> <p> The ampersand “\(\amp\)” corresponds to the English “and” (when “and” is used to connect sentences). So \((\theta \amp \psi)\) can be read “\(\theta\) and \(\psi\)”. The formula \((\theta \amp \psi)\) is called the “conjunction” of \(\theta\) and \(\psi\).</p> <ol start="4"> <li>If \(\theta\) and \(\psi\) are formulas of \(\LKe\), then so is \((\theta \vee \psi)\). </li> </ol> <p> The symbol “\(\vee\)” corresponds to “either … or … or both”, so \((\theta \vee \psi)\) can be read “\(\theta\) or \(\psi\)”. The formula \((\theta \vee \psi)\) is called the “disjunction” of \(\theta\) and \(\psi\).</p> <ol start="5"> <li> If \(\theta\) and \(\psi\) are formulas of \(\LKe\), then so is \((\theta \rightarrow \psi)\).</li> </ol> <p> The arrow “\(\rightarrow\)” roughly corresponds to “if … then … ”, so \((\theta \rightarrow \psi)\) can be read “if \(\theta\) then \(\psi\)” or “\(\theta\) only if \(\psi\)”.</p> <p> The symbols “\(\amp\)”, “\(\vee\)”, and “\(\rightarrow\)” are called “binary connectives”, since they serve to “connect” two formulas into one. Some authors introduce \((\theta \leftrightarrow \psi)\) as an abbreviation of \(((\theta \rightarrow \psi) \amp(\psi \rightarrow \theta))\). The symbol “\(\leftrightarrow\)” is an analogue of the locution “if and only if”.</p> <ol start="6"> <li>If \(\theta\) is a formula of \(\LKe\) and \(v\) is a variable, then \(\forall v \theta\) is a formula of \(\LKe\).</li> </ol> <p> The symbol “\(\forall\)” is called a <em>universal quantifier</em>, and is an analogue of “for all”; so \(\forall v\theta\) can be read “for all \(v, \theta\)”.</p> <ol start="7"> <li>If \(\theta\) is a formula of \(\LKe\) and \(v\) is a variable, then \(\exists v \theta\) is a formula of \(\LKe\).</li> </ol> <p> The symbol “\(\exists\)” is called an <em>existential quantifier</em>, and is an analogue of “there exists” or “there is”; so \(\exists v \theta\) can be read “there is a \(v\) such that \(\theta\)”.</p> <ol start="8"> <li>That’s all folks. That is, all formulas are constructed in accordance with rules (1)–(7).</li> </ol> <p> Clause (8) allows us to do inductions on the complexity of formulas. If a certain property holds of the atomic formulas and is closed under the operations presented in clauses (2)–(7), then the property holds of all formulas. Here is a simple example:</p> <div class="indent"> <p> <strong>Theorem 1</strong>. Every formula of \(\LKe\) has the same number of left and right parentheses. Moreover, each left parenthesis corresponds to a unique right parenthesis, which occurs to the right of the left parenthesis. Similarly, each right parenthesis corresponds to a unique left parenthesis, which occurs to the left of the given right parenthesis. If a parenthesis occurs between a matched pair of parentheses, then its mate also occurs within that matched pair. In other words, parentheses that occur within a matched pair are themselves matched.</p> <p> <strong>Proof</strong>: By clause (8), every formula is built up from the atomic formulas using clauses (2)–(7). The atomic formulas have no parentheses. Parentheses are introduced only in clauses (3)–(5), and each time they are introduced as a matched set. So at any stage in the construction of a formula, the parentheses are paired off.</p> </div> <p> We next define the notion of an occurrence of a variable being <em>free</em> or <em>bound</em> in a formula. A variable that immediately follows a quantifier (as in “\(\forall x\)” and “\(\exists y\)”) is neither free nor bound. We do not even think of those as occurrences of the variable. All variables that occur in an atomic formula are free. If a variable occurs free (or bound) in \(\theta\) or in \(\psi\), then that same occurrence is free (or bound) in \(\neg \theta, (\theta \amp \psi), (\theta \vee \psi)\), and \((\theta \rightarrow \psi)\). That is, the (unary and binary) connectives do not change the status of variables that occur in them. All occurrences of the variable \(v\) in \(\theta\) are bound in \(\forall v \theta\) and \(\exists v \theta\). Any <em>free</em> occurrences of \(v\) in \(\theta\) are bound by the initial quantifier. All other variables that occur in \(\theta\) are free or bound in \(\forall v \theta\) and \(\exists v \theta\), as they are in \(\theta\).</p> <p> For example, in the formula \((\forall\)x(<em>Axy</em> \(\vee Bx) \amp Bx)\), the occurrences of “\(x\)” in <em>Axy</em> and in the first \(Bx\) are bound by the quantifier. The occurrence of “\(y\)” and last occurrence of “\(x\)” are free. In \(\forall x(Ax \rightarrow \exists\)<em>xBx</em>), the “\(x\)” in \(Ax\) is bound by the initial universal quantifier, while the other occurrence of \(x\) is bound by the existential quantifier. The above syntax allows this “double-binding”. Although it does not create any ambiguities (see below), we will avoid such formulas, as a matter of taste and clarity.</p> <p> The syntax also allows so-called vacuous binding, as in \(\forall\)x\(Bc\). These, too, will be avoided in what follows. Some treatments of logic rule out vacuous binding and double binding as a matter of syntax. That simplifies some of the treatments below, and complicates others.</p> <p> Free variables correspond to place-holders, while bound variables are used to express generality. If a formula has no free variables, then it is called a <em>sentence</em>. If a formula has free variables, it is called <em>open</em>. </p> <h3 id="FeatSynt">2.4 Features of the syntax</h3> <p> Before turning to the deductive system and semantics, we mention a few features of the language, as developed so far. This helps draw the contrast between formal languages and natural languages like English.</p> <p> We assume at the outset that all of the categories are disjoint. For example, no connective is also a quantifier or a variable, and the non-logical terms are not also parentheses or connectives. Also, the items within each category are distinct. For example, the sign for disjunction does not do double-duty as the negation symbol, and perhaps more significantly, no two-place predicate is also a one-place predicate.</p> <p> One difference between natural languages like English and formal languages like \(\LKe\) is that the latter are not supposed to have any ambiguities. The policy that the different categories of symbols do not overlap, and that no symbol does double-duty, avoids the kind of ambiguity, sometimes called “equivocation”, that occurs when a single word has two meanings: “I’ll meet you at the bank.” But there are other kinds of ambiguity. Consider the English sentence:</p> <p class="indent"> John is married, and Mary is single, or Joe is crazy. </p> <p> It can mean that John is married and either Mary is single or Joe is crazy, or else it can mean that either both John is married and Mary is single, or else Joe is crazy. An ambiguity like this, due to different ways to parse the same sentence, is sometimes called an “amphiboly”. If our formal language did not have the parentheses in it, it would have amphibolies. For example, there would be a “formula” \(A \amp B \vee\)<em> C</em>. Is this supposed to be \(((A \amp B) \vee C)\), or is it \((A \amp(B \vee C))\)? The parentheses resolve what would be an amphiboly.</p> <p> Can we be sure that there are no other amphibolies in our language? That is, can we be sure that each formula of \(\LKe\) can be put together in only one way? Our next task is to answer this question.</p> <p> Let us temporarily use the term “unary marker” for the negation symbol \((\neg)\) or a quantifier followed by a variable (e.g., \(\forall x, \exists z)\).</p> <div class="indent"> <p> <strong>Lemma 2</strong>. Each formula consists of a string of zero or more unary markers followed by either an atomic formula or a formula produced using a binary connective, via one of clauses (3)–(5).</p> <p> <strong>Proof</strong>: We proceed by induction on the complexity of the formula or, in other words, on the number of formation rules that are applied. The Lemma clearly holds for atomic formulas. Let \(n\) be a natural number, and suppose that the Lemma holds for any formula constructed from \(n\) or fewer instances of clauses (2)–(7). Let \(\theta\) be a formula constructed from \(n+1\) instances. The Lemma holds if the last clause used to construct \(\theta\) was either (3), (4), or (5). If the last clause used to construct \(\theta\) was (2), then \(\theta\) is \(\neg \psi\). Since \(\psi\) was constructed with \(n\) instances of the rule, the Lemma holds for \(\psi\) (by the induction hypothesis), and so it holds for \(\theta\). Similar reasoning shows the Lemma to hold for \(\theta\) if the last clause was (6) or (7). By clause (8), this exhausts the cases, and so the Lemma holds for \(\theta\), by induction.</p> <p> <strong>Lemma 3</strong>. If a formula \(\theta\) contains a left parenthesis, then it ends with a right parenthesis, which matches the leftmost left parenthesis in \(\theta\).</p> <p> <strong>Proof</strong>: Here we also proceed by induction on the number of instances of (2)–(7) used to construct the formula. Clearly, the Lemma holds for atomic formulas, since they have no parentheses. Suppose, then, that the Lemma holds for formulas constructed with \(n\) or fewer instances of (2)–(7), and let \(\theta\) be constructed with \(n+1\) instances. If the last clause applied was (3)–(5), then the Lemma holds since \(\theta\) itself begins with a left parenthesis and ends with the matching right parenthesis. If the last clause applied was (2), then \(\theta\) is \(\neg \psi\), and the induction hypothesis applies to \(\psi\). Similarly, if the last clause applied was (6) or (7), then \(\theta\) consists of a quantifier, a variable, and a formula to which we can apply the induction hypothesis. It follows that the Lemma holds for \(\theta\).</p> <p> <strong>Lemma 4</strong>. Each formula contains at least one atomic formula.</p> </div> <p> The proof proceeds by induction on the number of instances of (2)–(7) used to construct the formula, and we leave it as an exercise. </p> <div class="indent"> <p> <strong>Theorem 5</strong>. Let \(\alpha, \beta\) be nonempty sequences of characters on our alphabet, such that \(\alpha \beta\) (i.e \(\alpha\) followed by \(\beta)\) is a formula. Then \(\alpha\) is <em>not</em> a formula.</p> <p> <strong>Proof</strong>: By Theorem 1 and Lemma 3, if \(\alpha\) contains a left parenthesis, then the right parenthesis that matches the leftmost left parenthesis in \(\alpha \beta\) comes at the end of \(\alpha \beta\), and so the matching right parenthesis is in \(\beta\). So, \(\alpha\) has more left parentheses than right parentheses. By Theorem \(1, \alpha\) is not a formula. So now suppose that \(\alpha\) does not contain any left parentheses. By Lemma \(2, \alpha \beta\) consists of a string of zero or more unary markers followed by either an atomic formula or a formula produced using a binary connective, via one of clauses (3)–(5). If the latter formula was produced via one of clauses (3)–(5), then it begins with a left parenthesis. Since \(\alpha\) does not contain any parentheses, it must be a string of unary markers. But then \(\alpha\) does not contain any atomic formulas, and so by Lemma \(4, \alpha\) is not a formula. The only case left is where \(\alpha \beta\) consists of a string of unary markers followed by an atomic formula, either in the form \(t_1 =t_2\) or \(Pt_1 \ldots t_n\). Again, if \(\alpha\) just consisted of unary markers, it would not be a formula, and so \(\alpha\) must consist of the unary markers that start \(\alpha \beta\), followed by either \(t_1\) by itself, \(t_1 =\) by itself, or the predicate letter \(P\), and perhaps some (but not all) of the terms \(t_1, \ldots,t_n\). In the first two cases, \(\alpha\) does not contain an atomic formula, by the policy that the categories do not overlap. Since \(P\) is an \(n\)-place predicate letter, by the policy that the predicate letters are distinct, \(P\) is not an \(m\)-place predicate letter for any \(m \ne n\). So the part of \(\alpha\) that consists of \(P\) followed by the terms is not an atomic formula. In all of these cases, then, \(\alpha\) does not contain an atomic formula. By Lemma \(4, \alpha\) is not a formula.</p> </div> <p> We are finally in position to show that there is no amphiboly in our language.</p> <div class="indent"> <p> <strong>Theorem 6</strong>. Let \(\theta\) be any formula of \(\LKe\). If \(\theta\) is not atomic, then there is one and only one among (2)–(7) that was the last clause applied to construct \(\theta\). That is, \(\theta\) could not be produced by two different clauses. Moreover, no formula produced by clauses (2)–(7) is atomic.</p> <p> <strong>Proof</strong>: By Clause (8), either \(\theta\) is atomic or it was produced by one of clauses (2)–(7). Thus, the first symbol in \(\theta\) must be either a predicate letter, a term, a unary marker, or a left parenthesis. If the first symbol in \(\theta\) is a predicate letter or term, then \(\theta\) is atomic. In this case, \(\theta\) was not produced by any of (2)–(7), since all such formulas begin with something other than a predicate letter or term. If the first symbol in \(\theta\) is a negation sign “\(\neg\)”, then was \(\theta\) produced by clause (2), and not by any other clause (since the other clauses produce formulas that begin with either a quantifier or a left parenthesis). Similarly, if \(\theta\) begins with a universal quantifier, then it was produced by clause (6), and not by any other clause, and if \(\theta\) begins with an existential quantifier, then it was produced by clause (7), and not by any other clause. The only case left is where \(\theta\) begins with a left parenthesis. In this case, it must have been produced by one of (3)–(5), and not by any other clause. We only need to rule out the possibility that \(\theta\) was produced by more than one of (3)–(5). To take an example, suppose that \(\theta\) was produced by (3) and (4). Then \(\theta\) is \((\psi_1 \amp \psi_2)\) and \(\theta\) is also \((\psi_3 \vee \psi_4)\), where \(\psi_1, \psi_2, \psi_3\), and \(\psi_4\) are themselves formulas. That is, \((\psi_1 \amp \psi_2)\) is the very same formula as \((\psi_3 \vee \psi_4)\). By Theorem \(5, \psi_1\) cannot be a proper part of \(\psi_3\), nor can \(\psi_3\) be a proper part of \(\psi_1\). So \(\psi_1\) must be the same formula as \(\psi_3\). But then “\(\amp\)” must be the same symbol as “\(\vee\)”, and this contradicts the policy that all of the symbols are different. So \(\theta\) was not produced by both Clause (3) and Clause (4). Similar reasoning takes care of the other combinations.</p> </div> <p> This result is sometimes called “unique readability”. It shows that each formula is produced from the atomic formulas via the various clauses in exactly one way. If \(\theta\) was produced by clause (2), then its <em>main connective</em> is the initial “\(\neg\)”. If \(\theta\) was produced by clauses (3), (4), or (5), then its <em>main connective</em> is the introduced “\(\amp\)”, “\(\vee\)”, or “\(\rightarrow\)”, respectively. If \(\theta\) was produced by clauses (6) or (7), then its <em>main connective</em> is the initial quantifier. We apologize for the tedious details. We included them to indicate the level of precision and rigor for the syntax.</p> <h2 id="Dedu">3. Deduction</h2> <p> We now introduce a <em>deductive system</em>, \(D\), for our languages. As above, we define an <em>argument</em> to be a non-empty collection of sentences in the formal language, one of which is designated to be the <em>conclusion</em>. If there are any other sentences in the argument, they are its <em>premises</em>.<sup>[<a href="notes.html#note-1" id="ref-1">1</a>]</sup> By convention, we use “\(\Gamma\)”, “\(\Gamma'\)”, “\(\Gamma_1\)”, etc, to range over sets of sentences, and we use the letters “\(\phi\)”, “\(\psi\)”, “\(\theta\)”, uppercase or lowercase, with or without subscripts, to range over single sentences. We write “\(\Gamma, \Gamma'\)” for the union of \(\Gamma\) and \(\Gamma'\), and “\(\Gamma, \phi\)” for the union of \(\Gamma\) with \(\{\phi\}\).</p> <p> We write an argument in the form \(\langle \Gamma, \phi \rangle\), where \(\Gamma\) is a set of sentences, the premises, and \(\phi\) is a single sentence, the conclusion. Remember that \(\Gamma\) may be empty. We write \(\Gamma \vdash \phi\) to indicate that \(\phi\) is deducible from \(\Gamma\), or, in other words, that the argument \(\langle \Gamma, \phi \rangle\) is deducible in \(D\). We may write \(\Gamma \vdash_D \phi\) to emphasize the deductive system \(D\). We write \(\vdash \phi\) or \(\vdash_D \phi\) to indicate that \(\phi\) can be deduced (in \(D)\) from the empty set of premises.</p> <p> The rules in \(D\) are chosen to match logical relations concerning the English analogues of the logical terminology in the language. Again, we define the deducibility relation by recursion. We start with a rule of assumptions: </p> <dl class="sentag tag3em"> <dt>(As)</dt> <dd>If \(\phi\) is a member of \(\Gamma\), then \(\Gamma \vdash \phi\).</dd> </dl> <p> We thus have that \(\{\phi \}\vdash \phi\); each premise follows from itself. We next present two clauses for each connective and quantifier. The clauses indicate how to “introduce” and “eliminate” sentences in which each symbol is the main connective.</p> <p> First, recall that “\(\amp\)” is an analogue of the English connective “and”. Intuitively, one can deduce a sentence in the form \((\theta \amp \psi)\) if one has deduced \(\theta\) and one has deduced \(\psi\). Conversely, one can deduce \(\theta\) from \((\theta \amp \psi)\) and one can deduce \(\psi\) from \((\theta \amp \psi)\):</p> <dl class="sentag tag3em"> <dt>\((\amp \mathrm{I})\)</dt> <dd>If \(\Gamma_1 \vdash \theta\) and \(\Gamma_2 \vdash \psi\), then \(\Gamma_1, \Gamma_2 \vdash(\theta \amp \psi)\). </dd> <dt>\((\amp \mathrm{E})\)</dt> <dd>If \(\Gamma \vdash(\theta \amp \psi)\) then \(\Gamma \vdash \theta\); and if \(\Gamma \vdash(\theta \amp \psi)\) then \(\Gamma \vdash \psi\).</dd> </dl> <p> The name “&I” stands for “&-introduction”; “&E” stands for “&-elimination”.</p> <p> Since, the symbol “\(\vee\)” corresponds to the English “or”, \((\theta \vee \psi)\) should be deducible from \(\theta\), and \((\theta \vee \psi)\) should also be deducible from \(\psi\): </p> <dl class="sentag tag3em"> <dt>\((\vee \mathrm{I})\) </dt> <dd>If \(\Gamma \vdash \theta\) then \(\Gamma \vdash(\theta \vee \psi)\); if \(\Gamma \vdash \psi\) then \(\Gamma \vdash(\theta \vee \psi)\).</dd> </dl> <p> The elimination rule is a bit more complicated. Suppose that “\(\theta\) or \(\psi\)” is true. Suppose also that \(\phi\) follows from \(\theta\) and that \(\phi\) follows from \(\psi\). One can reason that if \(\theta\) is true, then \(\phi\) is true. If instead \(\psi\) is true, we still have that \(\phi\) is true. So either way, \(\phi\) must be true.</p> <dl class="sentag tag3em"> <dt>\((\vee \mathrm{E})\) </dt> <dd>If \(\Gamma_1 \vdash(\theta \vee \psi), \Gamma_2, \theta \vdash \phi\) and \(\Gamma_3, \psi \vdash \phi\), then \(\Gamma_1, \Gamma_2, \Gamma_3 \vdash \phi\). </dd> </dl> <p> For the next clauses, recall that the symbol, “\(\rightarrow\)”, is an analogue of the English “if … then … ” construction. If one knows, or assumes \((\theta \rightarrow \psi)\) and also knows, or assumes \(\theta\), then one can conclude \(\psi\). Conversely, if one deduces \(\psi\) from an assumption \(\theta\), then one can conclude that \((\theta \rightarrow \psi)\).</p> <dl class="sentag tag3em"> <dt>\(({\rightarrow}\mathrm{I})\) </dt> <dd>If \(\Gamma, \theta \vdash \psi\), then \(\Gamma \vdash(\theta \rightarrow \psi)\). </dd> <dt>\(({\rightarrow}\mathrm{E})\) </dt> <dd>If \(\Gamma_1 \vdash(\theta \rightarrow \psi)\) and \(\Gamma_2 \vdash \theta\), then \(\Gamma_1, \Gamma_2 \vdash \psi\).</dd> </dl> <p> This elimination rule is sometimes called “modus ponens”. In some logic texts, the introduction rule is proved as a “deduction theorem”.</p> <p> Our next clauses are for the negation sign, “\(\neg\)”. The underlying idea is that a sentence \(\psi\) is inconsistent with its negation \(\neg \psi\). They cannot both be true. We call a pair of sentences \(\psi, \neg \psi\) <em>contradictory opposites</em>. If one can deduce such a pair from an assumption \(\theta\), then one can conclude that \(\theta\) is false, or, in other words, one can conclude \(\neg \theta\).</p> <dl class="sentag tag3em"> <dt>\((\neg \mathrm{I})\)</dt> <dd>If \(\Gamma_1, \theta \vdash \psi\) and \(\Gamma_2, \theta \vdash \neg \psi\), then \(\Gamma_1, \Gamma_2 \vdash \neg \theta\). </dd> </dl> <p> By (As), we have that \(\{A,\neg A\}\vdash A\) and \(\{\)<em>A,\(\neg\)A</em>\(\}\vdash \neg A\). So by \(\neg\)I we have that \(\{A\}\vdash \neg \neg A\). However, we do not have the converse yet. Intuitively, \(\neg \neg \theta\) corresponds to “it is not the case that it is not the case that” . One might think that this last is equivalent to \(\theta\), and we have a rule to that effect: </p> <dl class="sentag tag3em"> <dt>(DNE)</dt> <dd>If \(\Gamma \vdash \neg \neg \theta\), then \(\Gamma \vdash \theta\).</dd> </dl> <p> The name DNE stands for “double-negation elimination”. There is some controversy over this inference. It is rejected by philosophers and mathematicians who do not hold that each meaningful sentence is either true or not true. <em>Intuitionistic logic</em> does not sanction the inference in question (see, for example Dummett [2000], or the entry on <a href="../logic-intuitionistic/">intuitionistic logic</a>, or <a href="../intuitionistic-logic-development/">history of intuitionistic logic</a>), but, again, classical logic does.</p> <p> To illustrate the parts of the deductive system \(D\) presented thus far, we show that \(\vdash(A \vee \neg A)\):</p> <ol type="i"> <li>\(\{\neg(A \vee \neg A), A\}\vdash \neg(A \vee \neg A)\), by (As)</li> <li>\(\{\neg(A \vee \neg A), A\}\vdash A\), by (As).</li> <li>\(\{\neg(A \vee \neg A), A\}\vdash(A \vee \neg A)\), by \((\vee\)I), from (ii).</li> <li>\(\{\neg(A \vee \neg A)\}\vdash \neg A\), by \((\neg\)I), from (i) and (iii).</li> <li>\(\{\neg(A \vee \neg A), \neg A\}\vdash \neg(A \vee \neg A)\), by (As)</li> <li>\(\{\neg(A \vee \neg A), \neg A\}\vdash \neg A\), by (As)</li> <li>\(\{\neg(A \vee \neg A), \neg A\}\vdash(A \vee \neg A)\), by \((\vee\)I), from (vi).</li> <li>\(\{\neg(A \vee \neg A)\}\vdash \neg \neg A\), by \((\neg\)I), from (v) and (vii).</li> <li>\(\vdash \neg \neg(A \vee \neg A)\), by \((\neg\)I), from (iv) and (viii).</li> <li>\(\vdash(A \vee \neg A)\), by (DNE), from (ix).</li> </ol> <p> The principle \((\theta \vee \neg \theta)\) is sometimes called the <em>law of excluded middle</em>. It is not valid in intuitionistic logic.</p> <p> Let \(\theta, \neg \theta\) be a pair of contradictory opposites, and let \(\psi\) be any sentence at all. By (As) we have \(\{\theta, \neg \theta, \neg \psi \}\vdash \theta\) and \(\{\theta, \neg \theta, \neg \psi \}\vdash \neg \theta\). So by \((\neg\)I), \(\{\theta, \neg \theta \}\vdash \neg \neg \psi\). So, by (DNE) we have \(\{\theta , \neg \theta \}\vdash \psi\) . That is, anything at all follows from a pair of contradictory opposites. Some logicians introduce a rule to codify a similar inference:</p> <p class="indent"> If \(\Gamma_1 \vdash \theta\) and \(\Gamma_2 \vdash \neg \theta\), then for any sentence \(\psi, \Gamma_1, \Gamma_2 \vdash \psi\) </p> <p> The inference is sometimes called <em>ex falso quodlibet</em> or, more colorfully, <em>explosion</em>. Some call it “\(\neg\)-elimination”, but perhaps this stretches the notion of “elimination” a bit. We do not officially include <em>ex falso quodlibet</em> as a separate rule in \(D\), but as will be shown below (Theorem 10), each instance of it is derivable in our system \(D\).</p> <p> Some logicians object to <em>ex falso quodlibet</em>, on the ground that the sentence \(\psi\) may be <em>irrelevant</em> to any of the premises in \(\Gamma\). Suppose, for example, that one starts with some premises \(\Gamma\) about human nature and facts about certain people, and then deduces both the sentence “Clinton had extra-marital sexual relations” and “Clinton did not have extra-marital sexual relations”. One can perhaps conclude that there is something wrong with the premises \(\Gamma\). But should we be allowed to then deduce <em>anything at all</em> from \(\Gamma\)? Should we be allowed to deduce “The economy is sound”?</p> <p> A small minority of logicians, called <em>dialetheists</em>, hold that some contradictions are actually true. For them, <em>ex falso quodlibet</em> is not truth-preserving.</p> <p> Deductive systems that demur from <em>ex falso quodlibet</em> are called <em>paraconsistent</em>. Most relevant logics are paraconsistent. See the entries on <a href="../logic-relevance/">relevance logic</a>, <a href="../logic-paraconsistent/">paraconsistent logic</a>, and <a href="../dialetheism/">dialetheism</a>. Or see Anderson and Belnap [1975], Anderson, Belnap, and Dunn [1992], and Tennant [1997] for fuller overviews of relevant logic; and Priest [2006a,b], for dialetheism. Deep philosophical issues concerning the nature of <a href="../logical-consequence/">logical consequence</a> are involved. Far be it for an article in a philosophy encyclopedia to avoid philosophical issues, but space considerations preclude a fuller treatment of this issue here. Suffice it to note that the inference <em>ex falso quodlibet</em> is sanctioned in systems of <em>classical logic</em>, the subject of this article. It is essential to establishing the balance between the deductive system and the semantics (see §5 below).</p> <p> The next pieces of \(D\) are the clauses for the quantifiers. Let \(\theta\) be a formula, \(v\) a variable, and \(t\) a term (i.e., a variable or a constant). Then define \(\theta(v|t)\) to be the result of substituting \(t\) for each <em>free</em> occurrence of \(v\) in \(\theta\). So, if \(\theta\) is \((Qx \amp \exists\)<em>xPxy</em>), then \(\theta(x|c)\) is \((Qc \amp \exists\)<em>xPxy</em>). The last occurrence of \(x\) is not free.</p> <p> A sentence in the form \(\forall v \theta\) is an analogue of the English “for every \(v, \theta\) holds”. So one should be able to infer \(\theta(v|t)\) from \(\forall v \theta\) for any closed term \(t\). Recall that the only closed terms in our system are constants.</p> <dl class="sentag tag3em"> <dt>\((\forall \mathrm{E})\)</dt> <dd>If \(\Gamma \vdash \forall v \theta\), then \(\Gamma \vdash \theta(v|t)\), for any closed term \(t\).</dd> </dl> <p> The idea here is that if \(\forall v \theta\) is true, then \(\theta\) should hold of \(t\), no matter what \(t\) is.</p> <p> The introduction clause for the universal quantifier is a bit more complicated. Suppose that a sentence \(\theta\) contains a closed term \(t\), and that \(\theta\) has been deduced from a set of premises \(\Gamma\). If the closed term \(t\) does not occur in any member of \(\Gamma\), then \(\theta\) will hold no matter which object \(t\) may denote. That is, \(\forall v \theta\) follows. </p> <dl class="sentag tag3em"> <dt>\((\forall \mathrm{I})\) </dt> <dd>For any closed term \(t\), if \(\Gamma\vdash\theta (v|t)\), then \(\Gamma\vdash\forall v\theta\) provided that \(t\) is not in \(\Gamma\) or \(\theta\). </dd> </dl> <p> This rule \((\forall \mathbf{I})\) corresponds to a common inference in mathematics. Suppose that a mathematician says “let \(n\) be a natural number” and goes on to show that \(n\) has a certain property \(P\), without assuming anything about \(n\) (except that it is a natural number). She then reminds the reader that \(n\) is “arbitrary”, and concludes that \(P\) holds for <em>all</em> natural numbers. The condition that the term \(t\) not occur in any premise is what guarantees that it is indeed “arbitrary”. It could be any object, and so anything we conclude about it holds for all objects. </p> <p> The existential quantifier is an analogue of the English expression “there exists”, or perhaps just “there is”. If we have established (or assumed) that a given object \(t\) has a given property, then it follows that there is something that has that property. </p> <dl class="sentag tag3em"> <dt>\((\exists \mathrm{I})\)</dt> <dd>For any closed term \(t\), if \(\Gamma\vdash\theta (v|t)\) then \(\Gamma\vdash\exists v\theta\). </dd> </dl> <p> The elimination rule for \(\exists\) is not quite as simple:</p> <dl class="sentag tag3em"> <dt>\((\exists \mathrm{E})\)</dt> <dd>For any closed term \(t\), if \(\Gamma_1\vdash\exists v\theta\) and \(\Gamma_2, \theta(v|t)\vdash\phi\), then \(\Gamma_1 ,\Gamma_2\vdash\phi\), provided that \(t\) does not occur in \(\phi\), \(\Gamma_2\) or \(\theta\).</dd> </dl> <p> This elimination rule also corresponds to a common inference. Suppose that a mathematician assumes or somehow concludes that there is a natural number with a given property \(P\). She then says “let \(n\) be such a natural number, so that \(Pn\)”, and goes on to establish a sentence \(\phi\), which does not mention the number \(n\). If the derivation of \(\phi\) does not invoke anything about \(n\) (other than the assumption that it has the given property \(P)\), then \(n\) could have been any number that has the property \(P\). That is, \(n\) is an <em>arbitrary</em> number with property \(P\). It does not matter which number \(n\) is. Since \(\phi\) does not mention \(n\), it follows from the assertion that something has property \(P\). The provisions added to \((\exists\)E) are to guarantee that \(t\) is “arbitrary”.</p> <p> The final items are the rules for the identity sign “=”. The introduction rule is about a simple as can be:</p> <dl class="sentag tag3em"> <dt>\(({=}\mathrm{I})\)</dt> <dd>\(\Gamma \vdash t=t\), where \(t\) is any closed term. </dd> </dl> <p> This “inference” corresponds to the truism that everything is identical to itself. The elimination rule corresponds to a principle that if \(a\) is identical to \(b\), then anything true of \(a\) is also true of \(b\). </p> <dl class="sentag tag3em"> <dt>\(({=}\mathrm{E})\)</dt> <dd>For any closed terms \(t_1\) and \(t_2\), if \(\Gamma_1 \vdash t_1 =t_2\) and \(\Gamma_2 \vdash \theta\), then \(\Gamma_1, \Gamma_2 \vdash \theta'\), where \(\theta'\) is obtained from \(\theta\) by replacing one or more occurances of \(t_1\) with \(t_2\). </dd> </dl> <p> The rule \(({=}\mathrm{E})\) indicates a certain restriction in the expressive resources of our language. Suppose, for example, that Harry is identical to Donald (since his mischievous parents gave him two names). According to most people’s intuitions, it would not follow from this and “Dick knows that Harry is wicked” that “Dick knows that Donald is wicked”, for the reason that Dick might not know that Harry is identical to Donald. Contexts like this, in which identicals cannot safely be substituted for each other, are called “opaque”. We assume that our language \(\LKe\) has no opaque contexts.</p> <p> One final clause completes the description of the deductive system \(D\):</p> <dl class="sentag tag3em"> <dt>(*)</dt> <dd>That’s all folks. \(\Gamma \vdash \theta\) only if \(\theta\) follows from members of \(\Gamma\) by the above rules.</dd> </dl> <p> Again, this clause allows proofs by induction on the rules used to establish an argument. If a property of arguments holds of all instances of (As) and \(({=}\mathrm{I})\), and if the other rules preserve the property, then every argument that is deducible in \(D\) enjoys the property in question.</p> <p> Before moving on to the model theory for \(\LKe\), we pause to note a few features of the deductive system. To illustrate the level of rigor, we begin with a lemma that if a sentence does not contain a particular closed term, we can make small changes to the set of sentences we prove it from without problems. We allow ourselves the liberty here of extending some previous notation: for any terms \(t\) and \(t'\), and any formula \(\theta\), we say that \(\theta(t|t')\) is the result of replacing all free occurrences of \(t\) in \(\theta\) with \(t'\).</p> <div class="indent"> <p> <strong>Lemma 7.</strong> If \(\Gamma_1\) and \(\Gamma_2\) differ only in that wherever \(\Gamma_1\) contains \(\theta\), \(\Gamma_2\) contains \(\theta(t|t')\), then for any sentence \(\phi\) not containing \(t\) or \(t'\), if \(\Gamma_1\vdash\phi\) then \(\Gamma_2\vdash\phi\). </p> <p> <strong>Proof:</strong> The proof proceeds by induction on the number of steps in the proof of \(\phi\). Crucial to this proof is the fact that \(\theta=\theta(t|t')\) whenever \(\theta\) does not contain \(t\) or \(t'\). When the number of steps in the proof of \(\phi\) is one, this means that the last (and only) rule applied is (As) or (=I). Then, since \(\phi\) does not contain \(t\) or \(t'\), if \(\Gamma_1\vdash\phi\) we simply apply the same rule ((As) or (=I)) to \(\Gamma_2\) to get \(\Gamma_2\vdash\phi\). Assume that there are \(n>1\) steps in the proof of \(\phi\), and that Lemma 7 holds for any proof with less than \(n\) steps. Suppose that the \(n^{th}\) rule applied to \(\Gamma_1\) was (\(\amp I\)). Then \(\phi\) is \(\psi\amp\chi\), and \(\Gamma_1\vdash\phi\amp\chi\). But then we know that previous steps in the proof include \(\Gamma_1\vdash\psi\) and \(\Gamma_1\vdash\chi\), and by induction, we have \(\Gamma_2\vdash\psi\) and \(\Gamma_2\vdash\chi\), since neither \(\psi\) nor \(\chi\) contain \(t\) or \(t'\). So, we simply apply (\(\amp I\)) to \(\Gamma_2\) to get \(\Gamma_2\vdash\psi\amp\chi\) as required. Suppose now that the last step applied in the proof of \(\Gamma_1\vdash\phi\) was (\(\amp E\)). Then, at a previous step in the proof of \(\phi\), we know \(\Gamma_1\vdash\phi\amp\psi\) for some sentence \(\psi\). If \(\psi\) does not contain \(t\), then we simply apply (\(\amp E\)) to \(\Gamma_2\) to obtain the desired result. The only complication is if \(\psi\) contains \(t\). Then we would have that \(\Gamma_2\vdash (\phi\amp\psi)(t|t')\). But, since \((\phi\amp\psi)(t|t')\) is \(\phi(t|t')\amp\psi(t|t')\), and \(\phi(t|t')\) is just \(\phi\), we can just apply (\(\amp E\)) to get \(\Gamma_2\vdash\phi\) as required. The cases for the other rules are similar.</p> <p> <strong>Theorem 8. The rule of Weakening.</strong> If \(\Gamma_1 \vdash \phi\) and \(\Gamma_1 \subseteq \Gamma_2\), then \(\Gamma_2 \vdash \phi\).</p> <p> <strong>Proof:</strong> Again, we proceed by induction on the number of rules that were used to arrive at \(\Gamma_1 \vdash \phi\). Suppose that \(n\gt 0\) is a natural number, and that the theorem holds for any argument that was derived using fewer than \(n\) rules. Suppose that \(\Gamma_1 \vdash \phi\) using exactly \(n\) rules. If \(n=1\), then the rule is either (As) or \((=\)I). In these cases, \(\Gamma_2 \vdash \phi\) by the same rule. If the last rule applied was (&I), then \(\phi\) has the form \((\theta \amp \psi)\), and we have \(\Gamma_3 \vdash \theta\) and \(\Gamma_4 \vdash \psi\), with \(\Gamma_1 = \Gamma_3, \Gamma_4\). We apply the induction hypothesis to the deductions of \(\theta\) and \(\psi\), to get \(\Gamma_2 \vdash \theta\) and \(\Gamma_2 \vdash \psi\). and then apply (&I) to the result to get \(\Gamma_2 \vdash \phi\). Most of the other cases are exactly like this. Slight complications arise only in the rules \((\forall\)I) and \((\exists\)E), because there we have to pay attention to the conditions for the rules.</p> <p> Suppose that the last rule applied to get \(\Gamma_1 \vdash \phi\) is \((\forall\)I). So \(\phi\) is a sentence of the form \(\forall v\theta\), and we have \(\Gamma_1 \vdash \theta (v|t)\) and \(t\) does not occur in any member of \(\Gamma_1\) or in \(\theta\). The problem is that \(t\) may occur in a member of \(\Gamma_2\), and so we cannot just invoke the induction hypothesis and apply \((\forall\)I) to the result. So, let \(t'\) be a term not occurring in any sentence in \(\Gamma_2\). Let \(\Gamma'\) be the result of substituting \(t'\) for all \(t\) in \(\Gamma_2\). Then, since \(t\) does not occur in \(\Gamma_1\), \(\Gamma_1\subseteq\Gamma'\). So, the induction hypothesis gives us \(\Gamma'\vdash\theta (v|t)\), and we know that \(\Gamma'\) does not contain \(t\), so we can apply (\(\forall I\)) to get \(\Gamma'\vdash\forall v\theta\). But \(\forall v\theta\) does not contain \(t\) or \(t'\), so \(\Gamma_2\vdash\forall v\theta\) by Lemma 7.</p> <p> Suppose that the last rule applied was \((\exists\)E), we have \(\Gamma_3 \vdash \exists v\theta\) and \(\Gamma_4, \theta (v|t) \vdash \phi\), with \(\Gamma_1\) being \(\Gamma_3, \Gamma_4\), and \(t\) not in \(\phi\), \(\Gamma_4\) or \(\theta\). If \(t\) does not occur free in \(\Gamma_2\), we apply the induction hypothesis to get \(\Gamma_2 \vdash \exists v\theta\), and then \((\exists\)E) to end up with \(\Gamma_2 \vdash \phi\). If \(t\) does occur free in \(\Gamma_2\), then we follow a similar proceedure to \(\forall I\), using Lemma 7. </p> </div> <p> Theorem 8 allows us to add on premises at will. It follows that \(\Gamma \vdash \phi\) if and only if there is a subset \(\Gamma'\subseteq \Gamma\) such that \(\Gamma'\vdash \phi\). Some systems of relevant logic do not have weakening, nor does substructural logic (See the entries on <a href="../logic-relevance/">relevance logic</a>, <a href="../logic-substructural/">substructural logics</a>, and <a href="../logic-linear/">linear logic</a>).</p> <p> By clause (*), all derivations are established in a finite number of steps. So we have</p> <div class="indent"> <p> <strong>Theorem 9</strong>. \(\Gamma \vdash \phi\) if and only if there is a finite \(\Gamma'\subseteq \Gamma\) such that \(\Gamma'\vdash \phi\).</p> <p> <strong>Theorem 10</strong>. The rule of <em>ex falso quodlibet</em> is a “derived rule” of \(D\): if \(\Gamma_1 \vdash \theta\) and \(\Gamma_2 \vdash \neg \theta\), then \(\Gamma_1,\Gamma_2 \vdash \psi\), for any sentence \(\psi\).</p> <p> <strong>Proof:</strong> Suppose that \(\Gamma_1 \vdash \theta\) and \(\Gamma_2 \vdash \neg \theta\). Then by Theorem \(8, \Gamma_1,\neg \psi \vdash \theta\), and \(\Gamma_2,\neg \psi \vdash \neg \theta\). So by \((\neg\)I), \(\Gamma_1, \Gamma_2 \vdash \neg \neg \psi\). By (DNE), \(\Gamma_1, \Gamma_2 \vdash \psi\). </p> <p> <strong>Theorem 11. The rule of Cut</strong>. If \(\Gamma_1 \vdash \psi\) and \(\Gamma_2, \psi \vdash \theta\), then \(\Gamma_1, \Gamma_2 \vdash \theta\).</p> <p> <strong>Proof:</strong> Suppose \(\Gamma_1 \vdash \psi\) and \(\Gamma_2, \psi \vdash \theta\). We proceed by induction on the number of rules used to establish \(\Gamma_2, \psi \vdash \theta\). Suppose that \(n\) is a natural number, and that the theorem holds for any argument that was derived using fewer than \(n\) rules. Suppose that \(\Gamma_2, \psi \vdash \theta\) was derived using exactly \(n\) rules. If the last rule used was \((=\)I), then \(\Gamma_1, \Gamma_2 \vdash \theta\) is also an instance of \((=\)I). If \(\Gamma_2, \psi \vdash \theta\) is an instance of (As), then either \(\theta\) is \(\psi\), or \(\theta\) is a member of \(\Gamma_2\). In the former case, we have \(\Gamma_1 \vdash \theta\) by supposition, and get \(\Gamma_1, \Gamma_2 \vdash \theta\) by Weakening (Theorem 8). In the latter case, \(\Gamma_1, \Gamma_2 \vdash \theta\) is itself an instance of (As). Suppose that \(\Gamma_2, \psi \vdash \theta\) was obtained using (&E). Then we have \(\Gamma_2, \psi \vdash(\theta \amp \phi)\). The induction hypothesis gives us \(\Gamma_1, \Gamma_2 \vdash(\theta \amp \phi)\), and (&E) produces \(\Gamma_1, \Gamma_2 \vdash \theta\). The remaining cases are similar. </p> </div> <p> Theorem 11 allows us to chain together inferences. This fits the practice of establishing theorems and lemmas and then using those theorems and lemmas later, at will. The cut principle is, some think, essential to reasoning. In some logical systems, the cut principle is a deep theorem; in others it is invalid. The system here was designed, in part, to make the proof of Theorem 11 straightforward.</p> <p> If \(\Gamma \vdash_D \theta\), then we say that the sentence \(\theta\) is a <em>deductive consequence</em> of the set of sentences \(\Gamma\), and that the argument \(\langle \Gamma,\theta \rangle\) is <em>deductively valid</em>. A sentence \(\theta\) is a <em>logical theorem</em>, or a <em>deductive logical truth</em>, if \(\vdash_D \theta\). That is, \(\theta\) is a logical theorem if it is a deductive consequence of the empty set. A set \(\Gamma\) of sentences is <em>consistent</em> if there is no sentence \(\theta\) such that \(\Gamma \vdash_D \theta\) and \(\Gamma \vdash_D \neg \theta\). That is, a set is consistent if it does not entail a pair of contradictory opposite sentencess.</p> <div class="indent"> <p> <strong>Theorem 12</strong>. A set \(\Gamma\) is consistent if and only if there is a sentence \(\theta\) such that it is not the case that \(\Gamma \vdash \theta\).</p> <p> <strong>Proof:</strong> Suppose that \(\Gamma\) is consistent and let \(\theta\) be any sentence. Then either it is not the case that \(\Gamma \vdash \theta\) or it is not the case that \(\Gamma \vdash \neg \theta\). For the converse, suppose that \(\Gamma\) is inconsistent and let \(\psi\) be any sentence. We have that there is a sentence such that both \(\Gamma \vdash \theta\) and \(\Gamma \vdash \neg \theta\). By <em>ex falso quodlibet</em> (Theorem 10), \(\Gamma \vdash \psi\). </p> </div> <p> Define a set \(\Gamma\) of sentences of the language \(\LKe\) to be <em>maximally consistent</em> if \(\Gamma\) is consistent and for every sentence \(\theta\) of \(\LKe\), if \(\theta\) is not in \(\Gamma\), then \(\Gamma,\theta\) is inconsistent. In other words, \(\Gamma\) is maximally consistent if \(\Gamma\) is consistent, and adding any sentence in the language not already in \(\Gamma\) renders it inconsistent. Notice that if \(\Gamma\) is maximally consistent then \(\Gamma \vdash \theta\) if and only if \(\theta\) is in \(\Gamma\).</p> <div class="indent"> <p> <strong>Theorem 13. The Lindenbaum Lemma.</strong> Let \(\Gamma\) be any consistent set of sentences of \(\LKe .\) Then there is a set \(\Gamma'\) of sentences of \(\LKe\) such that \(\Gamma \subseteq \Gamma'\) and \(\Gamma'\) is maximally consistent.</p> <p> <strong>Proof:</strong> Although this theorem holds in general, we assume here that the set \(K\) of non-logical terminology is either finite or denumerably infinite (i.e., the size of the natural numbers, usually called \(\aleph_0)\). It follows that there is an enumeration \(\theta_0, \theta_1,\ldots\) of the sentences of \(\LKe\), such that every sentence of \(\LKe\) eventually occurs in the list. Define a sequence of sets of sentences, by recursion, as follows: \(\Gamma_0\) is \(\Gamma\); for each natural number \(n\), if \(\Gamma_n, \theta_n\) is consistent, then let \(\Gamma_{n+1} = \Gamma_n, \theta_n\). Otherwise, let \(\Gamma_{n+1} = \Gamma_n\). Let \(\Gamma'\) be the union of all of the sets \(\Gamma_n\). Intuitively, the idea is to go through the sentences of \(\LKe\), throwing each one into \(\Gamma'\) if doing so produces a consistent set. Notice that each \(\Gamma_n\) is consistent. Suppose that \(\Gamma'\) is inconsistent. Then there is a sentence \(\theta\) such that \(\Gamma'\vdash \theta\) and \(\Gamma'\vdash \neg \theta\). By Theorem 9 and Weakening (Theorem 8), there is finite subset \(\Gamma''\) of \(\Gamma'\) such that \(\Gamma''\vdash \theta\) and \(\Gamma''\vdash \neg \theta\). Because \(\Gamma''\) is finite, there is a natural number \(n\) such that every member of \(\Gamma''\) is in \(\Gamma_n\). So, by Weakening again, \(\Gamma_n \vdash \theta\) and \(\Gamma_n \vdash \neg \theta\). So \(\Gamma_n\) is inconsistent, which contradicts the construction. So \(\Gamma'\) is consistent. Now suppose that a sentence \(\theta\) is not in \(\Gamma'\). We have to show that \(\Gamma', \theta\) is inconsistent. The sentence \(\theta\) must occur in the aforementioned list of sentences; say that \(\theta\) is \(\theta_m\). Since \(\theta_m\) is not in \(\Gamma'\), then it is not in \(\Gamma_{m+1}\). This happens only if \(\Gamma_m, \theta_m\) is inconsistent. So a pair of contradictory opposites can be deduced from \(\Gamma_m,\theta_m\). By Weakening, a pair of contradictory opposites can be deduced from \(\Gamma', \theta_m\). So \(\Gamma', \theta_m\) is inconsistent. Thus, \(\Gamma'\) is maximally consistent.</p> </div> <p> Notice that this proof uses a principle corresponding to the law of excluded middle. In the construction of \(\Gamma'\), we assumed that, at each stage, either \(\Gamma_n\) is consistent or it is not. Intuitionists, who demur from excluded middle, do not accept the Lindenbaum lemma.</p> <h2 id="Sema">4. Semantics</h2> <p> Let \(K\) be a set of non-logical terminology. An <em>interpretation</em> for the language \(\LKe\) is a structure \(M = \langle d,I\rangle\), where \(d\) is a non-empty set, called the <em>domain-of-discourse</em>, or simply the <em>domain</em>, of the interpretation, and \(I\) is an <em>interpretation function</em>. Informally, the domain is what we interpret the language \(\LKe\) to be about. It is what the variables range over. The interpretation function assigns appropriate extensions to the non-logical terms. In particular,</p> <p class="indent"> If \(c\) is a constant in \(K\), then \(I(c)\) is a member of the domain \(d\). </p> <p> Thus we assume that every constant denotes something. Systems where this is not assumed are called <em>free logics</em> (see the entry on <a href="../logic-free/">free logic</a>). Continuing,</p> <div class="indent"> <p> If \(P^0\) is a zero-place predicate letter in \(K\), then \(I(P)\) is a truth value, either truth or falsehood.</p> <p> If \(Q^1\) is a one-place predicate letter in \(K\), then \(I(Q)\) is a subset of \(d\). Intuitively, \(I(Q)\) is the set of members of the domain that the predicate \(Q\) holds of. For example, \(I(Q)\) might be the set of red members of the domain.</p> <p> If \(R^2\) is a two-place predicate letter in \(K\), then \(I(R)\) is a set of ordered pairs of members of \(d\). Intuitively, \(I(R)\) is the set of pairs of members of the domain that the relation \(R\) holds between. For example, \(I(R)\) might be the set of pairs \(\langle a,b\rangle\) such that \(a\) and \(b\) are the members of the domain for which \(a\) loves \(b\).</p> <p> In general, if <em>S\(^n\)</em> is an \(n\)-place predicate letter in \(K\), then \(I(S)\) is a set of ordered \(n\)-tuples of members of \(d\).</p> </div> <p> Define \(s\) to be a <em>variable-assignment</em>, or simply an <em>assignment</em>, on an interpretation \(M\), if \(s\) is a function from the variables to the domain \(d\) of \(M\). The role of variable-assignments is to assign denotations to the <em>free</em> variables of open formulas. (In a sense, the quantifiers determine the “meaning” of the bound variables.) </p> <p> Let \(t\) be a term of \(\LKe\). We define the <em>denotation</em> of \(t\) in \(M\) under \(s\), in terms of the interpretation function and variable-assignment:</p> <p class="indent"> If \(t\) is a constant, then \(D_{M,s}(t)\) is \(I(t)\), and if \(t\) is a variable, then \(D_{M,s}(t)\) is \(s(t)\). </p> <p> That is, the interpretation \(M\) assigns denotations to the constants, while the variable-assignment assigns denotations to the (free) variables. If the language contained function symbols, the denotation function would be defined by recursion.</p> <p> We now define a relation of <em>satisfaction</em> between interpretations, variable-assignments, and formulas of \(\LKe\). If \(\phi\) is a formula of \(\LKe, M\) is an interpretation for \(\LKe\), and \(s\) is a variable-assignment on \(M\), then we write \(M,s\vDash \phi\) for \(M\) <em>satisfies</em> \(\phi\) <em>under the assignment</em> \(s\). The idea is that \(M,s\vDash \phi\) is an analogue of “\(\phi\) comes out true when interpreted as in \(M\) via \(s\)”.</p> <p> We proceed by recursion on the complexity of the formulas of \(\LKe\).</p> <p class="indent"> If \(t_1\) and \(t_2\) are terms, then \(M,s\vDash t_1 =t_2\) if and only if \(D_{M,s}(t_1)\) is the same as \(D_{M,s}(t_2)\). </p> <p> This is about as straightforward as it gets. An identity \(t_1 =t_2\) comes out true if and only if the terms \(t_1\) and \(t_2\) denote the same thing.</p> <p class="indent"> If \(P^0\) is a zero-place predicate letter in \(K\), then \(M,s\vDash P\) if and only if \(I(P)\) is truth. </p> <p class="indent"> If <em>S\(^n\)</em> is an \(n\)-place predicate letter in \(K\) and \(t_1, \ldots,t_n\) are terms, then \(M,s\vDash St_1 \ldots t_n\) if and only if the \(n\)-tuple \(\langle D_{M,s}(t_1), \ldots,D_{M,s}(t_n)\rangle\) is in \(I(S)\).</p> <p> This takes care of the atomic formulas. We now proceed to the compound formulas of the language, more or less following the meanings of the English counterparts of the logical terminology.</p> <div class="indent"> <p> \(M,s\vDash \neg \theta\) if and only if it is not the case that \(M,s\vDash \theta\).</p> <p> \(M,s\vDash(\theta \amp \psi)\) if and only if both \(M,s\vDash \theta\) and \(M,s\vDash \psi\).</p> <p> \(M,s\vDash(\theta \vee \psi)\) if and only if either \(M,s\vDash \theta\) or \(M,s\vDash \psi\).</p> <p> \(M,s\vDash(\theta \rightarrow \psi)\) if and only if either it is not the case that \(M,s\vDash \theta\), or \(M,s\vDash \psi\).</p> <p> \(M,s\vDash \forall v\theta\) if and only if \(M,s'\vDash \theta\), for every assignment \(s'\) that agrees with \(s\) except possibly at the variable \(v\).</p> </div> <p> The idea here is that \(\forall v\theta\) comes out true if and only if \(\theta\) comes out true no matter what is assigned to the variable \(v\). The final clause is similar.</p> <p class="indent"> \(M,s\vDash \exists v\theta\) if and only if \(M,s'\vDash \theta\), for some assignment \(s'\) that agrees with \(s\) except possibly at the variable \(v\). </p> <p> So \(\exists v\theta\) comes out true if there is an assignment to \(v\) that makes \(\theta\) true.</p> <p> Theorem 6, unique readability, assures us that this definition is coherent. At each stage in breaking down a formula, there is exactly one clause to be applied, and so we never get contradictory verdicts concerning satisfaction.</p> <p> As indicated, the role of variable-assignments is to give denotations to the free variables. We now show that variable-assignments play no other role.</p> <div class="indent"> <p> <strong>Theorem 14.</strong> For any formula \(\theta\), if \(s_1\) and \(s_2\) agree on the free variables in \(\theta\), then \(M,s_1 \vDash \theta\) if and only if \(M,s_2 \vDash \theta\).</p> <p> <strong>Proof:</strong> We proceed by induction on the complexity of the formula \(\theta\). The theorem clearly holds if \(\theta\) is atomic, since in those cases only the values of the variable-assignments at the variables in \(\theta\) figure in the definition. Assume, then, that the theorem holds for all formulas less complex than \(\theta\). And suppose that \(s_1\) and \(s_2\) agree on the free variables of \(\theta\). Assume, first, that \(\theta\) is a negation, \(\neg \psi\). Then, by the induction hypothesis, \(M,s_1 \vDash \psi\) if and only if \(M,s_2 \vDash \psi\). So, by the clause for negation, \(M,s_1 \vDash \neg \psi\) if and only if \(M,s_2 \vDash \neg \psi\). The cases where the main connective in \(\theta\) is binary are also straightforward. Suppose that \(\theta\) is \(\exists v\psi\), and that \(M,s_1 \vDash \exists v\psi\). Then there is an assignment \(s_1'\) that agrees with \(s_1\) except possibly at \(v\) such that \(M,s_1'\vDash \psi\). Let \(s_2'\) be the assignment that agrees with \(s_2\) on the free variables not in \(\psi\) and agrees with \(s_1'\) on the others. Then, by the induction hypothesis, \(M,s_2'\vDash \psi\). Notice that \(s_2'\) agrees with \(s_2\) on every variable except possibly \(v\). So \(M,s_2 \vDash \exists v\psi\). The converse is the same, and the case where \(\theta\) begins with a universal quantifier is similar.</p> </div> <p> By Theorem 14, if \(\theta\) is a sentence, and \(s_1, s_2\), are any two variable-assignments, then \(M,s_1 \vDash \theta\) if and only if \(M,s_2 \vDash \theta\). So we can just write \(M\vDash \theta\) if \(M,s\vDash \theta\) for some, or all, variable-assignments \(s\). So we define </p> <p class="indent"> \(M\vDash \theta\) where \(\theta\) is a sentence just in case \(M,s\vDash\theta\) for all variable assignments \(s\). </p> <p> In this case, we call \(M\) a <em>model</em> of \(\theta\).</p> <p> Suppose that \(K'\subseteq K\) are two sets of non-logical terms. If \(M = \langle d,I\rangle\) is an interpretation of \(\LKe\), then we define the <em>restriction</em> of \(M\) to \(\mathcal{L}1K'{=}\) to be the interpretation \(M'=\langle d,I'\rangle\) such that \(I'\) is the restriction of \(I\) to \(K'\). That is, \(M\) and \(M'\) have the same domain and agree on the non-logical terminology in \(K'\). A straightforward induction establishes the following:</p> <div class="indent"> <p> <strong>Theorem 15</strong>. If \(M'\) is the restriction of \(M\) to \(\mathcal{L}1K'{=}\), then for every sentence \(\theta\) of \(\mathcal{L}1K'\), \(M\vDash\theta\) if and only if \(M'\vDash \theta\).</p> <p> <strong>Theorem 16.</strong> If two interpretations \(M_1\) and \(M_2\) have the same domain and agree on all of the non-logical terminology of a sentence \(\theta\), then \(M_1\vDash\theta\) if and only if \(M_2\vDash \theta\).</p> </div> <p> In short, the satisfaction of a sentence \(\theta\) only depends on the domain of discourse and the interpretation of the non-logical terminology in \(\theta\).</p> <p> We say that an argument \(\langle \Gamma,\theta \rangle\) is <em>semantically valid</em>, or just <em>valid</em>, written \(\Gamma \vDash \theta\), if for every interpretation \(M\) of the language, if \(M\vDash\psi\), for every member \(\psi\) of \(\Gamma\), then \(M\vDash\theta\). If \(\Gamma \vDash \theta\), we also say that \(\theta\) is a <em>logical consequence</em>, or <em>semantic consequence</em>, or <em>model-theoretic consequence</em> of \(\Gamma\). The definition corresponds to the informal idea that an argument is valid if it is not possible for its premises to all be true and its conclusion false. Our definition of logical consequence also sanctions the common thesis that a valid argument is truth-preserving – to the extent that satisfaction represents truth. Officially, an argument in \(\LKe\) is valid if its conclusion comes out true under every interpretation of the language in which the premises are true. Validity is the model-theoretic counterpart to deducibility.</p> <p> A sentence \(\theta\) is <em>logically true</em>, or <em>valid</em>, if \(M\vDash \theta\), for every interpretation \(M\). A sentence is logically true if and only if it is a consequence of the empty set. If \(\theta\) is logically true, then for any set \(\Gamma\) of sentences, \(\Gamma \vDash \theta\). Logical truth is the model-theoretic counterpart of theoremhood.</p> <p> A sentence \(\theta\) is <em>satisfiable</em> if there is an interpretation \(M\) such that \(M\vDash \theta\). That is, \(\theta\) is satisfiable if there is an interpretation that satisfies it. A set \(\Gamma\) of sentences is satisfiable if there is an interpretation \(M\) such that \(M\vDash\theta\), for every sentence \(\theta\) in \(\Gamma\). If \(\Gamma\) is a set of sentences and if \(M\vDash \theta\) for each sentence \(\theta\) in \(\Gamma\), then we say that \(M\) is a <em>model of</em> \(\Gamma\). So a set of sentences is satisfiable if it has a model. Satisfiability is the model-theoretic counterpart to consistency.</p> <p> Notice that \(\Gamma \vDash \theta\) if and only if the set \(\Gamma,\neg \theta\) is not satisfiable. It follows that if a set \(\Gamma\) is not satisfiable, then if \(\theta\) is any sentence, \(\Gamma \vDash \theta\). This is a model-theoretic counterpart to <em>ex falso quodlibet</em> (see Theorem 10). We have the following, as an analogue to Theorem 12:</p> <div class="indent"> <p> <strong>Theorem 17</strong>. Let \(\Gamma\) be a set of sentences. The following are equivalent: (a) \(\Gamma\) is satisfiable; (b) there is no sentence \(\theta\) such that both \(\Gamma \vDash \theta\) and \(\Gamma \vDash \neg \theta\); (c) there is some sentence \(\psi\) such that it is not the case that \(\Gamma \vDash \psi\).</p> <p> <strong>Proof:</strong> (a)\(\Rightarrow\)(b): Suppose that \(\Gamma\) is satisfiable and let \(\theta\) be any sentence. There is an interpretation \(M\) such that \(M\vDash \psi\) for every member \(\psi\) of \(\Gamma\). By the clause for negations, we cannot have both \(M\vDash \theta\) and \(M\vDash \neg \theta\). So either \(\langle \Gamma,\theta \rangle\) is not valid or else \(\langle \Gamma,\neg \theta \rangle\) is not valid. (b)\(\Rightarrow\)(c): This is immediate. (c)\(\Rightarrow\)(a): Suppose that it is not the case that \(\Gamma \vDash \psi\). Then there is an interpretation \(M\) such that \(M\vDash \theta\), for every sentence \(\theta\) in \(\Gamma\) and it is not the case that \(M\vDash \psi\). A fortiori, \(M\) satisfies every member of \(\Gamma\), and so \(\Gamma\) is satisfiable.</p> </div> <h2 id="MetaTheo">5. Meta-theory</h2> <p> We now present some results that relate the deductive notions to their model-theoretic counterparts. The first one is probably the most straightforward. We motivated both the various rules of the deductive system \(D\) and the various clauses in the definition of satisfaction in terms of the meaning of the English counterparts to the logical terminology (more or less, with the same simplifications in both cases). So one would expect that an argument is deducible, or deductively valid, only if it is semantically valid.</p> <div class="indent"> <p> <strong>Theorem 18. Soundness.</strong> For any sentence \(\theta\) and set \(\Gamma\) of sentences, if \(\Gamma \vdash_D \theta\), then \(\Gamma \vDash \theta\).</p> <p> <strong>Proof:</strong> We proceed by induction on the number of clauses used to establish \(\Gamma \vdash \theta\). So let \(n\) be a natural number, and assume that the theorem holds for any argument established as deductively valid with fewer than \(n\) steps. And suppose that \(\Gamma \vdash \theta\) was established using exactly \(n\) steps. If the last rule applied was \((=\)I) then \(\theta\) is a sentence in the form \(t=t\), and so \(\theta\) is logically true. A fortiori, \(\Gamma \vDash \theta\). If the last rule applied was (As), then \(\theta\) is a member of \(\Gamma\), and so of course any interpretation that satisfies every member of \(\Gamma\) also satisfies \(\theta\). Suppose the last rule applied is (&I). So \(\theta\) has the form \((\phi \amp \psi)\), and we have \(\Gamma_1 \vdash \phi\) and \(\Gamma_2 \vdash \psi\), with \(\Gamma = \Gamma_1, \Gamma_2\). The induction hypothesis gives us \(\Gamma_1 \vDash \phi\) and \(\Gamma_2 \vDash \psi\). Suppose that \(M\) satisfies every member of \(\Gamma\). Then \(M\) satisfies every member of \(\Gamma_1\), and so \(M\) satisfies \(\phi\). Similarly, \(M\) satisfies every member of \(\Gamma_2\), and so \(M\) satisfies \(\psi\). Thus, by the clause for “\(\amp\)” in the definition of satisfaction, \(M\) satisfies \(\theta\). So \(\Gamma \vDash \theta\). </p> <p> Suppose the last clause applied was \((\exists\mathrm{E})\). So we have \(\Gamma_1 \vdash \exists v\phi\) and \(\Gamma_2, \phi(v|t) \vdash \theta\), where \(\Gamma = \Gamma_1, \Gamma_2\), and \(t\) does not occur in \(\phi , \theta \), or in any member of \(\Gamma_2\).</p> <p> We need to show that \(\Gamma\vDash\theta\). By the induction hypothesis, we have that \(\Gamma_1\vDash\exists v\phi\) and \(\Gamma_2, \phi(v|t)\vDash\theta\). Let \(M\) be an interpretation such that \(M\) makes every member of \(\Gamma\) true. So, \(M\) makes every member of \(\Gamma_1\) and \(\Gamma_2\) true. Then \(M,s\vDash\exists v\phi\) for all variable assignments \(s\), so there is an \(s'\) such that \(M,s'\vDash\phi\). Let \(M'\) differ from \(M\) only in that \(I_{M'}(t)=s'(v)\). Then, \(M',s'\vDash\phi(v|t)\) and \(M',s'\vDash\Gamma_2\) since \(t\) does not occur in \(\phi\) or \(\Gamma_2\). So, \(M',s'\vDash\theta\). Since \(t\) does not occur in \(\theta\) and \(M'\) differs from \(M\) only with respect to \(I_{M'}(t)\), \(M,s'\vDash\theta\). Since \(\theta\) is a sentence, \(s'\) doesn't matter, so \(M\vDash\theta\) as desired. Notice the role of the restrictions on \((\exists\)E) here. The other cases are about as straightforward. </p> <p> <strong>Corollary 19.</strong> Let \(\Gamma\) be a set of sentences. If \(\Gamma\) is satisfiable, then \(\Gamma\) is consistent. </p> <p> <strong>Proof:</strong> Suppose that \(\Gamma\) is satisfiable. So let \(M\) be an interpretation such that \(M\) satisfies every member of \(\Gamma\). Assume that \(\Gamma\) is inconsistent. Then there is a sentence \(\theta\) such that \(\Gamma \vdash \theta\) and \(\Gamma \vdash \neg \theta\). By soundness (Theorem 18), \(\Gamma \vDash \theta\) and \(\Gamma \vDash \neg \theta\). So we have that \(M\vDash \theta\) and \(M\vDash \neg \theta\). But this is impossible, given the clause for negation in the definition of satisfaction.</p> </div> <p> Even though the deductive system \(D\) and the model-theoretic semantics were developed with the meanings of the logical terminology in mind, one should not automatically expect the converse to soundness (or Corollary 19) to hold. For all we know so far, we may not have included enough rules of inference to deduce every valid argument. The converses to soundness and Corollary 19 are among the most important and influential results in mathematical logic. We begin with the latter.</p> <div class="indent"> <p> <strong>Theorem 20. Completeness. Gödel [1930].</strong> Let \(\Gamma\) be a set of sentences. If \(\Gamma\) is consistent, then \(\Gamma\) is satisfiable.</p> <p> <strong>Proof:</strong> The proof of completeness is rather complex. We only sketch it here. Let \(\Gamma\) be a consistent set of sentences of \(\LKe\). Again, we assume for simplicity that the set \(K\) of non-logical terminology is either finite or countably infinite (although the theorem holds even if \(K\) is uncountable). The task at hand is to find an interpretation \(M\) such that \(M\) satisfies every member of \(\Gamma\). Consider the language obtained from \(\LKe\) by adding a denumerably infinite stock of new individual constants \(c_0, c_1,\ldots\) We stipulate that the constants, \(c_0, c_1,\ldots\), are all different from each other and none of them occur in \(K\). One interesting feature of this construction, due to Leon Henkin, is that we build an interpretation of the language from the language itself, using some of the constants as members of the domain of discourse. Let \(\theta_0 (x), \theta_1 (x),\ldots\) be an enumeration of the formulas of the expanded language with at most one free variable, so that each formula with at most one free variable occurs in the list eventually. Define a sequence \(\Gamma_0, \Gamma_1,\ldots\) of sets of sentences (of the expanded language) by recursion as follows: \(\Gamma_0 = \Gamma\); and \(\Gamma_{n+1} = \Gamma_n,(\exists x\theta_n \rightarrow \theta_{n}(x|c_i))\), where \(c_i\) is the first constant in the above list that does not occur in \(\theta_n\) or in any member of \(\Gamma_n\). The underlying idea here is that if \(\exists x\theta_n\)is true, then \(c_i\) is to be one such \(x\). Let \(\Gamma\) be the union of the sets \(\Gamma_n\). </p> <p> We sketch a proof that \(\Gamma'\) is consistent. Suppose that \(\Gamma'\) is inconsistent. By Theorem 9, there is a finite subset of \(\Gamma\) that is inconsistent, and so one of the sets \(\Gamma_m\) is inconsistent. By hypothesis, \(\Gamma_0 = \Gamma\) is consistent. Let \(n\) be the smallest number such that \(\Gamma_n\) is consistent, but \(\Gamma_{n+1} = \Gamma_n,(\exists x\theta_n \rightarrow \theta_{n}(x|c_i))\) is inconsistent. By \((\neg\)I), we have that</p> \[\tag{1} \Gamma_n \vdash \neg(\exists x\theta_n \rightarrow \theta_n(x|c_i)). \] <p> By <em>ex falso quodlibet</em> (Theorem 10), \(\Gamma_n, \neg \exists x\theta_n, \exists x\theta_n \vdash \theta_n (x|c_i)\). So by \((\rightarrow\)I), \(\Gamma_n, \neg \exists x\theta_n \vdash(\exists x\theta_n \rightarrow \theta_n (x|c_i))\). From this and (1), we have \(\Gamma_n \vdash \neg \neg \exists x\theta_n\), by \((\neg\)I), and by (DNE) we have</p> \[\tag{2} \Gamma_n \vdash \exists x\theta_n . \] <p> By (As), \(\Gamma_n, \theta_n (x|c_i), \exists x\theta_n \vdash \theta_n (x|c_i)\). So by \((\rightarrow\)I), \(\Gamma_n, \theta_n (x|c_i)\vdash(\exists x\theta_{n} \rightarrow \theta_{n}(x|c_i))\). From this and (1), we have \(\Gamma_n \vdash \neg \theta_n (x|c_i)\), by \((\neg\)I). Let \(t\) be a term that does not occur in \(\theta_n\) or in any member of \(\Gamma_n\). By uniform substitution of \(t\) for \(c_i\), we can turn the derivation of \(\Gamma_n \vdash \neg \theta_n (x|c_i)\) into \(\Gamma_n \vdash \neg \theta_n (x|t)\). By \((\forall\)I), we have</p> \[\tag{3} \Gamma_n \vdash \forall v\neg \theta_n (x|v). \] <p> By (As) we have \(\{\forall v\neg \theta_n (x|v),\theta_n\}\vdash \theta_n\) and by \((\forall\)E) we have \(\{\forall v\neg \theta_n (x|v), \theta_n\}\vdash \neg \theta_n\). So \(\{\forall v\neg \theta_n (x|v), \theta_n\}\) is inconsistent. Let \(\phi\) be any sentence of the language. By <em>ex falso quodlibet</em> (Theorem 10), we have that \(\{\forall v\neg \theta_n (x|v),\theta_n\}\vdash \phi\) and \(\{\forall v\neg \theta_n (x|v), \theta_n\}\vdash \neg \phi\). So with (2), we have that \(\Gamma_n, \forall v\neg \theta_n (x|v)\vdash \phi\) and \(\Gamma_n, \forall v\neg \theta_n (x|v)\vdash \neg \phi\), by \((\exists\)E). By Cut (Theorem 11), \(\Gamma_n \vdash \phi\) and \(\Gamma_n \vdash \neg \phi\). So \(\Gamma_n\) is inconsistent, contradicting the assumption. So \(\Gamma'\) is consistent.</p> <p> Applying the Lindenbaum Lemma (Theorem 13), let \(\Gamma''\) be a maximally consistent set of sentences (of the expanded language) that contains \(\Gamma'\). So, of course, \(\Gamma''\) contains \(\Gamma\). We can now define an interpretation \(M\) such that \(M\) satisfies every member of \(\Gamma''\).</p> <p> If we did not have a sign for identity in the language, we would let the domain of \(M\) be the collection of new constants \(\{c_0, c_1, \ldots \}\). But as it is, there may be a sentence in the form \(c_{i}=c_{j}\), with \(i\ne j\), in \(\Gamma''\). If so, we cannot have both \(c_i\) and \(c_j\) in the domain of the interpretation (as they are distinct constants). So we define the domain \(d\) of \(M\) to be the set \(\{c_i\) | there is no \(j\lt i\) such that \(c_{i}=c_{j}\) is in \(\Gamma''\}\). In other words, a constant \(c_i\) is in the domain of \(M\) if \(\Gamma''\) does not declare it to be identical to an earlier constant in the list. Notice that for each new constant \(c_i\), there is exactly one \(j\le i\) such that \(c_j\) is in \(d\) and the sentence \(c_{i}=c_{j}\) is in \(\Gamma''\).</p> <p> We now define the interpretation function \(I\). Let \(a\) be any constant in the expanded language. By \((=\)I) and \((\exists\)I), \(\Gamma''\vdash \exists x x=a\), and so \(\exists x x=a \in \Gamma''\). By the construction of \(\Gamma'\), there is a sentence in the form \((\exists x x=a \rightarrow c_i =a)\) in \(\Gamma''\). We have that \(c_i =a\) is in \(\Gamma''\). As above, there is exactly one \(c_j\) in \(d\) such that \(c_{i}=c_{j}\) is in \(\Gamma''\). Let \(I(a)=c_j\). Notice that if \(c_i\) is a constant in the domain \(d\), then \(I\)(c\(_i)=c_i\). That is each \(c_i\) in \(d\) denotes itself.</p> <p> Let \(P\) be a zero-place predicate letter in \(K\). Then \(I(P)\) is truth if \(P\) is in \(\Gamma''\) and \(I(P)\) is falsehood otherwise. Let \(Q\) be a one-place predicate letter in \(K\). Then \(I(Q)\) is the set of constants \(\{\)c\(_i | c_i\) is in \(d\) and the sentence \(Qc\) is in \(\Gamma''\}\). Let \(R\) be a binary predicate letter in \(K\). Then \(I(R)\) is the set of pairs of constants \(\{\langle c_i,c_j\rangle | c_i\) is in \(d, c_j\) is in \(d\), and the sentence \(Rc_{i}c_{j}\) is in \(\Gamma''\}\). Three-place predicates, etc. are interpreted similarly. In effect, \(I\) interprets the non-logical terminology as they are in \(\Gamma''\).</p> <p> The final item in this proof is a lemma that for every sentence \(\theta\) in the expanded language, \(M\vDash \theta\) if and only if \(\theta\) is in \(\Gamma''\). This proceeds by induction on the complexity of \(\theta\). The case where \(\theta\) is atomic follows from the definitions of \(M\) (i.e., the domain \(d\) and the interpretation function \(I\)). The other cases follow from the various clauses in the definition of satisfaction.</p> <p> Since \(\Gamma \subseteq \Gamma''\), we have that \(M\) satisfies every member of \(\Gamma\). By Theorem 15, the restriction of \(M\) to the original language \(\LKe\) and \(s\) also satisfies every member of \(\Gamma\). Thus \(\Gamma\) is satisfiable. </p> </div> <p> A converse to Soundness (Theorem 18) is a straightforward corollary:</p> <div class="indent"> <p> <strong>Theorem 21.</strong> For any sentence \(\theta\) and set \(\Gamma\) of sentences, if \(\Gamma \vDash \theta\), then \(\Gamma \vdash_D \theta\).</p> <p> <strong>Proof:</strong> Suppose that \(\Gamma \vDash \theta\). Then there is no interpretation \(M\) such that <em>M</em> satisfies every member of \(\Gamma\) but does not satisfy \(\theta\). So the set \(\Gamma,\neg \theta\) is not satisfiable. By Completeness (Theorem 20), \(\Gamma,\neg \theta\) is inconsistent. So there is a sentence \(\phi\) such that \(\Gamma,\neg \theta \vdash \phi\) and \(\Gamma,\neg \theta \vdash \neg \phi\). By \((\neg\)I), \(\Gamma \vdash \neg \neg \theta\), and by (DNE) \(\Gamma \vdash \theta\).</p> </div> <p> Our next item is a corollary of Theorem 9, Soundness (Theorem 18), and Completeness:</p> <div class="indent"> <p> <strong>Corollary 22. Compactness.</strong> A set \(\Gamma\) of sentences is satisfiable if and only if every finite subset of \(\Gamma\) is satisfiable.</p> <p> <strong>Proof:</strong> If \(M\) satisfies every member of \(\Gamma\), then \(M\) satisfies every member of each finite subset of \(\Gamma\). For the converse, suppose that \(\Gamma\) is not satisfiable. Then we show that some finite subset of \(\Gamma\) is not satisfiable. By Completeness (Theorem 20), \(\Gamma\) is inconsistent. By Theorem 9 (and Weakening), there is a finite subset \(\Gamma'\subseteq \Gamma\) such that \(\Gamma'\) is inconsistent. By Corollary \(19, \Gamma'\) is not satisfiable.</p> </div> <p> Soundness and completeness together entail that an argument is deducible if and only if it is valid, and a set of sentences is consistent if and only if it is satisfiable. So we can go back and forth between model-theoretic and proof-theoretic notions, transferring properties of one to the other. Compactness holds in the model theory because all derivations use only a finite number of premises.</p> <p> Recall that in the proof of Completeness (Theorem 20), we made the simplifying assumption that the set \(K\) of non-logical constants is either finite or denumerably infinite. The interpretation we produced was itself either finite or denumerably infinite. Thus, we have the following:</p> <p class="indent"> <strong>Corollary 23. Löwenheim-Skolem Theorem.</strong> Let \(\Gamma\) be a satisfiable set of sentences of the language \(\LKe\). If \(\Gamma\) is either finite or denumerably infinite, then \(\Gamma\) has a model whose domain is either finite or denumerably infinite. </p> <p> In general, let \(\Gamma\) be a satisfiable set of sentences of \(\LKe\), and let \(\kappa\) be the larger of the size of \(\Gamma\) and denumerably infinite. Then \(\Gamma\) has a model whose domain is at most size \(\kappa\).</p> <p> There is a stronger version of Corollary 23. Let \(M_1 =\langle d_1,I_1\rangle\) and \(M_2 =\langle d_2,I_2\rangle\) be interpretations of the language \(\LKe\). Define \(M_1\) to be a <em>submodel</em> of \(M_2\) if \(d_1 \subseteq d_2, I_1 (c) = I_2 (c)\) for each constant \(c\), and \(I_1\) is the restriction of \(I_2\) to \(d_1\). For example, if \(R\) is a binary relation letter in \(K\), then for all \(a,b\) in \(d_1\), the pair \(\langle a,b\rangle\) is in \(I_1 (R)\) if and only if \(\langle a,b\rangle\) is in \(I_2 (R)\). If we had included function letters among the non-logical terminology, we would also require that \(d_1\) be closed under their interpretations in \(M_2\). Notice that if \(M_1\) is a submodel of \(M_2\), then any variable-assignment on \(M_1\) is also a variable-assignment on \(M_2\).</p> <p> Say that two interpretations \(M_1 =\langle d_1,I_1\rangle, M_2 =\langle d_2,I_2\rangle\) are <em>equivalent</em> if one of them is a submodel of the other, and for any formula of the language and any variable-assignment \(s\) on the submodel, \(M_1,s\vDash \theta\) if and only if \(M_2,s\vDash \theta\). Notice that if two interpretations are equivalent, then they satisfy the same sentences.</p> <div class="indent"> <p> <strong>Theorem 25. Downward Löwenheim-Skolem Theorem.</strong> Let \(M = \langle d,I\rangle\) be an interpretation of the language \(\LKe\). Let \(d_1\) be any subset of \(d\), and let \(\kappa\) be the maximum of the size of \(K\), the size of \(d_1\), and denumerably infinite. Then there is a submodel \(M' = \langle d',I'\rangle\) of \(M\) such that (1) \(d'\) is not larger than \(\kappa\), and (2) \(M\) and \(M'\) are equivalent. In particular, if the set \(K\) of non-logical terminology is either finite or denumerably infinite, then any interpretation has an equivalent submodel whose domain is either finite or denumerably infinite.</p> <p> <strong>Proof:</strong> Like completeness, this proof is complex, and we rest content with a sketch. The downward Löwenheim-Skolem theorem invokes the axiom of choice, and indeed, is equivalent to the axiom of choice (see the entry on <a href="../axiom-choice/">the axiom of choice</a>). So let \(C\) be a choice function on the powerset of \(d\), so that for each non-empty subset \(e\subseteq d, C(e)\) is a member of \(e\). We stipulate that if \(e\) is the empty set, then \(C(e)\) is \(C(d)\).</p> <p> Let \(s\) be a variable-assignment on \(M\), let \(\theta\) be a formula of \(\LKe\), and let \(v\) be a variable. Define the \(v\)-<em>witness of</em> \(\theta\) <em>over s</em>, written \(w_v (\theta,s)\), as follows: Let \(q\) be the set of all elements \(c\in d\) such that there is a variable-assignment \(s'\) on \(M\) that agrees with \(s\) on every variable except possibly \(v\), such that \(M,s'\vDash \theta\), and \(s'(v)=c\). Then \(w_v (\theta,s) = C(q)\). Notice that if \(M,s\vDash \exists v\theta\), then \(q\) is the set of elements of the domain that can go for \(v\) in \(\theta\). Indeed, \(M,s\vDash \exists v\theta\) if and only if \(q\) is non-empty. So if \(M,s\vDash \exists v\theta\), then \(w_v (\theta,s)\) (i.e., \(C(q))\) is a chosen element of the domain that can go for \(v\) in \(\theta\). In a sense, it is a “witness” that verifies \(M,s\vDash \exists v\theta\).</p> <p> If \(e\) is a non-empty subset of the domain \(d\), then define a variable-assignment \(s\) to be an \(e\)-<em>assignment</em> if for all variables \(u, s(u)\) is in \(e\). That is, \(s\) is an \(e\)-assignment if \(s\) assigns an element of \(e\) to each variable. Define \(sk(e)\), the <em>Skolem-hull</em> of \(e\), to be the set:</p> \[\begin{align*} e \cup \{w_v (\theta,s)|& \theta \text{ is a formula in } \LKe, \\ & v \text{ is a variable, and } \\ & s \text{ is an } e\text{-assignment} \}. \end{align*}\] <p> That is, the Skolem-Hull of \(e\) is the set \(e\) together with every \(v\)-witness of every formula over every \(e\)-assignment. Roughly, the idea is to start with \(e\) and then throw in enough elements to make each existentially quantified formula true. But we cannot rest content with the Skolem-hull, however. Once we throw the “witnesses” into the domain, we need to deal with \(sk(e)\) assignments. In effect, we need a set which is its own Skolem-hull, and also contains the given subset \(d_1\).</p> <p> We define a sequence of non-empty sets \(e_0, e_1,\ldots\) as follows: if the given subset \(d_1\) of \(d\) is empty and there are no constants in \(K\), then let \(e_0\) be \(C(d)\), the choice function applied to the entire domain; otherwise let \(e_0\) be the union of \(d_1\) and the denotations under \(I\) of the constants in \(K\). For each natural number \(n, e_{n+1}\) is \(sk(e_n)\). Finally, let \(d'\) be the union of the sets \(e_n\), and let \(I'\) be the restriction of \(I\) to \(d'\). Our interpretation is \(M' = \langle d',I'\rangle\).</p> <p> Clearly, \(d_1\) is a subset of \(d'\), and so \(M'\) is a submodel of \(M\). Let \(\kappa\) be the maximum of the size of \(K\), the size of \(d_1\), and denumerably infinite. A calculation reveals that the size of \(d'\) is at most \(\kappa\), based on the fact that there are at most \(\kappa\)-many formulas, and thus, at most \(\kappa\)-many witnesses at each stage. Notice, incidentally, that this calculation relies on the fact that a denumerable union of sets of size at most \(\kappa\) is itself at most \(\kappa\). This also relies on the axiom of choice.</p> <p> The final item is to show that \(M'\) is equivalent to \(M\): For every formula \(\theta\) and every variable-assignment \(s\) on \(M'\),</p> \[ M,s\vDash \theta \text{ if and only if } M',s\vDash \theta. \] <p> The proof proceeds by induction on the complexity of \(\theta\). Unfortunately, space constraints require that we leave this step as an exercise.</p> </div> <p> Another corollary to Compactness (Corollary 22) is the opposite of the Löwenheim-Skolem theorem:</p> <div class="indent"> <p> <strong>Theorem 26. Upward Löwenheim-Skolem Theorem.</strong> Let \(\Gamma\) be any set of sentences of \(\LKe,\) such that for each natural number \(n\), there is an interpretation \(M_n = \langle d_n,I_n\rangle\), such that \(d_n\) has at least \(n\) elements, and \(M_n\) satisfies every member of \(\Gamma\). In other words, \(\Gamma\) is satisfiable and there is no finite upper bound to the size of the interpretations that satisfy every member of \(\Gamma\). Then for any infinite cardinal \(\kappa\), there is an interpretation \(M=\langle d,I\rangle\), such that the size of \(d\) is <em>at least</em> \(\kappa\) and \(M\) satisfies every member of \(\Gamma\).</p> <p> <strong>Proof:</strong> Add a collection of new constants \(\{c_{\alpha} | \alpha \lt \kappa \}\), of size \(\kappa\), to the language, so that if \(c\) is a constant in \(K\), then \(c_{\alpha}\) is different from \(c\), and if \(\alpha \lt \beta \lt \kappa\), then \(c_{\alpha}\) is a different constant than \(c_{\beta}\). Consider the set of sentences \(\Gamma'\) consisting of \(\Gamma\) together with the set \(\{\neg c_{\alpha}=c_{\beta} | \alpha \ne \beta \}\). That is, \(\Gamma'\) consists of \(\Gamma\) together with statements to the effect that any two different new constants denote different objects. Let \(\Gamma''\) be any finite subset of \(\Gamma'\), and let \(m\) be the number of new constants that occur in \(\Gamma''\). Then expand the interpretation \(M_m\) to an interpretation \(M_m'\) of the new language, by interpreting each of the new constants in \(\Gamma''\) as a different member of the domain \(d_m\). By hypothesis, there are enough members of \(d_m\) to do this. One can interpret the other new constants at will. So \(M_m\) is a restriction of \(M_m'\). By hypothesis (and Theorem 15), \(M'_m\) satisfies every member of \(\Gamma\). Also \(M'_m\) satisfies the members of \(\{\neg c_{\alpha}=c_{\beta} | \alpha \ne \beta \}\) that are in \(\Gamma''\). So \(M'_m\) satisfies every member of \(\Gamma''\). By compactness, there is an interpretation \(M = \langle d,I\rangle\) such that \(M\) satisfies every member of \(\Gamma'\). Since \(\Gamma'\) contains every member of \(\{\neg c_{\alpha}=c_{\beta} | \alpha \ne \beta \}\), the domain \(d\) of \(M\) must be of size at least \(\kappa\), since each of the new constants must have a different denotation. By Theorem 15, the restriction of \(M\) to the original language \(\LKe\) satisfies every member of \(\Gamma\).</p> </div> <p> Combined, the proofs of the downward and upward Löwenheim-Skolem theorems show that for any satisfiable set \(\Gamma\) of sentences, if there is no finite bound on the models of \(\Gamma\), then for any infinite cardinal \(\kappa\), there is a model of \(\Gamma\) whose domain has size <em>exactly</em> \(\kappa\). Moreover, if \(M\) is any interpretation whose domain is infinite, then for any infinite cardinal \(\kappa\), there is an interpretation \(M'\) whose domain has size exactly \(\kappa\) such that \(M\) and \(M'\) are equivalent.</p> <p> These results indicate a weakness in the expressive resources of first-order languages like \(\LKe\). No satisfiable set of sentences can guarantee that its models are all denumerably infinite, nor can any satisfiable set of sentences guarantee that its models are uncountable. So in a sense, first-order languages cannot express the notion of “denumerably infinite”, at least not in the model theory. (See the entry on <a href="../logic-higher-order/">second-order and higher-order logic</a>.)</p> <p> Let \(A\) be any set of sentences in a first-order language \(\LKe\), where \(K\) includes terminology for arithmetic, and assume that every member of \(A\) is true of the natural numbers. We can even let \(A\) be the set of all sentences in \(\LKe\) that are true of the natural numbers. Then \(A\) has uncountable models, indeed models of any infinite cardinality. Such interpretations are among those that are sometimes called <em>unintended</em>, or <em>non-standard</em> models of arithmetic. Let \(B\) be any set of first-order sentences that are true of the real numbers, and let \(C\) be any first-order axiomatization of set theory. Then if \(B\) and \(C\) are satisfiable (in infinite interpretations), then each of them has denumerably infinite models. That is, any first-order, satisfiable set theory or theory of the real numbers, has (unintended) models the size of the natural numbers. This is despite the fact that a sentence (seemingly) stating that the universe is uncountable is provable in most set-theories. This situation, known as the <em>Skolem paradox</em>, has generated much discussion, but we must refer the reader elsewhere for a sample of it (see the entry on <a href="../paradox-skolem/">Skolem’s paradox</a> and Shapiro 1996).</p> <h2 id="OneRighLogi">6. The One Right Logic?</h2> <p> Surely, logic has something to do with correct reasoning, or at least correct deductive reasoning. The details of the connection are subtle, and controversial – see Harman [1984] for an influential study. It is common to say that someone has reasoned poorly if they have not reasoned logically, or that a given (deductive) argument is bad, and must be retracted, if it is shown to be invalid. </p> <p> Some philosophers and logicians have maintained that there is a single logical system that is uniquely correct, in its role of characterizing validity. Among those, some, perhaps most, favor classical, first-order logic as uniquely correct, as the One True Logic. See, for example, Quine [1986], Resnik [1996], Rumfitt [2015], Williamson [2017], and a host of others. </p> <p> That classical, first-order logic should be given this role is perhaps not surprising. It has rules which are more or less intuitive, and is simple for how strong it is. As we have seen in section 5, classical, first-order logic has interesting and important meta-theoretic properties, such as soundness and completeness, that have lead to many important mathematical and logical studies.</p> <p> However, as noted, the main meta-theoretic properties of classical, first-order logic lead to expressive <em>limitations</em> of the formal languages and model-theoretic semantics. Key notions, like finitude, countability, minimal closure, natural number, and the like cannot be expressed. </p> <p> Barwise [1985, 5] once remarked:</p> <blockquote> As logicians, we do our subject a disservice by convincing others that logic is first-order and then convincing them that almost none of the concepts of modern mathematics can really be captured in first-order logic. </blockquote> <p> And Wang [1974, 154]:</p> <blockquote> When we are interested in set theory or classical analysis, the Löwenheim-Skolem theorem is usually taken as a sort of defect... of the first-order logic... [W]hat is established [by these theorems] is not that first-order logic is the only possible logic but rather that it is the only possible logic when we in a sense deny reality to the concept of [the] uncountable... </blockquote> <p> Other criticisms of classical, first-order logic have also been lodged. There are issues with its ability to deal with certain paradoxes (see, for example, the entry on <a href="../russell-paradox/">Russel’s paradox </a>), its apparent overgeneration of beliefs (see the entry on (<a href="../logic-normative/">the normative status of logic</a>), and some argue that it has some arguments that do not match with the way we normally think we think (see for example, the entry on <a href="../logic-relevance/">relevance logic</a>).</p> <p> There are two main options available to those who are critical of classical, first-order logic, as the One True Logic. One is to propose some other logic as the One True Logic. Priest [2006a] describes the methodology one might use to settle in the One True Logic.</p> <p> The other main option is to simply deny that there is a single logic that qualifies as the One True Logic. One instance of this is a kind of <em>logical nihilism</em>, a thesis that there is no correct logic. Another is a <em>logical pluralism</em>, the thesis that a variety of different logical all qualify as correct, or best, or even the true logic, at least in various contexts.</p> <p> Of course, this is not the place to pursue this matter in detail. See Beall and Restall [2006] and Shapiro [2014] for examples of pluralism, and the entry on <a href="../logical-pluralism/">logical pluralism</a> for an overview of the terrain for both logical pluralism and logical nihilism.</p> <p> We close with brief sketches of some of the main alternatives to classical, first-order logic, providing references to other work and entries to this Encyclopedia. See also the second half of Shapiro and Kouri Kissel [2022].</p> <h3 id="RivaClasFirsOrdeLogi">6.1 Rivals to classical, first-order logic</h3> <h4>Approximations</h4> <p> In recent years, some work has been done to "approximate" classical logic. The idea is to get as close to classical logic as possible, in order to preserve some of the benefits, while at the same time removing some limitations of classical logic, like being closer to intuitive inference or applying to things like vagueness and paradoxes. </p> <p> For example, Barrio, Pailos and Szmuc [2020] show that we can approximate classical logic in something called the ST-hierarchy (ST for strict-tolerant, from Cobreros, Egre, Ripley and van Rooij [2012a,b]). This allows them to avoid certain classical problems at each level of the hierarchy, like some of the paradoxes, while at the same time maintaining many of the benefits of the strength of classical logic when considering the full hierarchy.</p> <p> Dave Ripley [2013] provides a multi-sequent calculus version of “classical logic” that he argues solves some of the paradoxes. Notably, he claims it solves at least the Sorites and Liar Paradoxes (see the entries on the <a href="../sorites-paradox/">sorites paradox</a> and <a href="../liar-paradox/">liar Paradox</a>). The system conservatively extends classical logic. Ripley claims that this is what makes it classical. However, the system is not transitive, and does not have a Cut rule.</p> <p> There are, of course, some questions about whether these new logics are <em>really</em> classical, but it is informative work nonetheless.</p> <h4>Expansions</h4> <p> One way to extend classical, first-order logic is to add additional operators to the underlying formal language. <em>Modal logic</em> adds operators which designate necessity and possibility. So, we can say that a proposition is possibly true, or necessarily true, rather than just true.</p> <p> W. V. O Quine [1953] once argued that it is not coherent for quantifiers to bind variables inside modal operators, but opinion on this matter has since changed considerably (see, for example, Barcan [1990]). There is now a thriving industry of developing modal logics to capture various kinds of modality and temporal operators. See the entry on <a href="../logic-modal/">modal logic</a>. </p> <p> All of the formal languages sketched above have only one sort of variable. These are sometimes called <em>first-order</em> variables. Each interpretation of the language has a domain, which is the range of these first-order variables. It is what the language is about, according to the given interpretation. <em>Second-order</em> variables range over properties, sets, classes, relations, or functions of the items in that domain. <em>Third-order</em> variables range over properties, classes, relations of whatever is in the range of the second-order variables. And it goes on from there.</p> <p> A formal language is called <em>second-order</em> if it has second-order variables and first-order variables, and no others; <em>Third-order</em> if it has third-order, second-order, and first-order variables and no others, etc. A formal language is <em>higher-order</em> if it is at least second-order.</p> <p> As noted, it is not an exaggeration to say that classical, first-order logic is the paradigm of contemporary logical theory. Most textbooks do not mention higher-order languages at all, and most of the rest give it scant treatment.</p> <p> A number of different deductive systems and model-theoretic semantics have been proposed for second- and higher-order languages. For the semantics, the main additional feature of the model-theory is to specify a range of the higher-order variables.</p> <p> In <em>Henkin semantics</em>, each interpretation specifies a specific range of the higher-order variables. For monadic second-order variables, each interpretation specifies a non-empty subset of the powerset of the domain, for two-place second-order variables, a non-empty set of ordered pairs of members of the domain, etc. The system has all of the above limitative meta-theoretic results. There is a deductive system that is sound and complete for Henkin semantics; the logic is compact; and the downward and upward Löwenheim-Skolem theorems all hold.</p> <p> In so-called <em>standard semantics</em>, sometimes called <em>full semantics</em>, monadic second-order variables range over the entire powerset of the domain; two-place second-order variables range over the entire class of ordered pairs of members of the domain, etc. It can be shown that second-order languages, with standard semantics, can characterize many mathematical notions and structures, up to isomorphism. Examples include the notions of finitude, countability, well-foundedness, minimal closure, and structures like the natural numbers, the real numbers, and the complex numbers. As a result, none of the limitative theorems of classical, first-order logic hold: there is no effective deductive system is both sound and complete, the logic is not compact, and both Löwenheim-Skolem theorems fail. Some, such as Quine [1986], argue that second-order logic, with standard semantics is not really logic, but is a form of mathematics, set theory in particular. For more on this, see Shapiro [1991] and the entry on <a href="../logic-higher-order/">higher-order logic</a>, along with the many references cited there.</p> <p> One might also consider generalized quantifiers as an expansion of classical first-order logic (see the entry on <a href="../generalized-quantifiers/">generalized quantifiers</a>). These quantifiers allow from an expansion between the classical “all” and “some” , and can accommodate quantifiers like “most” , “less than half” , “usually” , etc. They are useful from both a logical and linguistic perspective. For example, Kennedy and Väänänen [2021] use generalized quantifiers to argue that “ uncountable” is a logical notion.</p> <h3 id="SublClasFirsOrdeLogi">6.2 Sublogics of classical, first-order logic</h3> <p> Some philosophers and logicians argue that classical, first-order logic is too strong: it declares that some argument-forms are valid which are not. Here we sketch two kinds of proposals.</p> <h4>Intuitionistic logic</h4> <p> Advocates of <em>intuitionistic logic</em> reject the validity of the (so-called) Law of Excluded Middle:</p> \[ \Phi \vee \neg \Phi, \] <p> and other inferences related to this, such as Double Negation Elimination (DNE):</p> \[ {\rm If}\ \Gamma \vdash \neg\neg\Phi \ {\rm then}\ \Gamma \vdash \Phi \] <p> Roughly speaking, there are two main motivations for these restrictions. The traditional intuitionists L. E. J. Brouwer (e.g., [1964a], [1964b]) and Arend Heyting (e.g. [1956]) held that the essence of mathematics is idealized mental construction. Consider, for example, the proposition that for every natural number \(n\), there is a prime number \(m \gt n\) such that \(m \lt n!+2\). For Brouwer, this proposition invokes a <em>procedure</em> that, given any natural number \(n\), produces a prime number \(m\) that is greater than \(n\) but less than \(n!+2\). The proposition expresses the existence of such a procedure. Given this orientation, we have no reason to hold that for any mathematical proposition \(\Phi\), we can establish either the procedure associated with \(\Phi\) or the procedure associated with \(\neg \Phi\).</p> <p> Michael Dummett (e.g., [1978]) provides general arguments concerning how language functions, as a vehicle of communication, to argue that intuitionistic logic is uniquely correct, the One True Logic, not just for mathematics.</p> <p> For an overview of intuitionistic logic, and its philosophical motivation, see the entry on <a href="../logic-intuitionistic/">intuitionistic logic</a>.</p> <h4>Relevance and paraconsistency</h4> <p> This time the target inference, to be declared invalid, is the one we above call <em>ex falso quodlibet</em>, abbreviated (EFQ): \[ {\rm If} \ \Gamma_1 \vdash \Theta \ {\rm and} \ \Gamma_2 \vdash \neg\Theta \ {\rm then} \ \Gamma_1, \Gamma_2 \vdash \Psi \] We can focus attention one kind of instance of this: \[ \Phi, \neg\Phi \vdash \Psi, \] sometimes colorfully called “explosion”. It says that anything at all follows from a contradiction.</p> <p> Logics that regard (EFQ) as invalid are called <em>paraconsistent</em>. Broadly speaking, there are two camps of logicians advocating for paraconsistent systems, either as candidates for the One True Logic or as instances of pluralism. One camp consists of logicians who insist that in a valid argument, the premises must be <em>relevant</em> to the conclusion. Typically, relevance logicians also demur from certain classical logical truths called <em>paradoxes of material implication</em>, such as \((\Phi \rightarrow (\Psi \rightarrow \Phi))\) and \((\Phi \rightarrow (\Psi \rightarrow \Psi))\). </p> <p> For more, see the entry on <a href="../logic-relevance/">relevance logic</a>, or Kerr [2019]. Classic works include Anderson and Belnap [1975], Anderson Belnap and Dunn [1992], and Read [1988]. Neil Tennant’s [2017] core logic is both relevant and intuitionistic.</p> <p> The other main camp of logicians who prefer a paraconsistent logic (or paraconsistent logics) are advocates of <em>dialetheism</em>, the view that some contradictions, some sentences in the form \[ (\Phi \wedge \neg \Phi), \] are true. One supposed example is when \(\Phi\) is a statement of a semantic paradoxes, such as the Liar. Consider, for example, a sentence \(\Phi\) that says that \(\Phi\) is not true.</p> <p> In a system in which (EFQ) holds, any true contradiction would entail every sentence of the formal language, thus rendering the language and theory trivial. So, clearly, any logic for dialetheism would have to be paraconsistent. See the entry on <a href="../dialetheism/">dialetheism</a>. The classic work here is Priest [2006a].</p> <p> Of course, the small sample presented here does not include every logical system proposed as a rival to classical, first-order logic, again either as a candidate for the One True Logic, or as a further instance of logical pluralism. See, for example, the entries on <a href="../logic-substructural/">substructural logics</a>, <a href="../logic-fuzzy/">fuzzy logic</a>, and many others.</p> </div> <div id="bibliography"> <h2 id="Bib">Bibliography</h2> <ul class="hanging"> <li>Anderson, Alan and Nuel Belnap, 1975, <em>Entailment: The logic of relevance and necessity I</em>, Princeton: Princeton University Press.</li> <li>Anderson, Alan, Nuel Belnap, and J. Michael Dunn, 1992, <em>Entailment: The logic of relevance and necessity II</em>, Princeton: Princeton University Press.</li> <li>Barcan Marcus, Ruth. 1990, “A Backwards Look at Quine’s Animadversions on Modalities,” in R. Bartrett and R. Gibson (eds.), <em>Perspectives on Quine</em>, Cambridge: Blackwell.</li> <li>Barrio, Eduardo Alejandro., Federico Pailos, and Damian Szmuc, 2020, “A Hierarchy of Classical and Paraconsistent Logics”, <em>J Philos Logic</em>, 49: 93–120. doi:10.1007/s10992-019-09513-z</li> <li>Barwise, Jon, 1985, “Model-theoretic logics: background and aims”, in <em>Model-theoretic logics</em>, edtied by Jon Barwise and Soloman Feferman (eds.), New York, Springer-Verlag, pp. 3–23.</li> <li>Beall, Jc and Greg Restall, 2006, <em>Logical Pluralism</em>, Oxford: Oxford University Press.</li> <li>Brouwer, L.E.J., 1949, “Consciousness, Philosophy and Mathematics”, <em>Journal of Symbolic Logic</em>, 14(2): 132–133. </li> <li>–––, 1964b, “Intuitionism and Formalism”, in <em>Philosophy of mathematics: selected readings</em> edited by P. Benacerraf and H. Putnam, Englewood Cliffs, NJ, Cambridge University Press, (eds.), 77–89.</li> <li>Cobreros, Pablo, Paul Egré, David Ripley, and Robert van Rooij, 2012, “Tolerance and Mixed Consequence in the S’valuationist Setting”, <em>Studia logica</em>, 100(4), 855–877.</li> <li>–––, 2012, “Tolerant, classical, strict”, <em>Journal of Philosophical Logic</em>, 41(2), 347–385.</li> <li>Cook, Roy, 2002, “Vagueness and mathematical precision”, <em>Mind</em>, 111: 227–247.</li> <li>Corcoran, John, 1973, “Gaps between logical theory and mathematical practice”, <em>The methodological unity of science</em>, ed. by M. Bunge, Dordrecht: D. Reidel, 23–50.</li> <li>Davidson, Donald, 1984, <em>Inquiries into truth and interpretation</em>, Oxford: Clarendon Press.</li> <li>Dummett, Michael, 2000, <em>Elements of intuitionism</em>, second edition, Oxford: Oxford University Press.</li> <li>–––, 1978, “The philosophical basis of intuitionistic logic”, in <em>Truth and other enigmas</em>, Cambridge, MA, Harvard University Press, 215–247.</li> <li>Gödel, Kurt, 1930, “Die Vollständigkeit der Axiome des logischen Funktionenkalkuls”, <em>Montatshefte für Mathematik und Physik 37</em>, 349–360; translated as “The completeness of the axioms of the functional calculus of logic”, in van Heijenoort 1967, 582–591.</li> <li>Harman, Gilbert, 1984, “Logic and reasoning”, <em>Synthese</em>, 60, 107–127.</li> <li>Heyting, A., 1956, <em>Intuitionism</em>, Amsterdam: North-Holland Publishing.</li> <li>Kerr, Alison Duncan, 2019, “A plea for KR”, <em>Synthese</em>, 198(4): 3047–3071.</li> <li>Lycan, William, 1984, <em>Logical form in natural language</em>, Cambridge, MA: The MIT Press.</li> <li>Montague, Richard, 1974, <em>Formal philosophy</em>, ed. by R. Thomason, New Haven: Yale University Press.</li> <li>Kennedy, Juliette, and Jouko Väänänen, 2021, Logicality and modelclasses. <em>Bulletin of Symbolic Logic</em>, 27(4): 385–414.</li> <li>Priest, Graham, 2006a, <em>In contradiction, a study of the transconsistent</em>, second, revised edition, Oxford: Clarendon Press.</li> <li>–––, 2006b, <em>Doubt truth to be a liar</em>, Oxford: Clarendon Press.</li> <li>Quine, W. V. O., 1960, <em>Word and object</em>, Cambridge, MA: The MIT Press.</li> <li>–––, 1953, “Three grades of modal involvement”, <em>Proceedings of the XI<sup>th</sup> International Congress of Philosophy</em>, 14, Amsterdam, North Holland Publishing Company, 65–81.</li> <li>–––, 1986, <em>Philosophy of logic</em>, second edition, Cambridge, Massachusetts: Harvard University Press.</li> <li>–––, 1986, <em>Philosophy of logic</em>, second edition, Englewood Cliffs: Prentice-Hall.</li> <li>Read, Stephen, 1988, <em>Relevant logic</em>, Oxford: Oxford University Press. </li> <li>Resnik, Michael, 1996, “Ought there to be but one true logic”, in <em>Logic and Reality: Essays on the Legacy of Arthur Prior</em>, J. Copeland (ed.), Oxford: Oxford University Press, 489–517.</li> <li>Ripley, David, 2013, “Paradoxes and Failures of Cut”, <em>Australasian Journal of Philosophy</em>, 91(1): 139–164.</li> <li>Rumfitt, Ian, 2015, <em>The Boundary Stones of Thought: An Essay in the Philosophy of Logic</em>, Oxford: Oxford University Press.</li> <li>Shapiro, Stewart, 1991, <em>Foundations without Foundationalism</em>, Oxford: Clarendon Press.</li> <li>–––, 1996, <em>The limits of logic: Second-order logic and the Skolem paradox</em>, <em>The international research library of philosophy</em>, Dartmouth Publishing Company, 1996. (An anthology containing many of the significant later papers on the Skolem paradox.)</li> <li>–––, 1998, “Logical consequence: models and modality”, in <em>The philosophy of mathematics today</em>, edited by M. Schirn, Oxford: Oxford University Press, 131–156.</li> <li>–––, 2014, <em>Varieties of Logic</em>, Oxford: Oxford University Press.</li> <li>Shapiro, Stewart and Teresa Kouri Kissel, <em>Classical, first order logic, Cambridge Elements</em>, Cambridge, Cambridge University Press.</li> <li>Tennant, Neil, 1997, <em>The taming of the true</em>, Oxford: Clarendon Press.</li> <li>Van Heijenoort, Jean, 1967, <em>From Frege to Gödel</em>, Cambridge, Massachusetts: Harvard University Press. An anthology containing many of the major historical papers on mathematical logic in the early decades of the twentieth century.</li> <li>Wang, Hao, 1974, <em>From Mathematics to Philosophy</em>, London, Routledge and Kegan Paul.</li> <li>Williamson, Timothy, 2017, “Semantic paradoxes and abductive methodology”, in <em>Reflections on the liar</em> edited by Bradley Armour-Garb, Oxford, Oxford University Press, 325–346.</li> </ul> <h3 id="FurtRead">Further Reading</h3> <p> There are many fine textbooks on mathematical logic. A sample follows.</p> <ul class="hanging"> <li>Boolos, George S., John P. Burgess, and Richard C. Jeffrey, 2007, <em>Computability and logic</em>, fifth edition, Cambridge, England: Cambridge University Press. Elementary and intermediate level.</li> <li>Bergmann, Merrie, James Moor, and Jack Nelson, 2013, <em>The logic book</em>, sixth edition, New York: McGraw-Hill. Elementary and intermediate level.</li> <li>Church, Alonzo, 1956, <em>Introduction to mathematical logic</em>, Princeton: Princeton University Press. Classic textbook.</li> <li>Enderton, Herbert, 1972, <em>A mathematical introduction to logic</em>, New York: Academic Press. Textbook in mathematical logic, aimed at a mathematical audience.</li> <li>Forbes, Graeme, 1994, <em>Modern Logic</em>, Oxford: Oxford University Press. Elementary textbook.</li> <li>Mendelson, Elliott, 1987, <em>Introduction to mathematical logic</em>, third edition, Princeton: van Nostrand. Intermediate.</li> </ul> </div> <div id="academic-tools"> <h2 id="Aca">Academic Tools</h2> <blockquote> <table class="vert-top"> <tr> <td><img src="../../symbols/sepman-icon.jpg" alt="sep man icon" /></td> <td><a href="https://plato.stanford.edu/cgi-bin/encyclopedia/archinfo.cgi?entry=logic-classical" target="other">How to cite this entry</a>.</td> </tr> <tr> <td><img src="../../symbols/sepman-icon.jpg" alt="sep man icon" /></td> <td><a href="https://leibniz.stanford.edu/friends/preview/logic-classical/" target="other">Preview the PDF version of this entry</a> at the <a href="https://leibniz.stanford.edu/friends/" target="other">Friends of the SEP Society</a>.</td> </tr> <tr> <td><img src="../../symbols/inpho.png" alt="inpho icon" /></td> <td><a href="https://www.inphoproject.org/entity?sep=logic-classical&redirect=True" target="other">Look up topics and thinkers related to this entry</a> at the Internet Philosophy Ontology Project (InPhO).</td> </tr> <tr> <td><img src="../../symbols/pp.gif" alt="phil papers icon" /></td> <td><a href="https://philpapers.org/sep/logic-classical/" target="other">Enhanced bibliography for this entry</a> at <a href="https://philpapers.org/" target="other">PhilPapers</a>, with links to its database.</td> </tr> </table> </blockquote> </div> <div id="other-internet-resources"> <h2 id="Oth">Other Internet Resources</h2> <p> [Please contact the author with suggestions.]</p> </div> <div id="related-entries"> <h2 id="Rel">Related Entries</h2> <p> <a href="../logic-free/">logic: free</a> | <a href="../logic-infinitary/">logic: infinitary</a> | <a href="../logic-intuitionistic/">logic: intuitionistic</a> | <a href="../logic-linear/">logic: linear</a> | <a href="../logic-modal/">logic: modal</a> | <a href="../logic-paraconsistent/">logic: paraconsistent</a> | <a href="../logic-relevance/">logic: relevance</a> | <a href="../logic-higher-order/">logic: second-order and higher-order</a> | <a href="../logic-substructural/">logic: substructural</a> | <a href="../logic-temporal/">logic: temporal</a> | <a href="../logical-consequence/">logical consequence</a> | <a href="../logical-form/">logical form</a> | <a href="../logical-truth/">logical truth</a> | <a href="../model-theory/">model theory</a> | <a href="../modeltheory-fo/">model theory: first-order</a> | <a href="../paradox-skolem/">paradox: Skolem’s</a> | <a href="../proof-theory-development/">proof theory: development of</a> </p> </div> <script type="text/javascript" src="local.js"></script> <script type="text/javascript" src="../../MathJax/MathJax.js?config=TeX-MML-AM_CHTML"></script> </div><!-- #aueditable --><!--DO NOT MODIFY THIS LINE AND BELOW--> <!-- END ARTICLE HTML --> </div> <!-- End article-content --> <div id="article-copyright"> <p> <a href="../../info.html#c">Copyright © 2022</a> by <br /> <a href="https://philosophy.osu.edu/people/shapiro.4/" target="other">Stewart Shapiro</a> <<a href="mailto:shapiro%2e4%40osu%2eedu"><em>shapiro<abbr title=" dot ">.</abbr>4<abbr title=" at ">@</abbr>osu<abbr title=" dot ">.</abbr>edu</em></a>><br /> <a href="https://sites.google.com/site/teresakouri/" target="other">Teresa Kouri Kissel</a> <<a href="mailto:tkouri%40odu%2eedu"><em>tkouri<abbr title=" at ">@</abbr>odu<abbr title=" dot ">.</abbr>edu</em></a>> </p> </div> </div> <!-- End article --> <!-- NOTE: article banner is outside of the id="article" div. --> <div id="article-banner" class="scroll-block"> <div id="article-banner-content"> <a href="../../fundraising/"> Open access to the SEP is made possible by a world-wide funding initiative.<br /> The Encyclopedia Now Needs Your Support<br /> Please Read How You Can Help Keep the Encyclopedia Free</a> </div> </div> <!-- End article-banner --> </div> <!-- End content --> <div id="footer"> <div id="footer-menu"> <div class="menu-block"> <h4><i class="icon-book"></i> Browse</h4> <ul role="menu"> <li role="menuitem"><a href="../../contents.html">Table of Contents</a></li> <li role="menuitem"><a href="../../new.html">What's New</a></li> <li role="menuitem"><a href="https://plato.stanford.edu/cgi-bin/encyclopedia/random">Random Entry</a></li> <li role="menuitem"><a href="../../published.html">Chronological</a></li> <li role="menuitem"><a href="../../archives/">Archives</a></li> </ul> </div> <div class="menu-block"> <h4><i class="icon-info-sign"></i> About</h4> <ul role="menu"> <li role="menuitem"><a href="../../info.html">Editorial Information</a></li> <li role="menuitem"><a href="../../about.html">About the SEP</a></li> <li role="menuitem"><a href="../../board.html">Editorial Board</a></li> <li role="menuitem"><a href="../../cite.html">How to Cite the SEP</a></li> <li role="menuitem"><a href="../../special-characters.html">Special Characters</a></li> <li role="menuitem"><a href="../../tools/">Advanced Tools</a></li> <li role="menuitem"><a href="../../accessibility.html">Accessibility</a></li> <li role="menuitem"><a href="../../contact.html">Contact</a></li> </ul> </div> <div class="menu-block"> <h4><i class="icon-leaf"></i> Support SEP</h4> <ul role="menu"> <li role="menuitem"><a href="../../support/">Support the SEP</a></li> <li role="menuitem"><a href="../../support/friends.html">PDFs for SEP Friends</a></li> <li role="menuitem"><a href="../../support/donate.html">Make a Donation</a></li> <li role="menuitem"><a href="../../support/sepia.html">SEPIA for Libraries</a></li> </ul> </div> </div> <!-- End footer menu --> <div id="mirrors"> <div id="mirror-info"> <h4><i class="icon-globe"></i> Mirror Sites</h4> <p>View this site from another server:</p> </div> <div class="btn-group open"> <a class="btn dropdown-toggle" data-toggle="dropdown" href="https://plato.stanford.edu/"> <span class="flag flag-usa"></span> USA (Main Site) <span class="caret"></span> <span class="mirror-source">Philosophy, Stanford University</span> </a> <ul class="dropdown-menu"> <li><a href="../../mirrors.html">Info about mirror sites</a></li> </ul> </div> </div> <!-- End mirrors --> <div id="site-credits"> <p>The Stanford Encyclopedia of Philosophy is <a href="../../info.html#c">copyright © 2024</a> by <a href="https://mally.stanford.edu/">The Metaphysics Research Lab</a>, Department of Philosophy, Stanford University</p> <p>Library of Congress Catalog Data: ISSN 1095-5054</p> </div> <!-- End site credits --> </div> <!-- End footer --> </div> <!-- End container --> <!-- NOTE: Script required for drop-down button to work (mirrors). --> <script> $('.dropdown-toggle').dropdown(); </script> </body> </html>