CINXE.COM
<!doctype html> <html lang="en"> <head> <link rel="preload" as="style" href="/allStyles?hash=a6a9515db1ba16193f34056f0c011fc99b1ce185f6735396cfe30bb36e20ad83&theme=%7B%22name%22%3A%22default%22%7D" /><link rel="preload" as="style" href="/allStyles?hash=1fd42b42bd8d553bd58b8c654a6043b754b1cd4f5545af624cf81556fee7bd22&theme=%7B%22name%22%3A%22dark%22%7D" /><link rel="stylesheet" type="text/css" href="https://fonts.googleapis.com/icon?family=Material+Icons"><link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/instantsearch.css@7.0.0/themes/reset-min.css"><link rel="stylesheet" type="text/css" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500"><link rel="stylesheet" type="text/css" href="https://use.typekit.net/jvr1gjm.css"><link rel="stylesheet" type="text/css" href="https://use.typekit.net/tqv5rhd.css"><script>window.publicInstanceSettings = {"forumType":"EAForum","hasEvents":true,"title":"Effective Altruism Forum","tagline":"Effective altruism research, discussion and community updates","faviconUrl":"https://images.ctfassets.net/ohf186sfn6di/7J4cBC9SXCWMoqqCIqI0GI/affe205261bb8cff47501a0ada0f2268/ea-logo-square-1200x1200__1_.png?h=50","bingValidation":"956934807730F7902A92E4E5CF395944","forumSettings":{"tabTitle":"EA Forum","tabLongTitle":"Effective Altruism Forum","headerTitle":"Effective Altruism Forum","shortForumTitle":"EA Forum"},"siteNameWithArticle":"the EA Forum","taggingName":"topic","siteUrl":"https://forum.effectivealtruism.org","sentry":{"url":"https://3f843b521b2f4775bff13b82008b2f79@o241118.ingest.sentry.io/1413822","environment":"production","release":"2.1.0"},"aboutPostId":"ht2dScQTpeBXB6uMb","contactPostId":"jpqJKZm9JXgMTwSfg","introPostId":"wenu9kmeqdNfzKdFa","eaHomeSequenceId":"MWzftEpkb4Tpijjbu","eaHomeSequenceFirstPostId":"QFo7g3DFJHxYhqZue","eaHandbookPostId":"cN9Zu7dowefAfmNnH","debug":false,"testServer":false,"analytics":{"environment":"production"},"disallowCrawlers":false,"expectedDatabaseId":"de42c25fb2aa7e6db7a9298df0588d71","fmCrosspost":{"siteName":"LessWrong","baseUrl":"https://www.lesswrong.com/"},"allowTypeIIIPlayer":true,"botSite":{"redirectEnabled":true},"pg":{"idleTimeoutMs":60000},"verifyEmails":false,"hasCuratedPosts":true,"performanceMetricLogging":{"enabled":false},"twitterBot":{"enabled":false}}</script><link rel="shortcut icon" href="https://images.ctfassets.net/ohf186sfn6di/7J4cBC9SXCWMoqqCIqI0GI/affe205261bb8cff47501a0ada0f2268/ea-logo-square-1200x1200__1_.png?h=50"/><script>window.publicSettings = {"post":{"requestFeedbackKarmaLevel":200},"auth0":{"domain":"login.effectivealtruism.org","clientId":"XB2eN03HX6mJjOWYBAsw4o3hRPnhfo5q","connection":"Username-Password-Authentication"},"forum":{"postInterval":20,"maxPostsPerDay":10},"ipapi":{"apiKey":"IhJaJy9u2XHvm1WGLvYnhDZlS7h43iuaIPJbfUbLTIm5pL7MuR"},"type3":{"cutoffDate":"2023-05-01","karmaCutoff":100,"explicitlyAllowedPostIds":["m65R6pAAvd99BNEZL","Dtr8aHqCQSDhyueFZ","nzB7sphAgJDQGaLrG","6dsrwxHtCgYfJNptp","XCwNigouP88qhhei2","znFAeeKk566bCNMNE","bsE5t6qhGC65fEpzN","FpjQMYQmS3rWewZ83","jk7A3NMdbxp65kcJJ","omoZDu8ScNbot6kXS","hkimyETEo76hJ6NpW","pMsnCieusmYqGW26W","GsjmufaebreiaivF7","LpkXtFXdsRd4rG8Kb","KKzMMPpyv8NyYsJwG","mfAbsrd2ZahmwHq2G","qFfs5zXFGJaoXzb92","zu28unKfTHoxRWpGn","CfcvPBY9hdsenMHCr","JJuEKwRm3oDC3qce7","NFGEgEaLbtyrZ9dX3","pxALB46SEkwNbfiNS","CmGPp5p9RvTLuuzbt","QZy5gJ6JaxGtH7FQq","RQCTw3C59o4XsHvZ4","zdAst6ezi45cChRi6","oRx3LeqFdxN2JTANJ","KfqFLDkoccf8NQsQe","SatDeTkLtHiMrtDjc","i9RJjun327SnT3vW8","P52eSwfmwaN2uwrcM","euBJ4rgfhZBkmBDRT","M2gBGYWEQDnrPt6nb","XHZJ9i7QBtAJZ6byW","sqMgzYpvrdA6Dimfi","u8eif2FkHiaYiAdfH","cZCdfR2nxXQgrzESQ","8RcFQPiza2rvicNqw","2pNAPEQ8av3dQyXBX","yisrgRsi4v3uyhujw","jYT6c8ByLfDpYtwE9","4kqiHGrZh6Rj7EmEW","uLxjjdq6s94X5Yyoc","on34kaRXfQXMFvE6N","ATpxEPwCQWQAFf4XX","pseF3ZmY7uhLtdwss","wicAtfihz2JmPRgez","eyDDjYrG3i3PRGxtc","jSPGFxLmzJTYSZTK3","mCtZF5tbCYW2pRjhi","bDaQsDntmSZPgiSbd","2WS3i7eY4CdLH99eg","2iAwiBQm535ZSYSmo","EbvJRAvwtKAMBn2td","sLcQ4zdAnKZuMPp5u","6fzEkiiSjGn46aMWZ","hRJueS96CMLajeF57","apKTPEcRm6jSFaMya","HX9ZDGwwSxAab46N9","Bd7K4XCg4BGEaSetp","CkikpvdkkLLJHhLXL"]},"locale":"en","mapbox":{"apiKey":"pk.eyJ1IjoiY2VudHJlZm9yZWZmZWN0aXZlYWx0cnVpc20iLCJhIjoiY2txdWI4a3hqMDJ1cTJvcGJxdjhyNHBnbyJ9.MffE4UUmOgSecrNt60DSCw"},"petrov":{"afterTime":1635239887906,"beforeTime":1631226712000},"algolia":{"appId":"J261XPY4TF","searchKey":"a18008476db83aaca9b51b6444d80d18","indexPrefix":"test_","autoSyncIndexes":false},"botSite":{"url":"https://forum-bots.effectivealtruism.org","userAgentRegexes":{".*":[".*scalaj-http/.*",".*python-requests/.*",".*python-httpx/.*",".*okhttp/.*",".*axios/.*",".*PostmanRuntime/.*",".*WordPress/.*;.*",".*Go-http-client/.*",".*scrapy/.*",".*Twitterbot/.*"],"/all(p|P)osts\\?.*":[".*Mozilla/5.0.*AppleWebKit/537.36.*bingbot/.*",".*Mozilla/5.0.*YandexRenderResourcesBot/.*",".*Mozilla/5.0.*YandexBot/.*",".*Mozilla/5.0.*SemrushBot.*",".*Mozilla/5.0.*Googlebot/.*",".*Mozilla/5.0.*MegaIndex.ru/.*"]}},"datadog":{"rumSampleRate":50,"tracingSampleRate":100,"sessionReplaySampleRate":0},"logoUrl":"https://images.ctfassets.net/ohf186sfn6di/7J4cBC9SXCWMoqqCIqI0GI/affe205261bb8cff47501a0ada0f2268/ea-logo-square-1200x1200__1_.png?h=100","amaTagId":"nj9svkXCASvFayRrR","ckEditor":{"uploadUrl":"https://39669.cke-cs.com/easyimage/upload/","webSocketUrl":"39669.cke-cs.com/ws"},"debuggers":["twitter-bot"],"mailchimp":{"eaForumListId":"563e6dbcfa","forumDigestListId":"7457c7ff3e"},"reCaptcha":{"apiKey":"6LdJj6QUAAAAAG6U6e_nhCnpY06M3og1tYuKhW5U"},"siteImage":"https://res.cloudinary.com/cea/image/upload/v1582740871/EA_Forum_OG_Image.png","cloudinary":{"cloudName":"cea","uploadPresetBanner":"dg6sakas","uploadPresetDigest":"kwiphued","uploadPresetProfile":"ckffb3g5","uploadPresetGridImage":"omqmhwsk","uploadPresetSpotlight":"dg6sakas","uploadPresetEventImage":"r8g0ckcq","uploadPresetSocialPreview":"xgsjqx55"},"googleMaps":{"apiKey":"AIzaSyB0udA9kJ6zx86V_PE1MQEj7nf6iypL6uU"},"moderation":{"reasonsForInitialReview":["mapLocation","firstPost","firstComment","contactedTooManyUsers","bio","profileImage"]},"adminAccount":{"email":"forum@effectivealtruism.org","username":"AdminTeam"},"annualReview":{"end":"2022-02-01","start":"2021-12-01","reviewPhaseEnd":"2022-01-15","nominationPhaseEnd":"2021-12-14","announcementPostPath":"/posts/jB7Ten8qmDszRMTho/forum-review-the-best-of-ea-2011-2020"},"showSmallpox":false,"batchHttpLink":{"batchMax":10},"intercomAppId":"xycbzvda","showEAGBanner":false,"elicitSourceId":"4M2468qIv","isProductionDB":true,"commentInterval":8,"elicitSourceURL":"https://forum.effectivealtruism.org","eventBannerLink":"/posts/iGvRmX9L7rsYTHedR/world-malaria-day-reflecting-on-past-victories-and","googleDocImport":{"enabled":true},"moderationEmail":"forum@effectivealtruism.org","openThreadTagId":"eTLv8KzwBGcDip9Wi","showEventBanner":false,"startHerePostId":"BsnGqnLzrLdmsYTGt","timeDecayFactor":0.8,"googleTagManager":{"apiKey":"GTM-5VK8D73"},"hasCookieConsent":true,"hasProminentLogo":true,"subforumTagSlugs":["software-engineering","bioethics","effective-giving","forecasting-and-estimation"],"frontpageAlgorithm":{"cacheDisabled":true,"daysAgoCutoff":21,"decayFactorSlowest":0.5},"legacyRouteAcronym":"ea","showHandbookBanner":false,"subforumCommentBonus":{"base":20,"duration":48,"exponent":0.3,"magnitude":100},"subforumIntroMessage":"\n<h2>What is a subforum?<\/h2>\n<p>Subforums are spaces for discussion, questions, and more\ndetailed posts about particular topics. Full posts in this\nspace may also appear on the Frontpage, and posts from other\nparts of the EA Forum may appear here if relevant tags are applied.\nDiscussions in this space will never appear elsewhere.<\/p>","defaultVisibilityTags":[{"tagId":"ZCihBFp5P64JCvQY6","tagName":"Community","filterMode":"Hidden"}],"eventBannerMobileImage":"Banner/world_malaria_day_mobile.png","linkpostUrlPlaceholder":"http://johnsalvatier.org/blog/2017/reality-has-a-surprising-amount-of-detail","maxDocumentsPerRequest":5000,"defaultSequenceBannerId":"Banner/yeldubyolqpl3vqqy0m6.jpg","eventBannerDesktopImage":"Banner/world_malaria_day_web.png","useExperimentalTagStyle":false,"newUserIconKarmaThreshold":50,"hideUnreviewedAuthorComments":"2023-02-08T17:00:00","commentRateLimitDownvoteRatio":0.3,"commentRateLimitKarmaThreshold":30}</script><script>window.tabId = null</script><script>window.isReturningVisitor = false</script><script async src="/js/bundle.js?hash=ba398ddf2bc0c2d19673ab3a0d9ed66aed80daeea9b7206e00380633f9dedf82"></script><title data-react-helmet="true">Objectives of longtermist policy making — EA Forum</title><meta data-react-helmet="true" http-equiv="Accept-CH" content="DPR, Viewport-Width, Width"/><meta data-react-helmet="true" property="og:title" content="Objectives of longtermist policy making — EA Forum"/><meta data-react-helmet="true" charSet="utf-8"/><meta data-react-helmet="true" name="description" content="Estimated reading time: 20-30 minutes • -We would like to thank the following for their excellent feedback and guidance throughout this article, in n…"/><meta data-react-helmet="true" name="viewport" content="width=device-width, initial-scale=1"/><meta data-react-helmet="true" name="twitter:image:src" content="https://lh3.googleusercontent.com/7clnMCxK9m8ZkWkReSGxs3SZLE4JBN_bfqwimeeqPGFagVamXY4sZA7byYDRDPx2-_Z0167i_SyRowKy6z3FbYC93dfteYXUJhQfbLlx8HjHSW-ggicqZWu0ZLenDX9P2-_PlRUF"/><meta data-react-helmet="true" name="twitter:description" content="Estimated reading time: 20-30 minutes • -We would like to thank the following for their excellent feedback and guidance throughout this article, in n…"/><meta data-react-helmet="true" property="og:type" content="article"/><meta data-react-helmet="true" property="og:url" content="https://forum.effectivealtruism.org/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1"/><meta data-react-helmet="true" property="og:image" content="https://lh3.googleusercontent.com/7clnMCxK9m8ZkWkReSGxs3SZLE4JBN_bfqwimeeqPGFagVamXY4sZA7byYDRDPx2-_Z0167i_SyRowKy6z3FbYC93dfteYXUJhQfbLlx8HjHSW-ggicqZWu0ZLenDX9P2-_PlRUF"/><meta data-react-helmet="true" property="og:description" content="Estimated reading time: 20-30 minutes • -We would like to thank the following for their excellent feedback and guidance throughout this article, in n…"/><meta data-react-helmet="true" http-equiv="delegate-ch" content="sec-ch-dpr https://res.cloudinary.com;"/><meta data-react-helmet="true" name="citation_title" content="Objectives of longtermist policy making"/><meta data-react-helmet="true" name="citation_author" content="Henrik Øberg Myhre"/><meta data-react-helmet="true" name="citation_author" content="Andreas_Massey"/><meta data-react-helmet="true" name="citation_author" content="Philip Hall Andersen"/><meta data-react-helmet="true" name="citation_author" content="Jakob"/><meta data-react-helmet="true" name="citation_author" content="Sanna Baug Warholm"/><link data-react-helmet="true" rel="stylesheet" href="https://fonts.googleapis.com/css?family=Merriweather:300,400,500,600,700&subset=all"/><link data-react-helmet="true" rel="stylesheet" href="https://fonts.googleapis.com/css?family=Inter:300,400,450,500,600,700"/><link data-react-helmet="true" rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500"/><link data-react-helmet="true" rel="canonical" href="https://forum.effectivealtruism.org/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1"/><link data-react-helmet="true" rel="alternate" type="application/rss+xml" href="https://forum.effectivealtruism.org/feed.xml"/><script data-react-helmet="true" type="application/ld+json">{"@context":"http://schema.org","@type":"DiscussionForumPosting","url":"https://forum.effectivealtruism.org/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1","text":"<p><i>Estimated reading time: 20-30 minutes<\/i><\/p><p><i>-We would like to thank the following for their excellent feedback and guidance throughout this article, in no particular order: Tyler M. John, Max Stauffer, Aksel Braanen Sterri, Eirik Mofoss, Samuel Hilton, Konrad Seifert, Tildy Stokes, Erik Aunvåg Matsen and Marcel Grewal Sommerfelt.<\/i><\/p><h1><strong>0.0 Introduction<\/strong><\/h1><p>This article is co-authored by five members of Effective Altruism Norway as a pilot project to test if we can contribute in a valuable way to the emerging field of longtermism and policy making.<\/p><p>In the article we summarize some of the work that is being done in the emerging field of longtermism, using a new structure to classify the different interventions (see Figure 1: Three objectives of longtermist policy making). Then, for each objective we describe related challenges and potential solutions, and give some examples of current ongoing work.<\/p><p>We hope that the new structure can help improve coordination in this emerging field, and enable improved prioritization of interventions. If this structure resonates well with established experts in the field, we are happy to write up a shorter version of this article that could serve as an introduction to longtermist policy making for non-experts. Already, at 17 pages this article is one fourth of the length of the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>, which covers many of the same topics. <\/p><p>Finally, we have emphasized some aspects of longtermist policy making that we believe have been underemphasized in the effective altruism- and longtermism communities in the past. Examples include scenario planning, robust decision making and redteaming among others, which we have described together with forecasting in section 2.1 as essential epistemic capabilities for long-term governance. These tools are complementary to forecasting-based epistemic capabilities that the EA/longtermist communities already promote, and we hope that they will receive increased attention going forward.<\/p><p>We hope to produce 1-3 further articles on similar topics through 2021, and welcome any experts who have capacity to provide feedback on our work.<\/p><p>--------------------------------------------------------------------<\/p><p>In 2019 William MacAskill proposed a definition of the term <a href=\"https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism\"><u>longtermism<\/u><\/a> as the<i> view that those who live at future times matter just as much, morally, as those who live today<\/i>. There are many reasons to believe that actions can have a substantial impact on the future. For instance, the economic growth seen in the past two centuries has lifted billions out of poverty. In addition to this, any long-term consequences of climate change caused by humans could decrease the life quality of several generations to come. Our generation is also one of the first who has had the technological potential to destroy civilization through e.g. nuclear weapons, and thereby eliminating all future of humanity. This means that actions we take today can improve the course of history for hundreds of generations to come.<\/p><p>Interest in the welfare of future generations precedes the MacAskill definition of longtermism from 2017. In 2005 the Future of Humanity Institute was established at Oxford university. In 2009, the <a href=\"https://www.csf.gov.sg/who-we-are/\"><u>Centre for Strategic Futures<\/u><\/a> (CSF) was established by the Singaporian Government as a futures think tank. In 2017 William MacAskill started using the word “longtermism” as a term for the cluster of views that involved concern about ensuring the long-run future goes as well as possible. Since then, <a href=\"https://forum.effectivealtruism.org/tag/longtermism-philosophy\"><u>many have contributed<\/u><\/a> to the development of the philosophical field. The <a href=\"https://globalprioritiesinstitute.org/\"><u>Global Priorities Institute<\/u><\/a> (GPI) in Oxford was established in 2018 with the mission to <i>conduct and promote world-class, foundational academic research on how most effectively to do good<\/i>. In 2020 GPI published a new <a href=\"https://globalprioritiesinstitute.org/research-agenda/\"><u>research agenda<\/u><\/a>, where one of its two sections was dedicated to longtermism. These are just some of several milestones in the short history of longtermism. <\/p><p>If we believe that the future is what matters most and that we can influence it through our policy making, then it follows that the long-run outcomes of enacted policies should be one of the key considerations of the policy making process. However, most political systems are not prioritising long-term planning sufficiently compared to the potential benefits just for existing generations – nevermind thinking about the moral importance of future generations. <\/p><p>There are examples of different institutions and policy makers that are putting longtermism on the agenda today, but the time frame they consider long-term differs. Time horizons of longtermist organizations that frequently interact with policy makers (e.g. <a href=\"https://www.appgfuturegenerations.com/\"><u>APPG<\/u><\/a> and <a href=\"https://www.alpenglow.org.uk/\"><u>Alpenglow<\/u><\/a>) are constrained by the norms in the current policy making process. Although academics talking about \"longtermism\" can look thousands of years ahead, actors seeking to practically influence policy organisations, including ourselves, are typically considering shorter time horizons, e.g. 20-30 years in the future. <\/p><p>This article will explore three categories of objectives for longtermist policy making and might serve as a guide towards shaping longtermist policy suggestions. These objectives are summarized in figure 1.<\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/dz3yy1a99m7ei9v9fcbb.png\"><figcaption><strong>Figure 1<\/strong>: Representation of the three objectives longtermist policies should focus on. Objective 1 and 2 serve as foundations for the more direct objective(s) above them.<\/figcaption><\/figure><p>On top of the pyramid is the objective directly benefiting future generations - i.e. ensuring that there is a future for human civilization, and that it is as positive as possible. This objective builds on the condition that policy making institutions are enabled to develop such policies, which brings us to part two of the pyramid. This part describes three essential conditions to achieve successful behaviour change interventions; capability, motivation and opportunity, reflecting the <a href=\"https://link.springer.com/article/10.1186/1748-5908-6-42\"><u>COM-B system for institutional reform<\/u><\/a> (Michie et. al. 2011). The two upper pieces of the pyramid both rest upon the fundamental part, which concerns the objective of <i>understanding longtermism<\/i>. Interventions focused on this objective have a more indirect impact mechanism.<\/p><p>A policy intervention should optimize for one or several of these objectives in order to qualify as a \"longtermist policy proposal\".<\/p><p>Note that the proposals in figure 1 are synergistic - if we improve our performance on one of the objectives, it may become easier to also improve on others. In general, objective one works as an enabler of objective two, and both objective one and two are enablers of the third objective. For instance, if a policy making institution is able to agree on a set of KPIs to measure the long-term quality of a society (as a partial solution to objective 1 in figure 1), then they can set up a forecasting infrastructure for these KPIs (developing capabilities to govern for the long term, as described in objective 2). With this forecasting infrastructure in place, long-term effects of proposed policies will be more visible to the electorate, creating stronger incentives for politicians to optimize for long-term outcomes (solving another part of objective 2; motivations). This will for instance make it easier to prioritize catastrophic risk mitigation (enabling investment in efforts focused on objective 3), etc.<\/p><p>Several of the ideas in each category of objectives would be familiar to experienced effective altruists due to the natural synergies of longtermism and effective altruism. However, even experienced effective altruists may not have encountered all of the topics in this article; examples of topics that the experienced reader may find interesting include:<\/p><ul><li>The three-layered model of objectives of longtermist policies in figure 1<\/li><li>The discussion of governance KPIs in section 1.1<\/li><li>Non-forecasting tools like e.g. scenario planning as essential epistemic capabilities in section 2.1, on par with forecasting<\/li><li>Structured examples of how policy making institutions can be reformed to benefit future generations in section 2.4<\/li><li>The discussion of sustainability as a way to either mitigate catastrophic risk or a way to boost inclusive progress in section 3.3<\/li><\/ul><p>While the objectives are relevant for policy makers in a broad range of governance models and in countries with different levels of democratic development, the examples in this article are primarily focused on policy making on national levels in industrialized, democratic countries. <\/p><h1><strong>1.0 Further our understanding of longtermism and adjacent scientific fields<\/strong><\/h1><p>In the ongoing field of exploring strategic considerations related to longtermist policy making, there is a need for agreement of the meaning of the word. The bottom piece of the pyramid in figure 1 concerns our understanding of longtermism. William MacAskill <a href=\"https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism#Strong_Longtermism\"><u>proposes <\/u><\/a>three premises that make up what he calls the minimum definition of longtermism: (1) Those who live at future times matter as much, morally as those who live today, (2) society currently privileges those who live today above those who live in the future, and (3) we should take action to rectify that, and help ensure the long-run future goes well. Based on these premises, MacAskill and others have proposed political measures like <a href=\"https://philpapers.org/archive/JOHLIR.pdf\"><u>future assemblies<\/u><\/a> or a <a href=\"https://drive.google.com/file/d/1lJHBKfIROiyc8yxVaZnKEWUEYfOg06Eh/view\"><u>Ministry of the Future<\/u><\/a> (see section 2.4 for further elaboration). Organizations like the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/gpi-research-agenda.pdf\"><u>Global Priorities Institute<\/u><\/a> (GPI) and the <a href=\"https://www.fhi.ox.ac.uk/\"><u>Future of Humanity Institute<\/u><\/a> (FHI) are currently working on establishing longtermism as a scientific field of inquiry. <\/p><h2>1.1 What does a good society look like?<\/h2><p>Two important constraints on our current ability to positively influence the future are (i) uncertainty about what a good society looks like, i.e. moral cluelessness, and (ii) how we can best create one, i.e. strategic cluelessness. Different scientific and philosophical fields have attempted to investigate the first question in different ways. One example of moral cluelessness is the repugnant conclusion, which assumes that by adding more people to the world, and proportionally staying above a given average in happiness, one can reach a state of minimal happiness for an infinitely large population. However, we aren't completely clueless: here are some metrics that are commonly used to describe more or less positive aspects of a society. <\/p><p>Economists frequently use KPIs (Key Performance Indicators) to try to measure different facets of a successful society. GDP and GDP growth is perhaps the most common, while metrics like Gini-coefficients, average lifespan, GHG emissions, or the Human Development Index are used to describe inequality, health, sustainability and economic development, respectively.<\/p><p>While none of these metrics cover all that matters in a society on their own, a combination of such KPIs may capture most of the aspects that we care about. The “<a href=\"https://drive.google.com/file/d/1lJHBKfIROiyc8yxVaZnKEWUEYfOg06Eh/view\"><u>Portugal we want<\/u><\/a>” project is an example of a collaborative effort to converge on a set of KPIs to use in governance for the long term. There are also other examples that similarly attempt to stake out the course for the future of the country, e.g. the “<a href=\"https://www.cynnalcymru.com/project/the-wales-we-want/\"><u>Wales we want<\/u><\/a>”-project, or the japanese work on “<a href=\"https://www.japanpolicyforum.jp/society/pt20190109210522.html\"><u>Future Design<\/u><\/a>”. <\/p><p>Another, more academically oriented example of projects that attempt to compile partial descriptions of a good society into more complete descriptions, is the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>. It lists several other partial approaches to measure broader social welfare through a set of KPIs, including informal discussions by <a href=\"http://www.stafforini.com/blog/bostrom/\"><u>Bostrom <\/u><\/a>and <a href=\"http://reflectivedisequilibrium.blogspot.com/2013/12/what-proxies-to-use-for-flow-through.html\"><u>Shulman<\/u><\/a>. <\/p><h2>1.2 How do we create a good society?<\/h2><p>When we want to plan for a good society in the future we need to make prioritizations. This can be very important for the long-run trajectory of society as some efforts to improve society are much <a href=\"https://80000hours.org/problem-profiles/global-priorities-research/\"><u>more effective than others<\/u><\/a>. <a href=\"https://80000hours.org/2013/12/a-framework-for-strategically-selecting-a-cause/\"><u>Cause prioritization<\/u><\/a> is a philosophical field involved with evaluating and comparing different cause areas in their effectiveness. Some of the organizations working on cause prioritization are <a href=\"https://80000hours.org/articles/future-generations/\"><u>80,000 Hours<\/u><\/a>, the <a href=\"https://www.openphilanthropy.org/blog/update-cause-prioritization-open-philanthropy\"><u>Open Philanthropy Project<\/u><\/a>, and The Center for Reducing Suffering. The latter <a href=\"https://centerforreducingsuffering.org/the-benefits-of-cause-neutrality/\"><u>proposes<\/u><\/a> that starting out with a cause-neutral attitude to longtermist policy making is crucial to succeed at the cause prioritization. To achieve this, effective institutions and organizations need to: <\/p><ol><li>Build a broad movement for longtermist policy change so that these efforts don’t get stuck in a specific cause area.<\/li><li>Explicitly work on prioritization research so that cause areas can be accurately compared, as well as induce attitude change in political and societal institutions (see the middle piece of the pyramid: <i>shape policy making institutions for future generations<\/i>).<\/li><\/ol><p>One important concept in cause prioritization is the notion of <i>crucial considerations<\/i> - which are strategic questions that can significantly change the optimal strategy when they are taken into consideration. Some of the crucial consideration of longtermist policy making includes, but is not limited to, our evaluation of the <a href=\"https://forum.effectivealtruism.org/posts/XXLf6FmWujkxna3E6/are-we-living-at-the-most-influential-time-in-history-1\"><u>hinge of history hypothesis<\/u><\/a> (HoH), as well as other considerations discussed in the Global Priorities Institute’s <a href=\"https://globalprioritiesinstitute.org/research-agenda-web-version/\"><u>new research agenda<\/u><\/a>. The HoH assumes that this century, or perhaps especially the coming decades, is the most influential period in all of human history. Therefore, our evaluation of HoH’s likelihood is one of the determinants of how we should influence policy makers and the way we distribute the resources we have available today. If we believe that the coming century is merely as influential as a typical century, then we - like <a href=\"https://forum.effectivealtruism.org/posts/Eey2kTy3bAjNwG8b5/the-emerging-school-of-patient-longtermism\"><u>patient longtermist<\/u><\/a> - will probably spend less of our philanthropic resources now, and save more to spend them later. However, if we believe that this period is the most “hingey” period of all of human history - e.g. because our current values could be locked in for generations to come (i.e. <i>value lock-in view<\/i>), or if we are living in a<i> time of perils <\/i>- then we should rather spend more of our philanthropic resources now to ensure the most impact. These considerations can be applied to our spending of any type of philanthropic capital - either money, political influence or other resources of value. If we don’t live at the HoH, it then seems most logical to spend the next decades focusing on building political influence, rather than spending political capital to influence specific decisions in the near future. <\/p><h1><strong>2.0 Shape policy making institutions for future generations<\/strong><\/h1><p>So far, we have considered the problem of longtermism on a general level, and we will therefore describe in this part different measures and obstacles connected to developing and motivating longtermist policy making in institutions. This section reflects the second piece of the pyramid in figure 1, and further elaborates on the COM-B system to ensure successful interventions in behavioural change. We will first consider epistemic determinants and how we can develop epistemic <i>capabilities<\/i> like forecasting and scenario planning, as well as redteaming and robust decision making. Then we will look at how we can <i>motivate<\/i> policy makers to prioritize future generations, and in the last paragraph we will consider important institutional barriers to such policy making, and how to remove them in order to to create <i>opportunities<\/i> for long-termist policy making. This section is largely a summary of the work by John & MacAskill, so readers who've studied their work can skip it.<\/p><h2>2.1 Develop epistemic capabilities for long-term policy making<\/h2><p>Lack of knowledge about the future is likely one of the main sources of political short-termism, also known as epistemic determinants in <a href=\"https://www.researchgate.net/publication/343345291_Longtermist_Institutional_Reform\"><u>Longtermist Institutional Reform<\/u><\/a> by Tyler John and William MacAskill. These determinants lead to discounting of the value of long-term beneficial policies, making them less likely to be enacted. Some discounting is rational simply because there is a lot of uncertainty about the benefits of long-term policies. Irrational discounting is another source of short-termism which is caused by cognitive biases and attentional asymmetries between the future and nearby past. Vividness effects can make people react more strongly to vivid sources of information like news, videos and graphics compared to scientific research. People are also often over-confident in their ability to control and eliminate risks under situations of uncertainty. See <i>Thinking, fast and slow <\/i>(2011) by Daniel Kahneman for further details. Although these shortcomings are limiting politicians in their effectiveness, there has also been <a href=\"https://globalprioritiesinstitute.org/christian-tarsney-the-epistemic-challenge-to-longtermism/\"><u>cast doubt<\/u><\/a> on the possibility of predicting the future at all by philosopher Christian Tarsney.<\/p><p>Politicians work with the limitations of time and influence which can lead to attentional asymmetries, i.e. when determining the effectiveness of policies, they tend to focus too much on recent events, rather than basing it on future projections. The result of this asymmetry can be that politicians work with less accurate predictions. Furthermore, because of these reality constraints (i.e. time and power), politicians are forced to utilize heuristics like planning fallacy, availability bias and the law of small numbers to tackle current and future issues. However, we have also seen that the long-term can be prioritized politically with the Paris Agreement, carbon tax (e.g. in <a href=\"https://web.archive.org/web/20100615055008/http://iea.org/publications/free_new_Desc.asp?PUBS_ID=1580\"><u>Norway in 1991<\/u><\/a>), or the Danish <a href=\"https://klimaraadet.dk/en/about-danish-council-climate-change\"><u>council on climate change<\/u><\/a>. <\/p><p>To deal with these problems, politicians need effective means of forecasting with different sources - e.g. using teams of <a href=\"https://goodjudgment.com/\"><u>superforecasters<\/u><\/a> and domain experts, or market-based approaches like prediction markets, to obtain high-quality information about the future.This needs to be implemented to overcome the information barrier (knowledge about the future) and the attention barriers (making changes in future outcomes more salient) so that politicians can make informed decisions about the future. <\/p><p>To maximize the utility gained from this information, decision makers also need to invest in institutions and organizations that can develop epistemic capabilities beyond forecasting, e.g. scenario planning, robust decision making, and red teaming, among others. In <a href=\"https://www.smestrategy.net/blog/what-is-scenario-planning-and-how-to-use-it\"><u>scenario planning<\/u><\/a> exercises, policy makers define a set of scenarios that jointly describe the possible futures that are likely enough to be considered, that differ depending on factors of high uncertainty, and with significant implications for the optimal policy choice. Then, policies are evaluated for how they perform across the range of scenarios. Depending on the risk preferences of the policy makers, they should choose a robust policy that both has a high expected value across scenarios, and fails as gracefully as possible in the worst scenarios. Scenario planning could also be supplemented with <a href=\"https://link.springer.com/chapter/10.1007/978-3-030-05252-2_2\"><u>robust decision making<\/u><\/a> which especially emphasizes strategies that do well in worst-case scenarios. Additionally, <a href=\"https://www.synopsys.com/glossary/what-is-red-teaming.html\"><u>red teaming<\/u><\/a> can provide a solid method of stress-testing the plans we make for the future by taking an adversarial approach. <\/p><p>Several researchers within the EA movement are working on these issues, e.g. Neil Dullaghan, Michael MacKenzie, and Eva Vivalt. Dullaghan <a href=\"https://forum.effectivealtruism.org/posts/kCkd9Mia2EmbZ3A9c/deliberation-may-improve-decision-making\"><u>proposes<\/u><\/a> deliberation as a means of reaching better cooperation across party-lines and long-term thinking. He also claims that there may be a link between deliberation and long-term thinking; specifically in areas like climate change and the environment. Furthermore, MacKenzie <a href=\"https://www.oxfordhandbooks.com/view/10.1093/oxfordhb/9780198747369.001.0001/oxfordhb-9780198747369-e-7\"><u>argues<\/u><\/a> that deliberation can help us overcome our cognitive biases by for instance appealing to the idea “saving future children'' to ensure longtermist thinking. In order to gather all these findings within forecasting, Vivalt, a researcher at the Australian National University and University of Toronto, <a href=\"https://forum.effectivealtruism.org/posts/Z7RTJePkiWBH92qqo/eva-vivalt-forecasting-research-results\"><u>proposes<\/u><\/a> a platform to coordinate the research and the ability of each researcher to forecast. These are only some examples of researchers that are working to improve institutional decision making among many more. Still, it is one of the top recommended career paths by <a href=\"https://80000hours.org/problem-profiles/improving-institutional-decision-making/\"><u>80000 Hours<\/u><\/a>, as “Improving the quality of decision-making in important institutions could improve our ability to solve almost all other problems”.<\/p><h2>2.2 Motivate policymakers to prioritize future generations<\/h2><p>Even if there are policymakers who have the necessary capabilities to improve the welfare of future generations, there are still several factors that discourage them from doing so. These factors are referred to as motivational determinants in the <a href=\"https://philpapers.org/archive/JOHLIR.pdf\"><u>Longtermist Institutional Reform<\/u><\/a> by Tyler John and William MacAskill, from which the following three sections are heavily based on.<\/p><p>People tend to have a high <a href=\"https://en.wikipedia.org/wiki/Time_preference\"><u>time preference<\/u><\/a> for the present, leading to greater discounting of the value of long-term benefits, which makes policies more short-termist. This is a problem that affects both voters and people in power, although the severity of this problem is unclear.<\/p><p>Self-interest and relational favouritism another source of short-termism, as many people care more about themselves and their relatives than future generations. Self-beneficial policies are generally short-termist as policymakers and their relatives will only live for a short amount of time compared to the potential lifespan of humanity.<\/p><p>Cognitive biases may also affect people’s political decisions, two known biases are the identifiable victim effect and procrastination. The <a href=\"https://en.wikipedia.org/wiki/Identifiable_victim_effect\"><u>Identifiable victim effect<\/u><\/a> is the tendency to prioritize individuals that are visible over individuals that are statistical or theoretic. As future generations are invisible and haven’t been born yet, this naturally leads short-termism. <\/p><p>Procrastination drives people to delay difficult problems until they become urgent and demand action. The further a long-term beneficial action is delayed, the less beneficial it is likely to be for future generations. Longtermism is especially prone to procrastination due to its extremely long timeframe.<\/p><p>Politicians are often even more short-termist than these factors would suggest, and they may frequently make extremely short-term decisions that have minimal benefits and significant costs within a few years, due to the various institutional factors discussed below. <\/p><h2>2.3 Remove institutional barriers to longtermist policy making<\/h2><p>Even policymakers that have the expertise and motivation to improve the welfare of future generations can be held back by institutional barriers that are preventing them from effectively advocating for longtermist policies. Many of these factors are due to the way today’s governmental institutions are designed, other sources include politicians’ economic dependencies and the media.<\/p><p>Most governments have short election cycles that incentivize short-term policy. Elected representatives naturally want to be re-elected, and one way to gain the favour of potential voters is to provide evidence that their previous time in office brought positive and immediate effects, which is predominantly achieved by initiating short-term policies.<\/p><p>Along with short election cycles, most performance measures mainly evaluate the short-term effects of policies, further discouraging policymakers from advocating for long-term policy.<\/p><p>Time inconsistency is also a problem in governmental institutions because subsequent policymakers can repeal previously enacted future-beneficial policies, as well as redirect investments that were originally intended for future generations. Most governments lack strong institutions dedicated to protecting the interests of future generations, which could help combat the problem of time inconsistency.<\/p><p>The media, which is largely focused on today’s current events, demand immediate reactions from policymakers. This pressures the policymakers to focus on short-term issues in order to build their reputation, as abstaining from doing so might lower their odds of re-election.<\/p><h2>2.4 Proposed mechanisms<\/h2><p>To deal with the problems mentioned above (lacking capabilities, disincentivized policymakers and institutional barriers), there is a dire need for institutional reform. There are many different ways to go about this, and there is still a lot of uncertainty about what might be the best solutions. What follows is a list of various longtermist policy proposals chosen with help from Tyler John. The proposals are divided into five main categories, with examples below. A more comprehensive list can be found <a href=\"https://forum.effectivealtruism.org/posts/op93xvHkJ5KvCrKaj/institutions-for-future-generations#Four_branch_Model_of_Government\"><u>here<\/u><\/a>.<\/p><p><strong>Designated stakeholders<\/strong><\/p><p>Key decision-makers or their advisors are appointed as responsible for protecting the interests of future people. Some examples of these are:<\/p><ul><li>Ministers and Executive Departments<\/li><li>Ombudsperson for Future Generations<\/li><li>Parliamentary committees<\/li><\/ul><p><strong>Information interventions<\/strong><\/p><p>Affects how information about the impact of future policies is gained or made publicly available. Some examples of these are:<\/p><ul><li>In-government Think Tank<\/li><li>Posterity Impact Assessments<\/li><li>Intergenerational Deliberation Day<\/li><\/ul><p><strong>Voting mechanisms<\/strong><\/p><p>Democratic election mechanisms and policy voting rules are redesigned to promote candidates that are expected to benefit future people. Some examples of these are:<\/p><ul><li>Choosing legislators via lottery<\/li><li>Demeny voting<\/li><li>Longer election cycles<\/li><\/ul><p><strong>Liability mechanisms<\/strong><\/p><p>Mechanisms that hold current decision-makers liable if their decisions lead to poor outcomes in the future, including formal rights for future people. Some examples of these are:<\/p><ul><li>Intergenerational externality taxes<\/li><li>Making court systems more future-oriented<\/li><li>Pay for Long-term performance<\/li><\/ul><p><strong>Reallocation of resources<\/strong><\/p><p>Control of current resources is deferred to future people. Some examples of these are:<\/p><ul><li>Heritage funds<\/li><li>Financial Institutions for Intergenerational Borrowing<\/li><li>Lower social discount rate<\/li><\/ul><p>For more in-depth analysis of the various proposals, see “Longtermist Institutional Design Literature Review” by Tyler John.’<\/p><p>In addition to the five categories above, another way to encourage long-term policy could be to influence society to be more long-term friendly. An example of this is Roman Krznaric’s writings where he establishes terms and concepts that could enable more longtermist thinking. <\/p><h1><strong>3.0 Directly influence the future trajectory of human civilization<\/strong><\/h1><p>The top layer of the pyramid in figure 1 considers how one can influence the future of humanity in a more direct way than the objectives in layer 1 and 2 does. There are several methods to directly improve the future and positively shift the trajectory of civilization. One approach is to avoid the bad scenarios (as exemplified by the red scenarios in Figure 2), such as extinction and major catastrophes. Another approach is to boost the good scenarios (exemplified by the green scenarios in Figure 2) by increasing the rate of inclusive progress - either by increasing economic growth, by making progress more inclusive, or by increasing our ability to convert economic wealth into wellbeing. <\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/qcctw3cbjlfqdrff7mwq.png\"><figcaption><strong>Figure 2<\/strong>: Illustration of positive and negative trajectories of civilization.<\/figcaption><\/figure><h2>3.1 Mitigate catastrophic risk and build resiliency to tail events and unknown unknowns<\/h2><p>In the effective altruism movement, one commonly recognized way to positively influence the future is to make sure that it actually exists and avoid <a href=\"https://longtermrisk.org/reducing-risks-of-astronomical-suffering-a-neglected-priority/#III_Reducing_s-risks_is_both_tractable_and_neglected\"><u>scenarios of extreme suffering<\/u><\/a>, i.e. by avoiding existential risks. By developing longtermist policy and institutions, we can better prepare for the future by building resiliency to both known and unknown existential risks.<\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/si5ga5enygb19xnwiigi.png\"><figcaption><strong>Figure 3<\/strong>: Examples of risks based on a <a href=\"https://www.existential-risk.org/concept.html\"><u>figure<\/u><\/a> by Nick Bostrom<\/figcaption><\/figure><p>Let us start with some definitions. Bostrom explains the difference between existential risk and catastrophic risk in <a href=\"https://www.existential-risk.org/concept.html\"><u>Existential Risk Prevention as Global Priority<\/u><\/a>. Existential risks are both pan-generational and crushing, which means that they drastically reduce the quality of life or cause death that humanity cannot recover from. Compared to this, risks that are merely globally catastrophic do not individually threaten the survival of humanity. Assuming that existence is preferable to non-existence, existential risks are considered significantly worse than global catastrophic risks because they affect all future generations. <\/p><p>However, global catastrophes may drastically weaken critical systems and our ability to tackle a second catastrophe. This argument is presented by the Global Catastrophic Risk Institute in a paper about <a href=\"http://gcrinstitute.org/papers/003_double-catastrophe.pdf\"><u>double catastrophes<\/u><\/a> with a case study on how geoengineering may be severely affected by other catastrophes. Moreover, many of the practices that can help us avoid globally catastrophic risks are also useful to prevent existential risks. We have titled this section “mitigate catastrophic risk” to ensure that we cover as many of the risks that may significantly impact the long-term future of humanity as possible.<\/p><p>The list of already known existential risks includes both natural and anthropological risks. Today’s technological advancements have created more anthropological risks, and there are good reasons to believe that they will continue to do so. Bostrom argues in <a href=\"https://www.sciencedirect.com/science/article/pii/S0016328720300604\"><u>The Fragile World Hypothesis<\/u><\/a> that continuous technological development will increase systemic fragility, which can be a source of catastrophic or existential risk. In the Precipice, Toby Ord estimates the chances of existential catastrophe within the next 100 years at one in six. We have already been dangerously close to global catastrophe, e.g. when <a href=\"https://80000hours.org/2012/02/26th-of-september-petrov-day/\"><u>Stanislav Petrov<\/u><\/a> potentially singlehandedly avoided a global nuclear war in 1983 when he did not launch missiles in response to the warning system reporting a US missile launch. To prevent such close calls from happening in the future, we need to gain knowledge about both known and unknown risks and solutions to them. <\/p><p>In the Precipice, Ord proposes that reaching existential security is the first of three steps to optimize the future of human civilization. Reaching existential security includes both eliminating immediate dangers, potential future risks, and establishing long-lasting safeguards. For example, switching to renewable energy sources, electric or hydrogen-based fuel, and clean meat, are ways to safeguard against catastrophic <a href=\"https://www.mckinsey.com/business-functions/sustainability/our-insights/climate-risk-and-response-physical-hazards-and-socioeconomic-impacts\"><u>climate change<\/u><\/a>. This is one risk that 80,000 Hours include in their view of the world’s <a href=\"https://80000hours.org/problem-profiles/\"><u>most pressing problems<\/u><\/a>. 80,000 Hours’ list also includes <a href=\"https://80000hours.org/problem-profiles/positively-shaping-artificial-intelligence/\"><u>positively shaping the development of artificial intelligence<\/u><\/a>. This can be positively influenced by investing in technical research and improving governmental strategy. Another priority area is reaching <a href=\"https://80000hours.org/problem-profiles/nuclear-security/\"><u>nuclear security<\/u><\/a>, which includes shrinking nuclear stockpiles and improving systems and communication to avoid depending on people acting like Petrov in the case of false warnings.<i> <\/i>Another priority catastrophic risk area in the EA movement is <a href=\"https://www.openphilanthropy.org/research/cause-reports/biosecurity\"><u>biorisk and pandemic preparedness<\/u><\/a>, which is one of the focus areas of the Open Philanthropy Project. In addition to protecting against already known risks, humanity should research potential future risks and use forecasting principles to prepare for them. <\/p><p>When we have reached existential security, Ord proposes that the next steps should be <\/p><ol><li>a long reflection where we determine what kind of future we want to create and how to do so, and<\/li><li>achieving our full potential.<\/li><\/ol><p>Thus, Ord argues that existential security should take priority over other objectives described in this article, as it is more urgent.<\/p><p>There are a wide range of actions that can be taken to mitigate catastrophic and existential risks. As mentioned, these actions mainly include eliminating immediate dangers and establishing long-lasting safeguards. The lists below are partially based on the work by <a href=\"https://www.gcrpolicy.com/risk-management\"><u>Global Catastrophic Risk Policy<\/u><\/a>. <\/p><p><strong>Reduce the probability of specific risks<\/strong><\/p><p>The most direct course of action to avoid catastrophe is to reduce the probability of catastrophic or existential risks. Some suggestions to risks and how to reduce them are: <\/p><ul><li>Reducing the potential for both intentional and unintentional use of nuclear weapons through improving early warning systems, reducing the number of nuclear warheads and the number of people having access to them.<\/li><li>Strengthen preparedness against pandemics by improving early warning systems, implementing global procedures for limiting spread, and shorten vaccine development timelines. We can also prepare for pandemics by developing vaccines for diseases with high pandemic potential.<\/li><li>Mitigating climate change by curbing CO<sub>2<\/sub> emissions through technological development or policy changes. Other methods include climate engineering actions such as removing CO<sub>2<\/sub> from the atmosphere.<\/li><\/ul><p><strong>Improve risk management frameworks<\/strong><\/p><p>Another approach is to improve risk management frameworks in such a way that we are prepared and able to react better to future risks. Some examples are: <\/p><ul><li>Developing a centralized all-hazard national risk assessment process that is adaptable to risks in a variety of domains.<\/li><li>Developing a risk prioritization framework to evaluate vulnerabilities, and the impact of possible adverse outcomes.<\/li><li>Deconflicting risk ownership between government stakeholders: Set one department or agency as the primary owner for each risk, with clear responsibilities for mitigation, preparation and response.<\/li><li>Appointing a “national risk officer’ responsible for overseeing the national risk assessment process and coordinating mitigation efforts.<\/li><\/ul><p><strong>Increase resilience of critical systems<\/strong><\/p><p>We can also limit the potential harm done by catastrophic risks or mitigate risks by increasing the resilience of critical systems. Some examples of how to increase critical system resilience are: <\/p><ul><li>Increasing emergency storage capacity of items like food, fuel and medicine at secure locations.<\/li><li>Developing more resilient crops and protecting critical infrastructure assets against disasters both natural and anthropogenic.<\/li><li>Diversifying sourcing to e.g. ensure that digital communication systems tolerate power failures.<\/li><li>Hardening assets such as crops by making them more resilient.<\/li><\/ul><h2>3.2 Build inclusive progress through long-lasting and well-functioning institutions<\/h2><p>Another approach to positively shift the trajectory of civilization is to increase the rate of progress, and make progress more inclusive. Continuous progress can improve human life quality and create a flourishing future for people of diverse backgrounds. Collison and Cohen define <a href=\"https://www.theatlantic.com/science/archive/2019/07/we-need-new-science-progress/594946/\"><u>progress<\/u><\/a> as economic, technological, scientific, cultural or organizational advancements that transform our lives and raise our living standard. This definition is broader than the typical economic definition focused on measuring GDP growth as a proxy or progress. In particular, it includes the opportunity to increase progress by increasing our ability to convert economic wealth into wellbeing. For this reason, we will use the term “economic progress” when referring to GDP growth, while “progress” alone will refer to the broader definition. Moreover, “wellbeing”, “welfare” and “happiness” are used interchangeably, and it is assumed that this is closer to a true measure of progress (in the broader sense) than purely economic metrics.<\/p><p><strong>There is still much we don’t know about progress<\/strong><\/p><p>There is an ongoing debate about whether there are fundamental limits to economic progress (and indeed <a href=\"https://www.researchgate.net/publication/348836201_What_is_the_Upper_Limit_of_Value\"><u>if there are upper limits of progress overall<\/u><\/a>) - if, at some point in the future, GDP growth must slow down and approach zero. If there are limits to economic progress, then increasing the rate of economic progress will only speed up the arrival of a zero-growth world of abundance. This could severely limit the potential value of increasing the rate of economic progress.<\/p><p>If there is no immediate limit to economic progress, there are good reasons to believe that it could continue indefinitely, and improve human welfare in the process. Human quality of life has generally improved significantly since the Industrial Revolution. This strong correlation between GDP growth and improved life quality has been well documented by e.g. <a href=\"https://www.gapminder.org/\"><u>Gapminder<\/u><\/a>. For example, the <a href=\"https://ourworldindata.org/a-history-of-global-living-conditions-in-5-charts\"><u>percentage of people living in extreme poverty<\/u><\/a> has decreased from about 90% in 1820 to 10% in 2015. It is also argued that a <a href=\"https://www.worksinprogress.co/issue/securing-posterity/\"><u>stagnation in growth is risky<\/u><\/a> in regards to existential risks. GDP growth is far from the only factor that influences progress. Other examples include improved economic distribution, sustainable development and effective transforming of economic growth to human welfare. <\/p><p>There are also ongoing discussions about how to best measure (a broader definition of) progress, if progress is slowing down or accelerating, and how existential risk is affected by the rate of economic progress. This is briefly covered in the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>, and somewhat more extensively in sources therein.<\/p><p>To improve our understanding of how progress occurs, Collision and Cowen have proposed to develop “<a href=\"https://www.theatlantic.com/science/archive/2019/07/we-need-new-science-progress/594946/\"><u>Progress Studies<\/u><\/a>” as a field of research. According to Collision and Cowen, progress studies investigates successful institutions, people, organizations and cultures to find common factors that are linked to progress. If we succeed in finding common factors between Ancient Greece, The Industrial Revolution and Silicon Valley, we can improve progress by acting accordingly. Due to the immaturity of progress studies, we have yet to find such common factors. However, scientific reform and interventions as described above are seemingly very promising. <\/p><p><strong>General ideas for how to increase progress<\/strong><\/p><p>There are three main paths to increasing inclusive progress: increasing economic growth, making progress more inclusive, and converting economic wealth into welfare. The first path has been promoted by e.g. <a href=\"https://80000hours.org/podcast/episodes/tyler-cowen-stubborn-attachments/\"><u>Tyler Cowen, arguing<\/u><\/a> that it is among the most powerful tools to improve the future because economic growth compounds over time.<\/p><p>Making progress more inclusive by redistributing resources or social status can increase total human happiness. According to 80,000 Hours, <a href=\"https://80000hours.org/articles/money-and-happiness/\"><u>happiness<\/u><\/a> increases <a href=\"https://www.pnas.org/content/118/4/e2016976118\"><u>logarithmically <\/u><\/a>when one becomes wealthier, which means that it is a lot more cost-effective to increase the wealth of poor people. Therefore, redistribution of progress is also very important toward effectively and positively shifting the trajectory of humanity. <\/p><p>While there is a strong correlation between economic wealth and wellbeing, it is not all that matters. Some countries have higher levels of happiness than others, despite being poorer - for instance, self-reported <a href=\"https://ourworldindata.org/grapher/gdp-vs-happiness\"><u>happiness levels in Costa Rica are higher than in Luxembourg, while GDP is 6x lower<\/u><\/a>. It is plausible that we can find ways to make happiness cheaper, so that a similar level of economic wealth can be translated into more welfare.<\/p><p>It is hard to know the counterfactual impact of interventions focused on any of these paths. While catastrophic risk mitigation is focused on changing the outcomes of forks in the path of civilization, interventions for progress to a larger degree rely on shifting long-term trends that are hard to reason about empirically. So far, hypotheses for effective interventions have been generated through the use of some heuristics, including:<\/p><ul><li>Institutions can coordinate the efforts of individuals, and thereby multiply their total impact. For this reason, changes in institutional designs are “hingey” - a limited effort to improve an institution can have lasting effects at scale<\/li><li>Some institutional reforms matter more than others. In particular, longer-lasting institutions (examples may include the American Constitution or Ivy League schools) can maintain their influence over time, so reforming these institutions is a way to have a more durable impact. This is a version of “<a href=\"https://www.effectivealtruism.org/articles/a-proposed-adjustment-to-the-astronomical-waste-argument-nick-beckstead/\"><u>path-dependent trajectory changes<\/u><\/a>” advocated for by Nick Beckstead, and further discussed in e.g. Eliezer Yudkowsky’s <a href=\"https://equilibriabook.com/\"><u>Inadequate Equilibria<\/u><\/a><\/li><li>Moreover, more influential institutions (e.g. measured in budget size, number of members or technological capabilities) typically offer a larger potential for impact.<\/li><li>Finally, reforms that create positive feedback loops (e.g. by improving processes that are essential for progress, like science, innovation or decision making) accumulate over time<\/li><\/ul><p><strong>Specific proposals for how to increase inclusive progress<\/strong><\/p><p>It is commonly argued that the scientific revolution has been one of the key drivers of progress in the last centuries, but today many scholars criticize the modern academic institutions for being sub-optimal. For this reason, interventions aiming to improve academic research may be one promising category to increase the rate of progress. Some examples among many interventions aiming to improve academic research include <a href=\"https://www.replicationmarkets.com/\"><u>Replication Markets<\/u><\/a>, <a href=\"https://arxiv.org/\"><u>ArXiv<\/u><\/a>, <a href=\"https://www.semanticscholar.org/\"><u>Semantic Scholar<\/u><\/a> and <a href=\"https://ought.org/\"><u>Ought<\/u><\/a>. Replication Markets use forecasting to estimate a research claims chance of replication. ArXiv and Semantic Scholar are archives with scientific papers, and Ought tries to figure out which questions humans can delegate to artificial intelligence. Additionally, “scientific research” is one of the top cause areas of the Open Philanthropy Project.<\/p><p>All of the abovementioned interventions are improving academic progress, but there are also non-academic interventions that may increase progress. Some examples from the US Policy focus area of Open Philanthropy Project (Open Phil) include:<\/p><ul><li><a href=\"https://www.foreignaffairs.com/articles/united-states/2020-09-14/americas-exceptional-housing-crisis\"><u>Urban zoning/land use reform<\/u><\/a>, which is meant to reduce the costs of living in cities. This may increase progress because it allows people to move to areas with great economic opportunities<\/li><li><a href=\"https://www.openphilanthropy.org/focus/us-policy/macroeconomic-policy\"><u>Macroeconomic stabilization policy<\/u><\/a>, where Open Philanthropy funds advocacy initiatives focused on emphasizing the importance of alleviating suffering and lost output from unemployment during economic crises<\/li><li><a href=\"https://www.openphilanthropy.org/focus/us-policy/immigration-policy\"><u>Immigration policy reform<\/u><\/a>, which may both provide economic opportunities for people from lower-income countries and increase long-term economic growth<\/li><li><a href=\"https://forum.effectivealtruism.org/posts/8Rn2gw7escCc2Rmb7/thoughts-on-electoral-reform\"><u>Electoral reform<\/u><\/a>: e.g. campaign financing rules, election security measures, and improved voting systems (e.g. <a href=\"https://electionscience.org/approval-voting-101/\"><u>approval voting<\/u><\/a> or <a href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2003531\"><u>quadratic voting<\/u><\/a>), to better ensure that elected officials represent the electorate and reduce the <a href=\"https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors\"><u>risk of malevolent leaders<\/u><\/a><\/li><\/ul><h2>3.3 What about sustainability?<\/h2><p>Outside of the effective altruism movement, sustainability is one of the most common cause areas for people concerned about the welfare of future generations. Significant resources are invested in ensuring that our GHG emissions are brought down, that our depletion of natural resources and destruction of species habitats are slowed, and that state budgets are fiscally balanced across generations. Thus it may seem strange that sustainability has played such a small role in this article.<\/p><p>Our argument, borrowed from <a href=\"http://www.stafforini.com/blog/bostrom/\"><u>Bostrom <\/u><\/a>and others in the EA movement, is that unsustainabilities are bad if they exacerbate catastrophic risk, or if they slow down the rate of inclusive progress. <a href=\"https://www.mckinsey.com/business-functions/sustainability/our-insights/climate-risk-and-response-physical-hazards-and-socioeconomic-impacts\"><u>Research by the McKinsey Global Institute<\/u><\/a> shows that unmitigated climate change can be harmful in both of these ways. <a href=\"https://www.mckinsey.com/industries/public-and-social-sector/our-insights/the-social-contract-in-the-21st-century\"><u>Further research<\/u><\/a> by the McKinsey Global Institute demonstrates that the social contract is eroding across developed economies, and that economic outcomes for individuals are worsening as a consequence. In cases like these where the unsustainabilities are expected to create large amounts of human suffering, we should work hard to become more sustainable.<\/p><h1><strong>4.0 Summary<\/strong><\/h1><p>There are several objectives of longtermist policy making. We have presented three categories of objectives, where the objectives in the bottom layers are potential enablers of the upper objectives. All of them are relevant to the necessary prioritization of future generations, given that longtermism is plausible. <\/p><p>Each of the objectives and their sub-objectives are well covered in existing literature, but to our knowledge they have not been presented in this structure before. In this article we have summarized some of the relevant parts of the literature, in the hope of providing an accessible introduction to the field. Furthermore, we hope that some points in this article can serve as coordination points for more experienced longtermists - e.g. when referring to which parts of longtermist policy making they are attempting to improve, and why.<\/p>","mainEntityOfPage":{"@type":"WebPage","@id":"https://forum.effectivealtruism.org/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1"},"headline":"Objectives of longtermist policy making","description":"Estimated reading time: 20-30 minutes • -We would like to thank the following for their excellent feedback and guidance throughout this article, in n…","datePublished":"2021-02-10T18:26:30.881Z","about":[{"@type":"Thing","name":"Policy","url":"https://forum.effectivealtruism.org/topics/policy","description":"<p>The <strong>policy<\/strong> topic is very broad, covering any post about improving government policy (in developing and developed countries alike).<\/p><h2>Improving policy<\/h2><p>Governments are typically committed to the notion that their policies should be effective. This means that members of the effective altruist community can be in a good position to help governments reach their aims. Moreover, the fact that governments are very powerful, and control significant proportions of world GDP, suggests that helping policy-makers can be a high-value strategy. This strategy can be pursued either from the <i>outside<\/i>—by effective altruist organizations which advise policy-makers—or from the <i>inside<\/i>—by policy-makers who try to do the most good possible.<\/p><p>Some of the highest-impact reforms affect people who are less able to advocate for their own interests, such as <a href=\"https://forum.effectivealtruism.org/tag/criminal-justice-reform\">prisoners<\/a> or <a href=\"https://forum.effectivealtruism.org/tag/immigration-reform\">migrants<\/a>. Other policies, like <a href=\"https://forum.effectivealtruism.org/tag/macroeconomic-policy\">macroeconomic policy<\/a> and <a href=\"https://forum.effectivealtruism.org/tag/land-use-reform\">land use reform<\/a>, have effects that are somewhat diffuse and non-obvious, which makes it difficult to assemble groups to lobby for change. The more mainstream focus areas of <a href=\"https://forum.effectivealtruism.org/tag/global-poverty\">global poverty and health<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/animal-welfare-1\">animal welfare<\/a> and <a href=\"https://forum.effectivealtruism.org/tag/existential-risk\">existential risk<\/a> could also be addressed using political advocacy.<\/p><h2>Further reading<\/h2><p>Bowerman, Niel (2014) <a href=\"https://forum.effectivealtruism.org/posts/n5CNeo9jxDsCit9dj/good-policy-ideas-that-won-t-happen-yet\">Good policy ideas that won’t happen (yet)<\/a>, <i>Effective Altruism Forum<\/i>, September 14.<br><i>A look at the viability of changing public policy on certain issues.<\/i><\/p><p>Clough, Emily (2015) <a href=\"https://bostonreview.net/world/emily-clough-effective-altruism-ngos\">Effective altruism’s political blind spot<\/a>, <i>Boston Review<\/i>, July 14.<br><i>An example of one of the main criticisms of effective altruism: that it paid insufficient attention to political advocacy in the past.<\/i><\/p><p>Farquhar, Sebastian (2016) <a href=\"https://www.youtube.com/watch?v=NB_edlOrPOU&list=PLwp9xeoX5p8P_O5rQg-SNMwQOIvOPF5U2&index=10\">Should EAs do policy?<\/a>, <i>Effective Altruism Global<\/i>, August 5.<br><i>A talk at EA Global 2016 with an overview of why policy work might be effective.<\/i><\/p><p>Global Priorities Project (2015) <a href=\"http://globalprioritiesproject.org/2015/12/new-uk-aid-strategy-prioritising-research-and-crisis-response/\">New UK aid strategy – prioritising research and crisis response<\/a>, <i>Global Priorities Project<\/i>, December 2.<br><i>An example of effective altruist policy work.<\/i><\/p><p>Karnofsky, Holden (2013) <a href=\"https://www.openphilanthropy.org/blog/track-record-policy-oriented-philanthropy\">The track record of policy-oriented philanthropy<\/a>, <i>Open Philanthropy<\/i>, November 6.<br><i>Articles on Open Philanthropy about policy and philanthropy.<\/i><\/p><p>Open Philanthropy (2016) <a href=\"https://www.openphilanthropy.org/focus/us-policy\">U.S. policy<\/a>, <i>Open Philanthropy<\/i>.<br><i>The Philanthropy Project's assesment of policy as a focus area<\/i><\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/appg-on-future-generations\">APPG on Future Generations<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/democracy-defense-fund\">Democracy Defense Fund<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/improving-institutional-decision-making\">improving institutional decision-making<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/longtermist-institutional-reform\">longtermist institutional reform<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/standards-and-regulation\">standards and regul<\/a>... <\/p>"},{"@type":"Thing","name":"Existential risk","url":"https://forum.effectivealtruism.org/topics/existential-risk","description":"<p>An <strong>existential risk<\/strong> is a risk that threatens the destruction of the long-term potential of life.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefs39fj4bj7yr\"><sup><a href=\"#fns39fj4bj7yr\">[1]<\/a><\/sup><\/span> An existential risk could threaten the <a href=\"https://forum.effectivealtruism.org/topics/human-extinction\">extinction of humans<\/a> (and other sentient beings), or it could threaten some other unrecoverable <a href=\"https://forum.effectivealtruism.org/topics/civilizational-collapse\">collapse<\/a> or permanent failure to achieve a potential good state. <a href=\"https://forum.effectivealtruism.org/tag/natural-existential-risk\">Natural risks<\/a> such as those posed by <a href=\"https://forum.effectivealtruism.org/tag/asteroids\">asteroids<\/a> or <a href=\"https://forum.effectivealtruism.org/tag/supervolcano\">supervolcanoes<\/a> could be existential risks, as could <a href=\"https://forum.effectivealtruism.org/tag/anthropogenic-existential-risks\">anthropogenic (human-caused) risks<\/a> like accidents from <a href=\"https://forum.effectivealtruism.org/tag/global-catastrophic-biological-risk\">synthetic biology<\/a> or <a href=\"https://forum.effectivealtruism.org/topics/ai-alignment\">unaligned<\/a> <a href=\"https://forum.effectivealtruism.org/tag/ai-risk\">artificial intelligence<\/a>. <\/p><p><a href=\"https://forum.effectivealtruism.org/topics/estimation-of-existential-risk\">Estimating the probability of existential risk<\/a> from different <a href=\"https://forum.effectivealtruism.org/topics/existential-risk-factor\">factors<\/a> is difficult, but there are some estimates.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefs39fj4bj7yr\"><sup><a href=\"#fns39fj4bj7yr\">[1]<\/a><\/sup><\/span> <\/p><p>Some view reducing existential risks as a key moral priority, for a variety of reasons.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefflve249rqxn\"><sup><a href=\"#fnflve249rqxn\">[2]<\/a><\/sup><\/span> Some people simply view the current estimates of existential risk as unacceptably high. Other authors argue that existential risks are especially important because the <a href=\"https://forum.effectivealtruism.org/tag/longtermism\">long-run future of humanity<\/a> matters a great deal.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefehnudz7v1f\"><sup><a href=\"#fnehnudz7v1f\">[3]<\/a><\/sup><\/span> Many believe that there is <a href=\"https://forum.effectivealtruism.org/tag/temporal-discounting\">no intrinsic moral difference<\/a> between the importance of a life today and one in a hundred years. However, there may be many more people in the future than there are now. Given these assumptions, existential risks threaten not only the beings alive right now, but also the enormous number of lives yet to be lived. One objection to this argument is that people have a special responsibility to other people currently alive that they do not have to people who have not yet been born.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefql2k3envp7\"><sup><a href=\"#fnql2k3envp7\">[4]<\/a><\/sup><\/span> Another objection is that, although it would in principle be important to manage, the risks are currently so unlikely and poorly understood that existential risk reduction is less cost-effective than work on other promising areas.<\/p><p>In <a href=\"https://forum.effectivealtruism.org/tag/the-precipice\"><i>The Precipice: Existential Risk and the Future of Humanity<\/i><\/a>, <a href=\"https://forum.effectivealtruism.org/tag/toby-ord\">Toby Ord<\/a> offers several <a href=\"https://forum.effectivealtruism.org/tag/policy\">policy<\/a> and <a href=\"https://forum.effectivealtruism.org/tag/research\">research<\/a> recommendations for handling existential risks:<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref52gvr4dqg9p\"><sup><a href=\"#fn52gvr4dqg9p\">[5]<\/a><\/sup><\/span><\/p><ul><li>Explore options for new <a href=\"https://forum.effectivealtruism.org/tag/global-governance\">international institutions<\/a> aimed at reducing existential risk, both incremental and revolutionary.<\/li><li>Investigate possibilities for making the deliberate or reckless imposition of <a href=\"https://forum.effectivealtruism.org/tag/human-extinction\">human extinction<\/a> risk an international crime.<\/li><li>Investigate possibilities for bringing the <a href=\"https://forum.effectivealtruism.org/topics/longtermist-institutional-reform\">representation of future generations<\/a> into national and international democratic institutions.<\/li><li>Each major world power should have an appointed senior government position responsible for registering and responding to existential risks that<\/li><\/ul>... "},{"@type":"Thing","name":"Improving institutional decision-making","url":"https://forum.effectivealtruism.org/topics/improving-institutional-decision-making","description":"<p><strong>Improving institutional decision-making<\/strong> is a cause that focuses on increasing the technical quality and <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism\">effective altruism<\/a> alignment of the most important decisions made by the world’s most important decision-making bodies.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefpg98poczmg\"><sup><a href=\"#fnpg98poczmg\">[1]<\/a><\/sup><\/span><\/p><h2>Improving institutions<\/h2><p>Institutions such as governments, companies, and charities control significant resources. One potentially effective way to do good, therefore, is to help institutions use these resources in more productive ways.<\/p><p>Members of the effective altruism community have employed this method extensively. For instance, they have tried to <a href=\"https://forum.effectivealtruism.org/tag/policy-change\">increase the attention policy-makers give<\/a> to <a href=\"https://forum.effectivealtruism.org/tag/existential-risk\">existential risk<\/a>. Similarly, an important goal of effective altruist charity recommendations is to increase the effectiveness of nonprofit organizations. <a href=\"https://forum.effectivealtruism.org/tag/influencing-for-profits\">Within the for-profit sector<\/a>, altruists have sought to shape the incentives of businesses to make them more aligned with social value, and have also tried to create social value themselves by engaging in social entrepreneurship.<\/p><p>Institutions can be improved in two different ways: from the <i>outside<\/i> and from the <i>inside<\/i>. Effective altruism organizations try to improve institutions from the outside by giving them advice or, in the case of charities, by evaluating them, whereas individual members of the effective altruism community may work within institutions to help them achieve their ends more effectively.<\/p><p>One approach to improving decisions is to set up institutional structures that are conducive to good decision-making.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefusvfm9rnves\"><sup><a href=\"#fnusvfm9rnves\">[2]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref2d3u96edm2a\"><sup><a href=\"#fn2d3u96edm2a\">[3]<\/a><\/sup><\/span> This way, institutions like national governments might encourage people to make better decisions (e.g. saving for retirement) or make better decisions themselves (e.g. improving health policy).<\/p><h2>Evaluation<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/80-000-hours\">80,000 Hours<\/a> rates improving institutional decision-making a \"second-highest priority area\": an unusually pressing global problem ranked slightly below their four highest priority areas.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefhjq832ykn24\"><sup><a href=\"#fnhjq832ykn24\">[4]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>Whittlestone, Jess (2017) <a href=\"https://80000hours.org/problem-profiles/improving-institutional-decision-making/\">Improving institutional decision-making<\/a>, <i>80,000 Hours<\/i>, September.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations\">All-Party Parliamentary Group for Future Generations<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/ballot-initiative\">ballot initiative<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/electoral-reform\">electoral reform<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/forecasting\">forecasting<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/international-relations\">international relations<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/longtermist-institutional-reform\">longtermist institutional reform<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/policy-change\">policy change<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/political-polarization\">political polarization<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnpg98poczmg\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefpg98poczmg\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Clayton, Vicky, Dilhan Perera & ibatra171 (2021) <a href=\"https://forum.effectivealtruism.org/posts/FqCSZT3pBvoATkR82/refining-improving-institutional-decision-making-as-a-cause\">Refining improving institutional decision-making as a cause area: results from a scoping surve<\/a><\/p><\/div><\/li><\/ol>... "},{"@type":"Thing","name":"Longtermism","url":"https://forum.effectivealtruism.org/topics/longtermism","description":"<p><strong>Longtermism<\/strong> is the view that positively influencing the <a href=\"https://forum.effectivealtruism.org/tag/long-term-future\">long-term future<\/a> is a key moral priority of our time.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref2iwddw4crwt\"><sup><a href=\"#fn2iwddw4crwt\">[1]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefz6yx5b2rpim\"><sup><a href=\"#fnz6yx5b2rpim\">[2]<\/a><\/sup><\/span><\/p><p>Longtermism may be seen as following from the conjunction of three core claims:<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref6frik8lwexe\"><sup><a href=\"#fn6frik8lwexe\">[3]<\/a><\/sup><\/span> <\/p><ol><li>Future people matter morally. <\/li><li>If Earth-originating intelligence is not prematurely extinguished, the vast majority of people that will ever exist will exist in the future. <\/li><li>People alive today can predictably influence whether these people exist and how well their lives go.<\/li><\/ol><h2>Types of longtermism<\/h2><h3>Strong vs. weak longtermism<\/h3><p>Strong longtermism holds that positively influencing the long-term future is the key moral priority of our time. This form of longtermism was introduced by <a href=\"https://forum.effectivealtruism.org/topics/hilary-greaves\">Hilary Greaves<\/a> and <a href=\"https://forum.effectivealtruism.org/topics/william-macaskill\">Will MacAskill<\/a>,<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref4lqrm1tu6v4\"><sup><a href=\"#fn4lqrm1tu6v4\">[4]<\/a><\/sup><\/span> and has precedents in the work of <a href=\"https://forum.effectivealtruism.org/tag/nick-bostrom\">Nick Bostrom<\/a>,<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefijdcuqtcsja\"><sup><a href=\"#fnijdcuqtcsja\">[5]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefrxubxlbto4l\"><sup><a href=\"#fnrxubxlbto4l\">[6]<\/a><\/sup><\/span> <a href=\"https://forum.effectivealtruism.org/topics/nick-beckstead\">Nick Beckstead<\/a>,<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefohggtozamwg\"><sup><a href=\"#fnohggtozamwg\">[7]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref1j6xqn8ih7f\"><sup><a href=\"#fn1j6xqn8ih7f\">[8]<\/a><\/sup><\/span> and others. The authors do not define or discuss \"weak\" longtermism; the contrast is rather with longtermism as such, which as noted above holds that positively influencing the long-term future is a key priority, but not necessarily the top priority. <\/p><h3>Patient vs. urgent longtermism<\/h3><p>This distinction can be explained in reference to the <a href=\"https://forum.effectivealtruism.org/topics/hinge-of-history\">hinge of history hypothesis<\/a>, i.e., the hypothesis that we are currently living at a time when humanity has unusually high influence over the long-term future. Urgent longtermists find the hypothesis plausible and, accordingly, hold that it makes sense to spend altruistic resources relatively quickly. (Altruistic resources include not just financial assets, but other resources that can accumulate and be spent deliberately in the pursuit of altruistic goals, such as credibility, <a href=\"https://forum.effectivealtruism.org/topics/career-capital\">career capital<\/a> and <a href=\"https://forum.effectivealtruism.org/topics/altruistic-coordination\">coordination ability<\/a>.) By contrast, patient longtermists hold that the opportunities for influence are not concentrated in the near term and, in line with this, favour investing these resources so that they can be deployed at some point in the future, when the moments of significant influence arrive.<\/p><h3>Broad vs. targeted longtermism<\/h3><p>This distinction between <a href=\"https://forum.effectivealtruism.org/topics/broad-vs-narrow-interventions\">broad and targeted interventions<\/a> was originally introduced by <a href=\"https://forum.effectivealtruism.org/topics/nick-beckstead\">Nick Beckstead<\/a> in his doctoral dissertation, <i>On the Overwhelming Importance of Shaping the Far Future<\/i>.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref73581e7p8q\"><sup><a href=\"#fn73581e7p8q\">[9]<\/a><\/sup><\/span> Targeted (or narrow) longtermism attempts to positively influence the long-term future by focusing on specific, identifiable scenarios, such as the risks of <a href=\"https://forum.effectivealtruism.org/topics/ai-risk\">misaligned AI<\/a> or an <a href=\"https://forum.effectivealtruism.org/topics/biosecurity\">engineered pandemic<\/a>. By contrast, br... <\/p>"},{"@type":"Thing","name":"Longtermist institutional reform","url":"https://forum.effectivealtruism.org/topics/longtermist-institutional-reform","description":"<p><strong>Longtermist institutional reform<\/strong> is research on how institutions can better represent the interests of future generations in the political process.<\/p><h2>Further reading<\/h2><p>Baumann, Tobias (2020) <a href=\"https://centerforreducingsuffering.org/representing-future-generations-in-the-political-process/?utm_source=rss&utm_medium=rss&utm_campaign=representing-future-generations-in-the-political-process\">Representing future generations in the political process<\/a>, <i>Center for Reducing Suffering<\/i>, June 25.<\/p><p>González-Ricoy, Iñigo & Axel Gosseries (eds.) (2016) <a href=\"http://doi.org/10.1093/acprof:oso/9780198746959.003.0001\">Designing institutions for future generations<\/a>, in <i>Institutions for Future Generations<\/i>, Oxford: Oxford University Press, pp. 3–23.<\/p><p>Goth, Aidan & Matt Lerner (2022) <a href=\"https://www.founderspledge.com/stories/longtermist-institutional-reform\">Longtermist institutional reform<\/a>, <i>Founders Pledge<\/i>, January 12.<\/p><p>Jacobs, Alan M. (2011) <a href=\"https://en.wikipedia.org/wiki/Special:BookSources/9780521171779\"><i>Governing for the Long Term: Democracy and the Politics of Investment<\/i><\/a>, Cambridge: Cambridge University Press.<\/p><p>Jacobs, Alan M. (2016) <a href=\"http://doi.org/10.1146/annurev-polisci-110813-034103\">Policy making for the long term in advanced democracies<\/a>, <i>Annual Review of Political Science<\/i>, vol. 19, pp. 433–454.<\/p><p>John, Tyler (2019) <a href=\"https://forum.effectivealtruism.org/posts/op93xvHkJ5KvCrKaj/institutions-for-future-generations\">Institutions for future generations<\/a>, <i>Effective Altruism Forum<\/i>, November 11.<\/p><p>John, Tyler (2021) <a href=\"https://philpapers.org/rec/JOHEFP\">Empowering future people by empowering the young?<\/a>, in <i>Ageing without Ageism: Conceptual Puzzles and Policy Proposals<\/i>, Oxford: Oxford University Press, forthcoming.<\/p><p>John, William & William MacAskill (2021) <a href=\"https://en.wikipedia.org/wiki/Special:BookSources/978-0-9957281-8-9\">Longtermist institutional reform<\/a>, in Natalie Cargill & Tyler John (eds.) <i>The Long View: Essays on Policy, Philanthropy, and the Long-Term Future<\/i>, London: First, pp. 45–60.<\/p><p>Jones, Natalie, Mark O’Brien & Thomas Ryan (2018) <a href=\"http://doi.org/10.1016/j.futures.2018.01.007\">Representation of future generations in United Kingdom policy-making<\/a>, <i>Futures<\/i>, vol. 102, pp. 153–163.<\/p><p>Krznaric, Roman (2019) <a href=\"https://www.bbc.com/future/article/20190318-can-we-reinvent-democracy-for-the-long-term\">Why we need to reinvent democracy for the long-term<\/a>, <i>BBC Future<\/i>, March 18.<\/p><p>MacAskill, William (2019) <a href=\"https://forum.effectivealtruism.org/posts/b7BrGrswgANP3eRzd/age-weighted-voting\">Age-weighted voting<\/a>, <i>Effective Altruism Forum<\/i>, July 12.<\/p><p>McKinnon, Catriona (2017) <a href=\"http://doi.org/10.1080/00455091.2017.1280381\">Endangering humanity: an international crime?<\/a>, <i>Canadian Journal of Philosophy<\/i>, vol. 47, pp. 395–415.<\/p><p>Moorhouse, Fin & Luca Righetti (2021) <a href=\"https://a764aa28-8f1b-4abd-ad69-eed71af9e23a.filesusr.com/ugd/b589e0_6cc51397ac4b4d78b2f68d8f489b0847.pdf\">Institutions for the long run: taking future generations seriously in government<\/a>, <i>Cambridge Journal of Law, Politics, and Art<\/i>, vol. 1, pp. 430–437.<\/p><p>Nesbit, Martin & Andrea Illés (2015) <a href=\"http://www.worldfuturecouncil.org/wp-content/uploads/2016/02/IEEP_WFC_2016_Establishing_an_EU_Guardian_for_Future_Generations.pdf\">Establishing an EU “Guardian for future generations”. Report and recommendations for the World Future Council<\/a>, <i>Institute for European Environmental Policy<\/i>, London.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations\">All-Party Parliamentary Group for Future Generations<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/electoral-reform\">electoral reform<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/improving-institutional-decision-making\">improving institutional decision-making<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/longtermism\">longtermism<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/policy-change\">policy change<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/research-institute-for-future-design\">Research Institute for Future Design<\/a><\/p>"},{"@type":"Thing","name":"Long-term future","url":"https://forum.effectivealtruism.org/topics/long-term-future","description":"<p>The <strong>long-term future<\/strong> focuses on possible ways in which the future of humanity may unfold over long timescales.<\/p><h2>Bostrom's typology of possible scenarios<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/nick-bostrom\">Nick Bostrom<\/a> has identified four broad possibilities for the future of humanity.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefms8bxsox85m\"><sup><a href=\"#fnms8bxsox85m\">[1]<\/a><\/sup><\/span><\/p><p>First, humans may go prematurely <a href=\"https://forum.effectivealtruism.org/tag/human-extinction\">extinct<\/a>. Since the universe will eventually become inhospitable, extinction is inevitable in the very long run. However, it is also plausible that people will die out far before this deadline.<\/p><p>Second, human civilization may plateau, reaching a level of technological advancement beyond which no further advancement is feasible.<\/p><p>Third, human civilization may experience recurrent <a href=\"https://forum.effectivealtruism.org/tag/civilizational-collapse\">collapse<\/a>, undergoing repeated declines or catastrophes that prevent it from moving beyond a certain level of advancement.<\/p><p>Fourth, human civilization may advance so significantly as to become nearly unrecognizable. Bostrom conceptualizes this scenario as a “posthuman” era where people have developed significantly different cognitive abilities, population sizes, body types, sensory or emotional experiences, or life expectancies.<\/p><h2>Further reading<\/h2><p>Baum, Seth D. <i>et al.<\/i> (2019) <a href=\"http://doi.org/10.1108/FS-04-2018-0037\">Long-term trajectories of human civilization<\/a>, <i>Foresight<\/i>, vol. 21, pp. 53–83.<\/p><p>Bostrom, Nick (2009) <a href=\"http://doi.org/10.1057/9780230227279_10\">The future of humanity<\/a>, in Jan Kyrre Berg Olsen, Evan Selinger & Søren Riis (eds.) <i>New Waves in Philosophy of Technology<\/i>, London: Palgrave Macmillan, pp. 186–215.<\/p><p>Hanson, Robin (1998) <a href=\"http://mason.gmu.edu/~rhanson/longgrow.pdf\">Long-term growth as a sequence of exponential modes<\/a>, working paper, George Mason University (updated December 2000).<\/p><p>Roodman, David (2020) <a href=\"https://www.openphilanthropy.org/blog/modeling-human-trajectory\">Modeling the human trajectory<\/a>, <i>Open Philanthropy<\/i>, June 15.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/longtermism\">longtermism<\/a> | <a href=\"/tag/non-humans-and-the-long-term-future\">non-humans and the long-term future<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/space-colonization\">space colonization<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnms8bxsox85m\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefms8bxsox85m\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Bostrom, Nick (2009) <a href=\"http://doi.org/10.1057/9780230227279_10\">The future of humanity<\/a>, in Jan Kyrre Berg Olsen, Evan Selinger & Søren Riis (eds.) <i>New Waves in Philosophy of Technology<\/i>, London: Palgrave Macmillan, pp. 186–215.<\/p><\/div><\/li><\/ol>"}],"author":[{"@type":"Person","name":"Henrik Øberg Myhre","url":"https://forum.effectivealtruism.org/users/henrik-oberg-myhre"},{"@type":"Person","name":"Andreas_Massey","url":"https://forum.effectivealtruism.org/users/henrik-oberg-myhre"},{"@type":"Person","name":"Philip Hall Andersen","url":"https://forum.effectivealtruism.org/users/henrik-oberg-myhre"},{"@type":"Person","name":"Jakob","url":"https://forum.effectivealtruism.org/users/henrik-oberg-myhre"},{"@type":"Person","name":"Sanna Baug Warholm","url":"https://forum.effectivealtruism.org/users/henrik-oberg-myhre"}],"comment":[{"@type":"Comment","text":"<p>I'm skeptical of this framework because in reality part 2 seems optional - we don't need to reshape the political system to be more longtermist in order to make progress. For instance, those Open Phil recommendations like land use reform can be promoted thru conventional forms of lobbying and coalition building.<\/p><p>In fact, a vibrant and policy-engaged EA community that focuses on understandable short and medium term problems can itself become a fairly effective long-run institution, thus reducing the needs in part 1.<\/p><p>Additionally, while substantively defining a good society for the future may be difficult, we also have the option of defining it procedurally. The simplest example is that we can promote things like democracy or other mechanisms which tend to produce good outcomes. Or we can increase levels of compassion and rationality so that the architects of future societies will act better. This is sort of what you describe in part 2, but I'd emphasize that we can make political institutions which are generically better rather than specifically making them more longtermist.<\/p><p>This is not to say that anything in this post is a bad idea, just that there are more options for meeting longtermist goals.<\/p>","datePublished":"2021-02-14T05:15:27.605Z","author":[{"@type":"Person","name":"kbog","url":"https://forum.effectivealtruism.org/users/kbog","interactionStatistic":[{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/CommentAction"},"userInteractionCount":935},{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/WriteAction"},"userInteractionCount":64}]}],"comment":[{"@type":"Comment","text":"<p>Thank you for your feedback kbog.<\/p><p>First, we certainly agree that there are other options that have a limited influence on the future, however, for this article we wanted to only cover areas with a potential for outsized impact on the future. That is the reason we have confined ourselves to so few categories. <\/p><p>Second, there may be categories of interventions that are not addressed in our framework that are as important for improving the future as the interventions we list. If so, we welcome discussion on this topic, and hope that the framework can encourage productive discussion to identify such “intervention X”’s. <\/p><p>Third, I'm a bit confused about how we would focus on “processes that produce good outcomes” without first defining what we mean with good outcomes, and how to measure them?<\/p><p>Fourth, your point on taking the “individual more in focus” by emphasizing rationality and altruism improvement is a great suggestion. Admittedly, this may indeed be a potential lever to improve the future that we haven't sufficiently covered in our post as we were mostly concerned with improving institutions. <\/p><p>Lastly, as for improving political institutions more broadly, see our part on progress.<\/p>","datePublished":"2021-02-16T12:38:28.508Z","author":[{"@type":"Person","name":"Andreas_Massey","url":"https://forum.effectivealtruism.org/users/andreas_massey","interactionStatistic":[{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/CommentAction"},"userInteractionCount":4},{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/WriteAction"},"userInteractionCount":0}]}],"comment":[{"@type":"Comment","text":"<p>I think it's really not clear that reforming institutions to be more longtermist has an outsized long run impact compared to many other axes of institutional reform.<\/p><p>We know what constitutes good outcomes in the short run, so if we can design institutions to produce better short run outcomes, that will be beneficial in the long run insofar as those institutions endure into the long run. Institutional changes are inherently long-run.<\/p>","datePublished":"2021-02-21T03:51:58.089Z","author":[{"@type":"Person","name":"kbog","url":"https://forum.effectivealtruism.org/users/kbog","interactionStatistic":[{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/CommentAction"},"userInteractionCount":935},{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/WriteAction"},"userInteractionCount":64}]}],"comment":[{"@type":"Comment","text":"<p>The part of the article that you are referring to is in part inspired by John and MacAskills paper “longtermist institutional reform”, where they propose reforms that are built to tackle political short-termism. The case for this relies on two assumptions:<\/p><p>1. Long term consequences have an outsized moral importance, despite the uncertainty of long-term effects.<br>2. Because of this, political decision making should be designed to optimize for longterm outcomes. <\/p><p>Greaves and MacAskill have written a <a href=\"https://globalprioritiesinstitute.org/hilary-greaves-william-macaskill-the-case-for-strong-longtermism/\">paper<\/a> arguing for assumption 1: \"Because of the vast number of expected people in the future, it is quite plausible that for options that are appropriately chosen from a sufficiently large choice set, effects on the very long future dominate ex ante evaluations, even after taking into account the fact that further-future effects tend to be the most uncertain…“. We seem to agree on this assumption, but disagree on assumption 2. If I understand your argument against assumption 2, it assumes that there are no tradeoffs between optimizing for short-run outcomes and long-run outcomes. This assumption seems clearly false to us, and is implied to be false in “Longtermist institutional reform”. Consider fiscal policies for example: In the short run it could be beneficial to take all the savings in pension funds and spend them to boost the economy, but in the long run this is predictably harmful because many people will not afford to retire.<\/p>","datePublished":"2021-03-02T12:30:39.679Z","author":[{"@type":"Person","name":"Andreas_Massey","url":"https://forum.effectivealtruism.org/users/andreas_massey","interactionStatistic":[{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/CommentAction"},"userInteractionCount":4},{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/WriteAction"},"userInteractionCount":0}]}],"comment":[{"@type":"Comment","text":"<p>No I agree on 2! I'm just saying even from a longtermist perspective, it may not be as important and tractable as improving institutions in orthogonal ways.<\/p>","datePublished":"2021-03-04T00:49:47.045Z","author":[{"@type":"Person","name":"kbog","url":"https://forum.effectivealtruism.org/users/kbog","interactionStatistic":[{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/CommentAction"},"userInteractionCount":935},{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/WriteAction"},"userInteractionCount":64}]}]}]}]}]}]},{"@type":"Comment","text":"<p>Interesting writeup!<\/p><p>Depending on your intended audience, it might make sense to add more details for some of the proposals. For example, why is scenario planning a good idea compared to other methods of decision making? Is there a compelling story, or strong empirical evidence for its efficacy? <\/p><p>Some small nitpicks: <\/p><p>There seems to be a mistake here: <\/p><p>\"Bostrom argues in <a href=\"https://www.sciencedirect.com/science/article/pii/S0016328720300604\"><u>The Fragile World Hypothesis<\/u><\/a> that continuous technological development will increase systemic fragility, which can be a source of catastrophic or existential risk. In the Precipice, he estimates the chances of existential catastrophe within the next 100 years at one in six.\"<\/p><p>I also find this passage a bit odd: <\/p><p>\"One example of moral cluelessness is the repugnant conclusion, which assumes that by adding more people to the world, and proportionally staying above a given average in happiness, one can reach a state of minimal happiness for an infinitely large population.\"<\/p><p>The repugnant conclusion might motivate someone to think about cluelessness, but it does not really seem to be an example of cluelessness (the question whether we should accept it might or might not be). <\/p>","datePublished":"2021-02-12T16:59:58.944Z","author":[{"@type":"Person","name":"axioman","url":"https://forum.effectivealtruism.org/users/axioman","interactionStatistic":[{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/CommentAction"},"userInteractionCount":64},{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/WriteAction"},"userInteractionCount":1}]}],"comment":[{"@type":"Comment","text":"<p>Thank you for your feedback, Flodorner! <\/p><p>First, we certainly agree that a more detailed description could be productive for some of the topics in this piece, including your example on scenario planning and other decision making methods. At more than 6000 words this is already a long piece, so we were aiming to limit the level of detail to what we felt was necessary to explain the proposed framework, without necessarily justifying all nuances. Depending on what the community believes is most useful, we are happy to write follow-up pieces with either a higher level of detail for a selected few topics of particular interest (for a more technical discussion on e.g. decision making methods), or a summary piece covering all topics with a lower level of detail (to explain the same framework to non-experts). <\/p><p>As for your second issue you are completely correct, it has been corrected. <\/p><p>Regarding your last point, we also agree that the repugnant conclusion is not an example of cluelessness in itself. However, the lack of consensus about how to solve the repugnant conclusion is one example of how we still have things to figure out in terms of population ethics (i. e. are morally clueless in this area).<\/p>","datePublished":"2021-02-16T14:14:53.299Z","author":[{"@type":"Person","name":"Andreas_Massey","url":"https://forum.effectivealtruism.org/users/andreas_massey","interactionStatistic":[{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/CommentAction"},"userInteractionCount":4},{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/WriteAction"},"userInteractionCount":0}]}]}]}],"interactionStatistic":[{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/CommentAction"},"userInteractionCount":7},{"@type":"InteractionCounter","interactionType":{"@type":"http://schema.org/LikeAction"},"userInteractionCount":54}]}</script><meta name="twitter:card" content="summary"/><script>window.themeOptions = {"name":"auto"}</script><style id="jss-insertion-point"></style><style id="main-styles">@import url("/allStyles?hash=a6a9515db1ba16193f34056f0c011fc99b1ce185f6735396cfe30bb36e20ad83&theme=%7B%22name%22%3A%22default%22%7D") screen and (prefers-color-scheme: light); @import url("/allStyles?hash=1fd42b42bd8d553bd58b8c654a6043b754b1cd4f5545af624cf81556fee7bd22&theme=%7B%22name%22%3A%22dark%22%7D") screen and (prefers-color-scheme: dark); @import url("/allStyles?hash=a6a9515db1ba16193f34056f0c011fc99b1ce185f6735396cfe30bb36e20ad83&theme=%7B%22name%22%3A%22default%22%7D") print; </style></head> <body class="abTestNoEffect_group1 collectionsPageABTest_largeSequenceItemGroup booksProgressBarABTest_progressBar welcomeBoxABTest_welcomeBox twoLineEventsSidebar_control dialogueFacilitationMessages_optIn frontpageDialogueReciprocityRecommendations_show showOpinionsInReciprocity_noShow showRecommendedContentInMatchForm_show checkNotificationMessageContent_v3 newFrontpagePostFeedsWithRecommendationsOptIn_classicFrontpage"> <script>0</script><div id="react-app"><div class="wrapper" id="wrapper"><div></div><span></span><noscript class="noscript-warning"> This website requires javascript to properly function. Consider activating javascript to get access to all site functionality. </noscript><noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-5VK8D73" height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript><div class="Header-root"><div style="height:66px" class="Header-headroom headroom-wrapper"><div class="headroom headroom--unfixed"><header class="Header-appBar Header-appBarDarkBackground" style="background:#fff"><div style="background-image:url(https://res.cloudinary.com/cea/image/upload/v1730143996/Rectangle_5069.jpg)" class="Header-gsBackground Header-gsBackgroundActive"></div><div class="MuiToolbar-root MuiToolbar-regular"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root MuiIconButton-colorInherit Header-menuButton" type="button" aria-label="Menu"><span class="MuiIconButton-label"><svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg" class="ForumIcon-root"><path d="M2 5H18M2 10.25H18M2 15.5H18" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></span><span class="MuiTouchRipple-root"></span></button><h2 class="Typography-root Typography-title Header-title"><div class="Header-hideSmDown"><div class="Header-titleSubtitleContainer"><a class="Header-titleLink" href="/"><div class="Header-siteLogo"><div class="SiteLogo-icon"><svg version="1.1" x="0px" y="0px" viewBox="0 0 1200 1200"><g transform="translate(0.000000,1200.000000) scale(0.100000,-0.100000)" fill="currentColor" stroke="none"><path d="M5783 11560 c-712 -53 -1385 -317 -1948 -766 -141 -113 -426 -398 -539 -539 -286 -359 -508 -784 -631 -1205 -187 -645 -188 -1318 -5 -1959 134 -466 350 -874 675 -1276 220 -272 342 -531 510 -1080 144 -469 443 -749 903 -842 76 -16 147 -18 552 -18 l465 0 87 31 c300 107 650 497 1243 1384 211 315 288 446 410 695 343 700 460 1210 374 1638 -87 436 -381 741 -817 849 -120 30 -347 32 -442 4 -152 -44 -370 -168 -508 -290 -35 -31 -66 -56 -69 -56 -3 0 -43 36 -89 79 -148 143 -265 218 -424 272 -90 31 -103 33 -235 33 -540 -1 -969 -332 -1100 -847 -107 -422 -1 -935 294 -1421 160 -265 407 -576 680 -859 119 -123 172 -152 281 -151 126 0 214 55 270 166 26 52 30 72 30 137 0 109 -24 153 -159 296 -560 590 -824 1064 -824 1485 0 133 15 204 63 306 83 174 254 274 465 274 142 -1 253 -64 390 -222 40 -45 41 -47 28 -83 -29 -83 -45 -209 -41 -320 3 -98 7 -117 34 -175 86 -181 269 -276 409 -211 97 45 189 161 227 285 18 59 20 84 15 175 -4 58 -15 142 -26 186 l-19 79 62 61 c111 109 278 197 400 211 122 14 277 -38 370 -126 106 -100 150 -224 149 -420 0 -283 -108 -621 -359 -1122 -255 -510 -577 -970 -1035 -1479 -202 -225 -240 -248 -414 -256 -167 -8 -633 13 -688 31 -179 58 -296 184 -360 391 -90 288 -124 391 -157 475 -140 351 -276 589 -490 855 -412 512 -627 1087 -647 1735 -15 478 82 932 288 1355 370 759 1070 1331 1879 1535 283 72 502 95 804 87 439 -12 804 -102 1201 -297 634 -311 1135 -854 1401 -1516 97 -242 159 -489 190 -760 20 -171 14 -570 -10 -734 -91 -603 -320 -1139 -666 -1555 -292 -351 -490 -748 -613 -1230 -80 -315 -114 -577 -129 -1005 -3 -59 -9 -88 -32 -135 -129 -264 -651 -476 -1416 -575 -305 -39 -798 -70 -1137 -70 -148 0 -209 -19 -275 -83 -141 -137 -124 -367 36 -476 81 -56 111 -59 686 -65 551 -5 758 -14 1080 -46 495 -49 772 -136 870 -272 34 -47 37 -65 12 -94 -84 -102 -373 -165 -882 -194 -102 -6 -518 -13 -925 -16 l-740 -4 -63 -31 c-230 -112 -227 -437 6 -551 l67 -33 555 -7 c666 -7 909 -25 1200 -84 402 -83 662 -241 860 -522 70 -100 152 -145 265 -146 128 0 242 76 286 192 27 74 23 186 -9 249 -62 120 -296 381 -424 473 -27 18 -48 36 -47 39 0 3 32 22 70 41 91 45 213 142 266 211 134 175 182 343 150 531 -18 103 -68 227 -128 314 -79 116 -233 235 -401 313 l-83 38 81 41 c277 139 495 345 600 567 68 142 83 223 94 495 17 437 79 757 210 1089 101 257 213 448 381 649 456 550 733 1224 820 1992 17 150 18 601 1 745 -20 169 -49 326 -87 482 -352 1415 -1555 2472 -3005 2638 -170 19 -488 26 -647 15z m197 -3504 c0 -2 -8 -10 -17 -17 -16 -13 -17 -12 -4 4 13 16 21 21 21 13z"></path></g></svg></div></div>Effective Altruism Forum</a></div></div><div class="Header-hideMdUp"><a class="Header-titleLink" href="/"><div class="Header-siteLogo"><div class="SiteLogo-icon"><svg version="1.1" x="0px" y="0px" viewBox="0 0 1200 1200"><g transform="translate(0.000000,1200.000000) scale(0.100000,-0.100000)" fill="currentColor" stroke="none"><path d="M5783 11560 c-712 -53 -1385 -317 -1948 -766 -141 -113 -426 -398 -539 -539 -286 -359 -508 -784 -631 -1205 -187 -645 -188 -1318 -5 -1959 134 -466 350 -874 675 -1276 220 -272 342 -531 510 -1080 144 -469 443 -749 903 -842 76 -16 147 -18 552 -18 l465 0 87 31 c300 107 650 497 1243 1384 211 315 288 446 410 695 343 700 460 1210 374 1638 -87 436 -381 741 -817 849 -120 30 -347 32 -442 4 -152 -44 -370 -168 -508 -290 -35 -31 -66 -56 -69 -56 -3 0 -43 36 -89 79 -148 143 -265 218 -424 272 -90 31 -103 33 -235 33 -540 -1 -969 -332 -1100 -847 -107 -422 -1 -935 294 -1421 160 -265 407 -576 680 -859 119 -123 172 -152 281 -151 126 0 214 55 270 166 26 52 30 72 30 137 0 109 -24 153 -159 296 -560 590 -824 1064 -824 1485 0 133 15 204 63 306 83 174 254 274 465 274 142 -1 253 -64 390 -222 40 -45 41 -47 28 -83 -29 -83 -45 -209 -41 -320 3 -98 7 -117 34 -175 86 -181 269 -276 409 -211 97 45 189 161 227 285 18 59 20 84 15 175 -4 58 -15 142 -26 186 l-19 79 62 61 c111 109 278 197 400 211 122 14 277 -38 370 -126 106 -100 150 -224 149 -420 0 -283 -108 -621 -359 -1122 -255 -510 -577 -970 -1035 -1479 -202 -225 -240 -248 -414 -256 -167 -8 -633 13 -688 31 -179 58 -296 184 -360 391 -90 288 -124 391 -157 475 -140 351 -276 589 -490 855 -412 512 -627 1087 -647 1735 -15 478 82 932 288 1355 370 759 1070 1331 1879 1535 283 72 502 95 804 87 439 -12 804 -102 1201 -297 634 -311 1135 -854 1401 -1516 97 -242 159 -489 190 -760 20 -171 14 -570 -10 -734 -91 -603 -320 -1139 -666 -1555 -292 -351 -490 -748 -613 -1230 -80 -315 -114 -577 -129 -1005 -3 -59 -9 -88 -32 -135 -129 -264 -651 -476 -1416 -575 -305 -39 -798 -70 -1137 -70 -148 0 -209 -19 -275 -83 -141 -137 -124 -367 36 -476 81 -56 111 -59 686 -65 551 -5 758 -14 1080 -46 495 -49 772 -136 870 -272 34 -47 37 -65 12 -94 -84 -102 -373 -165 -882 -194 -102 -6 -518 -13 -925 -16 l-740 -4 -63 -31 c-230 -112 -227 -437 6 -551 l67 -33 555 -7 c666 -7 909 -25 1200 -84 402 -83 662 -241 860 -522 70 -100 152 -145 265 -146 128 0 242 76 286 192 27 74 23 186 -9 249 -62 120 -296 381 -424 473 -27 18 -48 36 -47 39 0 3 32 22 70 41 91 45 213 142 266 211 134 175 182 343 150 531 -18 103 -68 227 -128 314 -79 116 -233 235 -401 313 l-83 38 81 41 c277 139 495 345 600 567 68 142 83 223 94 495 17 437 79 757 210 1089 101 257 213 448 381 649 456 550 733 1224 820 1992 17 150 18 601 1 745 -20 169 -49 326 -87 482 -352 1415 -1555 2472 -3005 2638 -170 19 -488 26 -647 15z m197 -3504 c0 -2 -8 -10 -17 -17 -16 -13 -17 -12 -4 4 13 16 21 21 21 13z"></path></g></svg></div></div>EA Forum</a></div></h2><div class="Header-gsBanner"><a href="/posts/srZEX2r9upbwfnRKw/giving-season-2024-announcement">GIVING SEASON 2024</a></div><div class="Header-rightHeaderItems Header-gsRightHeaderItems"><div class="SearchBar-root"><div class="SearchBar-rootChild"><div class="SearchBar-searchInputArea SearchBar-searchInputAreaSmall"><div><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root SearchBar-searchIcon SearchBar-searchIconSmall" type="button"><span class="MuiIconButton-label"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="ForumIcon-root"><path stroke-linecap="round" stroke-linejoin="round" d="M21 21l-5.197-5.197m0 0A7.5 7.5 0 105.196 5.196a7.5 7.5 0 0010.607 10.607z"></path></svg></span><span class="MuiTouchRipple-root"></span></button></div><div></div></div></div></div><div class="UsersAccountMenu-root"><button tabindex="0" class="MuiButtonBase-root MuiButton-root MuiButton-contained MuiButton-containedPrimary MuiButton-raised MuiButton-raisedPrimary EAButton-root UsersAccountMenu-login EAButton-variantContained EAButton-greyContained" type="button" data-testid="user-login-button"><span class="MuiButton-label">Login</span><span class="MuiTouchRipple-root"></span></button><button tabindex="0" class="MuiButtonBase-root MuiButton-root MuiButton-contained MuiButton-containedPrimary MuiButton-raised MuiButton-raisedPrimary EAButton-root UsersAccountMenu-signUp EAButton-variantContained" type="button" data-testid="user-signup-button"><span class="MuiButton-label">Sign up</span><span class="MuiTouchRipple-root"></span></button></div></div></div></header></div></div></div><div class="Layout-standaloneNavFlex"><div class="Layout-searchResultsArea"></div><div class="Layout-main Layout-whiteBackground Layout-mainNoFooter"><div class="flash-messages FlashMessages-root"></div><div><div class="PostsPage-readingProgressBar"></div><div class="ToCColumn-root ToCColumn-tocActivated"><div class="ToCColumn-hideTocButton ToCColumn-hideTocButtonHidden"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="ForumIcon-root"><path stroke-linecap="round" stroke-linejoin="round" d="M8.25 6.75h12M8.25 12h12m-12 5.25h12M3.75 6.75h.007v.008H3.75V6.75zm.375 0a.375.375 0 11-.75 0 .375.375 0 01.75 0zM3.75 12h.007v.008H3.75V12zm.375 0a.375.375 0 11-.75 0 .375.375 0 01.75 0zm-.375 5.25h.007v.008H3.75v-.008zm.375 0a.375.375 0 11-.75 0 .375.375 0 01.75 0z"></path></svg>Hide<!-- --> table of contents</div><div class="ToCColumn-header"><div class="PostsPage-title"><div class="PostsPage-centralColumn"><div class="PostsPagePostHeader-header"><div class="PostsPagePostHeader-headerLeft"><div><h1 class="Typography-root Typography-display3 PostsPageTitle-root"><a class="PostsPageTitle-link" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1">Objectives of longtermist policy<!-- --> <span class="PostsPageTitle-lastWord">making</span></a></h1></div><div class="PostsPagePostHeader-authorAndSecondaryInfo"><div class="PostsPagePostHeader-authorInfo"><div class="PostsPagePostHeader-authors"><span class="Typography-root Typography-body1 PostsAuthors-root">by <span class="PostsAuthors-authorName"><span><span><span class=""><a class="UsersNameDisplay-noColor" href="/users/henrik-oberg-myhre?from=post_header">Henrik Øberg Myhre</a></span></span></span>, <span><span><span class=""><a class="UsersNameDisplay-noColor" href="/users/andreas_massey?from=post_header">Andreas_Massey</a></span></span></span>, <span><span><span class=""><a class="UsersNameDisplay-noColor" href="/users/philiphand?from=post_header">Philip Hall Andersen</a></span></span></span>, <span><span><span class=""><a class="UsersNameDisplay-noColor" href="/users/jakob?from=post_header">Jakob</a></span></span></span>, <span><span><span class=""><a class="UsersNameDisplay-noColor" href="/users/sanna-baug-warholm?from=post_header">Sanna Baug Warholm</a></span></span></span><span class="PostsCoauthor-markers"><span class="LWTooltip-root UserCommentMarkers-iconWrapper"><svg width="16" height="14" viewBox="0 0 16 14" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="UserCommentMarkers-sproutIcon ForumIcon-root"><path d="M15.3149 0.657185C15.2929 0.557739 15.2031 0.48933 15.1013 0.491891C15.062 0.493247 14.1257 0.529711 12.9513 0.858488C11.3849 1.29711 10.1504 2.0538 9.38118 3.04691C8.61107 4.04121 8.2035 5.43606 8.20215 7.08091C8.20125 8.31314 8.42335 9.24148 8.43284 9.28036C8.45589 9.37529 8.54088 9.44144 8.63776 9.44144C8.64138 9.44144 8.64484 9.44129 8.64846 9.44113C8.68688 9.43918 9.60045 9.38976 10.7516 9.04788C11.8215 8.72995 13.3008 8.08806 14.2716 6.83458C15.0408 5.84147 15.4646 4.45688 15.4975 2.83067C15.5222 1.61156 15.3233 0.695708 15.3148 0.657289L15.3149 0.657185Z" fill="currentColor"></path><path d="M0.873781 1.87378C0.793923 1.87378 0.749926 1.87559 0.749926 1.87559C0.64837 1.87996 0.563991 1.95605 0.549676 2.05671C0.533705 2.16956 0.174639 4.84 1.65068 6.47104C2.94242 7.89856 5.21902 8.00946 5.88603 8.00946C5.96574 8.00946 6.00973 8.00765 6.00973 8.00765C6.1137 8.00343 6.19944 7.92403 6.21104 7.82051C6.22385 7.70765 6.50637 5.03872 5.03274 3.41042C3.74235 1.9844 1.52318 1.87366 0.873762 1.87366L0.873781 1.87378Z" fill="currentColor"></path><path d="M13.3482 3.93867C13.3288 3.94922 11.7628 4.79423 10.3666 6.72363C9.35151 8.12689 8.263 10.3494 8.15257 13.5083C7.94042 13.4028 7.71546 13.3248 7.4834 13.2758C7.50705 12.7704 7.55557 12.275 7.62805 11.7907C7.69887 11.3194 7.79259 10.8588 7.90951 10.409C7.91675 10.3816 7.92428 10.3537 7.93151 10.3263C8.32508 8.85269 8.96771 7.49796 9.84105 6.29887C11.3316 4.25178 12.9647 3.37771 13.0336 3.34137C13.1986 3.25443 13.4023 3.31772 13.4892 3.48271C13.576 3.64785 13.5127 3.85157 13.3482 3.93849V3.93867Z" fill="currentColor"></path><path d="M7.9705 12.1186C7.81304 12.1186 7.67216 12.0078 7.63992 11.8475C6.91123 8.22266 2.65894 4.78127 2.61574 4.74694C2.47033 4.63046 2.44683 4.41801 2.5633 4.27244C2.67977 4.12704 2.89208 4.10339 3.03764 4.21986C3.08329 4.25632 4.16969 5.13038 5.36604 6.48724C6.98597 8.32445 7.97368 10.0832 8.30155 11.7143C8.33831 11.8971 8.21988 12.075 8.03711 12.1118C8.01481 12.1163 7.99236 12.1186 7.97036 12.1186L7.9705 12.1186Z" fill="currentColor"></path></svg></span></span></span></span></div></div><div class="PostsPagePostHeader-secondaryInfo"><div class="PostsPagePostHeader-secondaryInfoLeft"><span class="LWTooltip-root"><span class="PostsPageDate-date"><time dateTime="2021-02-10T18:26:30.881Z">Feb 10 2021</time></span></span><span class="LWTooltip-root"><span class="ReadTime-root">26<!-- --> min read</span></span><span class="LWTooltip-root"><a class="PostsPagePostHeader-secondaryInfoLink"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="PostsPagePostHeader-commentIcon ForumIcon-root"><path stroke-linecap="round" stroke-linejoin="round" d="M2.25 12.76c0 1.6 1.123 2.994 2.707 3.227 1.087.16 2.185.283 3.293.369V21l4.076-4.076a1.526 1.526 0 011.037-.443 48.282 48.282 0 005.68-.494c1.584-.233 2.707-1.626 2.707-3.228V6.741c0-1.602-1.123-2.995-2.707-3.228A48.394 48.394 0 0012 3c-2.392 0-4.744.175-7.043.513C3.373 3.746 2.25 5.14 2.25 6.741v6.018z"></path></svg> <!-- -->7</a></span></div><div class="PostsPagePostHeader-secondaryInfoRight"><span class="LWTooltip-root"><span class="BookmarkButton-container"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="BookmarkButton-icon PostsPagePostHeader-bookmarkButton ForumIcon-root"><path stroke-linecap="round" stroke-linejoin="round" d="M17.593 3.322c1.1.128 1.907 1.077 1.907 2.185V21L12 17.25 4.5 21V5.507c0-1.108.806-2.057 1.907-2.185a48.507 48.507 0 0111.186 0z"></path></svg> </span></span><div class="SharePostButton-root"><div><span class="LWTooltip-root"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="SharePostButton-icon ForumIcon-root"><path stroke-linecap="round" stroke-linejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5m-13.5-9L12 3m0 0l4.5 4.5M12 3v13.5"></path></svg></span></div></div><span class="PostsPagePostHeader-actions"><div class="PostActionsButton-root"><div><svg class="MuiSvgIcon-root PostActionsButton-icon" focusable="false" viewBox="0 0 24 24" aria-hidden="true" role="presentation"><path fill="none" d="M0 0h24v24H0z"></path><path d="M6 10c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2zm12 0c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2zm-6 0c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2z"></path></svg></div></div></span></div></div></div></div><div class="PostsPagePostHeader-headerVote"><div class="PostsVoteDefault-voteBlock"><div class="PostsVoteDefault-upvote" title="Click-and-hold for strong vote (click twice on mobile)"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-up" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></div><div class="PostsVoteDefault-voteScores"><div title="39 Votes"><h1 class="Typography-root Typography-headline PostsVoteDefault-voteScore">54</h1></div></div><div class="PostsVoteDefault-downvote" title="Click-and-hold for strong vote (click twice on mobile)"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-down" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></div></div></div></div><div class="PostsPagePostHeader-headerFooter"><div class="PostsPagePostHeader-tagSection"><span class="FooterTagList-root FooterTagList-allowTruncate FooterTagList-overrideMargins"><span class=""><span class="FooterTag-root FooterTag-core"><a href="/topics/policy"><span class="FooterTag-coreIcon"><svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg" class=""><path fill-rule="evenodd" clip-rule="evenodd" d="M6.99876 1.16609L6.95986 1.14063C6.54083 0.880042 5.98417 0.990422 5.69981 1.39873L5.59176 1.55345L5.56449 1.59481C5.44261 1.79036 5.39827 2.02094 5.43963 2.24906C5.46988 2.41721 5.54704 2.5683 5.65703 2.69394L3.54908 5.71685L3.50694 5.70212C3.12603 5.57959 2.69396 5.7091 2.45394 6.05355L2.34627 6.208L2.32068 6.24665C2.05908 6.66389 2.17009 7.21902 2.57996 7.50298L5.66611 9.64092L5.71432 9.67219C5.86126 9.76138 6.02711 9.80765 6.19694 9.80765C6.25252 9.80765 6.30849 9.80261 6.36432 9.79253C6.60977 9.74846 6.82316 9.61196 6.96534 9.40775L7.07326 9.25329L7.10053 9.21193C7.22229 9.01638 7.26675 8.78567 7.22552 8.55754C7.19333 8.37957 7.11087 8.21942 6.99002 8.0903L7.47536 7.39455L13.1255 11.5954L13.1718 11.6257C13.3281 11.7212 13.5024 11.7675 13.6751 11.7675C13.981 11.7675 14.2823 11.6232 14.4698 11.3544L14.7549 10.9455L14.7832 10.9027C14.9092 10.7003 14.9552 10.4618 14.9124 10.2255C14.8667 9.97234 14.7248 9.75221 14.5058 9.60086L8.59227 5.79272L9.08926 5.08043L9.13772 5.09904C9.23557 5.13381 9.33806 5.1532 9.44276 5.1532C9.49795 5.1532 9.55404 5.14803 9.60975 5.13808C9.85533 5.09427 10.0687 4.95764 10.211 4.75343L10.3188 4.59897L10.3461 4.55761C10.468 4.36206 10.5123 4.13148 10.471 3.90335C10.4267 3.65857 10.2897 3.44594 10.0851 3.30402L6.99876 1.16609ZM7.76897 6.9736L8.29878 6.21373L14.22 10.0265L14.2518 10.0507C14.3332 10.1183 14.3876 10.2111 14.4066 10.3163C14.4279 10.4347 14.4017 10.5544 14.3328 10.653L14.0477 11.0619L14.0265 11.09C13.8791 11.2704 13.6141 11.3106 13.4261 11.1798L7.76897 6.9736ZM3.14569 6.17333C3.1709 6.16881 3.1961 6.16674 3.22104 6.16674C3.30583 6.16674 3.38842 6.19209 3.45938 6.24159L6.54576 8.37926L6.57523 8.40162C6.65072 8.46418 6.70151 8.55078 6.71909 8.64823C6.73912 8.75848 6.71469 8.86938 6.65072 8.96115L6.54292 9.1156L6.52043 9.14507C6.45748 9.22042 6.37088 9.2707 6.27304 9.28828C6.16305 9.30857 6.05137 9.28415 5.95935 9.22042L2.87297 7.08249L2.84557 7.06181C2.67897 6.92492 2.64277 6.68052 2.76815 6.5006L2.87581 6.34614L2.8983 6.31667C2.96125 6.24132 3.04798 6.19091 3.14569 6.17333ZM3.98879 5.98396L6.06135 3.01184L8.65335 4.80741L6.58093 7.7794L3.98879 5.98396ZM6.14405 1.66177C6.20687 1.58642 6.29347 1.53601 6.39157 1.51856C6.50208 1.49905 6.61311 1.52283 6.70513 1.58668L9.79138 3.72435L9.82085 3.74671C9.89659 3.80927 9.94726 3.89561 9.96484 3.99332C9.98461 4.10357 9.96019 4.21447 9.89646 4.30624L9.7888 4.46044L9.76631 4.49004C9.70323 4.56552 9.61663 4.61593 9.51853 4.63338C9.4075 4.65315 9.29699 4.62885 9.20497 4.56526L6.11872 2.42733L6.08925 2.40496C6.01351 2.34241 5.96284 2.25607 5.94526 2.15835C5.92549 2.0481 5.94991 1.93746 6.01364 1.84544L6.12156 1.69124L6.14405 1.66177Z" fill="currentColor"></path><path d="M6.95986 1.14063L6.98725 1.09878L6.98626 1.09817L6.95986 1.14063ZM6.99876 1.16609L7.02725 1.12497L7.02615 1.12425L6.99876 1.16609ZM5.69981 1.39873L5.74081 1.42736L5.74085 1.42731L5.69981 1.39873ZM5.59176 1.55345L5.55075 1.52481L5.55002 1.52592L5.59176 1.55345ZM5.56449 1.59481L5.52273 1.56727L5.52206 1.56836L5.56449 1.59481ZM5.43963 2.24906L5.48884 2.24021L5.48883 2.24014L5.43963 2.24906ZM5.65703 2.69394L5.69805 2.72253L5.72043 2.69044L5.69465 2.661L5.65703 2.69394ZM3.54908 5.71685L3.53257 5.76405L3.56839 5.77657L3.59009 5.74545L3.54908 5.71685ZM3.50694 5.70212L3.52345 5.6549L3.52225 5.65452L3.50694 5.70212ZM2.45394 6.05355L2.49496 6.08214L2.49496 6.08213L2.45394 6.05355ZM2.34627 6.208L2.30524 6.1794L2.30458 6.1804L2.34627 6.208ZM2.32068 6.24665L2.27898 6.21904L2.27832 6.22009L2.32068 6.24665ZM2.57996 7.50298L2.55148 7.54408L2.55149 7.54408L2.57996 7.50298ZM5.66611 9.64092L5.63762 9.68204L5.63889 9.68286L5.66611 9.64092ZM5.71432 9.67219L5.68709 9.71416L5.68837 9.71494L5.71432 9.67219ZM6.36432 9.79253L6.35548 9.74332L6.35544 9.74333L6.36432 9.79253ZM6.96534 9.40775L6.92435 9.37911L6.9243 9.37918L6.96534 9.40775ZM7.07326 9.25329L7.11426 9.28194L7.115 9.28081L7.07326 9.25329ZM7.10053 9.21193L7.14229 9.23946L7.14298 9.23836L7.10053 9.21193ZM7.22552 8.55754L7.27472 8.54865L7.27472 8.54864L7.22552 8.55754ZM6.99002 8.0903L6.94902 8.06169L6.92585 8.0949L6.95352 8.12447L6.99002 8.0903ZM7.47536 7.39455L7.5052 7.35443L7.46384 7.32368L7.43436 7.36595L7.47536 7.39455ZM13.1255 11.5954L13.0956 11.6356L13.0982 11.6373L13.1255 11.5954ZM13.1718 11.6257L13.1444 11.6676L13.1457 11.6684L13.1718 11.6257ZM14.4698 11.3544L14.4287 11.3258L14.4287 11.3258L14.4698 11.3544ZM14.7549 10.9455L14.7959 10.9741L14.7966 10.9731L14.7549 10.9455ZM14.7832 10.9027L14.8249 10.9303L14.8256 10.9291L14.7832 10.9027ZM14.9124 10.2255L14.8632 10.2344L14.8632 10.2344L14.9124 10.2255ZM14.5058 9.60086L14.5343 9.55971L14.5329 9.55882L14.5058 9.60086ZM8.59227 5.79272L8.55126 5.76411L8.52157 5.80667L8.5652 5.83476L8.59227 5.79272ZM9.08926 5.08043L9.10718 5.03375L9.07064 5.01972L9.04825 5.05182L9.08926 5.08043ZM9.13772 5.09904L9.11979 5.14574L9.12098 5.14616L9.13772 5.09904ZM9.60975 5.13808L9.60097 5.08885L9.60096 5.08886L9.60975 5.13808ZM10.211 4.75343L10.17 4.72481L10.17 4.72484L10.211 4.75343ZM10.3188 4.59897L10.3598 4.62759L10.3606 4.62649L10.3188 4.59897ZM10.3461 4.55761L10.3879 4.58514L10.3885 4.58406L10.3461 4.55761ZM10.471 3.90335L10.4217 3.91224L10.4218 3.91227L10.471 3.90335ZM10.0851 3.30402L10.1136 3.26294L10.1136 3.26292L10.0851 3.30402ZM8.29878 6.21373L8.32585 6.17169L8.28533 6.1456L8.25776 6.18513L8.29878 6.21373ZM7.76897 6.9736L7.72795 6.945L7.70022 6.98479L7.73914 7.01372L7.76897 6.9736ZM14.22 10.0265L14.2503 9.98656L14.247 9.98445L14.22 10.0265ZM14.2518 10.0507L14.2837 10.0122L14.282 10.0109L14.2518 10.0507ZM14.4066 10.3163L14.4558 10.3074L14.4558 10.3074L14.4066 10.3163ZM14.3328 10.653L14.2918 10.6243L14.2918 10.6244L14.3328 10.653ZM14.0477 11.0619L14.0876 11.0921L14.0887 11.0905L14.0477 11.0619ZM14.0265 11.09L14.0652 11.1216L14.0664 11.1201L14.0265 11.09ZM13.4261 11.1798L13.3962 11.2199L13.3975 11.2208L13.4261 11.1798ZM3.14569 6.17333L3.13686 6.12412L3.13684 6.12412L3.14569 6.17333ZM3.45938 6.24159L3.43077 6.2826L3.43091 6.28269L3.45938 6.24159ZM6.54576 8.37926L6.57601 8.33939L6.57423 8.33816L6.54576 8.37926ZM6.57523 8.40162L6.60717 8.36309L6.60546 8.36179L6.57523 8.40162ZM6.71909 8.64823L6.66988 8.65711L6.66989 8.65717L6.71909 8.64823ZM6.65072 8.96115L6.69172 8.98976L6.69173 8.98974L6.65072 8.96115ZM6.54292 9.1156L6.5827 9.14597L6.58392 9.14422L6.54292 9.1156ZM6.52043 9.14507L6.55884 9.17716L6.56018 9.17541L6.52043 9.14507ZM6.27304 9.28828L6.2642 9.23907L6.26397 9.23911L6.27304 9.28828ZM5.95935 9.22042L5.93088 9.26153L5.93089 9.26153L5.95935 9.22042ZM2.87297 7.08249L2.84282 7.12243L2.8445 7.12359L2.87297 7.08249ZM2.84557 7.06181L2.8138 7.10047L2.81545 7.10172L2.84557 7.06181ZM2.76815 6.5006L2.72713 6.47201L2.72713 6.47201L2.76815 6.5006ZM2.87581 6.34614L2.83603 6.31578L2.8348 6.31755L2.87581 6.34614ZM2.8983 6.31667L2.85989 6.28459L2.85856 6.28634L2.8983 6.31667ZM6.06135 3.01184L6.08982 2.97074L6.04884 2.94235L6.02033 2.98324L6.06135 3.01184ZM3.98879 5.98396L3.94778 5.95536L3.91909 5.9965L3.96032 6.02506L3.98879 5.98396ZM8.65335 4.80741L8.69437 4.83601L8.72305 4.79487L8.68183 4.76631L8.65335 4.80741ZM6.58093 7.7794L6.55246 7.8205L6.59344 7.84888L6.62194 7.808L6.58093 7.7794ZM6.39157 1.51856L6.38287 1.46932L6.38281 1.46933L6.39157 1.51856ZM6.14405 1.66177L6.10561 1.62972L6.1043 1.63144L6.14405 1.66177ZM6.70513 1.58668L6.67663 1.62776L6.67666 1.62779L6.70513 1.58668ZM9.79138 3.72435L9.82163 3.68448L9.81985 3.68325L9.79138 3.72435ZM9.82085 3.74671L9.85271 3.70813L9.85107 3.70688L9.82085 3.74671ZM9.96484 3.99332L10.014 3.9845L10.014 3.98447L9.96484 3.99332ZM9.89646 4.30624L9.93746 4.33487L9.93753 4.33476L9.89646 4.30624ZM9.7888 4.46044L9.82864 4.49071L9.82979 4.48906L9.7888 4.46044ZM9.76631 4.49004L9.80471 4.52213L9.80612 4.52029L9.76631 4.49004ZM9.51853 4.63338L9.50977 4.58415L9.50976 4.58415L9.51853 4.63338ZM9.20497 4.56526L9.17649 4.60636L9.17654 4.6064L9.20497 4.56526ZM6.11872 2.42733L6.08847 2.46719L6.09025 2.46843L6.11872 2.42733ZM6.08925 2.40496L6.05738 2.44355L6.05903 2.4448L6.08925 2.40496ZM5.94526 2.15835L5.89605 2.16718L5.89605 2.16721L5.94526 2.15835ZM6.01364 1.84544L5.97267 1.81677L5.97253 1.81697L6.01364 1.84544ZM6.12156 1.69124L6.08178 1.66088L6.0806 1.66257L6.12156 1.69124ZM6.93248 1.18246L6.97138 1.20793L7.02615 1.12425L6.98724 1.09879L6.93248 1.18246ZM5.74085 1.42731C6.00984 1.04105 6.53679 0.936408 6.93345 1.18309L6.98626 1.09817C6.54487 0.823677 5.9585 0.939794 5.65878 1.37016L5.74085 1.42731ZM5.63275 1.58208L5.74081 1.42736L5.65882 1.3701L5.55077 1.52482L5.63275 1.58208ZM5.60623 1.62233L5.6335 1.58097L5.55002 1.52592L5.52275 1.56728L5.60623 1.62233ZM5.48883 2.24014C5.44969 2.02423 5.49161 1.80626 5.60692 1.62125L5.52206 1.56836C5.3936 1.77446 5.34686 2.01764 5.39044 2.25798L5.48883 2.24014ZM5.69465 2.661C5.59015 2.54164 5.51736 2.39872 5.48884 2.24021L5.39042 2.25791C5.4224 2.43569 5.50393 2.59497 5.61941 2.72687L5.69465 2.661ZM3.59009 5.74545L5.69805 2.72253L5.61602 2.66534L3.50806 5.68825L3.59009 5.74545ZM3.49044 5.74931L3.53257 5.76405L3.56558 5.66965L3.52345 5.65492L3.49044 5.74931ZM2.49496 6.08213C2.72184 5.75654 3.13065 5.6336 3.49163 5.74971L3.52225 5.65452C3.1214 5.52558 2.66607 5.66166 2.41292 6.02496L2.49496 6.08213ZM2.38729 6.2366L2.49496 6.08214L2.41292 6.02496L2.30525 6.17941L2.38729 6.2366ZM2.36237 6.27426L2.38796 6.23561L2.30458 6.1804L2.27899 6.21905L2.36237 6.27426ZM2.60843 7.46188C2.22059 7.19318 2.1156 6.66788 2.36304 6.27321L2.27832 6.22009C2.00256 6.65991 2.11959 7.24486 2.55148 7.54408L2.60843 7.46188ZM5.69458 9.59981L2.60843 7.46188L2.55149 7.54408L5.63763 9.68202L5.69458 9.59981ZM5.74153 9.63025L5.69332 9.59897L5.63889 9.68286L5.6871 9.71414L5.74153 9.63025ZM6.19694 9.75765C6.03629 9.75765 5.87941 9.71391 5.74026 9.62945L5.68837 9.71494C5.84311 9.80885 6.01792 9.85765 6.19694 9.85765V9.75765ZM6.35544 9.74333C6.30249 9.75289 6.24949 9.75765 6.19694 9.75765V9.85765C6.25555 9.85765 6.31449 9.85234 6.37321 9.84173L6.35544 9.74333ZM6.9243 9.37918C6.78973 9.57247 6.58795 9.70158 6.35548 9.74332L6.37316 9.84174C6.6316 9.79534 6.85659 9.65146 7.00637 9.43631L6.9243 9.37918ZM7.03228 9.22465L6.92435 9.37911L7.00632 9.43638L7.11425 9.28193L7.03228 9.22465ZM7.05879 9.18441L7.03152 9.22577L7.115 9.28081L7.14228 9.23945L7.05879 9.18441ZM7.17632 8.56643C7.21534 8.78235 7.17329 9.00047 7.05809 9.1855L7.14298 9.23836C7.27128 9.03228 7.31816 8.78899 7.27472 8.54865L7.17632 8.56643ZM6.95352 8.12447C7.06796 8.24674 7.14589 8.39817 7.17632 8.56644L7.27472 8.54864C7.24078 8.36096 7.15379 8.1921 7.02653 8.05613L6.95352 8.12447ZM7.43436 7.36595L6.94902 8.06169L7.03103 8.11891L7.51637 7.42316L7.43436 7.36595ZM13.1554 11.5553L7.5052 7.35443L7.44553 7.43468L13.0957 11.6356L13.1554 11.5553ZM13.1992 11.5838L13.1529 11.5536L13.0982 11.6373L13.1444 11.6675L13.1992 11.5838ZM13.6751 11.7175C13.5114 11.7175 13.3461 11.6736 13.1979 11.583L13.1457 11.6684C13.3101 11.7688 13.4935 11.8175 13.6751 11.8175V11.7175ZM14.4287 11.3258C14.2511 11.5806 13.9655 11.7175 13.6751 11.7175V11.8175C13.9966 11.8175 14.3136 11.6659 14.5108 11.383L14.4287 11.3258ZM14.7139 10.9169L14.4287 11.3258L14.5108 11.383L14.7959 10.9741L14.7139 10.9169ZM14.7415 10.8751L14.7132 10.9179L14.7966 10.9731L14.8249 10.9303L14.7415 10.8751ZM14.8632 10.2344C14.9038 10.4585 14.8602 10.6844 14.7407 10.8763L14.8256 10.9291C14.9582 10.7162 15.0066 10.4651 14.9616 10.2166L14.8632 10.2344ZM14.4774 9.64199C14.6856 9.78593 14.8199 9.99456 14.8632 10.2344L14.9616 10.2166C14.9135 9.95012 14.7639 9.71849 14.5342 9.55973L14.4774 9.64199ZM8.5652 5.83476L14.4787 9.6429L14.5329 9.55882L8.61934 5.75069L8.5652 5.83476ZM9.04825 5.05182L8.55126 5.76411L8.63327 5.82133L9.13026 5.10904L9.04825 5.05182ZM9.15565 5.05237L9.10718 5.03375L9.07133 5.12711L9.1198 5.14572L9.15565 5.05237ZM9.44276 5.1032C9.34428 5.1032 9.24745 5.08497 9.15446 5.05193L9.12098 5.14616C9.22369 5.18265 9.33184 5.2032 9.44276 5.2032V5.1032ZM9.60096 5.08886C9.548 5.09832 9.49485 5.1032 9.44276 5.1032V5.2032C9.50105 5.2032 9.56009 5.19774 9.61854 5.1873L9.60096 5.08886ZM10.17 4.72484C10.0353 4.91816 9.8335 5.04737 9.60097 5.08885L9.61853 5.1873C9.87716 5.14116 10.1022 4.99712 10.2521 4.78201L10.17 4.72484ZM10.2778 4.57035L10.17 4.72481L10.252 4.78204L10.3598 4.62758L10.2778 4.57035ZM10.3044 4.53009L10.2771 4.57145L10.3606 4.62649L10.3878 4.58513L10.3044 4.53009ZM10.4218 3.91227C10.4609 4.12819 10.419 4.34615 10.3037 4.53116L10.3885 4.58406C10.517 4.37796 10.5637 4.13478 10.5202 3.89443L10.4218 3.91227ZM10.0566 3.34511C10.2504 3.47949 10.3799 3.68055 10.4217 3.91224L10.5202 3.89447C10.4736 3.63659 10.3291 3.4124 10.1136 3.26294L10.0566 3.34511ZM6.97029 1.20719L10.0567 3.34513L10.1136 3.26292L7.02724 1.12499L6.97029 1.20719ZM8.25776 6.18513L7.72795 6.945L7.80998 7.0022L8.33979 6.24232L8.25776 6.18513ZM14.247 9.98445L8.32585 6.17169L8.27171 6.25577L14.1929 10.0685L14.247 9.98445ZM14.282 10.0109L14.2502 9.98668L14.1897 10.0663L14.2215 10.0905L14.282 10.0109ZM14.4558 10.3074C14.4347 10.1906 14.3742 10.0873 14.2837 10.0122L14.2198 10.0891C14.2922 10.1492 14.3405 10.2315 14.3574 10.3252L14.4558 10.3074ZM14.3738 10.6816C14.4503 10.5721 14.4795 10.4389 14.4558 10.3074L14.3574 10.3251C14.3764 10.4304 14.3531 10.5366 14.2918 10.6243L14.3738 10.6816ZM14.0887 11.0905L14.3738 10.6816L14.2918 10.6244L14.0066 11.0333L14.0887 11.0905ZM14.0664 11.1201L14.0876 11.0921L14.0078 11.0318L13.9866 11.0598L14.0664 11.1201ZM13.3975 11.2208C13.6078 11.3671 13.9019 11.3215 14.0652 11.1216L13.9877 11.0583C13.8563 11.2192 13.6205 11.2541 13.4546 11.1387L13.3975 11.2208ZM7.73914 7.01372L13.3962 11.2199L13.4559 11.1397L7.7988 6.93348L7.73914 7.01372ZM3.22104 6.11674C3.19333 6.11674 3.16516 6.11904 3.13686 6.12412L3.15453 6.22255C3.17663 6.21858 3.19886 6.21674 3.22104 6.21674V6.11674ZM3.48799 6.20058C3.40853 6.14515 3.31593 6.11674 3.22104 6.11674V6.21674C3.29574 6.21674 3.36832 6.23902 3.43077 6.2826L3.48799 6.20058ZM6.57423 8.33816L3.48785 6.20048L3.43091 6.28269L6.51729 8.42036L6.57423 8.33816ZM6.60546 8.36179L6.57599 8.33943L6.51554 8.41909L6.54501 8.44145L6.60546 8.36179ZM6.76829 8.63935C6.74862 8.53029 6.6917 8.4332 6.60714 8.36312L6.54333 8.44012C6.60974 8.49515 6.6544 8.57126 6.66988 8.65711L6.76829 8.63935ZM6.69173 8.98974C6.76328 8.88711 6.79071 8.76272 6.76828 8.63929L6.66989 8.65717C6.68753 8.75425 6.66611 8.85165 6.6097 8.93255L6.69173 8.98974ZM6.58392 9.14422L6.69172 8.98976L6.60971 8.93253L6.50192 9.08699L6.58392 9.14422ZM6.56018 9.17541L6.58267 9.14594L6.50318 9.08527L6.48068 9.11474L6.56018 9.17541ZM6.28188 9.33749C6.39119 9.31786 6.48827 9.26155 6.5588 9.17713L6.48206 9.11302C6.42669 9.17929 6.35058 9.22355 6.2642 9.23907L6.28188 9.33749ZM5.93089 9.26153C6.03373 9.33274 6.15889 9.36019 6.28211 9.33745L6.26397 9.23911C6.16721 9.25696 6.06902 9.23555 5.98781 9.17932L5.93089 9.26153ZM2.8445 7.12359L5.93088 9.26153L5.98782 9.17932L2.90144 7.04139L2.8445 7.12359ZM2.81545 7.10172L2.84285 7.1224L2.90309 7.04258L2.87569 7.0219L2.81545 7.10172ZM2.72713 6.47201C2.58669 6.67354 2.62728 6.94717 2.81383 7.10044L2.87731 7.02318C2.73065 6.90268 2.69886 6.68749 2.80917 6.52918L2.72713 6.47201ZM2.8348 6.31755L2.72713 6.47201L2.80917 6.52919L2.91683 6.37473L2.8348 6.31755ZM2.85856 6.28634L2.83607 6.31581L2.91556 6.37647L2.93805 6.34701L2.85856 6.28634ZM3.13684 6.12412C3.02764 6.14377 2.93044 6.20021 2.85993 6.28462L2.93668 6.34873C2.99206 6.28243 3.06831 6.23806 3.15454 6.22254L3.13684 6.12412ZM6.02033 2.98324L3.94778 5.95536L4.02981 6.01256L6.10236 3.04044L6.02033 2.98324ZM8.68183 4.76631L6.08982 2.97074L6.03287 3.05294L8.62488 4.84851L8.68183 4.76631ZM6.62194 7.808L8.69437 4.83601L8.61234 4.77881L6.53992 7.7508L6.62194 7.808ZM3.96032 6.02506L6.55246 7.8205L6.6094 7.7383L4.01726 5.94286L3.96032 6.02506ZM6.38281 1.46933C6.27316 1.48884 6.17604 1.54531 6.10565 1.62975L6.18245 1.69379C6.23769 1.62753 6.31377 1.58318 6.40032 1.56779L6.38281 1.46933ZM6.73364 1.5456C6.63061 1.47412 6.50618 1.44755 6.38287 1.46932L6.40026 1.5678C6.49798 1.55054 6.5956 1.57154 6.67663 1.62776L6.73364 1.5456ZM9.81985 3.68325L6.7336 1.54558L6.67666 1.62779L9.76291 3.76546L9.81985 3.68325ZM9.85107 3.70688L9.8216 3.68452L9.76116 3.76419L9.79062 3.78655L9.85107 3.70688ZM10.014 3.98447C9.99437 3.87513 9.93756 3.77826 9.85269 3.70816L9.78901 3.78527C9.85562 3.84028 9.90014 3.91609 9.91563 4.00218L10.014 3.98447ZM9.93753 4.33476C10.0088 4.23216 10.0362 4.10784 10.014 3.9845L9.91562 4.00215C9.93305 4.09931 9.9116 4.19678 9.85539 4.27772L9.93753 4.33476ZM9.82979 4.48906L9.93746 4.33487L9.85547 4.27762L9.7478 4.43181L9.82979 4.48906ZM9.80612 4.52029L9.82861 4.49069L9.74898 4.43019L9.72649 4.45979L9.80612 4.52029ZM9.52729 4.6826C9.63695 4.6631 9.73404 4.60662 9.80467 4.5221L9.72794 4.45798C9.67242 4.52442 9.59631 4.56876 9.50977 4.58415L9.52729 4.6826ZM9.17654 4.6064C9.27939 4.67747 9.40327 4.70469 9.5273 4.6826L9.50976 4.58415C9.41174 4.60161 9.31459 4.58024 9.23339 4.52413L9.17654 4.6064ZM6.09025 2.46843L9.17649 4.60636L9.23344 4.52416L6.14719 2.38622L6.09025 2.46843ZM6.05903 2.4448L6.0885 2.46716L6.14894 2.38749L6.11947 2.36513L6.05903 2.4448ZM5.89605 2.16721C5.91572 2.27655 5.97254 2.37342 6.05741 2.44352L6.12109 2.36641C6.05448 2.3114 6.00996 2.23558 5.99447 2.1495L5.89605 2.16721ZM5.97253 1.81697C5.90135 1.91977 5.87392 2.0438 5.89605 2.16718L5.99448 2.14953C5.97706 2.0524 5.99848 1.95515 6.05474 1.8739L5.97253 1.81697ZM6.0806 1.66257L5.97267 1.81677L6.0546 1.87411L6.16252 1.71991L6.0806 1.66257ZM6.1043 1.63144L6.08181 1.66091L6.16131 1.72157L6.1838 1.6921L6.1043 1.63144Z" fill="currentColor"></path><path d="M8.8 12.4675H2.2C1.92386 12.4675 1.7 12.6913 1.7 12.9675V13.5675C1.7 13.8436 1.92386 14.0675 2.2 14.0675H8.8C9.07614 14.0675 9.3 13.8436 9.3 13.5675V12.9675C9.3 12.6913 9.07614 12.4675 8.8 12.4675Z" stroke="currentColor" fill="none" stroke-width="0.6"></path></svg></span><span class="FooterTag-name">Policy</span></a></span></span><span class=""><span class="FooterTag-root FooterTag-core"><a href="/topics/existential-risk"><span class="FooterTag-coreIcon"><svg width="16" height="16" viewBox="0 0 16 16" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class=""><path d="M7.70242 7.6811C7.70242 7.84813 7.83465 7.98476 7.99629 7.98476C8.15792 7.98476 8.29015 7.84813 8.29015 7.6811V5.25156C8.29015 5.08452 8.15792 4.9479 7.99629 4.9479C7.83465 4.9479 7.70242 5.08453 7.70242 5.25156V7.6811ZM11.376 12.5401C11.3173 11.7809 10.7201 11.6367 10 11.5C9.95592 11.2115 9.90657 10.475 9.75958 10.2169V7.98482H11.229C12.1989 7.98482 12.9923 7.16492 12.9923 6.16272C12.9923 5.20608 12.2723 4.41658 11.3759 4.34063C11.3172 3.58142 10.7587 2.9741 10.0387 2.83736C9.90654 1.80477 9.0396 1 7.99624 1C6.95287 1 6.08592 1.80477 5.9537 2.85249C5.24839 2.97398 4.68997 3.58142 4.61647 4.34063C3.70544 4.41658 3.00003 5.20619 3.00003 6.16272C3.00003 7.16492 3.79348 7.98482 4.76334 7.98482H6.23279V10.2321C6.0858 10.475 6.04409 11.2116 6 11.5C5.2948 11.6216 4.68995 11.7809 4.61645 12.5401C3.70542 12.6161 3 13.4057 3 14.3622C3 14.5292 3.13223 14.6659 3.29386 14.6659C3.4555 14.6659 3.58773 14.5292 3.58773 14.3622C3.58773 13.6941 4.11673 13.1474 4.76331 13.1474H4.92496C5.01311 13.1474 5.0866 13.117 5.14533 13.0411C5.20407 12.9804 5.23349 12.8892 5.21883 12.7981V12.6766C5.21883 12.0844 5.67441 12.1044 6.2475 12.1044C6.40914 12.1044 6.54136 11.9678 6.54136 11.8008C6.54136 11.4971 6.62943 10.7179 6.79107 10.4749C6.82049 10.4294 6.83515 10.3686 6.83515 10.3079L6.83535 7.68114V7.43817C6.83535 6.73964 6.30635 6.16271 5.65977 6.16271C5.49812 6.16271 5.3659 6.29935 5.3659 6.46637C5.3659 6.6334 5.49813 6.77003 5.65977 6.77003C5.9684 6.77003 6.21819 7.0433 6.2475 7.37735L4.76338 7.37746C4.11679 7.37746 3.58779 6.83082 3.58779 6.16268C3.58779 5.49455 4.11679 4.94791 4.76338 4.94791H4.92502C5.01317 4.94791 5.08667 4.91751 5.1454 4.84157C5.20413 4.78088 5.23355 4.68968 5.21889 4.59859V4.47711C5.21889 3.88492 5.67439 3.41423 6.24748 3.41423C6.40913 3.41423 6.54134 3.2776 6.54134 3.11057C6.54134 2.27541 7.20257 1.59214 8.01079 1.59214C8.81902 1.59214 9.48025 2.27541 9.48025 3.11057C9.48025 3.2776 9.61247 3.41423 9.77411 3.41423C10.3472 3.41423 10.8027 3.88492 10.8027 4.47711V4.59859C10.788 4.68968 10.8174 4.76563 10.8762 4.84157C10.9349 4.90226 11.0084 4.94791 11.0966 4.94791H11.2289C11.8755 4.94791 12.4045 5.49455 12.4045 6.16268C12.4045 6.83082 11.8755 7.37746 11.2289 7.37746H9.75944C9.78886 7.0434 10.0387 6.77014 10.3472 6.77014C10.5088 6.77014 10.641 6.6335 10.641 6.46648C10.641 6.29945 10.5088 6.16282 10.3472 6.16282C9.70059 6.16282 9.17159 6.73985 9.17159 7.43827L9.17169 7.68114V10.3232C9.17169 10.3839 9.18635 10.4447 9.21576 10.4903C9.37741 10.7331 9.31453 11.4971 9.31453 11.8008C9.31453 11.9678 9.44676 12.1044 9.60839 12.1044C10.1815 12.1044 10.788 12.0997 10.788 12.6919V12.8134C10.7734 12.9044 10.8027 12.9804 10.8615 13.0563C10.9202 13.117 10.9937 13.1627 11.0819 13.1627H11.2289C11.8755 13.1627 12.4045 13.7093 12.4045 14.3774C12.4045 14.5445 12.5367 14.6811 12.6983 14.6811C12.86 14.6811 12.9922 14.5445 12.9922 14.3774C12.9922 13.4056 12.2724 12.616 11.376 12.5401Z"></path><rect x="4" y="9.29999" width="7.9" height="0.666667" rx="0.333333"></rect><rect x="3" y="14.3333" width="10" height="0.666667" rx="0.2"></rect></svg></span><span class="FooterTag-name">Existential risk</span></a></span></span><span class=""><span class="FooterTag-root"><a href="/topics/improving-institutional-decision-making"><span class="FooterTag-name">Improving institutional decision-making</span></a></span></span><span class=""><span class="FooterTag-root"><a href="/topics/longtermism"><span class="FooterTag-name">Longtermism</span></a></span></span><span class=""><span class="FooterTag-root"><a href="/topics/longtermist-institutional-reform"><span class="FooterTag-name">Longtermist institutional reform</span></a></span></span><span class=""><span class="FooterTag-root"><a href="/topics/long-term-future"><span class="FooterTag-name">Long-term future</span></a></span></span><a class="FooterTagList-postTypeLink" href="/about#Finding_content"><span class="LWTooltip-root"><div class="FooterTagList-frontpageOrPersonal">Frontpage</div></span></a></span></div></div></div></div></div><div class="ToCColumn-toc"><div class="ToCColumn-stickyBlockScroller"><div class="ToCColumn-stickyBlock"><div><div class="TableOfContentsRow-root TableOfContentsRow-level0 TableOfContentsRow-highlighted"><a href="#" class="TableOfContentsRow-link TableOfContentsRow-title TableOfContentsRow-highlightDot">Objectives of longtermist policy making</a></div><div class="TableOfContentsRow-root TableOfContentsRow-level1"><a href="#0_0_Introduction" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>0.0 Introduction</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level1"><a href="#1_0_Further_our_understanding_of_longtermism_and_adjacent_scientific_fields" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>1.0 Further our understanding of longtermism and adjacent scientific fields</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#1_1_What_does_a_good_society_look_like_" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>1.1 What does a good society look like?</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#1_2_How_do_we_create_a_good_society_" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>1.2 How do we create a good society?</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level1"><a href="#2_0_Shape_policy_making_institutions_for_future_generations" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>2.0 Shape policy making institutions for future generations</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#2_1_Develop_epistemic_capabilities_for_long_term_policy_making" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>2.1 Develop epistemic capabilities for long-term policy making</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#2_2_Motivate_policymakers_to_prioritize_future_generations" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>2.2 Motivate policymakers to prioritize future generations</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#2_3_Remove_institutional_barriers_to_longtermist_policy_making" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>2.3 Remove institutional barriers to longtermist policy making</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#2_4_Proposed_mechanisms" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>2.4 Proposed mechanisms</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Designated_stakeholders" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Designated stakeholders</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Information_interventions" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Information interventions</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Voting_mechanisms" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Voting mechanisms</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Liability_mechanisms" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Liability mechanisms</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Reallocation_of_resources" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Reallocation of resources</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level1"><a href="#3_0_Directly_influence_the_future_trajectory_of_human_civilization" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>3.0 Directly influence the future trajectory of human civilization</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#3_1_Mitigate_catastrophic_risk_and_build_resiliency_to_tail_events_and_unknown_unknowns" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>3.1 Mitigate catastrophic risk and build resiliency to tail events and unknown unknowns</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Reduce_the_probability_of_specific_risks" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Reduce the probability of specific risks</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Improve_risk_management_frameworks" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Improve risk management frameworks</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Increase_resilience_of_critical_systems" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Increase resilience of critical systems</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#3_2_Build_inclusive_progress_through_long_lasting_and_well_functioning_institutions" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>3.2 Build inclusive progress through long-lasting and well-functioning institutions</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#There_is_still_much_we_don_t_know_about_progress" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>There is still much we don’t know about progress</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#General_ideas_for_how_to_increase_progress" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>General ideas for how to increase progress</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level3"><a href="#Specific_proposals_for_how_to_increase_inclusive_progress" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>Specific proposals for how to increase inclusive progress</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level2"><a href="#3_3_What_about_sustainability_" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>3.3 What about sustainability?</span></a></div><div class="TableOfContentsRow-root TableOfContentsRow-level1"><a href="#4_0_Summary" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>4.0 Summary</span></a></div><div class="TableOfContentsDivider-divider"></div><div class="TableOfContentsRow-root TableOfContentsRow-level0"><a href="#comments" class="TableOfContentsRow-link TableOfContentsRow-highlightDot"><span>7 comments</span></a></div></div></div></div></div><div class="ToCColumn-gap1"></div><div class="ToCColumn-content"><div id="postBody" class="PostsPage-centralColumn PostsPage-postBody PostsPage-audioPlayerHidden"><div class="PostsPage-postContent instapaper_body ContentStyles-base content ContentStyles-postBody"><div id="postContent"><div><p><i>Estimated reading time: 20-30 minutes</i></p><p><i>-We would like to thank the following for their excellent feedback and guidance throughout this article, in no particular order: Tyler M. John, Max Stauffer, Aksel Braanen Sterri, Eirik Mofoss, Samuel Hilton, Konrad Seifert, Tildy Stokes, Erik Aunvåg Matsen and Marcel Grewal Sommerfelt.</i></p><h1 id="0_0_Introduction"><strong>0.0 Introduction</strong></h1><p>This article is co-authored by five members of Effective Altruism Norway as a pilot project to test if we can contribute in a valuable way to the emerging field of longtermism and policy making.</p><p>In the article we summarize some of the work that is being done in the emerging field of longtermism, using a new structure to classify the different interventions (see Figure 1: Three objectives of longtermist policy making). Then, for each objective we describe related challenges and potential solutions, and give some examples of current ongoing work.</p><p>We hope that the new structure can help improve coordination in this emerging field, and enable improved prioritization of interventions. If this structure resonates well with established experts in the field, we are happy to write up a shorter version of this article that could serve as an introduction to longtermist policy making for non-experts. Already, at 17 pages this article is one fourth of the length of the <a href="https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf"><u>GPI research agenda</u></a>, which covers many of the same topics. </p><p>Finally, we have emphasized some aspects of longtermist policy making that we believe have been underemphasized in the effective altruism- and longtermism communities in the past. Examples include scenario planning, robust decision making and redteaming among others, which we have described together with forecasting in section 2.1 as essential epistemic capabilities for long-term governance. These tools are complementary to forecasting-based epistemic capabilities that the EA/longtermist communities already promote, and we hope that they will receive increased attention going forward.</p><p>We hope to produce 1-3 further articles on similar topics through 2021, and welcome any experts who have capacity to provide feedback on our work.</p><p>--------------------------------------------------------------------</p><p>In 2019 William MacAskill proposed a definition of the term <a href="https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism"><u>longtermism</u></a> as the<i> view that those who live at future times matter just as much, morally, as those who live today</i>. There are many reasons to believe that actions can have a substantial impact on the future. For instance, the economic growth seen in the past two centuries has lifted billions out of poverty. In addition to this, any long-term consequences of climate change caused by humans could decrease the life quality of several generations to come. Our generation is also one of the first who has had the technological potential to destroy civilization through e.g. nuclear weapons, and thereby eliminating all future of humanity. This means that actions we take today can improve the course of history for hundreds of generations to come.</p><p>Interest in the welfare of future generations precedes the MacAskill definition of longtermism from 2017. In 2005 the Future of Humanity Institute was established at Oxford university. In 2009, the <a href="https://www.csf.gov.sg/who-we-are/"><u>Centre for Strategic Futures</u></a> (CSF) was established by the Singaporian Government as a futures think tank. In 2017 William MacAskill started using the word “longtermism” as a term for the cluster of views that involved concern about ensuring the long-run future goes as well as possible. Since then, <a href="https://forum.effectivealtruism.org/tag/longtermism-philosophy"><u>many have contributed</u></a> to the development of the philosophical field. The <a href="https://globalprioritiesinstitute.org/"><u>Global Priorities Institute</u></a> (GPI) in Oxford was established in 2018 with the mission to <i>conduct and promote world-class, foundational academic research on how most effectively to do good</i>. In 2020 GPI published a new <a href="https://globalprioritiesinstitute.org/research-agenda/"><u>research agenda</u></a>, where one of its two sections was dedicated to longtermism. These are just some of several milestones in the short history of longtermism. </p><p>If we believe that the future is what matters most and that we can influence it through our policy making, then it follows that the long-run outcomes of enacted policies should be one of the key considerations of the policy making process. However, most political systems are not prioritising long-term planning sufficiently compared to the potential benefits just for existing generations – nevermind thinking about the moral importance of future generations. </p><p>There are examples of different institutions and policy makers that are putting longtermism on the agenda today, but the time frame they consider long-term differs. Time horizons of longtermist organizations that frequently interact with policy makers (e.g. <a href="https://www.appgfuturegenerations.com/"><u>APPG</u></a> and <a href="https://www.alpenglow.org.uk/"><u>Alpenglow</u></a>) are constrained by the norms in the current policy making process. Although academics talking about "longtermism" can look thousands of years ahead, actors seeking to practically influence policy organisations, including ourselves, are typically considering shorter time horizons, e.g. 20-30 years in the future. </p><p>This article will explore three categories of objectives for longtermist policy making and might serve as a guide towards shaping longtermist policy suggestions. These objectives are summarized in figure 1.</p><figure class="image image_resized" style="width:624px"><img src="http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/dz3yy1a99m7ei9v9fcbb.png"><figcaption><strong>Figure 1</strong>: Representation of the three objectives longtermist policies should focus on. Objective 1 and 2 serve as foundations for the more direct objective(s) above them.</figcaption></figure><p>On top of the pyramid is the objective directly benefiting future generations - i.e. ensuring that there is a future for human civilization, and that it is as positive as possible. This objective builds on the condition that policy making institutions are enabled to develop such policies, which brings us to part two of the pyramid. This part describes three essential conditions to achieve successful behaviour change interventions; capability, motivation and opportunity, reflecting the <a href="https://link.springer.com/article/10.1186/1748-5908-6-42"><u>COM-B system for institutional reform</u></a> (Michie et. al. 2011). The two upper pieces of the pyramid both rest upon the fundamental part, which concerns the objective of <i>understanding longtermism</i>. Interventions focused on this objective have a more indirect impact mechanism.</p><p>A policy intervention should optimize for one or several of these objectives in order to qualify as a "longtermist policy proposal".</p><p>Note that the proposals in figure 1 are synergistic - if we improve our performance on one of the objectives, it may become easier to also improve on others. In general, objective one works as an enabler of objective two, and both objective one and two are enablers of the third objective. For instance, if a policy making institution is able to agree on a set of KPIs to measure the long-term quality of a society (as a partial solution to objective 1 in figure 1), then they can set up a forecasting infrastructure for these KPIs (developing capabilities to govern for the long term, as described in objective 2). With this forecasting infrastructure in place, long-term effects of proposed policies will be more visible to the electorate, creating stronger incentives for politicians to optimize for long-term outcomes (solving another part of objective 2; motivations). This will for instance make it easier to prioritize catastrophic risk mitigation (enabling investment in efforts focused on objective 3), etc.</p><p>Several of the ideas in each category of objectives would be familiar to experienced effective altruists due to the natural synergies of longtermism and effective altruism. However, even experienced effective altruists may not have encountered all of the topics in this article; examples of topics that the experienced reader may find interesting include:</p><ul><li>The three-layered model of objectives of longtermist policies in figure 1</li><li>The discussion of governance KPIs in section 1.1</li><li>Non-forecasting tools like e.g. scenario planning as essential epistemic capabilities in section 2.1, on par with forecasting</li><li>Structured examples of how policy making institutions can be reformed to benefit future generations in section 2.4</li><li>The discussion of sustainability as a way to either mitigate catastrophic risk or a way to boost inclusive progress in section 3.3</li></ul><p>While the objectives are relevant for policy makers in a broad range of governance models and in countries with different levels of democratic development, the examples in this article are primarily focused on policy making on national levels in industrialized, democratic countries. </p><h1 id="1_0_Further_our_understanding_of_longtermism_and_adjacent_scientific_fields"><strong>1.0 Further our understanding of longtermism and adjacent scientific fields</strong></h1><p>In the ongoing field of exploring strategic considerations related to longtermist policy making, there is a need for agreement of the meaning of the word. The bottom piece of the pyramid in figure 1 concerns our understanding of longtermism. William MacAskill <a href="https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism#Strong_Longtermism"><u>proposes </u></a>three premises that make up what he calls the minimum definition of longtermism: (1) Those who live at future times matter as much, morally as those who live today, (2) society currently privileges those who live today above those who live in the future, and (3) we should take action to rectify that, and help ensure the long-run future goes well. Based on these premises, MacAskill and others have proposed political measures like <a href="https://philpapers.org/archive/JOHLIR.pdf"><u>future assemblies</u></a> or a <a href="https://drive.google.com/file/d/1lJHBKfIROiyc8yxVaZnKEWUEYfOg06Eh/view"><u>Ministry of the Future</u></a> (see section 2.4 for further elaboration). Organizations like the <a href="https://globalprioritiesinstitute.org/wp-content/uploads/gpi-research-agenda.pdf"><u>Global Priorities Institute</u></a> (GPI) and the <a href="https://www.fhi.ox.ac.uk/"><u>Future of Humanity Institute</u></a> (FHI) are currently working on establishing longtermism as a scientific field of inquiry. </p><h2 id="1_1_What_does_a_good_society_look_like_">1.1 What does a good society look like?</h2><p>Two important constraints on our current ability to positively influence the future are (i) uncertainty about what a good society looks like, i.e. moral cluelessness, and (ii) how we can best create one, i.e. strategic cluelessness. Different scientific and philosophical fields have attempted to investigate the first question in different ways. One example of moral cluelessness is the repugnant conclusion, which assumes that by adding more people to the world, and proportionally staying above a given average in happiness, one can reach a state of minimal happiness for an infinitely large population. However, we aren't completely clueless: here are some metrics that are commonly used to describe more or less positive aspects of a society. </p><p>Economists frequently use KPIs (Key Performance Indicators) to try to measure different facets of a successful society. GDP and GDP growth is perhaps the most common, while metrics like Gini-coefficients, average lifespan, GHG emissions, or the Human Development Index are used to describe inequality, health, sustainability and economic development, respectively.</p><p>While none of these metrics cover all that matters in a society on their own, a combination of such KPIs may capture most of the aspects that we care about. The “<a href="https://drive.google.com/file/d/1lJHBKfIROiyc8yxVaZnKEWUEYfOg06Eh/view"><u>Portugal we want</u></a>” project is an example of a collaborative effort to converge on a set of KPIs to use in governance for the long term. There are also other examples that similarly attempt to stake out the course for the future of the country, e.g. the “<a href="https://www.cynnalcymru.com/project/the-wales-we-want/"><u>Wales we want</u></a>”-project, or the japanese work on “<a href="https://www.japanpolicyforum.jp/society/pt20190109210522.html"><u>Future Design</u></a>”. </p><p>Another, more academically oriented example of projects that attempt to compile partial descriptions of a good society into more complete descriptions, is the <a href="https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf"><u>GPI research agenda</u></a>. It lists several other partial approaches to measure broader social welfare through a set of KPIs, including informal discussions by <a href="http://www.stafforini.com/blog/bostrom/"><u>Bostrom </u></a>and <a href="http://reflectivedisequilibrium.blogspot.com/2013/12/what-proxies-to-use-for-flow-through.html"><u>Shulman</u></a>. </p><h2 id="1_2_How_do_we_create_a_good_society_">1.2 How do we create a good society?</h2><p>When we want to plan for a good society in the future we need to make prioritizations. This can be very important for the long-run trajectory of society as some efforts to improve society are much <a href="https://80000hours.org/problem-profiles/global-priorities-research/"><u>more effective than others</u></a>. <a href="https://80000hours.org/2013/12/a-framework-for-strategically-selecting-a-cause/"><u>Cause prioritization</u></a> is a philosophical field involved with evaluating and comparing different cause areas in their effectiveness. Some of the organizations working on cause prioritization are <a href="https://80000hours.org/articles/future-generations/"><u>80,000 Hours</u></a>, the <a href="https://www.openphilanthropy.org/blog/update-cause-prioritization-open-philanthropy"><u>Open Philanthropy Project</u></a>, and The Center for Reducing Suffering. The latter <a href="https://centerforreducingsuffering.org/the-benefits-of-cause-neutrality/"><u>proposes</u></a> that starting out with a cause-neutral attitude to longtermist policy making is crucial to succeed at the cause prioritization. To achieve this, effective institutions and organizations need to: </p><ol><li>Build a broad movement for longtermist policy change so that these efforts don’t get stuck in a specific cause area.</li><li>Explicitly work on prioritization research so that cause areas can be accurately compared, as well as induce attitude change in political and societal institutions (see the middle piece of the pyramid: <i>shape policy making institutions for future generations</i>).</li></ol><p>One important concept in cause prioritization is the notion of <i>crucial considerations</i> - which are strategic questions that can significantly change the optimal strategy when they are taken into consideration. Some of the crucial consideration of longtermist policy making includes, but is not limited to, our evaluation of the <a href="https://forum.effectivealtruism.org/posts/XXLf6FmWujkxna3E6/are-we-living-at-the-most-influential-time-in-history-1"><u>hinge of history hypothesis</u></a> (HoH), as well as other considerations discussed in the Global Priorities Institute’s <a href="https://globalprioritiesinstitute.org/research-agenda-web-version/"><u>new research agenda</u></a>. The HoH assumes that this century, or perhaps especially the coming decades, is the most influential period in all of human history. Therefore, our evaluation of HoH’s likelihood is one of the determinants of how we should influence policy makers and the way we distribute the resources we have available today. If we believe that the coming century is merely as influential as a typical century, then we - like <a href="https://forum.effectivealtruism.org/posts/Eey2kTy3bAjNwG8b5/the-emerging-school-of-patient-longtermism"><u>patient longtermist</u></a> - will probably spend less of our philanthropic resources now, and save more to spend them later. However, if we believe that this period is the most “hingey” period of all of human history - e.g. because our current values could be locked in for generations to come (i.e. <i>value lock-in view</i>), or if we are living in a<i> time of perils </i>- then we should rather spend more of our philanthropic resources now to ensure the most impact. These considerations can be applied to our spending of any type of philanthropic capital - either money, political influence or other resources of value. If we don’t live at the HoH, it then seems most logical to spend the next decades focusing on building political influence, rather than spending political capital to influence specific decisions in the near future. </p><h1 id="2_0_Shape_policy_making_institutions_for_future_generations"><strong>2.0 Shape policy making institutions for future generations</strong></h1><p>So far, we have considered the problem of longtermism on a general level, and we will therefore describe in this part different measures and obstacles connected to developing and motivating longtermist policy making in institutions. This section reflects the second piece of the pyramid in figure 1, and further elaborates on the COM-B system to ensure successful interventions in behavioural change. We will first consider epistemic determinants and how we can develop epistemic <i>capabilities</i> like forecasting and scenario planning, as well as redteaming and robust decision making. Then we will look at how we can <i>motivate</i> policy makers to prioritize future generations, and in the last paragraph we will consider important institutional barriers to such policy making, and how to remove them in order to to create <i>opportunities</i> for long-termist policy making. This section is largely a summary of the work by John & MacAskill, so readers who've studied their work can skip it.</p><h2 id="2_1_Develop_epistemic_capabilities_for_long_term_policy_making">2.1 Develop epistemic capabilities for long-term policy making</h2><p>Lack of knowledge about the future is likely one of the main sources of political short-termism, also known as epistemic determinants in <a href="https://www.researchgate.net/publication/343345291_Longtermist_Institutional_Reform"><u>Longtermist Institutional Reform</u></a> by Tyler John and William MacAskill. These determinants lead to discounting of the value of long-term beneficial policies, making them less likely to be enacted. Some discounting is rational simply because there is a lot of uncertainty about the benefits of long-term policies. Irrational discounting is another source of short-termism which is caused by cognitive biases and attentional asymmetries between the future and nearby past. Vividness effects can make people react more strongly to vivid sources of information like news, videos and graphics compared to scientific research. People are also often over-confident in their ability to control and eliminate risks under situations of uncertainty. See <i>Thinking, fast and slow </i>(2011) by Daniel Kahneman for further details. Although these shortcomings are limiting politicians in their effectiveness, there has also been <a href="https://globalprioritiesinstitute.org/christian-tarsney-the-epistemic-challenge-to-longtermism/"><u>cast doubt</u></a> on the possibility of predicting the future at all by philosopher Christian Tarsney.</p><p>Politicians work with the limitations of time and influence which can lead to attentional asymmetries, i.e. when determining the effectiveness of policies, they tend to focus too much on recent events, rather than basing it on future projections. The result of this asymmetry can be that politicians work with less accurate predictions. Furthermore, because of these reality constraints (i.e. time and power), politicians are forced to utilize heuristics like planning fallacy, availability bias and the law of small numbers to tackle current and future issues. However, we have also seen that the long-term can be prioritized politically with the Paris Agreement, carbon tax (e.g. in <a href="https://web.archive.org/web/20100615055008/http://iea.org/publications/free_new_Desc.asp?PUBS_ID=1580"><u>Norway in 1991</u></a>), or the Danish <a href="https://klimaraadet.dk/en/about-danish-council-climate-change"><u>council on climate change</u></a>. </p><p>To deal with these problems, politicians need effective means of forecasting with different sources - e.g. using teams of <a href="https://goodjudgment.com/"><u>superforecasters</u></a> and domain experts, or market-based approaches like prediction markets, to obtain high-quality information about the future.This needs to be implemented to overcome the information barrier (knowledge about the future) and the attention barriers (making changes in future outcomes more salient) so that politicians can make informed decisions about the future. </p><p>To maximize the utility gained from this information, decision makers also need to invest in institutions and organizations that can develop epistemic capabilities beyond forecasting, e.g. scenario planning, robust decision making, and red teaming, among others. In <a href="https://www.smestrategy.net/blog/what-is-scenario-planning-and-how-to-use-it"><u>scenario planning</u></a> exercises, policy makers define a set of scenarios that jointly describe the possible futures that are likely enough to be considered, that differ depending on factors of high uncertainty, and with significant implications for the optimal policy choice. Then, policies are evaluated for how they perform across the range of scenarios. Depending on the risk preferences of the policy makers, they should choose a robust policy that both has a high expected value across scenarios, and fails as gracefully as possible in the worst scenarios. Scenario planning could also be supplemented with <a href="https://link.springer.com/chapter/10.1007/978-3-030-05252-2_2"><u>robust decision making</u></a> which especially emphasizes strategies that do well in worst-case scenarios. Additionally, <a href="https://www.synopsys.com/glossary/what-is-red-teaming.html"><u>red teaming</u></a> can provide a solid method of stress-testing the plans we make for the future by taking an adversarial approach. </p><p>Several researchers within the EA movement are working on these issues, e.g. Neil Dullaghan, Michael MacKenzie, and Eva Vivalt. Dullaghan <a href="https://forum.effectivealtruism.org/posts/kCkd9Mia2EmbZ3A9c/deliberation-may-improve-decision-making"><u>proposes</u></a> deliberation as a means of reaching better cooperation across party-lines and long-term thinking. He also claims that there may be a link between deliberation and long-term thinking; specifically in areas like climate change and the environment. Furthermore, MacKenzie <a href="https://www.oxfordhandbooks.com/view/10.1093/oxfordhb/9780198747369.001.0001/oxfordhb-9780198747369-e-7"><u>argues</u></a> that deliberation can help us overcome our cognitive biases by for instance appealing to the idea “saving future children'' to ensure longtermist thinking. In order to gather all these findings within forecasting, Vivalt, a researcher at the Australian National University and University of Toronto, <a href="https://forum.effectivealtruism.org/posts/Z7RTJePkiWBH92qqo/eva-vivalt-forecasting-research-results"><u>proposes</u></a> a platform to coordinate the research and the ability of each researcher to forecast. These are only some examples of researchers that are working to improve institutional decision making among many more. Still, it is one of the top recommended career paths by <a href="https://80000hours.org/problem-profiles/improving-institutional-decision-making/"><u>80000 Hours</u></a>, as “Improving the quality of decision-making in important institutions could improve our ability to solve almost all other problems”.</p><h2 id="2_2_Motivate_policymakers_to_prioritize_future_generations">2.2 Motivate policymakers to prioritize future generations</h2><p>Even if there are policymakers who have the necessary capabilities to improve the welfare of future generations, there are still several factors that discourage them from doing so. These factors are referred to as motivational determinants in the <a href="https://philpapers.org/archive/JOHLIR.pdf"><u>Longtermist Institutional Reform</u></a> by Tyler John and William MacAskill, from which the following three sections are heavily based on.</p><p>People tend to have a high <a href="https://en.wikipedia.org/wiki/Time_preference"><u>time preference</u></a> for the present, leading to greater discounting of the value of long-term benefits, which makes  policies more short-termist. This is a problem that affects both voters and people in power, although the severity of this problem is unclear.</p><p>Self-interest and relational favouritism another source of short-termism, as many people care more about themselves and their relatives than future generations. Self-beneficial policies are generally short-termist as policymakers and their relatives will only live for a short amount of time compared to the potential lifespan of humanity.</p><p>Cognitive biases may also affect people’s political decisions, two known biases are the identifiable victim effect and procrastination. The <a href="https://en.wikipedia.org/wiki/Identifiable_victim_effect"><u>Identifiable victim effect</u></a> is the tendency to prioritize individuals that are visible over individuals that are statistical or theoretic. As future generations are invisible and haven’t been born yet, this naturally leads short-termism. </p><p>Procrastination drives people to delay difficult problems until they become urgent and demand action. The further a long-term beneficial action is delayed, the less beneficial it is likely to be for future generations. Longtermism is especially prone to procrastination due to its extremely long timeframe.</p><p>Politicians are often even more short-termist than these factors would suggest, and they may frequently make extremely short-term decisions that have minimal benefits and significant costs within a few years, due to the various institutional factors discussed below. </p><h2 id="2_3_Remove_institutional_barriers_to_longtermist_policy_making">2.3 Remove institutional barriers to longtermist policy making</h2><p>Even policymakers that have the expertise and motivation to improve the welfare of future generations can be held back by institutional barriers that are preventing them from effectively advocating for longtermist policies. Many of these factors are due to the way today’s governmental institutions are designed, other sources include politicians’ economic dependencies and the media.</p><p>Most governments have short election cycles that incentivize short-term policy. Elected representatives naturally want to be re-elected, and one way to gain the favour of potential voters is to provide evidence that their previous time in office brought positive and immediate effects, which is predominantly achieved by initiating short-term policies.</p><p>Along with short election cycles, most performance measures mainly evaluate the short-term effects of policies, further discouraging policymakers from advocating for long-term policy.</p><p>Time inconsistency is also a problem in governmental institutions because subsequent policymakers can repeal previously enacted future-beneficial policies, as well as redirect investments that were originally intended for future generations. Most governments lack strong institutions dedicated to protecting the interests of future generations, which could help combat the problem of time inconsistency.</p><p>The media, which is largely focused on today’s current events, demand immediate reactions from policymakers. This pressures the policymakers to focus on short-term issues in order to build their reputation, as abstaining from doing so might lower their odds of re-election.</p><h2 id="2_4_Proposed_mechanisms">2.4 Proposed mechanisms</h2><p>To deal with the problems mentioned above (lacking capabilities, disincentivized policymakers and institutional barriers), there is a dire need for institutional reform. There are many different ways to go about this, and there is still a lot of uncertainty about what might be the best solutions. What follows is a list of various longtermist policy proposals chosen with help from Tyler John. The proposals are divided into five main categories, with examples below. A more comprehensive list can be found <a href="https://forum.effectivealtruism.org/posts/op93xvHkJ5KvCrKaj/institutions-for-future-generations#Four_branch_Model_of_Government"><u>here</u></a>.</p><p><strong id="Designated_stakeholders">Designated stakeholders</strong></p><p>Key decision-makers or their advisors are appointed as responsible for protecting the interests of future people. Some examples of these are:</p><ul><li>Ministers and Executive Departments</li><li>Ombudsperson for Future Generations</li><li>Parliamentary committees</li></ul><p><strong id="Information_interventions">Information interventions</strong></p><p>Affects how information about the impact of future policies is gained or made publicly available. Some examples of these are:</p><ul><li>In-government Think Tank</li><li>Posterity Impact Assessments</li><li>Intergenerational Deliberation Day</li></ul><p><strong id="Voting_mechanisms">Voting mechanisms</strong></p><p>Democratic election mechanisms and policy voting rules are redesigned to promote candidates that are expected to benefit future people. Some examples of these are:</p><ul><li>Choosing legislators via lottery</li><li>Demeny voting</li><li>Longer election cycles</li></ul><p><strong id="Liability_mechanisms">Liability mechanisms</strong></p><p>Mechanisms that hold current decision-makers liable if their decisions lead to poor outcomes in the future, including formal rights for future people. Some examples of these are:</p><ul><li>Intergenerational externality taxes</li><li>Making court systems more future-oriented</li><li>Pay for Long-term performance</li></ul><p><strong id="Reallocation_of_resources">Reallocation of resources</strong></p><p>Control of current resources is deferred to future people. Some examples of these are:</p><ul><li>Heritage funds</li><li>Financial Institutions for Intergenerational Borrowing</li><li>Lower social discount rate</li></ul><p>For more in-depth analysis of the various proposals, see “Longtermist Institutional Design Literature Review” by Tyler John.’</p><p>In addition to the five categories above, another way to encourage long-term policy could be to influence society to be more long-term friendly. An example of this is Roman Krznaric’s writings where he establishes terms and concepts that could enable more longtermist thinking. </p><h1 id="3_0_Directly_influence_the_future_trajectory_of_human_civilization"><strong>3.0 Directly influence the future trajectory of human civilization</strong></h1><p>The top layer of the pyramid in figure 1 considers how one can influence the future of humanity in a more direct way than the objectives in layer 1 and 2 does. There are several methods to directly improve the future and positively shift the trajectory of civilization. One approach is to avoid the bad scenarios (as exemplified by the red scenarios in Figure 2), such as extinction and major catastrophes. Another approach is to boost the good scenarios (exemplified by the green scenarios in Figure 2) by increasing the rate of inclusive progress - either by increasing economic growth, by making progress more inclusive, or by increasing our ability to convert economic wealth into wellbeing. </p><figure class="image image_resized" style="width:624px"><img src="http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/qcctw3cbjlfqdrff7mwq.png"><figcaption><strong>Figure 2</strong>: Illustration of positive and negative trajectories of civilization.</figcaption></figure><h2 id="3_1_Mitigate_catastrophic_risk_and_build_resiliency_to_tail_events_and_unknown_unknowns">3.1 Mitigate catastrophic risk and build resiliency to tail events and unknown unknowns</h2><p>In the effective altruism movement, one commonly recognized way to positively influence the future is to make sure that it actually exists and avoid <a href="https://longtermrisk.org/reducing-risks-of-astronomical-suffering-a-neglected-priority/#III_Reducing_s-risks_is_both_tractable_and_neglected"><u>scenarios of extreme suffering</u></a>, i.e. by avoiding existential risks. By developing longtermist policy and institutions, we can better prepare for the future by building resiliency to both known and unknown existential risks.</p><figure class="image image_resized" style="width:624px"><img src="http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/si5ga5enygb19xnwiigi.png"><figcaption><strong>Figure 3</strong>: Examples of risks based on a <a href="https://www.existential-risk.org/concept.html"><u>figure</u></a> by Nick Bostrom</figcaption></figure><p>Let us start with some definitions. Bostrom explains the difference between existential risk and catastrophic risk in <a href="https://www.existential-risk.org/concept.html"><u>Existential Risk Prevention as Global Priority</u></a>. Existential risks are both pan-generational and crushing, which means that they drastically reduce the quality of life or cause death that humanity cannot recover from. Compared to this, risks that are merely globally catastrophic do not individually threaten the survival of humanity. Assuming that existence is preferable to non-existence, existential risks are considered significantly worse than global catastrophic risks because they affect all future generations. </p><p>However, global catastrophes may drastically weaken critical systems and our ability to tackle a second catastrophe. This argument is presented by the Global Catastrophic Risk Institute in a paper about <a href="http://gcrinstitute.org/papers/003_double-catastrophe.pdf"><u>double catastrophes</u></a> with a case study on how geoengineering may be severely affected by other catastrophes. Moreover, many of the practices that can help us avoid globally catastrophic risks are also useful to prevent existential risks. We have titled this section “mitigate catastrophic risk” to ensure that we cover as many of the risks that may significantly impact the long-term future of humanity as possible.</p><p>The list of already known existential risks includes both natural and anthropological risks. Today’s technological advancements have created more anthropological risks, and there are good reasons to believe that they will continue to do so. Bostrom argues in <a href="https://www.sciencedirect.com/science/article/pii/S0016328720300604"><u>The Fragile World Hypothesis</u></a> that continuous technological development will increase systemic fragility, which can be a source of catastrophic or existential risk. In the Precipice, Toby Ord estimates the chances of existential catastrophe within the next 100 years at one in six. We have already been dangerously close to global catastrophe, e.g. when <a href="https://80000hours.org/2012/02/26th-of-september-petrov-day/"><u>Stanislav Petrov</u></a> potentially singlehandedly avoided a global nuclear war in 1983 when he did not launch missiles in response to the warning system reporting a US missile launch. To prevent such close calls from happening in the future, we need to gain knowledge about both known and unknown risks and solutions to them. </p><p>In the Precipice, Ord proposes that reaching existential security is the first of three steps to optimize the future of human civilization. Reaching existential security includes both eliminating immediate dangers, potential future risks, and establishing long-lasting safeguards. For example, switching to renewable energy sources, electric or hydrogen-based fuel, and clean meat, are ways to safeguard against catastrophic <a href="https://www.mckinsey.com/business-functions/sustainability/our-insights/climate-risk-and-response-physical-hazards-and-socioeconomic-impacts"><u>climate change</u></a>. This is one risk that 80,000 Hours include in their view of the world’s <a href="https://80000hours.org/problem-profiles/"><u>most pressing problems</u></a>. 80,000 Hours’ list also includes <a href="https://80000hours.org/problem-profiles/positively-shaping-artificial-intelligence/"><u>positively shaping the development of artificial intelligence</u></a>. This can be positively influenced by investing in technical research and improving governmental strategy. Another priority area is reaching <a href="https://80000hours.org/problem-profiles/nuclear-security/"><u>nuclear security</u></a>, which includes shrinking nuclear stockpiles and improving systems and communication to avoid depending on people acting like Petrov in the case of false warnings.<i> </i>Another priority catastrophic risk area in the EA movement is <a href="https://www.openphilanthropy.org/research/cause-reports/biosecurity"><u>biorisk and pandemic preparedness</u></a>, which is one of the focus areas of the Open Philanthropy Project. In addition to protecting against already known risks, humanity should research potential future risks and use forecasting principles to prepare for them. </p><p>When we have reached existential security, Ord proposes that the next steps should be </p><ol><li>a long reflection where we determine what kind of future we want to create and how to do so, and</li><li>achieving our full potential.</li></ol><p>Thus, Ord argues that existential security should take priority over other objectives described in this article, as it is more urgent.</p><p>There are a wide range of actions that can be taken to mitigate catastrophic and existential risks. As mentioned, these actions mainly include eliminating immediate dangers and establishing long-lasting safeguards. The lists below are partially based on the work by <a href="https://www.gcrpolicy.com/risk-management"><u>Global Catastrophic Risk Policy</u></a>. </p><p><strong id="Reduce_the_probability_of_specific_risks">Reduce the probability of specific risks</strong></p><p>The most direct course of action to avoid catastrophe is to reduce the probability of catastrophic or existential risks. Some suggestions to risks and how to reduce them are: </p><ul><li>Reducing the potential for both intentional and unintentional use of nuclear weapons through improving early warning systems, reducing the number of nuclear warheads and the number of people having access to them.</li><li>Strengthen preparedness against pandemics by improving early warning systems, implementing global procedures for limiting spread, and shorten vaccine development timelines. We can also prepare for pandemics by developing vaccines for diseases with high pandemic potential.</li><li>Mitigating climate change by curbing CO<sub>2</sub> emissions through technological development or policy changes. Other methods include climate engineering actions such as removing CO<sub>2</sub> from the atmosphere.</li></ul><p><strong id="Improve_risk_management_frameworks">Improve risk management frameworks</strong></p><p>Another approach is to improve risk management frameworks in such a way that we are prepared and able to react better to future risks. Some examples are: </p><ul><li>Developing a centralized all-hazard national risk assessment process that is adaptable to risks in a variety of domains.</li><li>Developing a risk prioritization framework to evaluate vulnerabilities, and the impact of possible adverse outcomes.</li><li>Deconflicting risk ownership between government stakeholders: Set one department or agency as the primary owner for each risk, with clear responsibilities for mitigation, preparation and response.</li><li>Appointing a “national risk officer’ responsible for overseeing the national risk assessment process and coordinating mitigation efforts.</li></ul><p><strong id="Increase_resilience_of_critical_systems">Increase resilience of critical systems</strong></p><p>We can also limit the potential harm done by catastrophic risks or mitigate risks by increasing the resilience of critical systems. Some examples of how to increase critical system resilience are: </p><ul><li>Increasing emergency storage capacity of items like food, fuel and medicine at secure locations.</li><li>Developing more resilient crops and protecting critical infrastructure assets against disasters both natural and anthropogenic.</li><li>Diversifying sourcing to e.g. ensure that digital communication systems tolerate power failures.</li><li>Hardening assets such as crops by making them more resilient.</li></ul><h2 id="3_2_Build_inclusive_progress_through_long_lasting_and_well_functioning_institutions">3.2 Build inclusive progress through long-lasting and well-functioning institutions</h2><p>Another approach to positively shift the trajectory of civilization is to increase the rate of progress, and make progress more inclusive. Continuous progress can improve human life quality and create a flourishing future for people of diverse backgrounds. Collison and Cohen define <a href="https://www.theatlantic.com/science/archive/2019/07/we-need-new-science-progress/594946/"><u>progress</u></a> as economic, technological, scientific, cultural or organizational advancements that transform our lives and raise our living standard. This definition is broader than the typical economic definition focused on measuring GDP growth as a proxy or progress. In particular, it includes the opportunity to increase progress by increasing our ability to convert economic wealth into wellbeing. For this reason, we will use the term “economic progress” when referring to GDP growth, while “progress” alone will refer to the broader definition. Moreover, “wellbeing”, “welfare” and “happiness” are used interchangeably, and it is assumed that this is closer to a true measure of progress (in the broader sense) than purely economic metrics.</p><p><strong id="There_is_still_much_we_don_t_know_about_progress">There is still much we don’t know about progress</strong></p><p>There is an ongoing debate about whether there are fundamental limits to economic progress (and indeed <a href="https://www.researchgate.net/publication/348836201_What_is_the_Upper_Limit_of_Value"><u>if there are upper limits of progress overall</u></a>) - if, at some point in the future, GDP growth must slow down and approach zero. If there are limits to economic progress, then increasing the rate of economic progress will only speed up the arrival of a zero-growth world of abundance. This could severely limit the potential value of increasing the rate of economic progress.</p><p>If there is no immediate limit to economic progress, there are good reasons to believe that it could continue indefinitely, and improve human welfare in the process. Human quality of life has generally improved significantly since the Industrial Revolution. This strong correlation between GDP growth and improved life quality has been well documented by e.g. <a href="https://www.gapminder.org/"><u>Gapminder</u></a>. For example,  the <a href="https://ourworldindata.org/a-history-of-global-living-conditions-in-5-charts"><u>percentage of people living in extreme poverty</u></a> has decreased from about 90% in 1820 to 10% in 2015. It is also argued that a <a href="https://www.worksinprogress.co/issue/securing-posterity/"><u>stagnation in growth is risky</u></a> in regards to existential risks. GDP growth is far from the only factor that influences progress. Other examples include improved economic distribution, sustainable development and effective transforming of economic growth to human welfare. </p><p>There are also ongoing discussions about how to best measure (a broader definition of) progress, if progress is slowing down or accelerating, and how existential risk is affected by the rate of economic progress. This is briefly covered in the <a href="https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf"><u>GPI research agenda</u></a>, and somewhat more extensively in sources therein.</p><p>To improve our understanding of how progress occurs, Collision and Cowen have proposed to develop “<a href="https://www.theatlantic.com/science/archive/2019/07/we-need-new-science-progress/594946/"><u>Progress Studies</u></a>” as a field of research. According to Collision and Cowen, progress studies investigates successful institutions, people, organizations and cultures to find common factors that are linked to progress. If we succeed in finding common factors between Ancient Greece, The Industrial Revolution and Silicon Valley, we can improve progress by acting accordingly. Due to the immaturity of progress studies, we have yet to find such common factors. However, scientific reform and interventions as described above are seemingly very promising. </p><p><strong id="General_ideas_for_how_to_increase_progress">General ideas for how to increase progress</strong></p><p>There are three main paths to increasing inclusive progress: increasing economic growth, making progress more inclusive, and converting economic wealth into welfare. The first path has been promoted by e.g. <a href="https://80000hours.org/podcast/episodes/tyler-cowen-stubborn-attachments/"><u>Tyler Cowen, arguing</u></a> that it is among the most powerful tools to improve the future because economic growth compounds over time.</p><p>Making progress more inclusive by redistributing resources or social status can increase total human happiness. According to 80,000 Hours, <a href="https://80000hours.org/articles/money-and-happiness/"><u>happiness</u></a> increases <a href="https://www.pnas.org/content/118/4/e2016976118"><u>logarithmically </u></a>when one becomes wealthier, which means that it is a lot more cost-effective to increase the wealth of poor people. Therefore, redistribution of progress is also very important toward effectively and positively shifting the trajectory of humanity. </p><p>While there is a strong correlation between economic wealth and wellbeing, it is not all that matters. Some countries have higher levels of happiness than others, despite being poorer - for instance, self-reported <a href="https://ourworldindata.org/grapher/gdp-vs-happiness"><u>happiness levels in Costa Rica are higher than in Luxembourg, while GDP is 6x lower</u></a>. It is plausible that we can find ways to make happiness cheaper, so that a similar level of economic wealth can be translated into more welfare.</p><p>It is hard to know the counterfactual impact of interventions focused on any of these paths. While catastrophic risk mitigation is focused on changing the outcomes of forks in the path of civilization, interventions for progress to a larger degree rely on shifting long-term trends that are hard to reason about empirically. So far, hypotheses for effective interventions have been generated through the use of some heuristics, including:</p><ul><li>Institutions can coordinate the efforts of individuals, and thereby multiply their total impact. For this reason, changes in institutional designs are “hingey” - a limited effort to improve an institution can have lasting effects at scale</li><li>Some institutional reforms matter more than others. In particular, longer-lasting institutions (examples may include the American Constitution or Ivy League schools) can maintain their influence over time, so reforming these institutions is a way to have a more durable impact. This is a version of “<a href="https://www.effectivealtruism.org/articles/a-proposed-adjustment-to-the-astronomical-waste-argument-nick-beckstead/"><u>path-dependent trajectory changes</u></a>” advocated for by Nick Beckstead, and further discussed in e.g. Eliezer Yudkowsky’s <a href="https://equilibriabook.com/"><u>Inadequate Equilibria</u></a></li><li>Moreover, more influential institutions (e.g. measured in budget size, number of members or technological capabilities) typically offer a larger potential for impact.</li><li>Finally, reforms that create positive feedback loops (e.g. by improving processes that are essential for progress, like science, innovation or decision making) accumulate over time</li></ul><p><strong id="Specific_proposals_for_how_to_increase_inclusive_progress">Specific proposals for how to increase inclusive progress</strong></p><p>It is commonly argued that the scientific revolution has been one of the key drivers of progress in the last centuries, but today many scholars criticize the modern academic institutions for being sub-optimal. For this reason, interventions aiming to improve academic research may be one promising category to increase the rate of progress. Some examples among many interventions aiming to improve academic research include <a href="https://www.replicationmarkets.com/"><u>Replication Markets</u></a>, <a href="https://arxiv.org/"><u>ArXiv</u></a>, <a href="https://www.semanticscholar.org/"><u>Semantic Scholar</u></a> and <a href="https://ought.org/"><u>Ought</u></a>. Replication Markets use forecasting to estimate a research claims chance of replication. ArXiv and Semantic Scholar are archives with scientific papers, and Ought tries to figure out which questions humans can delegate to artificial intelligence. Additionally, “scientific research” is one of the top cause areas of the Open Philanthropy Project.</p><p>All of the abovementioned interventions are improving academic progress, but there are also non-academic interventions that may increase progress. Some examples from the US Policy focus area of Open Philanthropy Project (Open Phil) include:</p><ul><li><a href="https://www.foreignaffairs.com/articles/united-states/2020-09-14/americas-exceptional-housing-crisis"><u>Urban zoning/land use reform</u></a>, which is meant to reduce the costs of living in cities. This may increase progress because it allows people to move to areas with great economic opportunities</li><li><a href="https://www.openphilanthropy.org/focus/us-policy/macroeconomic-policy"><u>Macroeconomic stabilization policy</u></a>, where Open Philanthropy funds advocacy initiatives focused on emphasizing the importance of alleviating suffering and lost output from unemployment during economic crises</li><li><a href="https://www.openphilanthropy.org/focus/us-policy/immigration-policy"><u>Immigration policy reform</u></a>, which may both provide economic opportunities for people from lower-income countries and increase long-term economic growth</li><li><a href="https://forum.effectivealtruism.org/posts/8Rn2gw7escCc2Rmb7/thoughts-on-electoral-reform"><u>Electoral reform</u></a>: e.g. campaign financing rules, election security measures, and improved voting systems (e.g. <a href="https://electionscience.org/approval-voting-101/"><u>approval voting</u></a> or <a href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2003531"><u>quadratic voting</u></a>), to better ensure that elected officials represent the electorate and reduce the <a href="https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors"><u>risk of malevolent leaders</u></a></li></ul><h2 id="3_3_What_about_sustainability_">3.3 What about sustainability?</h2><p>Outside of the effective altruism movement, sustainability is one of the most common cause areas for people concerned about the welfare of future generations. Significant resources are invested in ensuring that our GHG emissions are brought down, that our depletion of natural resources and destruction of species habitats are slowed, and that state budgets are fiscally balanced across generations. Thus it may seem strange that sustainability has played such a small role in this article.</p><p>Our argument, borrowed from <a href="http://www.stafforini.com/blog/bostrom/"><u>Bostrom </u></a>and others in the EA movement, is that unsustainabilities are bad if they exacerbate catastrophic risk, or if they slow down the rate of inclusive progress. <a href="https://www.mckinsey.com/business-functions/sustainability/our-insights/climate-risk-and-response-physical-hazards-and-socioeconomic-impacts"><u>Research by the McKinsey Global Institute</u></a> shows that unmitigated climate change can be harmful in both of these ways. <a href="https://www.mckinsey.com/industries/public-and-social-sector/our-insights/the-social-contract-in-the-21st-century"><u>Further research</u></a> by the McKinsey Global Institute demonstrates that the social contract is eroding across developed economies, and that economic outcomes for individuals are worsening as a consequence. In cases like these where the unsustainabilities are expected to create large amounts of human suffering, we should work hard to become more sustainable.</p><h1 id="4_0_Summary"><strong>4.0 Summary</strong></h1><p>There are several objectives of longtermist policy making. We have presented three categories of objectives, where the objectives in the bottom layers are potential enablers of the upper objectives. All of them are relevant to the necessary prioritization of future generations, given that longtermism is plausible. </p><p>Each of the objectives and their sub-objectives are well covered in existing literature, but to our knowledge they have not been presented in this structure before. In this article we have summarized some of the relevant parts of the literature, in the hope of providing an accessible introduction to the field. Furthermore, we hope that some points in this article can serve as coordination points for more experienced longtermists - e.g. when referring to which parts of longtermist policy making they are attempting to improve, and why.</p></div></div></div></div><div class="PostsPage-centralColumn PostsPage-betweenPostAndComments"><div class="PostsPagePostFooter-footerSection"><div class="PostsPagePostFooter-voteBottom"><div class="EAEmojisVoteOnPost-root"><div class="PostsVoteDefault-voteBlockHorizontal"><div class="PostsVoteDefault-upvoteHorizontal" title="Click-and-hold for strong vote (click twice on mobile)"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-up" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></div><div class="PostsVoteDefault-voteScoresHorizontal"><div title="39 Votes"><h1 class="Typography-root Typography-headline PostsVoteDefault-voteScore">54</h1></div></div><div class="PostsVoteDefault-downvoteHorizontal" title="Click-and-hold for strong vote (click twice on mobile)"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-down" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></div></div><div class="EAEmojisVoteOnPost-divider EAEmojisVoteOnPost-hideOnMobile"></div><div class="EAEmojisVoteOnPost-reacts EAEmojisVoteOnPost-hideOnMobile"><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button EAReactsSection-buttonLarge"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button EAReactsSection-buttonLarge"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button EAReactsSection-buttonLarge"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon EAReactsSection-addEmojiIconLarge"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div></div></div></div><div class="PostsPagePostFooter-secondaryInfoRight"><span class="LWTooltip-root"><span class="BookmarkButton-container"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="BookmarkButton-icon PostsPagePostFooter-bookmarkButton ForumIcon-root"><path stroke-linecap="round" stroke-linejoin="round" d="M17.593 3.322c1.1.128 1.907 1.077 1.907 2.185V21L12 17.25 4.5 21V5.507c0-1.108.806-2.057 1.907-2.185a48.507 48.507 0 0111.186 0z"></path></svg> </span></span><div class="SharePostButton-root"><div><span class="LWTooltip-root"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="SharePostButton-icon ForumIcon-root"><path stroke-linecap="round" stroke-linejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5m-13.5-9L12 3m0 0l4.5 4.5M12 3v13.5"></path></svg></span></div></div><span class="PostsPagePostFooter-actions"><div class="PostActionsButton-root"><div><svg class="MuiSvgIcon-root PostActionsButton-icon" focusable="false" viewBox="0 0 24 24" aria-hidden="true" role="presentation"><path fill="none" d="M0 0h24v24H0z"></path><path d="M6 10c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2zm12 0c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2zm-6 0c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2z"></path></svg></div></div></span></div></div><div class="EAEmojisVoteOnPostSecondary-root"><div class="EAEmojisVoteOnPostSecondary-divider"></div><div class="SectionTitle-root"><h1 id="reactions" class="Typography-root Typography-display1 SectionTitle-title EAEmojisVoteOnPostSecondary-heading">Reactions</h1><div class="SectionTitle-children"></div></div><div class="EAEmojisVoteOnPostSecondary-reacts"><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button EAReactsSection-buttonLarge"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button EAReactsSection-buttonLarge"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button EAReactsSection-buttonLarge"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon EAReactsSection-addEmojiIconLarge"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div></div></div><div class="PingbacksList-root"><div class="PingbacksList-title"><span class="LWTooltip-root"><span>Mentioned in</span></span></div><div class="PingbacksList-list"><div><div class="Pingback-root"><span class="Typography-root Typography-body2 PostsItem2MetaInfo-metaInfo Pingback-karma"><span class="LWTooltip-root">84</span></span><span class=""><span class="PostsTitle-root PostsTitle-wrap"><span><a href="/posts/7SjtFYo6sCe3588Tx/why-scale-is-overrated-the-case-for-increasing-ea-policy"><span>Why scale is overrated: The case for increasing EA policy efforts in smaller countries</span></a></span></span></span></div></div><div><div class="Pingback-root"><span class="Typography-root Typography-body2 PostsItem2MetaInfo-metaInfo Pingback-karma"><span class="LWTooltip-root">37</span></span><span class=""><span class="PostsTitle-root PostsTitle-wrap"><span><a href="/posts/yEKQQQoN2W3Jn2Mue/ea-updates-for-march-2021"><span>EA Updates for March 2021</span></a></span></span></span></div></div><div><div class="Pingback-root"><span class="Typography-root Typography-body2 PostsItem2MetaInfo-metaInfo Pingback-karma"><span class="LWTooltip-root">10</span></span><span class=""><span class="PostsTitle-root PostsTitle-wrap"><span><a href="/posts/Cct4uvs7frmpKx8Nb/gwwc-march-2021-newsletter"><span>GWWC March 2021 Newsletter</span></a></span></span></span></div></div></div></div><div class="PostsPage-recommendations"><div class="PostsPageRecommendationsList-root"><div class="SectionTitle-root"><h1 id="more-posts-like-this" class="Typography-root Typography-display1 SectionTitle-title PostsPageRecommendationsList-title">More posts like this</h1><div class="SectionTitle-children"></div></div><div class="PostsPageRecommendationsList-listWrapper"><div class="Loading-spinner"><div class="Loading-bounce1"></div><div class="Loading-bounce2"></div><div class="Loading-bounce3"></div></div></div></div></div></div><span><span><div class="PostsPage-commentsSection"><div class="CommentsListSection-root CommentsListSection-maxWidthRoot"><div id="comments"></div><div class="CommentsListSection-commentsHeadline">Comments<span class="CommentsListSection-commentCount">7</span></div><div id="posts-thread-new-comment" class="CommentsListSection-newComment"><div class="CommentsNewForm-root"> <div class="CommentsNewForm-form"><div><form class="vulcan-form document-new" id="new-comment-form"><div class="FormErrors-root form-errors"></div><div class="form-input input-contents form-component-EditorFormComponent"><div class="EditorFormComponent-root"><div><div class="EditorFormComponent-editor EditorFormComponent-commentBodyStyles ContentStyles-base content ContentStyles-commentBody"><div class="Loading-spinner"><div class="Loading-bounce1"></div><div class="Loading-bounce2"></div><div class="Loading-bounce3"></div></div></div></div></div></div><div class="CommentsNewForm-submit"><button tabindex="0" class="MuiButtonBase-root MuiButton-root MuiButton-contained MuiButton-containedPrimary MuiButton-raised MuiButton-raisedPrimary CommentsNewForm-formButton CommentsNewForm-submitButton" type="submit" id="new-comment-submit"><span class="MuiButton-label">Comment</span><span class="MuiTouchRipple-root"></span></button></div></form></div></div> </div></div><div class="CommentsListMeta-root"><span class="Typography-root Typography-body2 CommentsListSection-inline">Sorted by <div class="InlineSelect-root"><a class="InlineSelect-link">New & upvoted</a></div></span><span class="Typography-root Typography-body2 CommentsListSection-clickToHighlightNewSince">Click to highlight new comments since: <a class="CommentsListSection-button"><time dateTime="2024-11-28T03:34:30.539Z">Today at 3:34 AM</time></a></span></div><div class=""><div><div class="comments-node CommentFrame-commentsNodeRoot comments-node-root comments-node-odd CommentFrame-node" id="LxHcNDY5SaqQwkg5G"><div><div class="CommentsItem-root recent-comments-node"><div class="CommentsItem-postTitleRow"></div><div class="CommentsItem-body"><div class="CommentsItemMeta-root"><a class="CommentsItemMeta-collapse"><svg width="20" height="20" viewBox="0 0 20 20" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="CommentsItemMeta-collapseChevron CommentsItemMeta-collapseChevronOpen ForumIcon-root"><path fill-rule="evenodd" clip-rule="evenodd" d="M7.20938 14.7698C6.92228 14.4713 6.93159 13.9965 7.23017 13.7094L11.1679 10L7.23017 6.29062C6.93159 6.00353 6.92228 5.52875 7.20938 5.23017C7.49647 4.93159 7.97125 4.92228 8.26983 5.20937L12.7698 9.45937C12.9169 9.60078 13 9.79599 13 10C13 10.204 12.9169 10.3992 12.7698 10.5406L8.26983 14.7906C7.97125 15.0777 7.49647 15.0684 7.20938 14.7698Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></a><span class="LWTooltip-root"><a class="CommentUserName-mainWrapper CommentUserName-fullWrapper CommentsItemMeta-username" href="/users/kbog"><div class="CommentUserName-profileImagePlaceholder"></div><span class="UsersNameDisplay-color CommentUserName-author">kbog</span></a></span><span class="CommentsItemDate-root CommentsItemDate-date"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=LxHcNDY5SaqQwkg5G"><span class="LWTooltip-root"><time dateTime="2021-02-14T05:15:27.605Z">Feb 14 2021</time></span></a></span><span class="OverallVoteAxis-vote"><span class="OverallVoteAxis-overallSection EAEmojisVoteOnComment-overallAxis OverallVoteAxis-overallSectionBox"><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-left" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span><span class="LWTooltip-root"><span class="OverallVoteAxis-voteScore">13</span></span><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-right" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span></span></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div><span class="CommentsItemMeta-rightSection"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=LxHcNDY5SaqQwkg5G"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="CommentsItemMeta-linkIcon ForumIcon-root"><path d="M12.232 4.232a2.5 2.5 0 013.536 3.536l-1.225 1.224a.75.75 0 001.061 1.06l1.224-1.224a4 4 0 00-5.656-5.656l-3 3a4 4 0 00.225 5.865.75.75 0 00.977-1.138 2.5 2.5 0 01-.142-3.667l3-3z"></path><path d="M11.603 7.963a.75.75 0 00-.977 1.138 2.5 2.5 0 01.142 3.667l-3 3a2.5 2.5 0 01-3.536-3.536l1.225-1.224a.75.75 0 00-1.061-1.06l-1.224 1.224a4 4 0 105.656 5.656l3-3a4 4 0 00-.225-5.865z"></path></svg></a></span></div><div class="CommentBody-root ContentStyles-base content ContentStyles-commentBody"><div class="CommentBody-commentStyling"><p>I'm skeptical of this framework because in reality part 2 seems optional - we don't need to reshape the political system to be more longtermist in order to make progress. For instance, those Open Phil recommendations like land use reform can be promoted thru conventional forms of lobbying and coalition building.</p><p>In fact, a vibrant and policy-engaged EA community that focuses on understandable short and medium term problems can itself become a fairly effective long-run institution, thus reducing the needs in part 1.</p><p>Additionally, while substantively defining a good society for the future may be difficult, we also have the option of defining it procedurally. The simplest example is that we can promote things like democracy or other mechanisms which tend to produce good outcomes. Or we can increase levels of compassion and rationality so that the architects of future societies will act better. This is sort of what you describe in part 2, but I'd emphasize that we can make political institutions which are generically better rather than specifically making them more longtermist.</p><p>This is not to say that anything in this post is a bad idea, just that there are more options for meeting longtermist goals.</p></div></div><div class="CommentBottom-bottom"><a class="comments-item-reply-link CommentsItem-replyLink">Reply</a></div></div></div></div><div class="CommentsNode-children"><div class="CommentsNode-parentScroll"></div><div><div class="comments-node comments-node-even CommentFrame-node CommentFrame-child" id="boTao4obr2YurBCd5"><div><div class="CommentsItem-root recent-comments-node"><div class="CommentsItem-postTitleRow"></div><div class="CommentsItem-body"><div class="CommentsItemMeta-root"><a class="CommentsItemMeta-collapse"><svg width="20" height="20" viewBox="0 0 20 20" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="CommentsItemMeta-collapseChevron CommentsItemMeta-collapseChevronOpen ForumIcon-root"><path fill-rule="evenodd" clip-rule="evenodd" d="M7.20938 14.7698C6.92228 14.4713 6.93159 13.9965 7.23017 13.7094L11.1679 10L7.23017 6.29062C6.93159 6.00353 6.92228 5.52875 7.20938 5.23017C7.49647 4.93159 7.97125 4.92228 8.26983 5.20937L12.7698 9.45937C12.9169 9.60078 13 9.79599 13 10C13 10.204 12.9169 10.3992 12.7698 10.5406L8.26983 14.7906C7.97125 15.0777 7.49647 15.0684 7.20938 14.7698Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></a><span class="LWTooltip-root"><a class="CommentUserName-mainWrapper CommentUserName-fullWrapper CommentsItemMeta-username" href="/users/andreas_massey"><div class="CommentUserName-profileImagePlaceholder"></div><span class="UsersNameDisplay-color CommentUserName-author">Andreas_Massey</span></a></span><span class="CommentsItemMeta-userMarkers"><span class="LWTooltip-root UserCommentMarkers-iconWrapper"><svg width="16" height="16" viewBox="0 0 16 16" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="UserCommentMarkers-postAuthorIcon ForumIcon-root"><path d="M7.93765 3.68928C7.93765 5.23289 6.68626 6.48428 5.14265 6.48428C3.59905 6.48428 2.34766 5.23289 2.34766 3.68928C2.34766 2.14568 3.59905 0.894287 5.14265 0.894287C6.68626 0.894287 7.93765 2.14568 7.93765 3.68928Z" fill="currentColor"></path><path d="M8.79027 8.01598C8.45861 7.68432 8.06384 7.44751 7.62172 7.28955C6.9269 7.77904 6.05841 8.06333 5.14252 8.06333C4.22662 8.06333 3.37398 7.77915 2.66331 7.28955C1.40002 7.73169 0.5 8.916 0.5 10.3214L0.50011 11.9321C0.50011 12.2479 0.768539 12.5163 1.08434 12.5163H7.81121C7.59014 11.8215 7.55853 10.8898 7.98491 9.67396C8.20609 9.1056 8.47451 8.56863 8.7903 8.01601L8.79027 8.01598Z" fill="currentColor"></path><path d="M15.3748 3.02607C15.1222 2.89973 13.7326 3.98931 12.3114 5.52101C12.2957 5.53676 12.2798 5.55262 12.2798 5.58413L11.7903 8.12644L11.4113 6.83154C11.3797 6.75257 11.2692 6.72095 11.2219 6.79993C10.7797 7.35266 10.3692 7.92113 10.0218 8.4896C10.006 8.50535 10.006 8.53696 10.006 8.56857L10.3534 11.174L9.45331 10.0213C9.40594 9.95818 9.29535 9.97393 9.26385 10.0529C8.82171 11.2372 8.82171 12.2952 9.65862 12.99L8.5533 14.8059C8.47432 14.948 8.56905 15.1059 8.727 15.1059H9.64287C9.73759 15.1059 9.81657 15.0428 9.83232 14.948C9.89544 14.6638 10.0691 14.0163 10.3377 13.3847C11.7903 13.9375 12.9589 12.7216 13.7959 10.9372C13.8275 10.8582 13.7642 10.7635 13.6853 10.7792L12.2325 10.9845L14.459 9.216C14.4748 9.20025 14.4907 9.18439 14.4907 9.16864C14.7117 8.47381 14.9012 7.74752 15.0591 7.06846C15.0749 6.98949 15.0118 6.92637 14.9328 6.94212L13.3695 7.24217L15.2959 5.59999C15.3117 5.58424 15.3276 5.55263 15.3276 5.53688C15.5486 4.14726 15.5486 3.12078 15.3749 3.02606L15.3748 3.02607Z" fill="currentColor"></path></svg></span></span><span class="CommentsItemDate-root CommentsItemDate-date"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=boTao4obr2YurBCd5"><span class="LWTooltip-root"><time dateTime="2021-02-16T12:38:28.508Z">Feb 16 2021</time></span></a></span><span class="OverallVoteAxis-vote"><span class="OverallVoteAxis-overallSection EAEmojisVoteOnComment-overallAxis OverallVoteAxis-overallSectionBox"><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-left" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span><span class="LWTooltip-root"><span class="OverallVoteAxis-voteScore">6</span></span><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-right" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span></span></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div><span class="CommentsItemMeta-rightSection"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=boTao4obr2YurBCd5"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="CommentsItemMeta-linkIcon ForumIcon-root"><path d="M12.232 4.232a2.5 2.5 0 013.536 3.536l-1.225 1.224a.75.75 0 001.061 1.06l1.224-1.224a4 4 0 00-5.656-5.656l-3 3a4 4 0 00.225 5.865.75.75 0 00.977-1.138 2.5 2.5 0 01-.142-3.667l3-3z"></path><path d="M11.603 7.963a.75.75 0 00-.977 1.138 2.5 2.5 0 01.142 3.667l-3 3a2.5 2.5 0 01-3.536-3.536l1.225-1.224a.75.75 0 00-1.061-1.06l-1.224 1.224a4 4 0 105.656 5.656l3-3a4 4 0 00-.225-5.865z"></path></svg></a></span></div><div class="CommentBody-root ContentStyles-base content ContentStyles-commentBody"><div class="CommentBody-commentStyling"><p>Thank you for your feedback kbog.</p><p>First, we certainly agree that there are other options that have a limited influence on the future, however, for this article we wanted to only cover areas with a potential for outsized impact on the future. That is the reason we have confined ourselves to so few categories. </p><p>Second, there may be categories of interventions that are not addressed in our framework that are as important for improving the future as the interventions we list. If so, we welcome discussion on this topic, and hope that the framework can encourage productive discussion to identify such “intervention X”’s. </p><p>Third, I'm a bit confused about how we would focus on “processes that produce good outcomes” without first defining what we mean with good outcomes, and how to measure them?</p><p>Fourth, your point on taking the “individual more in focus” by emphasizing rationality and altruism improvement is a great suggestion. Admittedly, this may indeed be a potential lever to improve the future that we haven't sufficiently covered in our post as we were mostly concerned with improving institutions. </p><p>Lastly, as for improving political institutions more broadly, see our part on progress.</p></div></div><div class="CommentBottom-bottom"><a class="comments-item-reply-link CommentsItem-replyLink">Reply</a></div></div></div></div><div class="CommentsNode-children"><div class="CommentsNode-parentScroll"></div><div><div class="comments-node comments-node-odd CommentFrame-node CommentFrame-child" id="kevgdPgWssSsk5fmi"><div><div class="CommentsItem-root recent-comments-node"><div class="CommentsItem-postTitleRow"></div><div class="CommentsItem-body"><div class="CommentsItemMeta-root"><a class="CommentsItemMeta-collapse"><svg width="20" height="20" viewBox="0 0 20 20" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="CommentsItemMeta-collapseChevron CommentsItemMeta-collapseChevronOpen ForumIcon-root"><path fill-rule="evenodd" clip-rule="evenodd" d="M7.20938 14.7698C6.92228 14.4713 6.93159 13.9965 7.23017 13.7094L11.1679 10L7.23017 6.29062C6.93159 6.00353 6.92228 5.52875 7.20938 5.23017C7.49647 4.93159 7.97125 4.92228 8.26983 5.20937L12.7698 9.45937C12.9169 9.60078 13 9.79599 13 10C13 10.204 12.9169 10.3992 12.7698 10.5406L8.26983 14.7906C7.97125 15.0777 7.49647 15.0684 7.20938 14.7698Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></a><span class="LWTooltip-root"><a class="CommentUserName-mainWrapper CommentUserName-fullWrapper CommentsItemMeta-username" href="/users/kbog"><div class="CommentUserName-profileImagePlaceholder"></div><span class="UsersNameDisplay-color CommentUserName-author">kbog</span></a></span><span class="CommentsItemDate-root CommentsItemDate-date"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=kevgdPgWssSsk5fmi"><span class="LWTooltip-root"><time dateTime="2021-02-21T03:51:58.089Z">Feb 21 2021</time></span></a></span><span class="OverallVoteAxis-vote"><span class="OverallVoteAxis-overallSection EAEmojisVoteOnComment-overallAxis OverallVoteAxis-overallSectionBox"><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-left" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span><span class="LWTooltip-root"><span class="OverallVoteAxis-voteScore">1</span></span><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-right" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span></span></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div><span class="CommentsItemMeta-rightSection"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=kevgdPgWssSsk5fmi"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="CommentsItemMeta-linkIcon ForumIcon-root"><path d="M12.232 4.232a2.5 2.5 0 013.536 3.536l-1.225 1.224a.75.75 0 001.061 1.06l1.224-1.224a4 4 0 00-5.656-5.656l-3 3a4 4 0 00.225 5.865.75.75 0 00.977-1.138 2.5 2.5 0 01-.142-3.667l3-3z"></path><path d="M11.603 7.963a.75.75 0 00-.977 1.138 2.5 2.5 0 01.142 3.667l-3 3a2.5 2.5 0 01-3.536-3.536l1.225-1.224a.75.75 0 00-1.061-1.06l-1.224 1.224a4 4 0 105.656 5.656l3-3a4 4 0 00-.225-5.865z"></path></svg></a></span></div><div class="CommentBody-root ContentStyles-base content ContentStyles-commentBody"><div class="CommentBody-commentStyling"><p>I think it's really not clear that reforming institutions to be more longtermist has an outsized long run impact compared to many other axes of institutional reform.</p><p>We know what constitutes good outcomes in the short run, so if we can design institutions to produce better short run outcomes, that will be beneficial in the long run insofar as those institutions endure into the long run. Institutional changes are inherently long-run.</p></div></div><div class="CommentBottom-bottom"><a class="comments-item-reply-link CommentsItem-replyLink">Reply</a></div></div></div></div><div class="CommentsNode-children"><div class="CommentsNode-parentScroll"></div><div><div class="comments-node comments-node-even CommentFrame-node CommentFrame-child" id="ppRR7jeL9dxBaNvhY"><div><div class="CommentsItem-root recent-comments-node"><div class="CommentsItem-postTitleRow"></div><div class="CommentsItem-body"><div class="CommentsItemMeta-root"><a class="CommentsItemMeta-collapse"><svg width="20" height="20" viewBox="0 0 20 20" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="CommentsItemMeta-collapseChevron CommentsItemMeta-collapseChevronOpen ForumIcon-root"><path fill-rule="evenodd" clip-rule="evenodd" d="M7.20938 14.7698C6.92228 14.4713 6.93159 13.9965 7.23017 13.7094L11.1679 10L7.23017 6.29062C6.93159 6.00353 6.92228 5.52875 7.20938 5.23017C7.49647 4.93159 7.97125 4.92228 8.26983 5.20937L12.7698 9.45937C12.9169 9.60078 13 9.79599 13 10C13 10.204 12.9169 10.3992 12.7698 10.5406L8.26983 14.7906C7.97125 15.0777 7.49647 15.0684 7.20938 14.7698Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></a><span class="LWTooltip-root"><a class="CommentUserName-mainWrapper CommentUserName-fullWrapper CommentsItemMeta-username" href="/users/andreas_massey"><div class="CommentUserName-profileImagePlaceholder"></div><span class="UsersNameDisplay-color CommentUserName-author">Andreas_Massey</span></a></span><span class="CommentsItemMeta-userMarkers"><span class="LWTooltip-root UserCommentMarkers-iconWrapper"><svg width="16" height="16" viewBox="0 0 16 16" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="UserCommentMarkers-postAuthorIcon ForumIcon-root"><path d="M7.93765 3.68928C7.93765 5.23289 6.68626 6.48428 5.14265 6.48428C3.59905 6.48428 2.34766 5.23289 2.34766 3.68928C2.34766 2.14568 3.59905 0.894287 5.14265 0.894287C6.68626 0.894287 7.93765 2.14568 7.93765 3.68928Z" fill="currentColor"></path><path d="M8.79027 8.01598C8.45861 7.68432 8.06384 7.44751 7.62172 7.28955C6.9269 7.77904 6.05841 8.06333 5.14252 8.06333C4.22662 8.06333 3.37398 7.77915 2.66331 7.28955C1.40002 7.73169 0.5 8.916 0.5 10.3214L0.50011 11.9321C0.50011 12.2479 0.768539 12.5163 1.08434 12.5163H7.81121C7.59014 11.8215 7.55853 10.8898 7.98491 9.67396C8.20609 9.1056 8.47451 8.56863 8.7903 8.01601L8.79027 8.01598Z" fill="currentColor"></path><path d="M15.3748 3.02607C15.1222 2.89973 13.7326 3.98931 12.3114 5.52101C12.2957 5.53676 12.2798 5.55262 12.2798 5.58413L11.7903 8.12644L11.4113 6.83154C11.3797 6.75257 11.2692 6.72095 11.2219 6.79993C10.7797 7.35266 10.3692 7.92113 10.0218 8.4896C10.006 8.50535 10.006 8.53696 10.006 8.56857L10.3534 11.174L9.45331 10.0213C9.40594 9.95818 9.29535 9.97393 9.26385 10.0529C8.82171 11.2372 8.82171 12.2952 9.65862 12.99L8.5533 14.8059C8.47432 14.948 8.56905 15.1059 8.727 15.1059H9.64287C9.73759 15.1059 9.81657 15.0428 9.83232 14.948C9.89544 14.6638 10.0691 14.0163 10.3377 13.3847C11.7903 13.9375 12.9589 12.7216 13.7959 10.9372C13.8275 10.8582 13.7642 10.7635 13.6853 10.7792L12.2325 10.9845L14.459 9.216C14.4748 9.20025 14.4907 9.18439 14.4907 9.16864C14.7117 8.47381 14.9012 7.74752 15.0591 7.06846C15.0749 6.98949 15.0118 6.92637 14.9328 6.94212L13.3695 7.24217L15.2959 5.59999C15.3117 5.58424 15.3276 5.55263 15.3276 5.53688C15.5486 4.14726 15.5486 3.12078 15.3749 3.02606L15.3748 3.02607Z" fill="currentColor"></path></svg></span></span><span class="CommentsItemDate-root CommentsItemDate-date"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=ppRR7jeL9dxBaNvhY"><span class="LWTooltip-root"><time dateTime="2021-03-02T12:30:39.679Z">Mar 2 2021</time></span></a></span><span class="OverallVoteAxis-vote"><span class="OverallVoteAxis-overallSection EAEmojisVoteOnComment-overallAxis OverallVoteAxis-overallSectionBox"><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-left" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span><span class="LWTooltip-root"><span class="OverallVoteAxis-voteScore">8</span></span><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-right" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span></span></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div><span class="CommentsItemMeta-rightSection"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=ppRR7jeL9dxBaNvhY"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="CommentsItemMeta-linkIcon ForumIcon-root"><path d="M12.232 4.232a2.5 2.5 0 013.536 3.536l-1.225 1.224a.75.75 0 001.061 1.06l1.224-1.224a4 4 0 00-5.656-5.656l-3 3a4 4 0 00.225 5.865.75.75 0 00.977-1.138 2.5 2.5 0 01-.142-3.667l3-3z"></path><path d="M11.603 7.963a.75.75 0 00-.977 1.138 2.5 2.5 0 01.142 3.667l-3 3a2.5 2.5 0 01-3.536-3.536l1.225-1.224a.75.75 0 00-1.061-1.06l-1.224 1.224a4 4 0 105.656 5.656l3-3a4 4 0 00-.225-5.865z"></path></svg></a></span></div><div class="CommentBody-root ContentStyles-base content ContentStyles-commentBody"><div class="CommentBody-commentStyling"><p>The part of the article that you are referring to is in part inspired by John and MacAskills paper “longtermist institutional reform”, where they propose reforms that are built to tackle political short-termism. The case for this relies on two assumptions:</p><p>1. Long term consequences have an outsized moral importance, despite the uncertainty of long-term effects.<br>2. Because of this, political decision making should be designed to optimize for longterm outcomes. </p><p>Greaves and MacAskill have written a <a href="https://globalprioritiesinstitute.org/hilary-greaves-william-macaskill-the-case-for-strong-longtermism/">paper</a> arguing for assumption 1: "Because of the vast number of expected people in the future, it is quite plausible that for options that are appropriately chosen from a sufficiently large choice set, effects on the very long future dominate ex ante evaluations, even after taking into account the fact that further-future effects tend to be the most uncertain…“. We seem to agree on this assumption, but disagree on assumption 2. If I understand your argument against assumption 2, it assumes that there are no tradeoffs between optimizing for short-run outcomes and long-run outcomes. This assumption seems clearly false to us, and is implied to be false in “Longtermist institutional reform”. Consider fiscal policies for example: In the short run it could be beneficial to take all the savings in pension funds and spend them to boost the economy, but in the long run this is predictably harmful because many people will not afford to retire.</p></div></div><div class="CommentBottom-bottom"><a class="comments-item-reply-link CommentsItem-replyLink">Reply</a></div></div></div></div><div class="CommentsNode-children"><div class="CommentsNode-parentScroll"></div><div><div class="comments-node comments-node-odd CommentFrame-node CommentFrame-child CommentFrame-answerLeafComment" id="pqw82ArBvWqz6QjRB"><div><div class="CommentsItem-root recent-comments-node"><div class="CommentsItem-postTitleRow"></div><div class="CommentsItem-body"><div class="CommentsItemMeta-root"><a class="CommentsItemMeta-collapse"><svg width="20" height="20" viewBox="0 0 20 20" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="CommentsItemMeta-collapseChevron CommentsItemMeta-collapseChevronOpen ForumIcon-root"><path fill-rule="evenodd" clip-rule="evenodd" d="M7.20938 14.7698C6.92228 14.4713 6.93159 13.9965 7.23017 13.7094L11.1679 10L7.23017 6.29062C6.93159 6.00353 6.92228 5.52875 7.20938 5.23017C7.49647 4.93159 7.97125 4.92228 8.26983 5.20937L12.7698 9.45937C12.9169 9.60078 13 9.79599 13 10C13 10.204 12.9169 10.3992 12.7698 10.5406L8.26983 14.7906C7.97125 15.0777 7.49647 15.0684 7.20938 14.7698Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></a><span class="LWTooltip-root"><a class="CommentUserName-mainWrapper CommentUserName-fullWrapper CommentsItemMeta-username" href="/users/kbog"><div class="CommentUserName-profileImagePlaceholder"></div><span class="UsersNameDisplay-color CommentUserName-author">kbog</span></a></span><span class="CommentsItemDate-root CommentsItemDate-date"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=pqw82ArBvWqz6QjRB"><span class="LWTooltip-root"><time dateTime="2021-03-04T00:49:47.045Z">Mar 4 2021</time></span></a></span><span class="OverallVoteAxis-vote"><span class="OverallVoteAxis-overallSection EAEmojisVoteOnComment-overallAxis OverallVoteAxis-overallSectionBox"><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-left" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span><span class="LWTooltip-root"><span class="OverallVoteAxis-voteScore">5</span></span><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-right" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span></span></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div><span class="CommentsItemMeta-rightSection"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=pqw82ArBvWqz6QjRB"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="CommentsItemMeta-linkIcon ForumIcon-root"><path d="M12.232 4.232a2.5 2.5 0 013.536 3.536l-1.225 1.224a.75.75 0 001.061 1.06l1.224-1.224a4 4 0 00-5.656-5.656l-3 3a4 4 0 00.225 5.865.75.75 0 00.977-1.138 2.5 2.5 0 01-.142-3.667l3-3z"></path><path d="M11.603 7.963a.75.75 0 00-.977 1.138 2.5 2.5 0 01.142 3.667l-3 3a2.5 2.5 0 01-3.536-3.536l1.225-1.224a.75.75 0 00-1.061-1.06l-1.224 1.224a4 4 0 105.656 5.656l3-3a4 4 0 00-.225-5.865z"></path></svg></a></span></div><div class="CommentBody-root ContentStyles-base content ContentStyles-commentBody"><div class="CommentBody-commentStyling"><p>No I agree on 2! I'm just saying even from a longtermist perspective, it may not be as important and tractable as improving institutions in orthogonal ways.</p></div></div><div class="CommentBottom-bottom"><a class="comments-item-reply-link CommentsItem-replyLink">Reply</a></div></div></div></div></div></div></div></div></div></div></div></div></div></div></div></div></div></div><div><div class="comments-node CommentFrame-commentsNodeRoot comments-node-root comments-node-odd CommentFrame-node" id="nyFELFCfQSwaQHX7N"><div><div class="CommentsItem-root recent-comments-node"><div class="CommentsItem-postTitleRow"></div><div class="CommentsItem-body"><div class="CommentsItemMeta-root"><a class="CommentsItemMeta-collapse"><svg width="20" height="20" viewBox="0 0 20 20" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="CommentsItemMeta-collapseChevron CommentsItemMeta-collapseChevronOpen ForumIcon-root"><path fill-rule="evenodd" clip-rule="evenodd" d="M7.20938 14.7698C6.92228 14.4713 6.93159 13.9965 7.23017 13.7094L11.1679 10L7.23017 6.29062C6.93159 6.00353 6.92228 5.52875 7.20938 5.23017C7.49647 4.93159 7.97125 4.92228 8.26983 5.20937L12.7698 9.45937C12.9169 9.60078 13 9.79599 13 10C13 10.204 12.9169 10.3992 12.7698 10.5406L8.26983 14.7906C7.97125 15.0777 7.49647 15.0684 7.20938 14.7698Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></a><span class="LWTooltip-root"><a class="CommentUserName-mainWrapper CommentUserName-fullWrapper CommentsItemMeta-username" href="/users/axioman"><div class="CommentUserName-profileImagePlaceholder"></div><span class="UsersNameDisplay-color CommentUserName-author">axioman</span></a></span><span class="CommentsItemDate-root CommentsItemDate-date"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=nyFELFCfQSwaQHX7N"><span class="LWTooltip-root"><time dateTime="2021-02-12T16:59:58.944Z">Feb 12 2021</time></span></a></span><span class="OverallVoteAxis-vote"><span class="OverallVoteAxis-overallSection EAEmojisVoteOnComment-overallAxis OverallVoteAxis-overallSectionBox"><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-left" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span><span class="LWTooltip-root"><span class="OverallVoteAxis-voteScore">11</span></span><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-right" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span></span></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div><span class="CommentsItemMeta-rightSection"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=nyFELFCfQSwaQHX7N"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="CommentsItemMeta-linkIcon ForumIcon-root"><path d="M12.232 4.232a2.5 2.5 0 013.536 3.536l-1.225 1.224a.75.75 0 001.061 1.06l1.224-1.224a4 4 0 00-5.656-5.656l-3 3a4 4 0 00.225 5.865.75.75 0 00.977-1.138 2.5 2.5 0 01-.142-3.667l3-3z"></path><path d="M11.603 7.963a.75.75 0 00-.977 1.138 2.5 2.5 0 01.142 3.667l-3 3a2.5 2.5 0 01-3.536-3.536l1.225-1.224a.75.75 0 00-1.061-1.06l-1.224 1.224a4 4 0 105.656 5.656l3-3a4 4 0 00-.225-5.865z"></path></svg></a></span></div><div class="CommentBody-root ContentStyles-base content ContentStyles-commentBody"><div class="CommentBody-commentStyling"><p>Interesting writeup!</p><p>Depending on your intended audience, it might make sense to add more details for some of the proposals. For example, why is scenario planning a good idea compared to other methods of decision making? Is there a compelling story, or strong empirical evidence for its efficacy? </p><p>Some small nitpicks: </p><p>There seems to be a mistake here: </p><p>"Bostrom argues in <a href="https://www.sciencedirect.com/science/article/pii/S0016328720300604"><u>The Fragile World Hypothesis</u></a> that continuous technological development will increase systemic fragility, which can be a source of catastrophic or existential risk. In the Precipice, he estimates the chances of existential catastrophe within the next 100 years at one in six."</p><p>I also find this passage a bit odd: </p><p>"One example of moral cluelessness is the repugnant conclusion, which assumes that by adding more people to the world, and proportionally staying above a given average in happiness, one can reach a state of minimal happiness for an infinitely large population."</p><p>The repugnant conclusion might motivate someone to think about cluelessness, but it does not really seem to be an example of cluelessness (the question whether we should accept it might or might not be). </p></div></div><div class="CommentBottom-bottom"><a class="comments-item-reply-link CommentsItem-replyLink">Reply</a></div></div></div></div><div class="CommentsNode-children"><div class="CommentsNode-parentScroll"></div><div><div class="comments-node comments-node-even CommentFrame-node CommentFrame-child CommentFrame-answerLeafComment" id="nPwgzekqfoNXFpvZJ"><div><div class="CommentsItem-root recent-comments-node"><div class="CommentsItem-postTitleRow"></div><div class="CommentsItem-body"><div class="CommentsItemMeta-root"><a class="CommentsItemMeta-collapse"><svg width="20" height="20" viewBox="0 0 20 20" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="CommentsItemMeta-collapseChevron CommentsItemMeta-collapseChevronOpen ForumIcon-root"><path fill-rule="evenodd" clip-rule="evenodd" d="M7.20938 14.7698C6.92228 14.4713 6.93159 13.9965 7.23017 13.7094L11.1679 10L7.23017 6.29062C6.93159 6.00353 6.92228 5.52875 7.20938 5.23017C7.49647 4.93159 7.97125 4.92228 8.26983 5.20937L12.7698 9.45937C12.9169 9.60078 13 9.79599 13 10C13 10.204 12.9169 10.3992 12.7698 10.5406L8.26983 14.7906C7.97125 15.0777 7.49647 15.0684 7.20938 14.7698Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></a><span class="LWTooltip-root"><a class="CommentUserName-mainWrapper CommentUserName-fullWrapper CommentsItemMeta-username" href="/users/andreas_massey"><div class="CommentUserName-profileImagePlaceholder"></div><span class="UsersNameDisplay-color CommentUserName-author">Andreas_Massey</span></a></span><span class="CommentsItemMeta-userMarkers"><span class="LWTooltip-root UserCommentMarkers-iconWrapper"><svg width="16" height="16" viewBox="0 0 16 16" fill="currentColor" xmlns="http://www.w3.org/2000/svg" class="UserCommentMarkers-postAuthorIcon ForumIcon-root"><path d="M7.93765 3.68928C7.93765 5.23289 6.68626 6.48428 5.14265 6.48428C3.59905 6.48428 2.34766 5.23289 2.34766 3.68928C2.34766 2.14568 3.59905 0.894287 5.14265 0.894287C6.68626 0.894287 7.93765 2.14568 7.93765 3.68928Z" fill="currentColor"></path><path d="M8.79027 8.01598C8.45861 7.68432 8.06384 7.44751 7.62172 7.28955C6.9269 7.77904 6.05841 8.06333 5.14252 8.06333C4.22662 8.06333 3.37398 7.77915 2.66331 7.28955C1.40002 7.73169 0.5 8.916 0.5 10.3214L0.50011 11.9321C0.50011 12.2479 0.768539 12.5163 1.08434 12.5163H7.81121C7.59014 11.8215 7.55853 10.8898 7.98491 9.67396C8.20609 9.1056 8.47451 8.56863 8.7903 8.01601L8.79027 8.01598Z" fill="currentColor"></path><path d="M15.3748 3.02607C15.1222 2.89973 13.7326 3.98931 12.3114 5.52101C12.2957 5.53676 12.2798 5.55262 12.2798 5.58413L11.7903 8.12644L11.4113 6.83154C11.3797 6.75257 11.2692 6.72095 11.2219 6.79993C10.7797 7.35266 10.3692 7.92113 10.0218 8.4896C10.006 8.50535 10.006 8.53696 10.006 8.56857L10.3534 11.174L9.45331 10.0213C9.40594 9.95818 9.29535 9.97393 9.26385 10.0529C8.82171 11.2372 8.82171 12.2952 9.65862 12.99L8.5533 14.8059C8.47432 14.948 8.56905 15.1059 8.727 15.1059H9.64287C9.73759 15.1059 9.81657 15.0428 9.83232 14.948C9.89544 14.6638 10.0691 14.0163 10.3377 13.3847C11.7903 13.9375 12.9589 12.7216 13.7959 10.9372C13.8275 10.8582 13.7642 10.7635 13.6853 10.7792L12.2325 10.9845L14.459 9.216C14.4748 9.20025 14.4907 9.18439 14.4907 9.16864C14.7117 8.47381 14.9012 7.74752 15.0591 7.06846C15.0749 6.98949 15.0118 6.92637 14.9328 6.94212L13.3695 7.24217L15.2959 5.59999C15.3117 5.58424 15.3276 5.55263 15.3276 5.53688C15.5486 4.14726 15.5486 3.12078 15.3749 3.02606L15.3748 3.02607Z" fill="currentColor"></path></svg></span></span><span class="CommentsItemDate-root CommentsItemDate-date"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=nPwgzekqfoNXFpvZJ"><span class="LWTooltip-root"><time dateTime="2021-02-16T14:14:53.299Z">Feb 16 2021</time></span></a></span><span class="OverallVoteAxis-vote"><span class="OverallVoteAxis-overallSection EAEmojisVoteOnComment-overallAxis OverallVoteAxis-overallSectionBox"><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-left" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span><span class="LWTooltip-root"><span class="OverallVoteAxis-voteScore">5</span></span><span class="LWTooltip-root"><button tabindex="0" class="MuiButtonBase-root MuiIconButton-root VoteArrowIconHollow-root VoteArrowIconHollow-right" type="button"><span class="MuiIconButton-label"><svg class="MuiSvgIcon-root VoteArrowIconHollow-smallArrow" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation" style="color:inherit"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg><svg class="MuiSvgIcon-root VoteArrowIconHollow-bigArrow VoteArrowIconHollow-exited" focusable="false" viewBox="6 6 12 12" aria-hidden="true" role="presentation"><path d="M7.41 15.41L12 10.83l4.59 4.58L18 14l-6-6-6 6z"></path><path fill="none" d="M0 0h24v24H0z"></path></svg></span></button></span></span></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Vector (Stroke)" d="M11.5419 2.12049C11.7994 1.762 11.737 1.24987 11.3935 0.972329C11.0428 0.688906 10.5419 0.764291 10.2795 1.12957L4.54399 9.11368L1.65149 6.04587C1.34241 5.71806 0.836155 5.71806 0.52708 6.04587C0.224307 6.36699 0.224307 6.88303 0.527079 7.20416L4.06278 10.9541C4.22277 11.1238 4.44712 11.2146 4.67877 11.1981C4.91025 11.1816 5.11998 11.06 5.25616 10.8705L11.5419 2.12049Z" fill="currentColor" stroke="currentColor" stroke-width="0.4" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><span class="LWTooltip-root"><div role="button" class="EAReactsSection-button"><div class="EAReactsSection-emojiPreview"><svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg"><path id="Union" d="M2.28033 1.21967C1.98744 0.926777 1.51256 0.926777 1.21967 1.21967C0.926777 1.51256 0.926777 1.98744 1.21967 2.28033L4.93934 6L1.21967 9.71967C0.926777 10.0126 0.926777 10.4874 1.21967 10.7803C1.51256 11.0732 1.98744 11.0732 2.28033 10.7803L6 7.06066L9.71967 10.7803C10.0126 11.0732 10.4874 11.0732 10.7803 10.7803C11.0732 10.4874 11.0732 10.0126 10.7803 9.71967L7.06066 6L10.7803 2.28033C11.0732 1.98744 11.0732 1.51256 10.7803 1.21967C10.4874 0.926777 10.0126 0.926777 9.71967 1.21967L6 4.93934L2.28033 1.21967Z" fill="currentColor" stroke="currentColor" stroke-width="0.5" stroke-linecap="round" stroke-linejoin="round"></path></svg></div><div>0</div></div></span><div role="button" class="EAReactsSection-button"><span class="LWTooltip-root"><svg width="20" height="18" viewBox="0 0 20 18" fill="none" xmlns="http://www.w3.org/2000/svg" class="EAReactsSection-addEmojiIcon"><rect x="14.75" width="1.5" height="7.5" rx="0.2" fill="currentColor"></rect><rect x="11.75" y="4.5" width="1.5" height="7.5" rx="0.2" transform="rotate(-90 11.75 4.5)" fill="currentColor"></rect><circle cx="6" cy="8.25" r="1.25" fill="currentColor"></circle><circle cx="11.5" cy="8.25" r="1.25" fill="currentColor"></circle><path d="M8.74999 14C9.28048 14 9.78913 13.7892 10.1643 13.4141C10.5392 13.0392 10.75 12.5305 10.75 12C10.7481 11.648 10.6522 11.3027 10.4726 11H7.02744C6.84783 11.3027 6.75192 11.648 6.75 12C6.75 12.5305 6.96083 13.0392 7.33575 13.4141C7.71084 13.7892 8.21951 14 8.74999 14Z" fill="currentColor"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M8.75586 1.9375C8.75391 1.9375 8.75195 1.9375 8.75 1.9375C4.33172 1.9375 0.75 5.51922 0.75 9.9375C0.75 14.3558 4.33172 17.9375 8.75 17.9375C13.0061 17.9375 16.4859 14.6139 16.7357 10.4205H15.2323C14.9852 13.7848 12.1774 16.4375 8.75 16.4375C5.16015 16.4375 2.25 13.5274 2.25 9.9375C2.25 6.34765 5.16015 3.4375 8.75 3.4375C8.75195 3.4375 8.75391 3.4375 8.75586 3.4375V1.9375Z" fill="currentColor"></path></svg></span></div><span class="CommentsItemMeta-rightSection"><a rel="nofollow" href="/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1?commentId=nPwgzekqfoNXFpvZJ"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="CommentsItemMeta-linkIcon ForumIcon-root"><path d="M12.232 4.232a2.5 2.5 0 013.536 3.536l-1.225 1.224a.75.75 0 001.061 1.06l1.224-1.224a4 4 0 00-5.656-5.656l-3 3a4 4 0 00.225 5.865.75.75 0 00.977-1.138 2.5 2.5 0 01-.142-3.667l3-3z"></path><path d="M11.603 7.963a.75.75 0 00-.977 1.138 2.5 2.5 0 01.142 3.667l-3 3a2.5 2.5 0 01-3.536-3.536l1.225-1.224a.75.75 0 00-1.061-1.06l-1.224 1.224a4 4 0 105.656 5.656l3-3a4 4 0 00-.225-5.865z"></path></svg></a></span></div><div class="CommentBody-root ContentStyles-base content ContentStyles-commentBody"><div class="CommentBody-commentStyling"><p>Thank you for your feedback, Flodorner! </p><p>First, we certainly agree that a more detailed description could be productive for some of the topics in this piece, including your example on scenario planning and other decision making methods. At more than 6000 words this is already a long piece, so we were aiming to limit the level of detail to what we felt was necessary to explain the proposed framework, without necessarily justifying all nuances. Depending on what the community believes is most useful, we are happy to write follow-up pieces with either a higher level of detail for a selected few topics of particular interest (for a more technical discussion on e.g. decision making methods), or a summary piece covering all topics with a lower level of detail (to explain the same framework to non-experts). </p><p>As for your second issue you are completely correct, it has been corrected. </p><p>Regarding your last point, we also agree that the repugnant conclusion is not an example of cluelessness in itself. However, the lack of consensus about how to solve the repugnant conclusion is one example of how we still have things to figure out in terms of population ethics (i. e. are morally clueless in this area).</p></div></div><div class="CommentBottom-bottom"><a class="comments-item-reply-link CommentsItem-replyLink">Reply</a></div></div></div></div></div></div></div></div></div></div></div></div></span></span></div><div class="ToCColumn-gap2"></div><div class="ToCColumn-rhs"></div><div class="ToCColumn-gap3"></div></div><span><div class="PostBottomRecommendations-root"><div class="ToCColumn-root ToCColumn-tocActivated"><div class="ToCColumn-header"></div><div class="ToCColumn-toc"><div class="ToCColumn-stickyBlockScroller"><div class="ToCColumn-stickyBlock"><div></div></div></div></div><div class="ToCColumn-gap1"></div><div class="ToCColumn-content"><div><div class="PostBottomRecommendations-section"><div class="PostBottomRecommendations-sectionHeading">More from<!-- --> <span class=""><a href="/users/henrik-oberg-myhre">Henrik Øberg Myhre</a></span></div><div class="Loading-spinner"><div class="Loading-bounce1"></div><div class="Loading-bounce2"></div><div class="Loading-bounce3"></div></div><div class="PostBottomRecommendations-viewMore"><a href="/users/henrik-oberg-myhre">View more</a></div></div><div class="PostBottomRecommendations-section"><div class="PostBottomRecommendations-sectionHeading">Curated and popular this week</div><div class="Loading-spinner"><div class="Loading-bounce1"></div><div class="Loading-bounce2"></div><div class="Loading-bounce3"></div></div></div><div class="PostBottomRecommendations-section"><div class="PostBottomRecommendations-sectionHeading">Relevant<!-- --> opportunities</div><div class="Loading-spinner"><div class="Loading-bounce1"></div><div class="Loading-bounce2"></div><div class="Loading-bounce3"></div></div><div class="PostBottomRecommendations-viewMore"><a href="/topics/opportunities-to-take-action">View more</a></div></div></div></div><div class="ToCColumn-gap2"></div><div class="ToCColumn-gap3"></div></div></div></span></div></div></div></div></div><script> try { function prettyScrollTo(element) { const rect = element.getBoundingClientRect(); const offset = 25; // See commentIdToLandmark const elementY = rect.top + offset; const scrollPosition = elementY + window.scrollY - (window.innerHeight / 5) + 1; window.scrollTo({ top: scrollPosition }); } // Function to scroll to the comment specified in the query parameter function scrollFocusOnQueryComment() { const urlParams = new URLSearchParams(window.location.search); let commentId = urlParams.get("commentId"); if (!commentId) { const hash = window.location.hash; if (hash.startsWith("#")) { commentId = hash.substring(1); } } const element = document.getElementById(commentId); if (!element) { return; } prettyScrollTo(element); const ref = { cleanup: function() {} }; const observer = new MutationObserver(function() { // Check if the element is still in the DOM if (!element.isConnected) { ref.cleanup(); return; } prettyScrollTo(element); }); observer.observe(document.body, { attributes: true, childList: true, subtree: true }); const userEventListener = function() { ref.cleanup(); }; // Event listeners for user-initiated actions ["mousedown", "keydown", "wheel", "touchstart"].forEach(function(eventType) { window.addEventListener(eventType, userEventListener, { passive: true }); }); ref.cleanup = function() { observer.disconnect(); ["mousedown", "keydown", "wheel", "touchstart"].forEach(function(eventType) { window.removeEventListener(eventType, userEventListener); }); window.killPreloadScroll = null; }; // Expose the cleanup function on the window object so the proper version in scrollUtils can take over window.killPreloadScroll = ref.cleanup; setTimeout(ref.cleanup, 5000); } scrollFocusOnQueryComment(); } catch (e) { console.error(e) if (false) { // Note: condition will be constant in rendered html alert("Error in preloaded scrollFocusOnQueryComment script, see renderPage.tsx (this error will only appear on dev)."); } } </script> </body> <script>window.ssrRenderedAt = "2024-11-28T03:34:30.539Z"</script> <script>window.ssrMetadata = {"renderedAt":"2024-11-28T03:34:30.539Z","cacheFriendly":true,"timezone":"GMT"}</script> <script>window.__APOLLO_STATE__ = {"ROOT_QUERY":{"__typename":"Query","currentUser":null,"unreadNotificationCounts":{"__typename":"NotificationCounts","unreadNotifications":0,"unreadPrivateMessages":0,"faviconBadgeNumber":0,"checkedAt":"2024-11-28T03:34:30.572Z"},"GivingSeason2024VoteCounts":{"3":{"TiCYBcRyAFKJipaRu":115,"bSvonumaPthGvoFQt":101,"ukvmmGrP5xFGWJSzw":103},"4":{"TiCYBcRyAFKJipaRu":90,"bSvonumaPthGvoFQt":93,"ukvmmGrP5xFGWJSzw":93,"CZ2NEgro46EwKuRLo":79},"5":{"TiCYBcRyAFKJipaRu":72,"bSvonumaPthGvoFQt":81,"erCuFfkLdNkj2QGnn":66,"ukvmmGrP5xFGWJSzw":90,"CZ2NEgro46EwKuRLo":72},"6":{"TiCYBcRyAFKJipaRu":62,"bSvonumaPthGvoFQt":63,"erCuFfkLdNkj2QGnn":64,"FwFmveQAjE8QBxYx2":49,"ukvmmGrP5xFGWJSzw":79,"CZ2NEgro46EwKuRLo":71},"7":{"TiCYBcRyAFKJipaRu":51,"bSvonumaPthGvoFQt":61,"erCuFfkLdNkj2QGnn":53,"kEt7wQAantCRd4xb9":35,"FwFmveQAjE8QBxYx2":47,"ukvmmGrP5xFGWJSzw":79,"CZ2NEgro46EwKuRLo":69},"8":{"TiCYBcRyAFKJipaRu":47,"bSvonumaPthGvoFQt":59,"erCuFfkLdNkj2QGnn":49,"kEt7wQAantCRd4xb9":34,"FwFmveQAjE8QBxYx2":46,"ukvmmGrP5xFGWJSzw":78,"zn8yuZxuYQsS9G7cL":31,"CZ2NEgro46EwKuRLo":60},"9":{"TiCYBcRyAFKJipaRu":41,"bSvonumaPthGvoFQt":45,"erCuFfkLdNkj2QGnn":48,"kEt7wQAantCRd4xb9":33,"e2DtufQ8yWqWSn7Zz":31,"ukvmmGrP5xFGWJSzw":76,"zn8yuZxuYQsS9G7cL":31,"CZ2NEgro46EwKuRLo":60,"FwFmveQAjE8QBxYx2":39},"10":{"TiCYBcRyAFKJipaRu":38,"bSvonumaPthGvoFQt":43,"erCuFfkLdNkj2QGnn":48,"kEt7wQAantCRd4xb9":33,"e2DtufQ8yWqWSn7Zz":31,"ukvmmGrP5xFGWJSzw":66,"FjCwXXFSkjAfceQuC":27,"zn8yuZxuYQsS9G7cL":30,"CZ2NEgro46EwKuRLo":59,"FwFmveQAjE8QBxYx2":35},"11":{"TiCYBcRyAFKJipaRu":36,"bSvonumaPthGvoFQt":41,"jKQzswu9pcf7mN2rf":25,"erCuFfkLdNkj2QGnn":46,"kEt7wQAantCRd4xb9":29,"e2DtufQ8yWqWSn7Zz":31,"ukvmmGrP5xFGWJSzw":66,"FjCwXXFSkjAfceQuC":27,"zn8yuZxuYQsS9G7cL":26,"CZ2NEgro46EwKuRLo":59,"FwFmveQAjE8QBxYx2":31},"12":{"TiCYBcRyAFKJipaRu":35,"bSvonumaPthGvoFQt":40,"jKQzswu9pcf7mN2rf":25,"erCuFfkLdNkj2QGnn":45,"kEt7wQAantCRd4xb9":29,"e2DtufQ8yWqWSn7Zz":28,"ukvmmGrP5xFGWJSzw":61,"FjCwXXFSkjAfceQuC":26,"zn8yuZxuYQsS9G7cL":26,"CZ2NEgro46EwKuRLo":56,"p77QopbWzXeRPr4jZ":21,"FwFmveQAjE8QBxYx2":28},"13":{"TiCYBcRyAFKJipaRu":34,"bSvonumaPthGvoFQt":39,"jKQzswu9pcf7mN2rf":22,"erCuFfkLdNkj2QGnn":44,"kEt7wQAantCRd4xb9":27,"e2DtufQ8yWqWSn7Zz":28,"ukvmmGrP5xFGWJSzw":61,"FjCwXXFSkjAfceQuC":26,"zn8yuZxuYQsS9G7cL":21,"CZ2NEgro46EwKuRLo":55,"p77QopbWzXeRPr4jZ":20,"v9sCG54LbZnm9pAxu":18,"FwFmveQAjE8QBxYx2":28},"14":{"TiCYBcRyAFKJipaRu":34,"bSvonumaPthGvoFQt":39,"jKQzswu9pcf7mN2rf":22,"erCuFfkLdNkj2QGnn":43,"2tAsABJxEpbhpzpAc":16,"kEt7wQAantCRd4xb9":27,"e2DtufQ8yWqWSn7Zz":28,"ukvmmGrP5xFGWJSzw":60,"FjCwXXFSkjAfceQuC":26,"zn8yuZxuYQsS9G7cL":21,"CZ2NEgro46EwKuRLo":45,"p77QopbWzXeRPr4jZ":19,"v9sCG54LbZnm9pAxu":18,"FwFmveQAjE8QBxYx2":27},"15":{"TiCYBcRyAFKJipaRu":33,"bSvonumaPthGvoFQt":38,"jKQzswu9pcf7mN2rf":22,"erCuFfkLdNkj2QGnn":43,"2tAsABJxEpbhpzpAc":16,"kEt7wQAantCRd4xb9":27,"e2DtufQ8yWqWSn7Zz":27,"ukvmmGrP5xFGWJSzw":58,"FjCwXXFSkjAfceQuC":19,"zn8yuZxuYQsS9G7cL":21,"CZ2NEgro46EwKuRLo":45,"p77QopbWzXeRPr4jZ":18,"bsNBRfEoh65dT7kDj":15,"v9sCG54LbZnm9pAxu":18,"FwFmveQAjE8QBxYx2":26},"16":{"TiCYBcRyAFKJipaRu":33,"ZjtdEFrexgz8jA2bM":14,"jKQzswu9pcf7mN2rf":22,"erCuFfkLdNkj2QGnn":43,"bSvonumaPthGvoFQt":36,"2tAsABJxEpbhpzpAc":16,"kEt7wQAantCRd4xb9":27,"e2DtufQ8yWqWSn7Zz":27,"ukvmmGrP5xFGWJSzw":58,"FjCwXXFSkjAfceQuC":17,"zn8yuZxuYQsS9G7cL":21,"CZ2NEgro46EwKuRLo":45,"p77QopbWzXeRPr4jZ":14,"bsNBRfEoh65dT7kDj":14,"v9sCG54LbZnm9pAxu":18,"FwFmveQAjE8QBxYx2":23},"17":{"TiCYBcRyAFKJipaRu":31,"ZjtdEFrexgz8jA2bM":14,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":42,"bSvonumaPthGvoFQt":36,"2tAsABJxEpbhpzpAc":15,"kEt7wQAantCRd4xb9":26,"e2DtufQ8yWqWSn7Zz":27,"ukvmmGrP5xFGWJSzw":58,"FjCwXXFSkjAfceQuC":17,"zn8yuZxuYQsS9G7cL":20,"CZ2NEgro46EwKuRLo":45,"p77QopbWzXeRPr4jZ":14,"bsNBRfEoh65dT7kDj":14,"v9sCG54LbZnm9pAxu":14,"FwFmveQAjE8QBxYx2":22,"JNWNHKhaoz4T8Qz7b":12},"18":{"TiCYBcRyAFKJipaRu":29,"ZjtdEFrexgz8jA2bM":14,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":42,"bSvonumaPthGvoFQt":35,"2tAsABJxEpbhpzpAc":15,"kEt7wQAantCRd4xb9":23,"e2DtufQ8yWqWSn7Zz":27,"ukvmmGrP5xFGWJSzw":58,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":12,"zn8yuZxuYQsS9G7cL":20,"CZ2NEgro46EwKuRLo":44,"p77QopbWzXeRPr4jZ":13,"bsNBRfEoh65dT7kDj":14,"FwFmveQAjE8QBxYx2":22,"v9sCG54LbZnm9pAxu":13,"JNWNHKhaoz4T8Qz7b":12},"19":{"TiCYBcRyAFKJipaRu":29,"ZjtdEFrexgz8jA2bM":14,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":35,"2tAsABJxEpbhpzpAc":15,"kEt7wQAantCRd4xb9":20,"e2DtufQ8yWqWSn7Zz":26,"ukvmmGrP5xFGWJSzw":57,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":11,"zn8yuZxuYQsS9G7cL":20,"CZ2NEgro46EwKuRLo":44,"p77QopbWzXeRPr4jZ":13,"bsNBRfEoh65dT7kDj":14,"FwFmveQAjE8QBxYx2":22,"v9sCG54LbZnm9pAxu":13,"JNWNHKhaoz4T8Qz7b":11,"yFw9NHjjcmNwQFctc":9},"20":{"TiCYBcRyAFKJipaRu":28,"ZjtdEFrexgz8jA2bM":13,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":34,"2tAsABJxEpbhpzpAc":15,"kEt7wQAantCRd4xb9":20,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":56,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":11,"zn8yuZxuYQsS9G7cL":19,"CZ2NEgro46EwKuRLo":43,"p77QopbWzXeRPr4jZ":12,"bsNBRfEoh65dT7kDj":14,"FwFmveQAjE8QBxYx2":22,"v9sCG54LbZnm9pAxu":13,"zmEAPdA2zgZnXS5Pp":9,"JNWNHKhaoz4T8Qz7b":11,"yFw9NHjjcmNwQFctc":9},"21":{"TiCYBcRyAFKJipaRu":28,"ZjtdEFrexgz8jA2bM":13,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":13,"kEt7wQAantCRd4xb9":20,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":56,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":19,"CZ2NEgro46EwKuRLo":41,"p77QopbWzXeRPr4jZ":12,"bsNBRfEoh65dT7kDj":14,"FwFmveQAjE8QBxYx2":22,"jtTDH5tDeZEnvaL7t":9,"zmEAPdA2zgZnXS5Pp":9,"JNWNHKhaoz4T8Qz7b":10,"yFw9NHjjcmNwQFctc":9,"v9sCG54LbZnm9pAxu":11},"22":{"TiCYBcRyAFKJipaRu":28,"ZjtdEFrexgz8jA2bM":13,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":11,"kEt7wQAantCRd4xb9":20,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":55,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":18,"D5z3scedaxjnRjtaZ":8,"CZ2NEgro46EwKuRLo":40,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":11,"FwFmveQAjE8QBxYx2":21,"jtTDH5tDeZEnvaL7t":9,"zmEAPdA2zgZnXS5Pp":9,"JNWNHKhaoz4T8Qz7b":10,"yFw9NHjjcmNwQFctc":9,"v9sCG54LbZnm9pAxu":11},"23":{"TiCYBcRyAFKJipaRu":28,"ZjtdEFrexgz8jA2bM":13,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":11,"kEt7wQAantCRd4xb9":17,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":55,"okCHp62fiy5WPdktd":8,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":16,"D5z3scedaxjnRjtaZ":8,"CZ2NEgro46EwKuRLo":40,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":11,"FwFmveQAjE8QBxYx2":21,"jtTDH5tDeZEnvaL7t":9,"zmEAPdA2zgZnXS5Pp":9,"JNWNHKhaoz4T8Qz7b":9,"yFw9NHjjcmNwQFctc":9,"v9sCG54LbZnm9pAxu":11},"24":{"TiCYBcRyAFKJipaRu":27,"bcZAGKRMqjExFyM2X":6,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":11,"ZjtdEFrexgz8jA2bM":11,"kEt7wQAantCRd4xb9":17,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":55,"okCHp62fiy5WPdktd":8,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":16,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":40,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":21,"jtTDH5tDeZEnvaL7t":9,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"yFw9NHjjcmNwQFctc":9,"v9sCG54LbZnm9pAxu":11},"25":{"TiCYBcRyAFKJipaRu":27,"bcZAGKRMqjExFyM2X":6,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":11,"ZjtdEFrexgz8jA2bM":11,"kEt7wQAantCRd4xb9":16,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":54,"okCHp62fiy5WPdktd":8,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":16,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":40,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"jtTDH5tDeZEnvaL7t":8,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"yFw9NHjjcmNwQFctc":9,"v9sCG54LbZnm9pAxu":11,"mFHAiABbYmPaZFHXN":4},"26":{"TiCYBcRyAFKJipaRu":26,"bcZAGKRMqjExFyM2X":6,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":10,"ZjtdEFrexgz8jA2bM":11,"kEt7wQAantCRd4xb9":16,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":54,"okCHp62fiy5WPdktd":8,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":15,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":39,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"jtTDH5tDeZEnvaL7t":8,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"yFw9NHjjcmNwQFctc":9,"v9sCG54LbZnm9pAxu":11,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"27":{"TiCYBcRyAFKJipaRu":25,"bcZAGKRMqjExFyM2X":6,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":9,"ZjtdEFrexgz8jA2bM":11,"kEt7wQAantCRd4xb9":16,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":54,"okCHp62fiy5WPdktd":8,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":15,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"jtTDH5tDeZEnvaL7t":8,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"yFw9NHjjcmNwQFctc":9,"v9sCG54LbZnm9pAxu":11,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"28":{"TiCYBcRyAFKJipaRu":25,"bcZAGKRMqjExFyM2X":6,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":11,"kEt7wQAantCRd4xb9":16,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":54,"okCHp62fiy5WPdktd":8,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"jtTDH5tDeZEnvaL7t":8,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"yFw9NHjjcmNwQFctc":8,"v9sCG54LbZnm9pAxu":10,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"29":{"TiCYBcRyAFKJipaRu":25,"bcZAGKRMqjExFyM2X":6,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":40,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":11,"kEt7wQAantCRd4xb9":16,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":53,"okCHp62fiy5WPdktd":8,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":4,"jtTDH5tDeZEnvaL7t":5,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"yFw9NHjjcmNwQFctc":8,"v9sCG54LbZnm9pAxu":10,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"30":{"TiCYBcRyAFKJipaRu":24,"bcZAGKRMqjExFyM2X":6,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":39,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":11,"kEt7wQAantCRd4xb9":16,"qZRJFqPK7Abk3hGZa":4,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":53,"okCHp62fiy5WPdktd":7,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":4,"jtTDH5tDeZEnvaL7t":4,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"yFw9NHjjcmNwQFctc":8,"v9sCG54LbZnm9pAxu":10,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"31":{"TiCYBcRyAFKJipaRu":24,"bcZAGKRMqjExFyM2X":5,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":39,"bSvonumaPthGvoFQt":33,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":10,"kEt7wQAantCRd4xb9":16,"qZRJFqPK7Abk3hGZa":4,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":52,"okCHp62fiy5WPdktd":7,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"yPLbkCqKN6fd6t7FD":3,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":4,"jtTDH5tDeZEnvaL7t":4,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"yFw9NHjjcmNwQFctc":8,"v9sCG54LbZnm9pAxu":10,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"32":{"TiCYBcRyAFKJipaRu":24,"bcZAGKRMqjExFyM2X":5,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":39,"bSvonumaPthGvoFQt":32,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":10,"kEt7wQAantCRd4xb9":16,"qZRJFqPK7Abk3hGZa":4,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":51,"okCHp62fiy5WPdktd":7,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":7,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"yPLbkCqKN6fd6t7FD":3,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":4,"jtTDH5tDeZEnvaL7t":4,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"yFw9NHjjcmNwQFctc":8,"JCfD2nugxvfSW5sFL":2,"v9sCG54LbZnm9pAxu":10,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"33":{"TiCYBcRyAFKJipaRu":24,"bcZAGKRMqjExFyM2X":5,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":39,"bSvonumaPthGvoFQt":32,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":10,"kEt7wQAantCRd4xb9":16,"qZRJFqPK7Abk3hGZa":4,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":51,"okCHp62fiy5WPdktd":7,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":6,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"yPLbkCqKN6fd6t7FD":3,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":4,"jtTDH5tDeZEnvaL7t":4,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"yFw9NHjjcmNwQFctc":8,"JCfD2nugxvfSW5sFL":2,"v9sCG54LbZnm9pAxu":10,"b9kzQ5SKuCxoNyKRz":1,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"34":{"TiCYBcRyAFKJipaRu":24,"bcZAGKRMqjExFyM2X":5,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":39,"bSvonumaPthGvoFQt":32,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":10,"kEt7wQAantCRd4xb9":16,"qZRJFqPK7Abk3hGZa":4,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":51,"okCHp62fiy5WPdktd":7,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":6,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"yPLbkCqKN6fd6t7FD":3,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":3,"jtTDH5tDeZEnvaL7t":4,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"yFw9NHjjcmNwQFctc":8,"JCfD2nugxvfSW5sFL":2,"pFzAbnkFAfYjveKZx":1,"v9sCG54LbZnm9pAxu":10,"b9kzQ5SKuCxoNyKRz":1,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"35":{"TiCYBcRyAFKJipaRu":24,"bcZAGKRMqjExFyM2X":5,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":39,"bSvonumaPthGvoFQt":32,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":10,"kEt7wQAantCRd4xb9":16,"qZRJFqPK7Abk3hGZa":4,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":51,"okCHp62fiy5WPdktd":7,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":6,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"yPLbkCqKN6fd6t7FD":2,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":3,"jtTDH5tDeZEnvaL7t":4,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"yFw9NHjjcmNwQFctc":8,"JCfD2nugxvfSW5sFL":2,"dr5pMu5Piwh3pMabN":1,"pFzAbnkFAfYjveKZx":1,"v9sCG54LbZnm9pAxu":10,"b9kzQ5SKuCxoNyKRz":1,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"36":{"TiCYBcRyAFKJipaRu":24,"bcZAGKRMqjExFyM2X":5,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":39,"bSvonumaPthGvoFQt":32,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":9,"kEt7wQAantCRd4xb9":16,"qZRJFqPK7Abk3hGZa":4,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":51,"okCHp62fiy5WPdktd":7,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":6,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"yPLbkCqKN6fd6t7FD":2,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":3,"jtTDH5tDeZEnvaL7t":4,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FZjQTgbDcMNdBFeoF":4,"QcsgDnoPMnEdfS4cz":1,"yFw9NHjjcmNwQFctc":8,"JCfD2nugxvfSW5sFL":2,"dr5pMu5Piwh3pMabN":1,"pFzAbnkFAfYjveKZx":1,"v9sCG54LbZnm9pAxu":10,"b9kzQ5SKuCxoNyKRz":1,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4},"37":{"TiCYBcRyAFKJipaRu":24,"bcZAGKRMqjExFyM2X":5,"jKQzswu9pcf7mN2rf":21,"erCuFfkLdNkj2QGnn":39,"bSvonumaPthGvoFQt":32,"2tAsABJxEpbhpzpAc":8,"ZjtdEFrexgz8jA2bM":9,"kEt7wQAantCRd4xb9":16,"qZRJFqPK7Abk3hGZa":4,"e2DtufQ8yWqWSn7Zz":25,"ukvmmGrP5xFGWJSzw":50,"okCHp62fiy5WPdktd":7,"FjCwXXFSkjAfceQuC":17,"kwJiy4oSiwNtTnuzt":10,"zn8yuZxuYQsS9G7cL":14,"D5z3scedaxjnRjtaZ":6,"CZ2NEgro46EwKuRLo":38,"bsNBRfEoh65dT7kDj":14,"yPLbkCqKN6fd6t7FD":2,"p77QopbWzXeRPr4jZ":10,"FwFmveQAjE8QBxYx2":20,"MZ6dAxCjAQRegQKK8":3,"jtTDH5tDeZEnvaL7t":4,"zmEAPdA2zgZnXS5Pp":8,"JNWNHKhaoz4T8Qz7b":9,"FdwwzsEauxrgekHCd":1,"FZjQTgbDcMNdBFeoF":4,"QcsgDnoPMnEdfS4cz":1,"yFw9NHjjcmNwQFctc":8,"JCfD2nugxvfSW5sFL":2,"dr5pMu5Piwh3pMabN":1,"pFzAbnkFAfYjveKZx":1,"v9sCG54LbZnm9pAxu":10,"b9kzQ5SKuCxoNyKRz":1,"LyP2AmPH8XdAczSww":4,"gAkcrp4eymBpfknxc":4,"mFHAiABbYmPaZFHXN":4}},"spotlights({\"input\":{\"enableCache\":false,\"enableTotal\":false,\"terms\":{\"limit\":1,\"view\":\"mostRecentlyPromotedSpotlights\"}}})":{"__typename":"MultiSpotlightOutput","results":[],"totalCount":null},"post({\"input\":{\"selector\":{\"documentId\":\"t4Lqh7GHBM9YyEDg8\"}}})":{"__typename":"SinglePostOutput","result":{"__ref":"Post:t4Lqh7GHBM9YyEDg8"}},"forumEvents({\"input\":{\"enableCache\":false,\"enableTotal\":false,\"terms\":{\"limit\":10,\"view\":\"currentForumEvent\"}}})":{"__typename":"MultiForumEventOutput","results":[{"__ref":"ForumEvent:Lfc2yYJcxY7mM79FK"}],"totalCount":null},"forumEvent({\"input\":{\"selector\":{\"documentId\":\"BkpY8huZKGykawEG9\"}}})":{"__typename":"SingleForumEventOutput","result":{"__ref":"ForumEvent:BkpY8huZKGykawEG9"}},"GivingSeason2024DonationTotal":9373.86,"comments({\"input\":{\"enableCache\":false,\"enableTotal\":true,\"terms\":{\"limit\":1000,\"postId\":\"t4Lqh7GHBM9YyEDg8\",\"view\":\"postCommentsMagic\"}}})":{"__typename":"MultiCommentOutput","results":[{"__ref":"Comment:LxHcNDY5SaqQwkg5G"},{"__ref":"Comment:nyFELFCfQSwaQHX7N"},{"__ref":"Comment:ppRR7jeL9dxBaNvhY"},{"__ref":"Comment:boTao4obr2YurBCd5"},{"__ref":"Comment:pqw82ArBvWqz6QjRB"},{"__ref":"Comment:nPwgzekqfoNXFpvZJ"},{"__ref":"Comment:kevgdPgWssSsk5fmi"}],"totalCount":7},"post({\"input\":{\"resolverArgs\":{\"batchKey\":\"singlePost\",\"sequenceId\":null},\"selector\":{\"documentId\":\"t4Lqh7GHBM9YyEDg8\"}}})":{"__typename":"SinglePostOutput","result":{"__ref":"Post:t4Lqh7GHBM9YyEDg8"}},"posts({\"input\":{\"enableCache\":false,\"enableTotal\":true,\"terms\":{\"limit\":5,\"postId\":\"t4Lqh7GHBM9YyEDg8\",\"view\":\"pingbackPosts\"}}})":{"__typename":"MultiPostOutput","results":[{"__ref":"Post:7SjtFYo6sCe3588Tx"},{"__ref":"Post:yEKQQQoN2W3Jn2Mue"},{"__ref":"Post:Cct4uvs7frmpKx8Nb"}],"totalCount":3}},"Post:t4Lqh7GHBM9YyEDg8":{"_id":"t4Lqh7GHBM9YyEDg8","__typename":"Post","url":null,"postedAt":"2021-02-10T18:26:30.881Z","createdAt":null,"sticky":false,"metaSticky":false,"stickyPriority":2,"status":2,"frontpageDate":"2021-02-10T20:59:08.528Z","meta":false,"deletedDraft":false,"postCategory":"post","tagRelevance":{"FdbA8vts5JPKCEou8":5,"d4bQXgZhDP43eJMwp":7,"ee66CtAMYurQreWBH":6,"of9xBvR3wpbp6qsZC":7,"t2L2RziMDLEuHBWNF":7,"uDAGFwZLscHKfoubc":9},"shareWithUsers":["q6ZLMygEtBW5mhgGY","ee4HWBeSdeYsRCLG5","ZkxGhRzN5NKcmWCWq","atWycpyDNsFN4ep9z","j7H8ri59wkf9zeGPg"],"sharingSettings":null,"linkSharingKey":null,"contents_latest":"6ktFytXhM7qfFie7d","commentCount":7,"voteCount":39,"baseScore":54,"extendedScore":null,"emojiReactors":{},"unlisted":false,"score":0.015543255023658276,"lastVisitedAt":null,"isFuture":false,"isRead":false,"lastCommentedAt":"2021-03-04T00:49:47.423Z","lastCommentPromotedAt":null,"canonicalCollectionSlug":null,"curatedDate":null,"commentsLocked":null,"commentsLockedToAccountsCreatedAfter":null,"debate":false,"question":false,"hiddenRelatedQuestion":false,"originalPostRelationSourceId":null,"userId":"LFsPKPZ36ZTZQXHct","location":null,"googleLocation":null,"onlineEvent":false,"globalEvent":false,"startTime":null,"endTime":null,"localStartTime":null,"localEndTime":null,"eventRegistrationLink":null,"joinEventLink":null,"facebookLink":null,"meetupLink":null,"website":null,"contactInfo":null,"isEvent":false,"eventImageId":null,"eventType":null,"types":[],"groupId":null,"reviewedByUserId":"2kBP4gThRsNXB3WWX","suggestForCuratedUserIds":null,"suggestForCuratedUsernames":null,"reviewForCuratedUserId":null,"authorIsUnreviewed":false,"afDate":null,"suggestForAlignmentUserIds":null,"reviewForAlignmentUserId":null,"afBaseScore":1,"afExtendedScore":null,"afCommentCount":0,"afLastCommentedAt":"2021-02-06T12:39:00.736Z","afSticky":false,"hideAuthor":false,"moderationStyle":null,"ignoreRateLimits":null,"submitToFrontpage":true,"shortform":false,"onlyVisibleToLoggedIn":false,"onlyVisibleToEstablishedAccounts":false,"reviewCount":0,"reviewVoteCount":0,"positiveReviewVoteCount":0,"manifoldReviewMarketId":null,"annualReviewMarketProbability":0,"annualReviewMarketIsResolved":false,"annualReviewMarketYear":0,"annualReviewMarketUrl":"0","group":null,"podcastEpisodeId":null,"forceAllowType3Audio":false,"nominationCount2019":0,"reviewCount2019":0,"votingSystem":"eaEmojis","disableRecommendation":false,"slug":"objectives-of-longtermist-policy-making-1","title":"Objectives of longtermist policy making","draft":null,"hideCommentKarma":false,"af":false,"currentUserReviewVote":null,"coauthorStatuses":[{"userId":"q6ZLMygEtBW5mhgGY","confirmed":true,"requested":false},{"userId":"ZkxGhRzN5NKcmWCWq","confirmed":true,"requested":false},{"userId":"atWycpyDNsFN4ep9z","confirmed":true,"requested":false},{"userId":"j7H8ri59wkf9zeGPg","confirmed":true,"requested":false}],"hasCoauthorPermission":true,"rejected":false,"collabEditorDialogue":false,"tableOfContents":{"html":"<p><i>Estimated reading time: 20-30 minutes<\/i><\/p><p><i>-We would like to thank the following for their excellent feedback and guidance throughout this article, in no particular order: Tyler M. John, Max Stauffer, Aksel Braanen Sterri, Eirik Mofoss, Samuel Hilton, Konrad Seifert, Tildy Stokes, Erik Aunvåg Matsen and Marcel Grewal Sommerfelt.<\/i><\/p><h1 id=\"0_0_Introduction\"><strong>0.0 Introduction<\/strong><\/h1><p>This article is co-authored by five members of Effective Altruism Norway as a pilot project to test if we can contribute in a valuable way to the emerging field of longtermism and policy making.<\/p><p>In the article we summarize some of the work that is being done in the emerging field of longtermism, using a new structure to classify the different interventions (see Figure 1: Three objectives of longtermist policy making). Then, for each objective we describe related challenges and potential solutions, and give some examples of current ongoing work.<\/p><p>We hope that the new structure can help improve coordination in this emerging field, and enable improved prioritization of interventions. If this structure resonates well with established experts in the field, we are happy to write up a shorter version of this article that could serve as an introduction to longtermist policy making for non-experts. Already, at 17 pages this article is one fourth of the length of the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>, which covers many of the same topics. <\/p><p>Finally, we have emphasized some aspects of longtermist policy making that we believe have been underemphasized in the effective altruism- and longtermism communities in the past. Examples include scenario planning, robust decision making and redteaming among others, which we have described together with forecasting in section 2.1 as essential epistemic capabilities for long-term governance. These tools are complementary to forecasting-based epistemic capabilities that the EA/longtermist communities already promote, and we hope that they will receive increased attention going forward.<\/p><p>We hope to produce 1-3 further articles on similar topics through 2021, and welcome any experts who have capacity to provide feedback on our work.<\/p><p>--------------------------------------------------------------------<\/p><p>In 2019 William MacAskill proposed a definition of the term <a href=\"https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism\"><u>longtermism<\/u><\/a> as the<i> view that those who live at future times matter just as much, morally, as those who live today<\/i>. There are many reasons to believe that actions can have a substantial impact on the future. For instance, the economic growth seen in the past two centuries has lifted billions out of poverty. In addition to this, any long-term consequences of climate change caused by humans could decrease the life quality of several generations to come. Our generation is also one of the first who has had the technological potential to destroy civilization through e.g. nuclear weapons, and thereby eliminating all future of humanity. This means that actions we take today can improve the course of history for hundreds of generations to come.<\/p><p>Interest in the welfare of future generations precedes the MacAskill definition of longtermism from 2017. In 2005 the Future of Humanity Institute was established at Oxford university. In 2009, the <a href=\"https://www.csf.gov.sg/who-we-are/\"><u>Centre for Strategic Futures<\/u><\/a> (CSF) was established by the Singaporian Government as a futures think tank. In 2017 William MacAskill started using the word “longtermism” as a term for the cluster of views that involved concern about ensuring the long-run future goes as well as possible. Since then, <a href=\"https://forum.effectivealtruism.org/tag/longtermism-philosophy\"><u>many have contributed<\/u><\/a> to the development of the philosophical field. The <a href=\"https://globalprioritiesinstitute.org/\"><u>Global Priorities Institute<\/u><\/a> (GPI) in Oxford was established in 2018 with the mission to <i>conduct and promote world-class, foundational academic research on how most effectively to do good<\/i>. In 2020 GPI published a new <a href=\"https://globalprioritiesinstitute.org/research-agenda/\"><u>research agenda<\/u><\/a>, where one of its two sections was dedicated to longtermism. These are just some of several milestones in the short history of longtermism. <\/p><p>If we believe that the future is what matters most and that we can influence it through our policy making, then it follows that the long-run outcomes of enacted policies should be one of the key considerations of the policy making process. However, most political systems are not prioritising long-term planning sufficiently compared to the potential benefits just for existing generations – nevermind thinking about the moral importance of future generations. <\/p><p>There are examples of different institutions and policy makers that are putting longtermism on the agenda today, but the time frame they consider long-term differs. Time horizons of longtermist organizations that frequently interact with policy makers (e.g. <a href=\"https://www.appgfuturegenerations.com/\"><u>APPG<\/u><\/a> and <a href=\"https://www.alpenglow.org.uk/\"><u>Alpenglow<\/u><\/a>) are constrained by the norms in the current policy making process. Although academics talking about \"longtermism\" can look thousands of years ahead, actors seeking to practically influence policy organisations, including ourselves, are typically considering shorter time horizons, e.g. 20-30 years in the future. <\/p><p>This article will explore three categories of objectives for longtermist policy making and might serve as a guide towards shaping longtermist policy suggestions. These objectives are summarized in figure 1.<\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/dz3yy1a99m7ei9v9fcbb.png\"><figcaption><strong>Figure 1<\/strong>: Representation of the three objectives longtermist policies should focus on. Objective 1 and 2 serve as foundations for the more direct objective(s) above them.<\/figcaption><\/figure><p>On top of the pyramid is the objective directly benefiting future generations - i.e. ensuring that there is a future for human civilization, and that it is as positive as possible. This objective builds on the condition that policy making institutions are enabled to develop such policies, which brings us to part two of the pyramid. This part describes three essential conditions to achieve successful behaviour change interventions; capability, motivation and opportunity, reflecting the <a href=\"https://link.springer.com/article/10.1186/1748-5908-6-42\"><u>COM-B system for institutional reform<\/u><\/a> (Michie et. al. 2011). The two upper pieces of the pyramid both rest upon the fundamental part, which concerns the objective of <i>understanding longtermism<\/i>. Interventions focused on this objective have a more indirect impact mechanism.<\/p><p>A policy intervention should optimize for one or several of these objectives in order to qualify as a \"longtermist policy proposal\".<\/p><p>Note that the proposals in figure 1 are synergistic - if we improve our performance on one of the objectives, it may become easier to also improve on others. In general, objective one works as an enabler of objective two, and both objective one and two are enablers of the third objective. For instance, if a policy making institution is able to agree on a set of KPIs to measure the long-term quality of a society (as a partial solution to objective 1 in figure 1), then they can set up a forecasting infrastructure for these KPIs (developing capabilities to govern for the long term, as described in objective 2). With this forecasting infrastructure in place, long-term effects of proposed policies will be more visible to the electorate, creating stronger incentives for politicians to optimize for long-term outcomes (solving another part of objective 2; motivations). This will for instance make it easier to prioritize catastrophic risk mitigation (enabling investment in efforts focused on objective 3), etc.<\/p><p>Several of the ideas in each category of objectives would be familiar to experienced effective altruists due to the natural synergies of longtermism and effective altruism. However, even experienced effective altruists may not have encountered all of the topics in this article; examples of topics that the experienced reader may find interesting include:<\/p><ul><li>The three-layered model of objectives of longtermist policies in figure 1<\/li><li>The discussion of governance KPIs in section 1.1<\/li><li>Non-forecasting tools like e.g. scenario planning as essential epistemic capabilities in section 2.1, on par with forecasting<\/li><li>Structured examples of how policy making institutions can be reformed to benefit future generations in section 2.4<\/li><li>The discussion of sustainability as a way to either mitigate catastrophic risk or a way to boost inclusive progress in section 3.3<\/li><\/ul><p>While the objectives are relevant for policy makers in a broad range of governance models and in countries with different levels of democratic development, the examples in this article are primarily focused on policy making on national levels in industrialized, democratic countries. <\/p><h1 id=\"1_0_Further_our_understanding_of_longtermism_and_adjacent_scientific_fields\"><strong>1.0 Further our understanding of longtermism and adjacent scientific fields<\/strong><\/h1><p>In the ongoing field of exploring strategic considerations related to longtermist policy making, there is a need for agreement of the meaning of the word. The bottom piece of the pyramid in figure 1 concerns our understanding of longtermism. William MacAskill <a href=\"https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism#Strong_Longtermism\"><u>proposes <\/u><\/a>three premises that make up what he calls the minimum definition of longtermism: (1) Those who live at future times matter as much, morally as those who live today, (2) society currently privileges those who live today above those who live in the future, and (3) we should take action to rectify that, and help ensure the long-run future goes well. Based on these premises, MacAskill and others have proposed political measures like <a href=\"https://philpapers.org/archive/JOHLIR.pdf\"><u>future assemblies<\/u><\/a> or a <a href=\"https://drive.google.com/file/d/1lJHBKfIROiyc8yxVaZnKEWUEYfOg06Eh/view\"><u>Ministry of the Future<\/u><\/a> (see section 2.4 for further elaboration). Organizations like the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/gpi-research-agenda.pdf\"><u>Global Priorities Institute<\/u><\/a> (GPI) and the <a href=\"https://www.fhi.ox.ac.uk/\"><u>Future of Humanity Institute<\/u><\/a> (FHI) are currently working on establishing longtermism as a scientific field of inquiry. <\/p><h2 id=\"1_1_What_does_a_good_society_look_like_\">1.1 What does a good society look like?<\/h2><p>Two important constraints on our current ability to positively influence the future are (i) uncertainty about what a good society looks like, i.e. moral cluelessness, and (ii) how we can best create one, i.e. strategic cluelessness. Different scientific and philosophical fields have attempted to investigate the first question in different ways. One example of moral cluelessness is the repugnant conclusion, which assumes that by adding more people to the world, and proportionally staying above a given average in happiness, one can reach a state of minimal happiness for an infinitely large population. However, we aren't completely clueless: here are some metrics that are commonly used to describe more or less positive aspects of a society. <\/p><p>Economists frequently use KPIs (Key Performance Indicators) to try to measure different facets of a successful society. GDP and GDP growth is perhaps the most common, while metrics like Gini-coefficients, average lifespan, GHG emissions, or the Human Development Index are used to describe inequality, health, sustainability and economic development, respectively.<\/p><p>While none of these metrics cover all that matters in a society on their own, a combination of such KPIs may capture most of the aspects that we care about. The “<a href=\"https://drive.google.com/file/d/1lJHBKfIROiyc8yxVaZnKEWUEYfOg06Eh/view\"><u>Portugal we want<\/u><\/a>” project is an example of a collaborative effort to converge on a set of KPIs to use in governance for the long term. There are also other examples that similarly attempt to stake out the course for the future of the country, e.g. the “<a href=\"https://www.cynnalcymru.com/project/the-wales-we-want/\"><u>Wales we want<\/u><\/a>”-project, or the japanese work on “<a href=\"https://www.japanpolicyforum.jp/society/pt20190109210522.html\"><u>Future Design<\/u><\/a>”. <\/p><p>Another, more academically oriented example of projects that attempt to compile partial descriptions of a good society into more complete descriptions, is the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>. It lists several other partial approaches to measure broader social welfare through a set of KPIs, including informal discussions by <a href=\"http://www.stafforini.com/blog/bostrom/\"><u>Bostrom <\/u><\/a>and <a href=\"http://reflectivedisequilibrium.blogspot.com/2013/12/what-proxies-to-use-for-flow-through.html\"><u>Shulman<\/u><\/a>. <\/p><h2 id=\"1_2_How_do_we_create_a_good_society_\">1.2 How do we create a good society?<\/h2><p>When we want to plan for a good society in the future we need to make prioritizations. This can be very important for the long-run trajectory of society as some efforts to improve society are much <a href=\"https://80000hours.org/problem-profiles/global-priorities-research/\"><u>more effective than others<\/u><\/a>. <a href=\"https://80000hours.org/2013/12/a-framework-for-strategically-selecting-a-cause/\"><u>Cause prioritization<\/u><\/a> is a philosophical field involved with evaluating and comparing different cause areas in their effectiveness. Some of the organizations working on cause prioritization are <a href=\"https://80000hours.org/articles/future-generations/\"><u>80,000 Hours<\/u><\/a>, the <a href=\"https://www.openphilanthropy.org/blog/update-cause-prioritization-open-philanthropy\"><u>Open Philanthropy Project<\/u><\/a>, and The Center for Reducing Suffering. The latter <a href=\"https://centerforreducingsuffering.org/the-benefits-of-cause-neutrality/\"><u>proposes<\/u><\/a> that starting out with a cause-neutral attitude to longtermist policy making is crucial to succeed at the cause prioritization. To achieve this, effective institutions and organizations need to: <\/p><ol><li>Build a broad movement for longtermist policy change so that these efforts don’t get stuck in a specific cause area.<\/li><li>Explicitly work on prioritization research so that cause areas can be accurately compared, as well as induce attitude change in political and societal institutions (see the middle piece of the pyramid: <i>shape policy making institutions for future generations<\/i>).<\/li><\/ol><p>One important concept in cause prioritization is the notion of <i>crucial considerations<\/i> - which are strategic questions that can significantly change the optimal strategy when they are taken into consideration. Some of the crucial consideration of longtermist policy making includes, but is not limited to, our evaluation of the <a href=\"https://forum.effectivealtruism.org/posts/XXLf6FmWujkxna3E6/are-we-living-at-the-most-influential-time-in-history-1\"><u>hinge of history hypothesis<\/u><\/a> (HoH), as well as other considerations discussed in the Global Priorities Institute’s <a href=\"https://globalprioritiesinstitute.org/research-agenda-web-version/\"><u>new research agenda<\/u><\/a>. The HoH assumes that this century, or perhaps especially the coming decades, is the most influential period in all of human history. Therefore, our evaluation of HoH’s likelihood is one of the determinants of how we should influence policy makers and the way we distribute the resources we have available today. If we believe that the coming century is merely as influential as a typical century, then we - like <a href=\"https://forum.effectivealtruism.org/posts/Eey2kTy3bAjNwG8b5/the-emerging-school-of-patient-longtermism\"><u>patient longtermist<\/u><\/a> - will probably spend less of our philanthropic resources now, and save more to spend them later. However, if we believe that this period is the most “hingey” period of all of human history - e.g. because our current values could be locked in for generations to come (i.e. <i>value lock-in view<\/i>), or if we are living in a<i> time of perils <\/i>- then we should rather spend more of our philanthropic resources now to ensure the most impact. These considerations can be applied to our spending of any type of philanthropic capital - either money, political influence or other resources of value. If we don’t live at the HoH, it then seems most logical to spend the next decades focusing on building political influence, rather than spending political capital to influence specific decisions in the near future. <\/p><h1 id=\"2_0_Shape_policy_making_institutions_for_future_generations\"><strong>2.0 Shape policy making institutions for future generations<\/strong><\/h1><p>So far, we have considered the problem of longtermism on a general level, and we will therefore describe in this part different measures and obstacles connected to developing and motivating longtermist policy making in institutions. This section reflects the second piece of the pyramid in figure 1, and further elaborates on the COM-B system to ensure successful interventions in behavioural change. We will first consider epistemic determinants and how we can develop epistemic <i>capabilities<\/i> like forecasting and scenario planning, as well as redteaming and robust decision making. Then we will look at how we can <i>motivate<\/i> policy makers to prioritize future generations, and in the last paragraph we will consider important institutional barriers to such policy making, and how to remove them in order to to create <i>opportunities<\/i> for long-termist policy making. This section is largely a summary of the work by John & MacAskill, so readers who've studied their work can skip it.<\/p><h2 id=\"2_1_Develop_epistemic_capabilities_for_long_term_policy_making\">2.1 Develop epistemic capabilities for long-term policy making<\/h2><p>Lack of knowledge about the future is likely one of the main sources of political short-termism, also known as epistemic determinants in <a href=\"https://www.researchgate.net/publication/343345291_Longtermist_Institutional_Reform\"><u>Longtermist Institutional Reform<\/u><\/a> by Tyler John and William MacAskill. These determinants lead to discounting of the value of long-term beneficial policies, making them less likely to be enacted. Some discounting is rational simply because there is a lot of uncertainty about the benefits of long-term policies. Irrational discounting is another source of short-termism which is caused by cognitive biases and attentional asymmetries between the future and nearby past. Vividness effects can make people react more strongly to vivid sources of information like news, videos and graphics compared to scientific research. People are also often over-confident in their ability to control and eliminate risks under situations of uncertainty. See <i>Thinking, fast and slow <\/i>(2011) by Daniel Kahneman for further details. Although these shortcomings are limiting politicians in their effectiveness, there has also been <a href=\"https://globalprioritiesinstitute.org/christian-tarsney-the-epistemic-challenge-to-longtermism/\"><u>cast doubt<\/u><\/a> on the possibility of predicting the future at all by philosopher Christian Tarsney.<\/p><p>Politicians work with the limitations of time and influence which can lead to attentional asymmetries, i.e. when determining the effectiveness of policies, they tend to focus too much on recent events, rather than basing it on future projections. The result of this asymmetry can be that politicians work with less accurate predictions. Furthermore, because of these reality constraints (i.e. time and power), politicians are forced to utilize heuristics like planning fallacy, availability bias and the law of small numbers to tackle current and future issues. However, we have also seen that the long-term can be prioritized politically with the Paris Agreement, carbon tax (e.g. in <a href=\"https://web.archive.org/web/20100615055008/http://iea.org/publications/free_new_Desc.asp?PUBS_ID=1580\"><u>Norway in 1991<\/u><\/a>), or the Danish <a href=\"https://klimaraadet.dk/en/about-danish-council-climate-change\"><u>council on climate change<\/u><\/a>. <\/p><p>To deal with these problems, politicians need effective means of forecasting with different sources - e.g. using teams of <a href=\"https://goodjudgment.com/\"><u>superforecasters<\/u><\/a> and domain experts, or market-based approaches like prediction markets, to obtain high-quality information about the future.This needs to be implemented to overcome the information barrier (knowledge about the future) and the attention barriers (making changes in future outcomes more salient) so that politicians can make informed decisions about the future. <\/p><p>To maximize the utility gained from this information, decision makers also need to invest in institutions and organizations that can develop epistemic capabilities beyond forecasting, e.g. scenario planning, robust decision making, and red teaming, among others. In <a href=\"https://www.smestrategy.net/blog/what-is-scenario-planning-and-how-to-use-it\"><u>scenario planning<\/u><\/a> exercises, policy makers define a set of scenarios that jointly describe the possible futures that are likely enough to be considered, that differ depending on factors of high uncertainty, and with significant implications for the optimal policy choice. Then, policies are evaluated for how they perform across the range of scenarios. Depending on the risk preferences of the policy makers, they should choose a robust policy that both has a high expected value across scenarios, and fails as gracefully as possible in the worst scenarios. Scenario planning could also be supplemented with <a href=\"https://link.springer.com/chapter/10.1007/978-3-030-05252-2_2\"><u>robust decision making<\/u><\/a> which especially emphasizes strategies that do well in worst-case scenarios. Additionally, <a href=\"https://www.synopsys.com/glossary/what-is-red-teaming.html\"><u>red teaming<\/u><\/a> can provide a solid method of stress-testing the plans we make for the future by taking an adversarial approach. <\/p><p>Several researchers within the EA movement are working on these issues, e.g. Neil Dullaghan, Michael MacKenzie, and Eva Vivalt. Dullaghan <a href=\"https://forum.effectivealtruism.org/posts/kCkd9Mia2EmbZ3A9c/deliberation-may-improve-decision-making\"><u>proposes<\/u><\/a> deliberation as a means of reaching better cooperation across party-lines and long-term thinking. He also claims that there may be a link between deliberation and long-term thinking; specifically in areas like climate change and the environment. Furthermore, MacKenzie <a href=\"https://www.oxfordhandbooks.com/view/10.1093/oxfordhb/9780198747369.001.0001/oxfordhb-9780198747369-e-7\"><u>argues<\/u><\/a> that deliberation can help us overcome our cognitive biases by for instance appealing to the idea “saving future children'' to ensure longtermist thinking. In order to gather all these findings within forecasting, Vivalt, a researcher at the Australian National University and University of Toronto, <a href=\"https://forum.effectivealtruism.org/posts/Z7RTJePkiWBH92qqo/eva-vivalt-forecasting-research-results\"><u>proposes<\/u><\/a> a platform to coordinate the research and the ability of each researcher to forecast. These are only some examples of researchers that are working to improve institutional decision making among many more. Still, it is one of the top recommended career paths by <a href=\"https://80000hours.org/problem-profiles/improving-institutional-decision-making/\"><u>80000 Hours<\/u><\/a>, as “Improving the quality of decision-making in important institutions could improve our ability to solve almost all other problems”.<\/p><h2 id=\"2_2_Motivate_policymakers_to_prioritize_future_generations\">2.2 Motivate policymakers to prioritize future generations<\/h2><p>Even if there are policymakers who have the necessary capabilities to improve the welfare of future generations, there are still several factors that discourage them from doing so. These factors are referred to as motivational determinants in the <a href=\"https://philpapers.org/archive/JOHLIR.pdf\"><u>Longtermist Institutional Reform<\/u><\/a> by Tyler John and William MacAskill, from which the following three sections are heavily based on.<\/p><p>People tend to have a high <a href=\"https://en.wikipedia.org/wiki/Time_preference\"><u>time preference<\/u><\/a> for the present, leading to greater discounting of the value of long-term benefits, which makes  policies more short-termist. This is a problem that affects both voters and people in power, although the severity of this problem is unclear.<\/p><p>Self-interest and relational favouritism another source of short-termism, as many people care more about themselves and their relatives than future generations. Self-beneficial policies are generally short-termist as policymakers and their relatives will only live for a short amount of time compared to the potential lifespan of humanity.<\/p><p>Cognitive biases may also affect people’s political decisions, two known biases are the identifiable victim effect and procrastination. The <a href=\"https://en.wikipedia.org/wiki/Identifiable_victim_effect\"><u>Identifiable victim effect<\/u><\/a> is the tendency to prioritize individuals that are visible over individuals that are statistical or theoretic. As future generations are invisible and haven’t been born yet, this naturally leads short-termism. <\/p><p>Procrastination drives people to delay difficult problems until they become urgent and demand action. The further a long-term beneficial action is delayed, the less beneficial it is likely to be for future generations. Longtermism is especially prone to procrastination due to its extremely long timeframe.<\/p><p>Politicians are often even more short-termist than these factors would suggest, and they may frequently make extremely short-term decisions that have minimal benefits and significant costs within a few years, due to the various institutional factors discussed below. <\/p><h2 id=\"2_3_Remove_institutional_barriers_to_longtermist_policy_making\">2.3 Remove institutional barriers to longtermist policy making<\/h2><p>Even policymakers that have the expertise and motivation to improve the welfare of future generations can be held back by institutional barriers that are preventing them from effectively advocating for longtermist policies. Many of these factors are due to the way today’s governmental institutions are designed, other sources include politicians’ economic dependencies and the media.<\/p><p>Most governments have short election cycles that incentivize short-term policy. Elected representatives naturally want to be re-elected, and one way to gain the favour of potential voters is to provide evidence that their previous time in office brought positive and immediate effects, which is predominantly achieved by initiating short-term policies.<\/p><p>Along with short election cycles, most performance measures mainly evaluate the short-term effects of policies, further discouraging policymakers from advocating for long-term policy.<\/p><p>Time inconsistency is also a problem in governmental institutions because subsequent policymakers can repeal previously enacted future-beneficial policies, as well as redirect investments that were originally intended for future generations. Most governments lack strong institutions dedicated to protecting the interests of future generations, which could help combat the problem of time inconsistency.<\/p><p>The media, which is largely focused on today’s current events, demand immediate reactions from policymakers. This pressures the policymakers to focus on short-term issues in order to build their reputation, as abstaining from doing so might lower their odds of re-election.<\/p><h2 id=\"2_4_Proposed_mechanisms\">2.4 Proposed mechanisms<\/h2><p>To deal with the problems mentioned above (lacking capabilities, disincentivized policymakers and institutional barriers), there is a dire need for institutional reform. There are many different ways to go about this, and there is still a lot of uncertainty about what might be the best solutions. What follows is a list of various longtermist policy proposals chosen with help from Tyler John. The proposals are divided into five main categories, with examples below. A more comprehensive list can be found <a href=\"https://forum.effectivealtruism.org/posts/op93xvHkJ5KvCrKaj/institutions-for-future-generations#Four_branch_Model_of_Government\"><u>here<\/u><\/a>.<\/p><p><strong id=\"Designated_stakeholders\">Designated stakeholders<\/strong><\/p><p>Key decision-makers or their advisors are appointed as responsible for protecting the interests of future people. Some examples of these are:<\/p><ul><li>Ministers and Executive Departments<\/li><li>Ombudsperson for Future Generations<\/li><li>Parliamentary committees<\/li><\/ul><p><strong id=\"Information_interventions\">Information interventions<\/strong><\/p><p>Affects how information about the impact of future policies is gained or made publicly available. Some examples of these are:<\/p><ul><li>In-government Think Tank<\/li><li>Posterity Impact Assessments<\/li><li>Intergenerational Deliberation Day<\/li><\/ul><p><strong id=\"Voting_mechanisms\">Voting mechanisms<\/strong><\/p><p>Democratic election mechanisms and policy voting rules are redesigned to promote candidates that are expected to benefit future people. Some examples of these are:<\/p><ul><li>Choosing legislators via lottery<\/li><li>Demeny voting<\/li><li>Longer election cycles<\/li><\/ul><p><strong id=\"Liability_mechanisms\">Liability mechanisms<\/strong><\/p><p>Mechanisms that hold current decision-makers liable if their decisions lead to poor outcomes in the future, including formal rights for future people. Some examples of these are:<\/p><ul><li>Intergenerational externality taxes<\/li><li>Making court systems more future-oriented<\/li><li>Pay for Long-term performance<\/li><\/ul><p><strong id=\"Reallocation_of_resources\">Reallocation of resources<\/strong><\/p><p>Control of current resources is deferred to future people. Some examples of these are:<\/p><ul><li>Heritage funds<\/li><li>Financial Institutions for Intergenerational Borrowing<\/li><li>Lower social discount rate<\/li><\/ul><p>For more in-depth analysis of the various proposals, see “Longtermist Institutional Design Literature Review” by Tyler John.’<\/p><p>In addition to the five categories above, another way to encourage long-term policy could be to influence society to be more long-term friendly. An example of this is Roman Krznaric’s writings where he establishes terms and concepts that could enable more longtermist thinking. <\/p><h1 id=\"3_0_Directly_influence_the_future_trajectory_of_human_civilization\"><strong>3.0 Directly influence the future trajectory of human civilization<\/strong><\/h1><p>The top layer of the pyramid in figure 1 considers how one can influence the future of humanity in a more direct way than the objectives in layer 1 and 2 does. There are several methods to directly improve the future and positively shift the trajectory of civilization. One approach is to avoid the bad scenarios (as exemplified by the red scenarios in Figure 2), such as extinction and major catastrophes. Another approach is to boost the good scenarios (exemplified by the green scenarios in Figure 2) by increasing the rate of inclusive progress - either by increasing economic growth, by making progress more inclusive, or by increasing our ability to convert economic wealth into wellbeing. <\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/qcctw3cbjlfqdrff7mwq.png\"><figcaption><strong>Figure 2<\/strong>: Illustration of positive and negative trajectories of civilization.<\/figcaption><\/figure><h2 id=\"3_1_Mitigate_catastrophic_risk_and_build_resiliency_to_tail_events_and_unknown_unknowns\">3.1 Mitigate catastrophic risk and build resiliency to tail events and unknown unknowns<\/h2><p>In the effective altruism movement, one commonly recognized way to positively influence the future is to make sure that it actually exists and avoid <a href=\"https://longtermrisk.org/reducing-risks-of-astronomical-suffering-a-neglected-priority/#III_Reducing_s-risks_is_both_tractable_and_neglected\"><u>scenarios of extreme suffering<\/u><\/a>, i.e. by avoiding existential risks. By developing longtermist policy and institutions, we can better prepare for the future by building resiliency to both known and unknown existential risks.<\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/si5ga5enygb19xnwiigi.png\"><figcaption><strong>Figure 3<\/strong>: Examples of risks based on a <a href=\"https://www.existential-risk.org/concept.html\"><u>figure<\/u><\/a> by Nick Bostrom<\/figcaption><\/figure><p>Let us start with some definitions. Bostrom explains the difference between existential risk and catastrophic risk in <a href=\"https://www.existential-risk.org/concept.html\"><u>Existential Risk Prevention as Global Priority<\/u><\/a>. Existential risks are both pan-generational and crushing, which means that they drastically reduce the quality of life or cause death that humanity cannot recover from. Compared to this, risks that are merely globally catastrophic do not individually threaten the survival of humanity. Assuming that existence is preferable to non-existence, existential risks are considered significantly worse than global catastrophic risks because they affect all future generations. <\/p><p>However, global catastrophes may drastically weaken critical systems and our ability to tackle a second catastrophe. This argument is presented by the Global Catastrophic Risk Institute in a paper about <a href=\"http://gcrinstitute.org/papers/003_double-catastrophe.pdf\"><u>double catastrophes<\/u><\/a> with a case study on how geoengineering may be severely affected by other catastrophes. Moreover, many of the practices that can help us avoid globally catastrophic risks are also useful to prevent existential risks. We have titled this section “mitigate catastrophic risk” to ensure that we cover as many of the risks that may significantly impact the long-term future of humanity as possible.<\/p><p>The list of already known existential risks includes both natural and anthropological risks. Today’s technological advancements have created more anthropological risks, and there are good reasons to believe that they will continue to do so. Bostrom argues in <a href=\"https://www.sciencedirect.com/science/article/pii/S0016328720300604\"><u>The Fragile World Hypothesis<\/u><\/a> that continuous technological development will increase systemic fragility, which can be a source of catastrophic or existential risk. In the Precipice, Toby Ord estimates the chances of existential catastrophe within the next 100 years at one in six. We have already been dangerously close to global catastrophe, e.g. when <a href=\"https://80000hours.org/2012/02/26th-of-september-petrov-day/\"><u>Stanislav Petrov<\/u><\/a> potentially singlehandedly avoided a global nuclear war in 1983 when he did not launch missiles in response to the warning system reporting a US missile launch. To prevent such close calls from happening in the future, we need to gain knowledge about both known and unknown risks and solutions to them. <\/p><p>In the Precipice, Ord proposes that reaching existential security is the first of three steps to optimize the future of human civilization. Reaching existential security includes both eliminating immediate dangers, potential future risks, and establishing long-lasting safeguards. For example, switching to renewable energy sources, electric or hydrogen-based fuel, and clean meat, are ways to safeguard against catastrophic <a href=\"https://www.mckinsey.com/business-functions/sustainability/our-insights/climate-risk-and-response-physical-hazards-and-socioeconomic-impacts\"><u>climate change<\/u><\/a>. This is one risk that 80,000 Hours include in their view of the world’s <a href=\"https://80000hours.org/problem-profiles/\"><u>most pressing problems<\/u><\/a>. 80,000 Hours’ list also includes <a href=\"https://80000hours.org/problem-profiles/positively-shaping-artificial-intelligence/\"><u>positively shaping the development of artificial intelligence<\/u><\/a>. This can be positively influenced by investing in technical research and improving governmental strategy. Another priority area is reaching <a href=\"https://80000hours.org/problem-profiles/nuclear-security/\"><u>nuclear security<\/u><\/a>, which includes shrinking nuclear stockpiles and improving systems and communication to avoid depending on people acting like Petrov in the case of false warnings.<i> <\/i>Another priority catastrophic risk area in the EA movement is <a href=\"https://www.openphilanthropy.org/research/cause-reports/biosecurity\"><u>biorisk and pandemic preparedness<\/u><\/a>, which is one of the focus areas of the Open Philanthropy Project. In addition to protecting against already known risks, humanity should research potential future risks and use forecasting principles to prepare for them. <\/p><p>When we have reached existential security, Ord proposes that the next steps should be <\/p><ol><li>a long reflection where we determine what kind of future we want to create and how to do so, and<\/li><li>achieving our full potential.<\/li><\/ol><p>Thus, Ord argues that existential security should take priority over other objectives described in this article, as it is more urgent.<\/p><p>There are a wide range of actions that can be taken to mitigate catastrophic and existential risks. As mentioned, these actions mainly include eliminating immediate dangers and establishing long-lasting safeguards. The lists below are partially based on the work by <a href=\"https://www.gcrpolicy.com/risk-management\"><u>Global Catastrophic Risk Policy<\/u><\/a>. <\/p><p><strong id=\"Reduce_the_probability_of_specific_risks\">Reduce the probability of specific risks<\/strong><\/p><p>The most direct course of action to avoid catastrophe is to reduce the probability of catastrophic or existential risks. Some suggestions to risks and how to reduce them are: <\/p><ul><li>Reducing the potential for both intentional and unintentional use of nuclear weapons through improving early warning systems, reducing the number of nuclear warheads and the number of people having access to them.<\/li><li>Strengthen preparedness against pandemics by improving early warning systems, implementing global procedures for limiting spread, and shorten vaccine development timelines. We can also prepare for pandemics by developing vaccines for diseases with high pandemic potential.<\/li><li>Mitigating climate change by curbing CO<sub>2<\/sub> emissions through technological development or policy changes. Other methods include climate engineering actions such as removing CO<sub>2<\/sub> from the atmosphere.<\/li><\/ul><p><strong id=\"Improve_risk_management_frameworks\">Improve risk management frameworks<\/strong><\/p><p>Another approach is to improve risk management frameworks in such a way that we are prepared and able to react better to future risks. Some examples are: <\/p><ul><li>Developing a centralized all-hazard national risk assessment process that is adaptable to risks in a variety of domains.<\/li><li>Developing a risk prioritization framework to evaluate vulnerabilities, and the impact of possible adverse outcomes.<\/li><li>Deconflicting risk ownership between government stakeholders: Set one department or agency as the primary owner for each risk, with clear responsibilities for mitigation, preparation and response.<\/li><li>Appointing a “national risk officer’ responsible for overseeing the national risk assessment process and coordinating mitigation efforts.<\/li><\/ul><p><strong id=\"Increase_resilience_of_critical_systems\">Increase resilience of critical systems<\/strong><\/p><p>We can also limit the potential harm done by catastrophic risks or mitigate risks by increasing the resilience of critical systems. Some examples of how to increase critical system resilience are: <\/p><ul><li>Increasing emergency storage capacity of items like food, fuel and medicine at secure locations.<\/li><li>Developing more resilient crops and protecting critical infrastructure assets against disasters both natural and anthropogenic.<\/li><li>Diversifying sourcing to e.g. ensure that digital communication systems tolerate power failures.<\/li><li>Hardening assets such as crops by making them more resilient.<\/li><\/ul><h2 id=\"3_2_Build_inclusive_progress_through_long_lasting_and_well_functioning_institutions\">3.2 Build inclusive progress through long-lasting and well-functioning institutions<\/h2><p>Another approach to positively shift the trajectory of civilization is to increase the rate of progress, and make progress more inclusive. Continuous progress can improve human life quality and create a flourishing future for people of diverse backgrounds. Collison and Cohen define <a href=\"https://www.theatlantic.com/science/archive/2019/07/we-need-new-science-progress/594946/\"><u>progress<\/u><\/a> as economic, technological, scientific, cultural or organizational advancements that transform our lives and raise our living standard. This definition is broader than the typical economic definition focused on measuring GDP growth as a proxy or progress. In particular, it includes the opportunity to increase progress by increasing our ability to convert economic wealth into wellbeing. For this reason, we will use the term “economic progress” when referring to GDP growth, while “progress” alone will refer to the broader definition. Moreover, “wellbeing”, “welfare” and “happiness” are used interchangeably, and it is assumed that this is closer to a true measure of progress (in the broader sense) than purely economic metrics.<\/p><p><strong id=\"There_is_still_much_we_don_t_know_about_progress\">There is still much we don’t know about progress<\/strong><\/p><p>There is an ongoing debate about whether there are fundamental limits to economic progress (and indeed <a href=\"https://www.researchgate.net/publication/348836201_What_is_the_Upper_Limit_of_Value\"><u>if there are upper limits of progress overall<\/u><\/a>) - if, at some point in the future, GDP growth must slow down and approach zero. If there are limits to economic progress, then increasing the rate of economic progress will only speed up the arrival of a zero-growth world of abundance. This could severely limit the potential value of increasing the rate of economic progress.<\/p><p>If there is no immediate limit to economic progress, there are good reasons to believe that it could continue indefinitely, and improve human welfare in the process. Human quality of life has generally improved significantly since the Industrial Revolution. This strong correlation between GDP growth and improved life quality has been well documented by e.g. <a href=\"https://www.gapminder.org/\"><u>Gapminder<\/u><\/a>. For example,  the <a href=\"https://ourworldindata.org/a-history-of-global-living-conditions-in-5-charts\"><u>percentage of people living in extreme poverty<\/u><\/a> has decreased from about 90% in 1820 to 10% in 2015. It is also argued that a <a href=\"https://www.worksinprogress.co/issue/securing-posterity/\"><u>stagnation in growth is risky<\/u><\/a> in regards to existential risks. GDP growth is far from the only factor that influences progress. Other examples include improved economic distribution, sustainable development and effective transforming of economic growth to human welfare. <\/p><p>There are also ongoing discussions about how to best measure (a broader definition of) progress, if progress is slowing down or accelerating, and how existential risk is affected by the rate of economic progress. This is briefly covered in the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>, and somewhat more extensively in sources therein.<\/p><p>To improve our understanding of how progress occurs, Collision and Cowen have proposed to develop “<a href=\"https://www.theatlantic.com/science/archive/2019/07/we-need-new-science-progress/594946/\"><u>Progress Studies<\/u><\/a>” as a field of research. According to Collision and Cowen, progress studies investigates successful institutions, people, organizations and cultures to find common factors that are linked to progress. If we succeed in finding common factors between Ancient Greece, The Industrial Revolution and Silicon Valley, we can improve progress by acting accordingly. Due to the immaturity of progress studies, we have yet to find such common factors. However, scientific reform and interventions as described above are seemingly very promising. <\/p><p><strong id=\"General_ideas_for_how_to_increase_progress\">General ideas for how to increase progress<\/strong><\/p><p>There are three main paths to increasing inclusive progress: increasing economic growth, making progress more inclusive, and converting economic wealth into welfare. The first path has been promoted by e.g. <a href=\"https://80000hours.org/podcast/episodes/tyler-cowen-stubborn-attachments/\"><u>Tyler Cowen, arguing<\/u><\/a> that it is among the most powerful tools to improve the future because economic growth compounds over time.<\/p><p>Making progress more inclusive by redistributing resources or social status can increase total human happiness. According to 80,000 Hours, <a href=\"https://80000hours.org/articles/money-and-happiness/\"><u>happiness<\/u><\/a> increases <a href=\"https://www.pnas.org/content/118/4/e2016976118\"><u>logarithmically <\/u><\/a>when one becomes wealthier, which means that it is a lot more cost-effective to increase the wealth of poor people. Therefore, redistribution of progress is also very important toward effectively and positively shifting the trajectory of humanity. <\/p><p>While there is a strong correlation between economic wealth and wellbeing, it is not all that matters. Some countries have higher levels of happiness than others, despite being poorer - for instance, self-reported <a href=\"https://ourworldindata.org/grapher/gdp-vs-happiness\"><u>happiness levels in Costa Rica are higher than in Luxembourg, while GDP is 6x lower<\/u><\/a>. It is plausible that we can find ways to make happiness cheaper, so that a similar level of economic wealth can be translated into more welfare.<\/p><p>It is hard to know the counterfactual impact of interventions focused on any of these paths. While catastrophic risk mitigation is focused on changing the outcomes of forks in the path of civilization, interventions for progress to a larger degree rely on shifting long-term trends that are hard to reason about empirically. So far, hypotheses for effective interventions have been generated through the use of some heuristics, including:<\/p><ul><li>Institutions can coordinate the efforts of individuals, and thereby multiply their total impact. For this reason, changes in institutional designs are “hingey” - a limited effort to improve an institution can have lasting effects at scale<\/li><li>Some institutional reforms matter more than others. In particular, longer-lasting institutions (examples may include the American Constitution or Ivy League schools) can maintain their influence over time, so reforming these institutions is a way to have a more durable impact. This is a version of “<a href=\"https://www.effectivealtruism.org/articles/a-proposed-adjustment-to-the-astronomical-waste-argument-nick-beckstead/\"><u>path-dependent trajectory changes<\/u><\/a>” advocated for by Nick Beckstead, and further discussed in e.g. Eliezer Yudkowsky’s <a href=\"https://equilibriabook.com/\"><u>Inadequate Equilibria<\/u><\/a><\/li><li>Moreover, more influential institutions (e.g. measured in budget size, number of members or technological capabilities) typically offer a larger potential for impact.<\/li><li>Finally, reforms that create positive feedback loops (e.g. by improving processes that are essential for progress, like science, innovation or decision making) accumulate over time<\/li><\/ul><p><strong id=\"Specific_proposals_for_how_to_increase_inclusive_progress\">Specific proposals for how to increase inclusive progress<\/strong><\/p><p>It is commonly argued that the scientific revolution has been one of the key drivers of progress in the last centuries, but today many scholars criticize the modern academic institutions for being sub-optimal. For this reason, interventions aiming to improve academic research may be one promising category to increase the rate of progress. Some examples among many interventions aiming to improve academic research include <a href=\"https://www.replicationmarkets.com/\"><u>Replication Markets<\/u><\/a>, <a href=\"https://arxiv.org/\"><u>ArXiv<\/u><\/a>, <a href=\"https://www.semanticscholar.org/\"><u>Semantic Scholar<\/u><\/a> and <a href=\"https://ought.org/\"><u>Ought<\/u><\/a>. Replication Markets use forecasting to estimate a research claims chance of replication. ArXiv and Semantic Scholar are archives with scientific papers, and Ought tries to figure out which questions humans can delegate to artificial intelligence. Additionally, “scientific research” is one of the top cause areas of the Open Philanthropy Project.<\/p><p>All of the abovementioned interventions are improving academic progress, but there are also non-academic interventions that may increase progress. Some examples from the US Policy focus area of Open Philanthropy Project (Open Phil) include:<\/p><ul><li><a href=\"https://www.foreignaffairs.com/articles/united-states/2020-09-14/americas-exceptional-housing-crisis\"><u>Urban zoning/land use reform<\/u><\/a>, which is meant to reduce the costs of living in cities. This may increase progress because it allows people to move to areas with great economic opportunities<\/li><li><a href=\"https://www.openphilanthropy.org/focus/us-policy/macroeconomic-policy\"><u>Macroeconomic stabilization policy<\/u><\/a>, where Open Philanthropy funds advocacy initiatives focused on emphasizing the importance of alleviating suffering and lost output from unemployment during economic crises<\/li><li><a href=\"https://www.openphilanthropy.org/focus/us-policy/immigration-policy\"><u>Immigration policy reform<\/u><\/a>, which may both provide economic opportunities for people from lower-income countries and increase long-term economic growth<\/li><li><a href=\"https://forum.effectivealtruism.org/posts/8Rn2gw7escCc2Rmb7/thoughts-on-electoral-reform\"><u>Electoral reform<\/u><\/a>: e.g. campaign financing rules, election security measures, and improved voting systems (e.g. <a href=\"https://electionscience.org/approval-voting-101/\"><u>approval voting<\/u><\/a> or <a href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2003531\"><u>quadratic voting<\/u><\/a>), to better ensure that elected officials represent the electorate and reduce the <a href=\"https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors\"><u>risk of malevolent leaders<\/u><\/a><\/li><\/ul><h2 id=\"3_3_What_about_sustainability_\">3.3 What about sustainability?<\/h2><p>Outside of the effective altruism movement, sustainability is one of the most common cause areas for people concerned about the welfare of future generations. Significant resources are invested in ensuring that our GHG emissions are brought down, that our depletion of natural resources and destruction of species habitats are slowed, and that state budgets are fiscally balanced across generations. Thus it may seem strange that sustainability has played such a small role in this article.<\/p><p>Our argument, borrowed from <a href=\"http://www.stafforini.com/blog/bostrom/\"><u>Bostrom <\/u><\/a>and others in the EA movement, is that unsustainabilities are bad if they exacerbate catastrophic risk, or if they slow down the rate of inclusive progress. <a href=\"https://www.mckinsey.com/business-functions/sustainability/our-insights/climate-risk-and-response-physical-hazards-and-socioeconomic-impacts\"><u>Research by the McKinsey Global Institute<\/u><\/a> shows that unmitigated climate change can be harmful in both of these ways. <a href=\"https://www.mckinsey.com/industries/public-and-social-sector/our-insights/the-social-contract-in-the-21st-century\"><u>Further research<\/u><\/a> by the McKinsey Global Institute demonstrates that the social contract is eroding across developed economies, and that economic outcomes for individuals are worsening as a consequence. In cases like these where the unsustainabilities are expected to create large amounts of human suffering, we should work hard to become more sustainable.<\/p><h1 id=\"4_0_Summary\"><strong>4.0 Summary<\/strong><\/h1><p>There are several objectives of longtermist policy making. We have presented three categories of objectives, where the objectives in the bottom layers are potential enablers of the upper objectives. All of them are relevant to the necessary prioritization of future generations, given that longtermism is plausible. <\/p><p>Each of the objectives and their sub-objectives are well covered in existing literature, but to our knowledge they have not been presented in this structure before. In this article we have summarized some of the relevant parts of the literature, in the hope of providing an accessible introduction to the field. Furthermore, we hope that some points in this article can serve as coordination points for more experienced longtermists - e.g. when referring to which parts of longtermist policy making they are attempting to improve, and why.<\/p>","sections":[{"title":"0.0 Introduction","anchor":"0_0_Introduction","level":1},{"title":"1.0 Further our understanding of longtermism and adjacent scientific fields","anchor":"1_0_Further_our_understanding_of_longtermism_and_adjacent_scientific_fields","level":1},{"title":"1.1 What does a good society look like?","anchor":"1_1_What_does_a_good_society_look_like_","level":2},{"title":"1.2 How do we create a good society?","anchor":"1_2_How_do_we_create_a_good_society_","level":2},{"title":"2.0 Shape policy making institutions for future generations","anchor":"2_0_Shape_policy_making_institutions_for_future_generations","level":1},{"title":"2.1 Develop epistemic capabilities for long-term policy making","anchor":"2_1_Develop_epistemic_capabilities_for_long_term_policy_making","level":2},{"title":"2.2 Motivate policymakers to prioritize future generations","anchor":"2_2_Motivate_policymakers_to_prioritize_future_generations","level":2},{"title":"2.3 Remove institutional barriers to longtermist policy making","anchor":"2_3_Remove_institutional_barriers_to_longtermist_policy_making","level":2},{"title":"2.4 Proposed mechanisms","anchor":"2_4_Proposed_mechanisms","level":2},{"title":"Designated stakeholders","anchor":"Designated_stakeholders","level":3},{"title":"Information interventions","anchor":"Information_interventions","level":3},{"title":"Voting mechanisms","anchor":"Voting_mechanisms","level":3},{"title":"Liability mechanisms","anchor":"Liability_mechanisms","level":3},{"title":"Reallocation of resources","anchor":"Reallocation_of_resources","level":3},{"title":"3.0 Directly influence the future trajectory of human civilization","anchor":"3_0_Directly_influence_the_future_trajectory_of_human_civilization","level":1},{"title":"3.1 Mitigate catastrophic risk and build resiliency to tail events and unknown unknowns","anchor":"3_1_Mitigate_catastrophic_risk_and_build_resiliency_to_tail_events_and_unknown_unknowns","level":2},{"title":"Reduce the probability of specific risks","anchor":"Reduce_the_probability_of_specific_risks","level":3},{"title":"Improve risk management frameworks","anchor":"Improve_risk_management_frameworks","level":3},{"title":"Increase resilience of critical systems","anchor":"Increase_resilience_of_critical_systems","level":3},{"title":"3.2 Build inclusive progress through long-lasting and well-functioning institutions","anchor":"3_2_Build_inclusive_progress_through_long_lasting_and_well_functioning_institutions","level":2},{"title":"There is still much we don’t know about progress","anchor":"There_is_still_much_we_don_t_know_about_progress","level":3},{"title":"General ideas for how to increase progress","anchor":"General_ideas_for_how_to_increase_progress","level":3},{"title":"Specific proposals for how to increase inclusive progress","anchor":"Specific_proposals_for_how_to_increase_inclusive_progress","level":3},{"title":"3.3 What about sustainability?","anchor":"3_3_What_about_sustainability_","level":2},{"title":"4.0 Summary","anchor":"4_0_Summary","level":1},{"divider":true,"level":0,"anchor":"postHeadingsDivider"},{"anchor":"comments","level":0,"title":"7 comments"}]},"reviewWinner":null,"version":"1.0.0","contents":{"__ref":"Revision:6ktFytXhM7qfFie7d"},"customHighlight":null,"myEditorAccess":"none","sequence({\"sequenceId\":null})":null,"prevPost({\"sequenceId\":null})":null,"nextPost({\"sequenceId\":null})":null,"canonicalSource":null,"noIndex":false,"viewCount":null,"socialPreviewData":{"__ref":"SocialPreviewType:t4Lqh7GHBM9YyEDg8"},"commentSortOrder":null,"sideCommentVisibility":null,"collectionTitle":null,"canonicalPrevPostSlug":null,"canonicalNextPostSlug":null,"canonicalSequenceId":null,"canonicalBookId":null,"canonicalSequence":null,"canonicalBook":null,"canonicalCollection":null,"podcastEpisode":null,"bannedUserIds":null,"currentUserVote":null,"currentUserExtendedVote":null,"feedLink":null,"feed":null,"sourcePostRelations":[],"targetPostRelations":[],"rsvps":null,"activateRSVPs":null,"fmCrosspost":{"isCrosspost":false},"glossary":[],"readTimeMinutes":26,"rejectedReason":null,"lastPromotedComment":null,"bestAnswer":null,"tags":[{"__ref":"Tag:of9xBvR3wpbp6qsZC"},{"__ref":"Tag:ee66CtAMYurQreWBH"},{"__ref":"Tag:uDAGFwZLscHKfoubc"},{"__ref":"Tag:t2L2RziMDLEuHBWNF"},{"__ref":"Tag:d4bQXgZhDP43eJMwp"},{"__ref":"Tag:FdbA8vts5JPKCEou8"}],"feedId":null,"totalDialogueResponseCount":0,"unreadDebateResponseCount":0,"dialogTooltipPreview":null,"disableSidenotes":false,"user":{"__ref":"User:LFsPKPZ36ZTZQXHct"},"coauthors":[{"__ref":"User:q6ZLMygEtBW5mhgGY"},{"__ref":"User:ZkxGhRzN5NKcmWCWq"},{"__ref":"User:atWycpyDNsFN4ep9z"},{"__ref":"User:j7H8ri59wkf9zeGPg"}]},"Tag:d5RTBWgyJoDGh2hxM":{"_id":"d5RTBWgyJoDGh2hxM","__typename":"Tag","userId":"PSBFYGLmnNYkxe7Lx","name":"Donation Election","shortName":null,"slug":"donation-election","core":false,"postCount":12,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2024-11-07T16:27:40.211Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:Lfc2yYJcxY7mM79FK_frontpageDescription":{"_id":"Lfc2yYJcxY7mM79FK_frontpageDescription","__typename":"Revision","html":""},"Revision:Lfc2yYJcxY7mM79FK_frontpageDescriptionMobile":{"_id":"Lfc2yYJcxY7mM79FK_frontpageDescriptionMobile","__typename":"Revision","html":""},"Revision:Lfc2yYJcxY7mM79FK_postPageDescription":{"_id":"Lfc2yYJcxY7mM79FK_postPageDescription","__typename":"Revision","html":""},"ForumEvent:Lfc2yYJcxY7mM79FK":{"_id":"Lfc2yYJcxY7mM79FK","__typename":"ForumEvent","publicData":null,"voteCount":0,"post":null,"tag":{"__ref":"Tag:d5RTBWgyJoDGh2hxM"},"frontpageDescription":{"__ref":"Revision:Lfc2yYJcxY7mM79FK_frontpageDescription"},"frontpageDescriptionMobile":{"__ref":"Revision:Lfc2yYJcxY7mM79FK_frontpageDescriptionMobile"},"postPageDescription":{"__ref":"Revision:Lfc2yYJcxY7mM79FK_postPageDescription"},"title":"Donation Election","startDate":"2024-11-18T08:00:00.000Z","endDate":"2024-12-03T08:00:00.000Z","darkColor":"#ae070a","lightColor":"#f6e7e1","contrastColor":null,"tagId":"d5RTBWgyJoDGh2hxM","postId":null,"bannerImageId":null,"includesPoll":false,"customComponent":"GivingSeason2024Banner"},"Tag:Dvs6cEeHqvRvAfG2c":{"_id":"Dvs6cEeHqvRvAfG2c","__typename":"Tag","userId":"PSBFYGLmnNYkxe7Lx","name":"Marginal Funding Week","shortName":null,"slug":"marginal-funding-week","core":false,"postCount":52,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2024-11-07T16:21:13.332Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:BkpY8huZKGykawEG9_frontpageDescription":{"_id":"BkpY8huZKGykawEG9_frontpageDescription","__typename":"Revision","html":""},"Revision:BkpY8huZKGykawEG9_frontpageDescriptionMobile":{"_id":"BkpY8huZKGykawEG9_frontpageDescriptionMobile","__typename":"Revision","html":""},"Revision:BkpY8huZKGykawEG9_postPageDescription":{"_id":"BkpY8huZKGykawEG9_postPageDescription","__typename":"Revision","html":""},"ForumEvent:BkpY8huZKGykawEG9":{"_id":"BkpY8huZKGykawEG9","__typename":"ForumEvent","publicData":null,"voteCount":0,"post":null,"tag":{"__ref":"Tag:Dvs6cEeHqvRvAfG2c"},"frontpageDescription":{"__ref":"Revision:BkpY8huZKGykawEG9_frontpageDescription"},"frontpageDescriptionMobile":{"__ref":"Revision:BkpY8huZKGykawEG9_frontpageDescriptionMobile"},"postPageDescription":{"__ref":"Revision:BkpY8huZKGykawEG9_postPageDescription"},"title":"Marginal Funding Week","startDate":"2024-11-11T10:00:00.000Z","endDate":"2024-11-18T08:00:00.000Z","darkColor":"#da3700","lightColor":"#f5e7e4","contrastColor":null,"tagId":"Dvs6cEeHqvRvAfG2c","postId":null,"bannerImageId":null,"includesPoll":false,"customComponent":"GivingSeason2024Banner"},"Revision:LxHcNDY5SaqQwkg5G_contents":{"_id":"LxHcNDY5SaqQwkg5G_contents","__typename":"Revision","html":"<p>I'm skeptical of this framework because in reality part 2 seems optional - we don't need to reshape the political system to be more longtermist in order to make progress. For instance, those Open Phil recommendations like land use reform can be promoted thru conventional forms of lobbying and coalition building.<\/p><p>In fact, a vibrant and policy-engaged EA community that focuses on understandable short and medium term problems can itself become a fairly effective long-run institution, thus reducing the needs in part 1.<\/p><p>Additionally, while substantively defining a good society for the future may be difficult, we also have the option of defining it procedurally. The simplest example is that we can promote things like democracy or other mechanisms which tend to produce good outcomes. Or we can increase levels of compassion and rationality so that the architects of future societies will act better. This is sort of what you describe in part 2, but I'd emphasize that we can make political institutions which are generically better rather than specifically making them more longtermist.<\/p><p>This is not to say that anything in this post is a bad idea, just that there are more options for meeting longtermist goals.<\/p>","plaintextMainText":"I'm skeptical of this framework because in reality part 2 seems optional - we don't need to reshape the political system to be more longtermist in order to make progress. For instance, those Open Phil recommendations like land use reform can be promoted thru conventional forms of lobbying and coalition building.\n\nIn fact, a vibrant and policy-engaged EA community that focuses on understandable short and medium term problems can itself become a fairly effective long-run institution, thus reducing the needs in part 1.\n\nAdditionally, while substantively defining a good society for the future may be difficult, we also have the option of defining it procedurally. The simplest example is that we can promote things like democracy or other mechanisms which tend to produce good outcomes. Or we can increase levels of compassion and rationality so that the architects of future societies will act better. This is sort of what you describe in part 2, but I'd emphasize that we can make political institutions which are generically better rather than specifically making them more longtermist.\n\nThis is not to say that anything in this post is a bad idea, just that there are more options for meeting longtermist goals.","wordCount":195},"User:3LR86ZAiczoXGku6t":{"_id":"3LR86ZAiczoXGku6t","__typename":"User","slug":"kbog","createdAt":"2015-07-14T03:12:26.267Z","username":"kbog","displayName":"kbog","profileImageId":null,"previousDisplayName":null,"fullName":null,"karma":2859,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"<p>We truly do live in interesting times<\/p>\n","jobTitle":null,"organization":null,"postCount":64,"commentCount":935,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":0,"reviewedByUserId":"9qZsZAzbC2zxsPHzN","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"Comment:LxHcNDY5SaqQwkg5G":{"_id":"LxHcNDY5SaqQwkg5G","__typename":"Comment","postId":"t4Lqh7GHBM9YyEDg8","tagId":null,"tag":null,"relevantTagIds":[],"relevantTags":[],"tagCommentType":"DISCUSSION","parentCommentId":null,"topLevelCommentId":null,"descendentCount":4,"title":null,"contents":{"__ref":"Revision:LxHcNDY5SaqQwkg5G_contents"},"postedAt":"2021-02-14T05:15:27.605Z","repliesBlockedUntil":null,"userId":"3LR86ZAiczoXGku6t","deleted":false,"deletedPublic":false,"deletedByUserId":null,"deletedReason":null,"hideAuthor":false,"authorIsUnreviewed":false,"user":{"__ref":"User:3LR86ZAiczoXGku6t"},"currentUserVote":null,"currentUserExtendedVote":null,"baseScore":13,"extendedScore":{},"score":0.0033051481004804373,"voteCount":7,"emojiReactors":{},"af":false,"afDate":null,"moveToAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":{},"suggestForAlignmentUserIds":[],"reviewForAlignmentUserId":null,"needsReview":null,"answer":false,"parentAnswerId":null,"retracted":false,"postVersion":"0.15.0","reviewedByUserId":null,"shortform":false,"shortformFrontpage":true,"lastSubthreadActivity":"2021-03-04T00:49:47.472Z","moderatorHat":false,"hideModeratorHat":null,"nominatedForReview":null,"reviewingForReview":null,"promoted":null,"promotedByUser":null,"directChildrenCount":1,"votingSystem":"eaEmojis","isPinnedOnProfile":false,"debateResponse":null,"rejected":false,"rejectedReason":null,"modGPTRecommendation":null,"originalDialogueId":null},"Revision:nyFELFCfQSwaQHX7N_contents":{"_id":"nyFELFCfQSwaQHX7N_contents","__typename":"Revision","html":"<p>Interesting writeup!<\/p><p>Depending on your intended audience, it might make sense to add more details for some of the proposals. For example, why is scenario planning a good idea compared to other methods of decision making? Is there a compelling story, or strong empirical evidence for its efficacy? <\/p><p>Some small nitpicks: <\/p><p>There seems to be a mistake here: <\/p><p>\"Bostrom argues in <a href=\"https://www.sciencedirect.com/science/article/pii/S0016328720300604\"><u>The Fragile World Hypothesis<\/u><\/a> that continuous technological development will increase systemic fragility, which can be a source of catastrophic or existential risk. In the Precipice, he estimates the chances of existential catastrophe within the next 100 years at one in six.\"<\/p><p>I also find this passage a bit odd: <\/p><p>\"One example of moral cluelessness is the repugnant conclusion, which assumes that by adding more people to the world, and proportionally staying above a given average in happiness, one can reach a state of minimal happiness for an infinitely large population.\"<\/p><p>The repugnant conclusion might motivate someone to think about cluelessness, but it does not really seem to be an example of cluelessness (the question whether we should accept it might or might not be). <\/p>","plaintextMainText":"Interesting writeup!\n\nDepending on your intended audience, it might make sense to add more details for some of the proposals. For example, why is scenario planning a good idea compared to other methods of decision making? Is there a compelling story, or strong empirical evidence for its efficacy? \n\nSome small nitpicks: \n\nThere seems to be a mistake here: \n\n\"Bostrom argues in The Fragile World Hypothesis that continuous technological development will increase systemic fragility, which can be a source of catastrophic or existential risk. In the Precipice, he estimates the chances of existential catastrophe within the next 100 years at one in six.\"\n\nI also find this passage a bit odd: \n\n\"One example of moral cluelessness is the repugnant conclusion, which assumes that by adding more people to the world, and proportionally staying above a given average in happiness, one can reach a state of minimal happiness for an infinitely large population.\"\n\nThe repugnant conclusion might motivate someone to think about cluelessness, but it does not really seem to be an example of cluelessness (the question whether we should accept it might or might not be). ","wordCount":178},"User:3xmv6cuKvMgnBdGAu":{"_id":"3xmv6cuKvMgnBdGAu","__typename":"User","slug":"axioman","createdAt":"2018-05-16T20:04:57.354Z","username":"Flodorner","displayName":"axioman","profileImageId":null,"previousDisplayName":null,"fullName":null,"karma":234,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"","jobTitle":null,"organization":null,"postCount":1,"commentCount":64,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":0,"reviewedByUserId":"9qZsZAzbC2zxsPHzN","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"Comment:nyFELFCfQSwaQHX7N":{"_id":"nyFELFCfQSwaQHX7N","__typename":"Comment","postId":"t4Lqh7GHBM9YyEDg8","tagId":null,"tag":null,"relevantTagIds":[],"relevantTags":[],"tagCommentType":"DISCUSSION","parentCommentId":null,"topLevelCommentId":null,"descendentCount":1,"title":null,"contents":{"__ref":"Revision:nyFELFCfQSwaQHX7N_contents"},"postedAt":"2021-02-12T16:59:58.944Z","repliesBlockedUntil":null,"userId":"3xmv6cuKvMgnBdGAu","deleted":false,"deletedPublic":false,"deletedByUserId":null,"deletedReason":null,"hideAuthor":false,"authorIsUnreviewed":false,"user":{"__ref":"User:3xmv6cuKvMgnBdGAu"},"currentUserVote":null,"currentUserExtendedVote":null,"baseScore":11,"extendedScore":null,"score":0.0027157566510140896,"voteCount":7,"emojiReactors":{},"af":false,"afDate":null,"moveToAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":null,"suggestForAlignmentUserIds":[],"reviewForAlignmentUserId":null,"needsReview":true,"answer":false,"parentAnswerId":null,"retracted":false,"postVersion":"0.14.0","reviewedByUserId":null,"shortform":false,"shortformFrontpage":true,"lastSubthreadActivity":"2021-02-16T14:14:54.327Z","moderatorHat":false,"hideModeratorHat":null,"nominatedForReview":null,"reviewingForReview":null,"promoted":null,"promotedByUser":null,"directChildrenCount":1,"votingSystem":"eaEmojis","isPinnedOnProfile":false,"debateResponse":null,"rejected":false,"rejectedReason":null,"modGPTRecommendation":null,"originalDialogueId":null},"Revision:ppRR7jeL9dxBaNvhY_contents":{"_id":"ppRR7jeL9dxBaNvhY_contents","__typename":"Revision","html":"<p>The part of the article that you are referring to is in part inspired by John and MacAskills paper “longtermist institutional reform”, where they propose reforms that are built to tackle political short-termism. The case for this relies on two assumptions:<\/p><p>1. Long term consequences have an outsized moral importance, despite the uncertainty of long-term effects.<br>2. Because of this, political decision making should be designed to optimize for longterm outcomes. <\/p><p>Greaves and MacAskill have written a <a href=\"https://globalprioritiesinstitute.org/hilary-greaves-william-macaskill-the-case-for-strong-longtermism/\">paper<\/a> arguing for assumption 1: \"Because of the vast number of expected people in the future, it is quite plausible that for options that are appropriately chosen from a sufficiently large choice set, effects on the very long future dominate ex ante evaluations, even after taking into account the fact that further-future effects tend to be the most uncertain…“. We seem to agree on this assumption, but disagree on assumption 2. If I understand your argument against assumption 2, it assumes that there are no tradeoffs between optimizing for short-run outcomes and long-run outcomes. This assumption seems clearly false to us, and is implied to be false in “Longtermist institutional reform”. Consider fiscal policies for example: In the short run it could be beneficial to take all the savings in pension funds and spend them to boost the economy, but in the long run this is predictably harmful because many people will not afford to retire.<\/p>","plaintextMainText":"The part of the article that you are referring to is in part inspired by John and MacAskills paper “longtermist institutional reform”, where they propose reforms that are built to tackle political short-termism. The case for this relies on two assumptions:\n\n1. Long term consequences have an outsized moral importance, despite the uncertainty of long-term effects.\n2. Because of this, political decision making should be designed to optimize for longterm outcomes. \n\nGreaves and MacAskill have written a paper arguing for assumption 1: \"Because of the vast number of expected people in the future, it is quite plausible that for options that are appropriately chosen from a sufficiently large choice set, effects on the very long future dominate ex ante evaluations, even after taking into account the fact that further-future effects tend to be the most uncertain…“. We seem to agree on this assumption, but disagree on assumption 2. If I understand your argument against assumption 2, it assumes that there are no tradeoffs between optimizing for short-run outcomes and long-run outcomes. This assumption seems clearly false to us, and is implied to be false in “Longtermist institutional reform”. Consider fiscal policies for example: In the short run it could be beneficial to take all the savings in pension funds and spend them to boost the economy, but in the long run this is predictably harmful because many people will not afford to retire.","wordCount":230},"User:q6ZLMygEtBW5mhgGY":{"_id":"q6ZLMygEtBW5mhgGY","__typename":"User","slug":"andreas_massey","createdAt":"2021-01-31T09:58:25.462Z","username":"Andreas_Massey","displayName":"Andreas_Massey","profileImageId":"Profile/up6t6yzpt4pexagomrth","previousDisplayName":null,"fullName":null,"karma":54,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"<p>BA in Psychology at NTNU<\/p><p>MA in Cognitive Neuroscience at UiO<\/p><p>Curricular activities: Climbing, chess, tennis, gaming, video editing, cave exploration, and music production.<\/p>","jobTitle":"ML Doctorate researcher","organization":"NMBU","postCount":0,"commentCount":4,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":0,"reviewedByUserId":"9qZsZAzbC2zxsPHzN","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"Comment:ppRR7jeL9dxBaNvhY":{"_id":"ppRR7jeL9dxBaNvhY","__typename":"Comment","postId":"t4Lqh7GHBM9YyEDg8","tagId":null,"tag":null,"relevantTagIds":[],"relevantTags":[],"tagCommentType":"DISCUSSION","parentCommentId":"kevgdPgWssSsk5fmi","topLevelCommentId":"LxHcNDY5SaqQwkg5G","descendentCount":1,"title":null,"contents":{"__ref":"Revision:ppRR7jeL9dxBaNvhY_contents"},"postedAt":"2021-03-02T12:30:39.679Z","repliesBlockedUntil":null,"userId":"q6ZLMygEtBW5mhgGY","deleted":false,"deletedPublic":false,"deletedByUserId":null,"deletedReason":null,"hideAuthor":false,"authorIsUnreviewed":false,"user":{"__ref":"User:q6ZLMygEtBW5mhgGY"},"currentUserVote":null,"currentUserExtendedVote":null,"baseScore":8,"extendedScore":null,"score":0.0021183229982852936,"voteCount":6,"emojiReactors":{},"af":false,"afDate":null,"moveToAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":null,"suggestForAlignmentUserIds":[],"reviewForAlignmentUserId":null,"needsReview":true,"answer":false,"parentAnswerId":null,"retracted":false,"postVersion":"0.15.0","reviewedByUserId":null,"shortform":false,"shortformFrontpage":true,"lastSubthreadActivity":"2021-03-04T00:49:47.045Z","moderatorHat":false,"hideModeratorHat":null,"nominatedForReview":null,"reviewingForReview":null,"promoted":null,"promotedByUser":null,"directChildrenCount":1,"votingSystem":"eaEmojis","isPinnedOnProfile":false,"debateResponse":null,"rejected":false,"rejectedReason":null,"modGPTRecommendation":null,"originalDialogueId":null},"Revision:boTao4obr2YurBCd5_contents":{"_id":"boTao4obr2YurBCd5_contents","__typename":"Revision","html":"<p>Thank you for your feedback kbog.<\/p><p>First, we certainly agree that there are other options that have a limited influence on the future, however, for this article we wanted to only cover areas with a potential for outsized impact on the future. That is the reason we have confined ourselves to so few categories. <\/p><p>Second, there may be categories of interventions that are not addressed in our framework that are as important for improving the future as the interventions we list. If so, we welcome discussion on this topic, and hope that the framework can encourage productive discussion to identify such “intervention X”’s. <\/p><p>Third, I'm a bit confused about how we would focus on “processes that produce good outcomes” without first defining what we mean with good outcomes, and how to measure them?<\/p><p>Fourth, your point on taking the “individual more in focus” by emphasizing rationality and altruism improvement is a great suggestion. Admittedly, this may indeed be a potential lever to improve the future that we haven't sufficiently covered in our post as we were mostly concerned with improving institutions. <\/p><p>Lastly, as for improving political institutions more broadly, see our part on progress.<\/p>","plaintextMainText":"Thank you for your feedback kbog.\n\nFirst, we certainly agree that there are other options that have a limited influence on the future, however, for this article we wanted to only cover areas with a potential for outsized impact on the future. That is the reason we have confined ourselves to so few categories. \n\nSecond, there may be categories of interventions that are not addressed in our framework that are as important for improving the future as the interventions we list. If so, we welcome discussion on this topic, and hope that the framework can encourage productive discussion to identify such “intervention X”’s. \n\nThird, I'm a bit confused about how we would focus on “processes that produce good outcomes” without first defining what we mean with good outcomes, and how to measure them?\n\nFourth, your point on taking the “individual more in focus” by emphasizing rationality and altruism improvement is a great suggestion. Admittedly, this may indeed be a potential lever to improve the future that we haven't sufficiently covered in our post as we were mostly concerned with improving institutions. \n\nLastly, as for improving political institutions more broadly, see our part on progress.","wordCount":189},"Comment:boTao4obr2YurBCd5":{"_id":"boTao4obr2YurBCd5","__typename":"Comment","postId":"t4Lqh7GHBM9YyEDg8","tagId":null,"tag":null,"relevantTagIds":[],"relevantTags":[],"tagCommentType":"DISCUSSION","parentCommentId":"LxHcNDY5SaqQwkg5G","topLevelCommentId":"LxHcNDY5SaqQwkg5G","descendentCount":3,"title":null,"contents":{"__ref":"Revision:boTao4obr2YurBCd5_contents"},"postedAt":"2021-02-16T12:38:28.508Z","repliesBlockedUntil":null,"userId":"q6ZLMygEtBW5mhgGY","deleted":false,"deletedPublic":false,"deletedByUserId":null,"deletedReason":null,"hideAuthor":false,"authorIsUnreviewed":false,"user":{"__ref":"User:q6ZLMygEtBW5mhgGY"},"currentUserVote":null,"currentUserExtendedVote":null,"baseScore":6,"extendedScore":null,"score":0.0015154731227084994,"voteCount":4,"emojiReactors":{},"af":false,"afDate":null,"moveToAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":null,"suggestForAlignmentUserIds":[],"reviewForAlignmentUserId":null,"needsReview":true,"answer":false,"parentAnswerId":null,"retracted":false,"postVersion":"0.15.0","reviewedByUserId":null,"shortform":false,"shortformFrontpage":true,"lastSubthreadActivity":"2021-03-04T00:49:47.045Z","moderatorHat":false,"hideModeratorHat":null,"nominatedForReview":null,"reviewingForReview":null,"promoted":null,"promotedByUser":null,"directChildrenCount":1,"votingSystem":"eaEmojis","isPinnedOnProfile":false,"debateResponse":null,"rejected":false,"rejectedReason":null,"modGPTRecommendation":null,"originalDialogueId":null},"Revision:pqw82ArBvWqz6QjRB_contents":{"_id":"pqw82ArBvWqz6QjRB_contents","__typename":"Revision","html":"<p>No I agree on 2! I'm just saying even from a longtermist perspective, it may not be as important and tractable as improving institutions in orthogonal ways.<\/p>","plaintextMainText":"No I agree on 2! I'm just saying even from a longtermist perspective, it may not be as important and tractable as improving institutions in orthogonal ways.","wordCount":27},"Comment:pqw82ArBvWqz6QjRB":{"_id":"pqw82ArBvWqz6QjRB","__typename":"Comment","postId":"t4Lqh7GHBM9YyEDg8","tagId":null,"tag":null,"relevantTagIds":[],"relevantTags":[],"tagCommentType":"DISCUSSION","parentCommentId":"ppRR7jeL9dxBaNvhY","topLevelCommentId":"LxHcNDY5SaqQwkg5G","descendentCount":0,"title":null,"contents":{"__ref":"Revision:pqw82ArBvWqz6QjRB_contents"},"postedAt":"2021-03-04T00:49:47.045Z","repliesBlockedUntil":null,"userId":"3LR86ZAiczoXGku6t","deleted":false,"deletedPublic":false,"deletedByUserId":null,"deletedReason":null,"hideAuthor":false,"authorIsUnreviewed":false,"user":{"__ref":"User:3LR86ZAiczoXGku6t"},"currentUserVote":null,"currentUserExtendedVote":null,"baseScore":5,"extendedScore":null,"score":0.001242061727680266,"voteCount":4,"emojiReactors":{},"af":false,"afDate":null,"moveToAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":null,"suggestForAlignmentUserIds":[],"reviewForAlignmentUserId":null,"needsReview":null,"answer":false,"parentAnswerId":null,"retracted":false,"postVersion":"0.15.0","reviewedByUserId":null,"shortform":false,"shortformFrontpage":true,"lastSubthreadActivity":"2021-03-04T00:49:47.051Z","moderatorHat":false,"hideModeratorHat":null,"nominatedForReview":null,"reviewingForReview":null,"promoted":null,"promotedByUser":null,"directChildrenCount":0,"votingSystem":"eaEmojis","isPinnedOnProfile":false,"debateResponse":null,"rejected":false,"rejectedReason":null,"modGPTRecommendation":null,"originalDialogueId":null},"Revision:nPwgzekqfoNXFpvZJ_contents":{"_id":"nPwgzekqfoNXFpvZJ_contents","__typename":"Revision","html":"<p>Thank you for your feedback, Flodorner! <\/p><p>First, we certainly agree that a more detailed description could be productive for some of the topics in this piece, including your example on scenario planning and other decision making methods. At more than 6000 words this is already a long piece, so we were aiming to limit the level of detail to what we felt was necessary to explain the proposed framework, without necessarily justifying all nuances. Depending on what the community believes is most useful, we are happy to write follow-up pieces with either a higher level of detail for a selected few topics of particular interest (for a more technical discussion on e.g. decision making methods), or a summary piece covering all topics with a lower level of detail (to explain the same framework to non-experts). <\/p><p>As for your second issue you are completely correct, it has been corrected. <\/p><p>Regarding your last point, we also agree that the repugnant conclusion is not an example of cluelessness in itself. However, the lack of consensus about how to solve the repugnant conclusion is one example of how we still have things to figure out in terms of population ethics (i. e. are morally clueless in this area).<\/p>","plaintextMainText":"Thank you for your feedback, Flodorner! \n\nFirst, we certainly agree that a more detailed description could be productive for some of the topics in this piece, including your example on scenario planning and other decision making methods. At more than 6000 words this is already a long piece, so we were aiming to limit the level of detail to what we felt was necessary to explain the proposed framework, without necessarily justifying all nuances. Depending on what the community believes is most useful, we are happy to write follow-up pieces with either a higher level of detail for a selected few topics of particular interest (for a more technical discussion on e.g. decision making methods), or a summary piece covering all topics with a lower level of detail (to explain the same framework to non-experts). \n\nAs for your second issue you are completely correct, it has been corrected. \n\nRegarding your last point, we also agree that the repugnant conclusion is not an example of cluelessness in itself. However, the lack of consensus about how to solve the repugnant conclusion is one example of how we still have things to figure out in terms of population ethics (i. e. are morally clueless in this area).","wordCount":201},"Comment:nPwgzekqfoNXFpvZJ":{"_id":"nPwgzekqfoNXFpvZJ","__typename":"Comment","postId":"t4Lqh7GHBM9YyEDg8","tagId":null,"tag":null,"relevantTagIds":[],"relevantTags":[],"tagCommentType":"DISCUSSION","parentCommentId":"nyFELFCfQSwaQHX7N","topLevelCommentId":"nyFELFCfQSwaQHX7N","descendentCount":0,"title":null,"contents":{"__ref":"Revision:nPwgzekqfoNXFpvZJ_contents"},"postedAt":"2021-02-16T14:14:53.299Z","repliesBlockedUntil":null,"userId":"q6ZLMygEtBW5mhgGY","deleted":false,"deletedPublic":false,"deletedByUserId":null,"deletedReason":null,"hideAuthor":false,"authorIsUnreviewed":false,"user":{"__ref":"User:q6ZLMygEtBW5mhgGY"},"currentUserVote":null,"currentUserExtendedVote":null,"baseScore":5,"extendedScore":null,"score":0.0012277899077162147,"voteCount":4,"emojiReactors":{},"af":false,"afDate":null,"moveToAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":null,"suggestForAlignmentUserIds":[],"reviewForAlignmentUserId":null,"needsReview":true,"answer":false,"parentAnswerId":null,"retracted":false,"postVersion":"0.15.0","reviewedByUserId":null,"shortform":false,"shortformFrontpage":true,"lastSubthreadActivity":"2021-02-16T14:14:53.306Z","moderatorHat":false,"hideModeratorHat":null,"nominatedForReview":null,"reviewingForReview":null,"promoted":null,"promotedByUser":null,"directChildrenCount":0,"votingSystem":"eaEmojis","isPinnedOnProfile":false,"debateResponse":null,"rejected":false,"rejectedReason":null,"modGPTRecommendation":null,"originalDialogueId":null},"Revision:kevgdPgWssSsk5fmi_contents":{"_id":"kevgdPgWssSsk5fmi_contents","__typename":"Revision","html":"<p>I think it's really not clear that reforming institutions to be more longtermist has an outsized long run impact compared to many other axes of institutional reform.<\/p><p>We know what constitutes good outcomes in the short run, so if we can design institutions to produce better short run outcomes, that will be beneficial in the long run insofar as those institutions endure into the long run. Institutional changes are inherently long-run.<\/p>","plaintextMainText":"I think it's really not clear that reforming institutions to be more longtermist has an outsized long run impact compared to many other axes of institutional reform.\n\nWe know what constitutes good outcomes in the short run, so if we can design institutions to produce better short run outcomes, that will be beneficial in the long run insofar as those institutions endure into the long run. Institutional changes are inherently long-run.","wordCount":70},"Comment:kevgdPgWssSsk5fmi":{"_id":"kevgdPgWssSsk5fmi","__typename":"Comment","postId":"t4Lqh7GHBM9YyEDg8","tagId":null,"tag":null,"relevantTagIds":[],"relevantTags":[],"tagCommentType":"DISCUSSION","parentCommentId":"boTao4obr2YurBCd5","topLevelCommentId":"LxHcNDY5SaqQwkg5G","descendentCount":2,"title":null,"contents":{"__ref":"Revision:kevgdPgWssSsk5fmi_contents"},"postedAt":"2021-02-21T03:51:58.089Z","repliesBlockedUntil":null,"userId":"3LR86ZAiczoXGku6t","deleted":false,"deletedPublic":false,"deletedByUserId":null,"deletedReason":null,"hideAuthor":false,"authorIsUnreviewed":false,"user":{"__ref":"User:3LR86ZAiczoXGku6t"},"currentUserVote":null,"currentUserExtendedVote":null,"baseScore":1,"extendedScore":null,"score":0.000011006528438883834,"voteCount":5,"emojiReactors":{},"af":false,"afDate":null,"moveToAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":null,"suggestForAlignmentUserIds":[],"reviewForAlignmentUserId":null,"needsReview":null,"answer":false,"parentAnswerId":null,"retracted":false,"postVersion":"0.15.0","reviewedByUserId":null,"shortform":false,"shortformFrontpage":true,"lastSubthreadActivity":"2021-03-04T00:49:47.045Z","moderatorHat":false,"hideModeratorHat":null,"nominatedForReview":null,"reviewingForReview":null,"promoted":null,"promotedByUser":null,"directChildrenCount":1,"votingSystem":"eaEmojis","isPinnedOnProfile":false,"debateResponse":null,"rejected":false,"rejectedReason":null,"modGPTRecommendation":null,"originalDialogueId":null},"Revision:6ktFytXhM7qfFie7d":{"_id":"6ktFytXhM7qfFie7d","__typename":"Revision","version":"1.0.0","updateType":"patch","editedAt":"2022-11-09T12:14:04.022+00:00","userId":"LFsPKPZ36ZTZQXHct","html":"<p><i>Estimated reading time: 20-30 minutes<\/i><\/p><p><i>-We would like to thank the following for their excellent feedback and guidance throughout this article, in no particular order: Tyler M. John, Max Stauffer, Aksel Braanen Sterri, Eirik Mofoss, Samuel Hilton, Konrad Seifert, Tildy Stokes, Erik Aunvåg Matsen and Marcel Grewal Sommerfelt.<\/i><\/p><h1><strong>0.0 Introduction<\/strong><\/h1><p>This article is co-authored by five members of Effective Altruism Norway as a pilot project to test if we can contribute in a valuable way to the emerging field of longtermism and policy making.<\/p><p>In the article we summarize some of the work that is being done in the emerging field of longtermism, using a new structure to classify the different interventions (see Figure 1: Three objectives of longtermist policy making). Then, for each objective we describe related challenges and potential solutions, and give some examples of current ongoing work.<\/p><p>We hope that the new structure can help improve coordination in this emerging field, and enable improved prioritization of interventions. If this structure resonates well with established experts in the field, we are happy to write up a shorter version of this article that could serve as an introduction to longtermist policy making for non-experts. Already, at 17 pages this article is one fourth of the length of the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>, which covers many of the same topics. <\/p><p>Finally, we have emphasized some aspects of longtermist policy making that we believe have been underemphasized in the effective altruism- and longtermism communities in the past. Examples include scenario planning, robust decision making and redteaming among others, which we have described together with forecasting in section 2.1 as essential epistemic capabilities for long-term governance. These tools are complementary to forecasting-based epistemic capabilities that the EA/longtermist communities already promote, and we hope that they will receive increased attention going forward.<\/p><p>We hope to produce 1-3 further articles on similar topics through 2021, and welcome any experts who have capacity to provide feedback on our work.<\/p><p>--------------------------------------------------------------------<\/p><p>In 2019 William MacAskill proposed a definition of the term <a href=\"https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism\"><u>longtermism<\/u><\/a> as the<i> view that those who live at future times matter just as much, morally, as those who live today<\/i>. There are many reasons to believe that actions can have a substantial impact on the future. For instance, the economic growth seen in the past two centuries has lifted billions out of poverty. In addition to this, any long-term consequences of climate change caused by humans could decrease the life quality of several generations to come. Our generation is also one of the first who has had the technological potential to destroy civilization through e.g. nuclear weapons, and thereby eliminating all future of humanity. This means that actions we take today can improve the course of history for hundreds of generations to come.<\/p><p>Interest in the welfare of future generations precedes the MacAskill definition of longtermism from 2017. In 2005 the Future of Humanity Institute was established at Oxford university. In 2009, the <a href=\"https://www.csf.gov.sg/who-we-are/\"><u>Centre for Strategic Futures<\/u><\/a> (CSF) was established by the Singaporian Government as a futures think tank. In 2017 William MacAskill started using the word “longtermism” as a term for the cluster of views that involved concern about ensuring the long-run future goes as well as possible. Since then, <a href=\"https://forum.effectivealtruism.org/tag/longtermism-philosophy\"><u>many have contributed<\/u><\/a> to the development of the philosophical field. The <a href=\"https://globalprioritiesinstitute.org/\"><u>Global Priorities Institute<\/u><\/a> (GPI) in Oxford was established in 2018 with the mission to <i>conduct and promote world-class, foundational academic research on how most effectively to do good<\/i>. In 2020 GPI published a new <a href=\"https://globalprioritiesinstitute.org/research-agenda/\"><u>research agenda<\/u><\/a>, where one of its two sections was dedicated to longtermism. These are just some of several milestones in the short history of longtermism. <\/p><p>If we believe that the future is what matters most and that we can influence it through our policy making, then it follows that the long-run outcomes of enacted policies should be one of the key considerations of the policy making process. However, most political systems are not prioritising long-term planning sufficiently compared to the potential benefits just for existing generations – nevermind thinking about the moral importance of future generations. <\/p><p>There are examples of different institutions and policy makers that are putting longtermism on the agenda today, but the time frame they consider long-term differs. Time horizons of longtermist organizations that frequently interact with policy makers (e.g. <a href=\"https://www.appgfuturegenerations.com/\"><u>APPG<\/u><\/a> and <a href=\"https://www.alpenglow.org.uk/\"><u>Alpenglow<\/u><\/a>) are constrained by the norms in the current policy making process. Although academics talking about \"longtermism\" can look thousands of years ahead, actors seeking to practically influence policy organisations, including ourselves, are typically considering shorter time horizons, e.g. 20-30 years in the future. <\/p><p>This article will explore three categories of objectives for longtermist policy making and might serve as a guide towards shaping longtermist policy suggestions. These objectives are summarized in figure 1.<\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/dz3yy1a99m7ei9v9fcbb.png\"><figcaption><strong>Figure 1<\/strong>: Representation of the three objectives longtermist policies should focus on. Objective 1 and 2 serve as foundations for the more direct objective(s) above them.<\/figcaption><\/figure><p>On top of the pyramid is the objective directly benefiting future generations - i.e. ensuring that there is a future for human civilization, and that it is as positive as possible. This objective builds on the condition that policy making institutions are enabled to develop such policies, which brings us to part two of the pyramid. This part describes three essential conditions to achieve successful behaviour change interventions; capability, motivation and opportunity, reflecting the <a href=\"https://link.springer.com/article/10.1186/1748-5908-6-42\"><u>COM-B system for institutional reform<\/u><\/a> (Michie et. al. 2011). The two upper pieces of the pyramid both rest upon the fundamental part, which concerns the objective of <i>understanding longtermism<\/i>. Interventions focused on this objective have a more indirect impact mechanism.<\/p><p>A policy intervention should optimize for one or several of these objectives in order to qualify as a \"longtermist policy proposal\".<\/p><p>Note that the proposals in figure 1 are synergistic - if we improve our performance on one of the objectives, it may become easier to also improve on others. In general, objective one works as an enabler of objective two, and both objective one and two are enablers of the third objective. For instance, if a policy making institution is able to agree on a set of KPIs to measure the long-term quality of a society (as a partial solution to objective 1 in figure 1), then they can set up a forecasting infrastructure for these KPIs (developing capabilities to govern for the long term, as described in objective 2). With this forecasting infrastructure in place, long-term effects of proposed policies will be more visible to the electorate, creating stronger incentives for politicians to optimize for long-term outcomes (solving another part of objective 2; motivations). This will for instance make it easier to prioritize catastrophic risk mitigation (enabling investment in efforts focused on objective 3), etc.<\/p><p>Several of the ideas in each category of objectives would be familiar to experienced effective altruists due to the natural synergies of longtermism and effective altruism. However, even experienced effective altruists may not have encountered all of the topics in this article; examples of topics that the experienced reader may find interesting include:<\/p><ul><li>The three-layered model of objectives of longtermist policies in figure 1<\/li><li>The discussion of governance KPIs in section 1.1<\/li><li>Non-forecasting tools like e.g. scenario planning as essential epistemic capabilities in section 2.1, on par with forecasting<\/li><li>Structured examples of how policy making institutions can be reformed to benefit future generations in section 2.4<\/li><li>The discussion of sustainability as a way to either mitigate catastrophic risk or a way to boost inclusive progress in section 3.3<\/li><\/ul><p>While the objectives are relevant for policy makers in a broad range of governance models and in countries with different levels of democratic development, the examples in this article are primarily focused on policy making on national levels in industrialized, democratic countries. <\/p><h1><strong>1.0 Further our understanding of longtermism and adjacent scientific fields<\/strong><\/h1><p>In the ongoing field of exploring strategic considerations related to longtermist policy making, there is a need for agreement of the meaning of the word. The bottom piece of the pyramid in figure 1 concerns our understanding of longtermism. William MacAskill <a href=\"https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism#Strong_Longtermism\"><u>proposes <\/u><\/a>three premises that make up what he calls the minimum definition of longtermism: (1) Those who live at future times matter as much, morally as those who live today, (2) society currently privileges those who live today above those who live in the future, and (3) we should take action to rectify that, and help ensure the long-run future goes well. Based on these premises, MacAskill and others have proposed political measures like <a href=\"https://philpapers.org/archive/JOHLIR.pdf\"><u>future assemblies<\/u><\/a> or a <a href=\"https://drive.google.com/file/d/1lJHBKfIROiyc8yxVaZnKEWUEYfOg06Eh/view\"><u>Ministry of the Future<\/u><\/a> (see section 2.4 for further elaboration). Organizations like the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/gpi-research-agenda.pdf\"><u>Global Priorities Institute<\/u><\/a> (GPI) and the <a href=\"https://www.fhi.ox.ac.uk/\"><u>Future of Humanity Institute<\/u><\/a> (FHI) are currently working on establishing longtermism as a scientific field of inquiry. <\/p><h2>1.1 What does a good society look like?<\/h2><p>Two important constraints on our current ability to positively influence the future are (i) uncertainty about what a good society looks like, i.e. moral cluelessness, and (ii) how we can best create one, i.e. strategic cluelessness. Different scientific and philosophical fields have attempted to investigate the first question in different ways. One example of moral cluelessness is the repugnant conclusion, which assumes that by adding more people to the world, and proportionally staying above a given average in happiness, one can reach a state of minimal happiness for an infinitely large population. However, we aren't completely clueless: here are some metrics that are commonly used to describe more or less positive aspects of a society. <\/p><p>Economists frequently use KPIs (Key Performance Indicators) to try to measure different facets of a successful society. GDP and GDP growth is perhaps the most common, while metrics like Gini-coefficients, average lifespan, GHG emissions, or the Human Development Index are used to describe inequality, health, sustainability and economic development, respectively.<\/p><p>While none of these metrics cover all that matters in a society on their own, a combination of such KPIs may capture most of the aspects that we care about. The “<a href=\"https://drive.google.com/file/d/1lJHBKfIROiyc8yxVaZnKEWUEYfOg06Eh/view\"><u>Portugal we want<\/u><\/a>” project is an example of a collaborative effort to converge on a set of KPIs to use in governance for the long term. There are also other examples that similarly attempt to stake out the course for the future of the country, e.g. the “<a href=\"https://www.cynnalcymru.com/project/the-wales-we-want/\"><u>Wales we want<\/u><\/a>”-project, or the japanese work on “<a href=\"https://www.japanpolicyforum.jp/society/pt20190109210522.html\"><u>Future Design<\/u><\/a>”. <\/p><p>Another, more academically oriented example of projects that attempt to compile partial descriptions of a good society into more complete descriptions, is the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>. It lists several other partial approaches to measure broader social welfare through a set of KPIs, including informal discussions by <a href=\"http://www.stafforini.com/blog/bostrom/\"><u>Bostrom <\/u><\/a>and <a href=\"http://reflectivedisequilibrium.blogspot.com/2013/12/what-proxies-to-use-for-flow-through.html\"><u>Shulman<\/u><\/a>. <\/p><h2>1.2 How do we create a good society?<\/h2><p>When we want to plan for a good society in the future we need to make prioritizations. This can be very important for the long-run trajectory of society as some efforts to improve society are much <a href=\"https://80000hours.org/problem-profiles/global-priorities-research/\"><u>more effective than others<\/u><\/a>. <a href=\"https://80000hours.org/2013/12/a-framework-for-strategically-selecting-a-cause/\"><u>Cause prioritization<\/u><\/a> is a philosophical field involved with evaluating and comparing different cause areas in their effectiveness. Some of the organizations working on cause prioritization are <a href=\"https://80000hours.org/articles/future-generations/\"><u>80,000 Hours<\/u><\/a>, the <a href=\"https://www.openphilanthropy.org/blog/update-cause-prioritization-open-philanthropy\"><u>Open Philanthropy Project<\/u><\/a>, and The Center for Reducing Suffering. The latter <a href=\"https://centerforreducingsuffering.org/the-benefits-of-cause-neutrality/\"><u>proposes<\/u><\/a> that starting out with a cause-neutral attitude to longtermist policy making is crucial to succeed at the cause prioritization. To achieve this, effective institutions and organizations need to: <\/p><ol><li>Build a broad movement for longtermist policy change so that these efforts don’t get stuck in a specific cause area.<\/li><li>Explicitly work on prioritization research so that cause areas can be accurately compared, as well as induce attitude change in political and societal institutions (see the middle piece of the pyramid: <i>shape policy making institutions for future generations<\/i>).<\/li><\/ol><p>One important concept in cause prioritization is the notion of <i>crucial considerations<\/i> - which are strategic questions that can significantly change the optimal strategy when they are taken into consideration. Some of the crucial consideration of longtermist policy making includes, but is not limited to, our evaluation of the <a href=\"https://forum.effectivealtruism.org/posts/XXLf6FmWujkxna3E6/are-we-living-at-the-most-influential-time-in-history-1\"><u>hinge of history hypothesis<\/u><\/a> (HoH), as well as other considerations discussed in the Global Priorities Institute’s <a href=\"https://globalprioritiesinstitute.org/research-agenda-web-version/\"><u>new research agenda<\/u><\/a>. The HoH assumes that this century, or perhaps especially the coming decades, is the most influential period in all of human history. Therefore, our evaluation of HoH’s likelihood is one of the determinants of how we should influence policy makers and the way we distribute the resources we have available today. If we believe that the coming century is merely as influential as a typical century, then we - like <a href=\"https://forum.effectivealtruism.org/posts/Eey2kTy3bAjNwG8b5/the-emerging-school-of-patient-longtermism\"><u>patient longtermist<\/u><\/a> - will probably spend less of our philanthropic resources now, and save more to spend them later. However, if we believe that this period is the most “hingey” period of all of human history - e.g. because our current values could be locked in for generations to come (i.e. <i>value lock-in view<\/i>), or if we are living in a<i> time of perils <\/i>- then we should rather spend more of our philanthropic resources now to ensure the most impact. These considerations can be applied to our spending of any type of philanthropic capital - either money, political influence or other resources of value. If we don’t live at the HoH, it then seems most logical to spend the next decades focusing on building political influence, rather than spending political capital to influence specific decisions in the near future. <\/p><h1><strong>2.0 Shape policy making institutions for future generations<\/strong><\/h1><p>So far, we have considered the problem of longtermism on a general level, and we will therefore describe in this part different measures and obstacles connected to developing and motivating longtermist policy making in institutions. This section reflects the second piece of the pyramid in figure 1, and further elaborates on the COM-B system to ensure successful interventions in behavioural change. We will first consider epistemic determinants and how we can develop epistemic <i>capabilities<\/i> like forecasting and scenario planning, as well as redteaming and robust decision making. Then we will look at how we can <i>motivate<\/i> policy makers to prioritize future generations, and in the last paragraph we will consider important institutional barriers to such policy making, and how to remove them in order to to create <i>opportunities<\/i> for long-termist policy making. This section is largely a summary of the work by John & MacAskill, so readers who've studied their work can skip it.<\/p><h2>2.1 Develop epistemic capabilities for long-term policy making<\/h2><p>Lack of knowledge about the future is likely one of the main sources of political short-termism, also known as epistemic determinants in <a href=\"https://www.researchgate.net/publication/343345291_Longtermist_Institutional_Reform\"><u>Longtermist Institutional Reform<\/u><\/a> by Tyler John and William MacAskill. These determinants lead to discounting of the value of long-term beneficial policies, making them less likely to be enacted. Some discounting is rational simply because there is a lot of uncertainty about the benefits of long-term policies. Irrational discounting is another source of short-termism which is caused by cognitive biases and attentional asymmetries between the future and nearby past. Vividness effects can make people react more strongly to vivid sources of information like news, videos and graphics compared to scientific research. People are also often over-confident in their ability to control and eliminate risks under situations of uncertainty. See <i>Thinking, fast and slow <\/i>(2011) by Daniel Kahneman for further details. Although these shortcomings are limiting politicians in their effectiveness, there has also been <a href=\"https://globalprioritiesinstitute.org/christian-tarsney-the-epistemic-challenge-to-longtermism/\"><u>cast doubt<\/u><\/a> on the possibility of predicting the future at all by philosopher Christian Tarsney.<\/p><p>Politicians work with the limitations of time and influence which can lead to attentional asymmetries, i.e. when determining the effectiveness of policies, they tend to focus too much on recent events, rather than basing it on future projections. The result of this asymmetry can be that politicians work with less accurate predictions. Furthermore, because of these reality constraints (i.e. time and power), politicians are forced to utilize heuristics like planning fallacy, availability bias and the law of small numbers to tackle current and future issues. However, we have also seen that the long-term can be prioritized politically with the Paris Agreement, carbon tax (e.g. in <a href=\"https://web.archive.org/web/20100615055008/http://iea.org/publications/free_new_Desc.asp?PUBS_ID=1580\"><u>Norway in 1991<\/u><\/a>), or the Danish <a href=\"https://klimaraadet.dk/en/about-danish-council-climate-change\"><u>council on climate change<\/u><\/a>. <\/p><p>To deal with these problems, politicians need effective means of forecasting with different sources - e.g. using teams of <a href=\"https://goodjudgment.com/\"><u>superforecasters<\/u><\/a> and domain experts, or market-based approaches like prediction markets, to obtain high-quality information about the future.This needs to be implemented to overcome the information barrier (knowledge about the future) and the attention barriers (making changes in future outcomes more salient) so that politicians can make informed decisions about the future. <\/p><p>To maximize the utility gained from this information, decision makers also need to invest in institutions and organizations that can develop epistemic capabilities beyond forecasting, e.g. scenario planning, robust decision making, and red teaming, among others. In <a href=\"https://www.smestrategy.net/blog/what-is-scenario-planning-and-how-to-use-it\"><u>scenario planning<\/u><\/a> exercises, policy makers define a set of scenarios that jointly describe the possible futures that are likely enough to be considered, that differ depending on factors of high uncertainty, and with significant implications for the optimal policy choice. Then, policies are evaluated for how they perform across the range of scenarios. Depending on the risk preferences of the policy makers, they should choose a robust policy that both has a high expected value across scenarios, and fails as gracefully as possible in the worst scenarios. Scenario planning could also be supplemented with <a href=\"https://link.springer.com/chapter/10.1007/978-3-030-05252-2_2\"><u>robust decision making<\/u><\/a> which especially emphasizes strategies that do well in worst-case scenarios. Additionally, <a href=\"https://www.synopsys.com/glossary/what-is-red-teaming.html\"><u>red teaming<\/u><\/a> can provide a solid method of stress-testing the plans we make for the future by taking an adversarial approach. <\/p><p>Several researchers within the EA movement are working on these issues, e.g. Neil Dullaghan, Michael MacKenzie, and Eva Vivalt. Dullaghan <a href=\"https://forum.effectivealtruism.org/posts/kCkd9Mia2EmbZ3A9c/deliberation-may-improve-decision-making\"><u>proposes<\/u><\/a> deliberation as a means of reaching better cooperation across party-lines and long-term thinking. He also claims that there may be a link between deliberation and long-term thinking; specifically in areas like climate change and the environment. Furthermore, MacKenzie <a href=\"https://www.oxfordhandbooks.com/view/10.1093/oxfordhb/9780198747369.001.0001/oxfordhb-9780198747369-e-7\"><u>argues<\/u><\/a> that deliberation can help us overcome our cognitive biases by for instance appealing to the idea “saving future children'' to ensure longtermist thinking. In order to gather all these findings within forecasting, Vivalt, a researcher at the Australian National University and University of Toronto, <a href=\"https://forum.effectivealtruism.org/posts/Z7RTJePkiWBH92qqo/eva-vivalt-forecasting-research-results\"><u>proposes<\/u><\/a> a platform to coordinate the research and the ability of each researcher to forecast. These are only some examples of researchers that are working to improve institutional decision making among many more. Still, it is one of the top recommended career paths by <a href=\"https://80000hours.org/problem-profiles/improving-institutional-decision-making/\"><u>80000 Hours<\/u><\/a>, as “Improving the quality of decision-making in important institutions could improve our ability to solve almost all other problems”.<\/p><h2>2.2 Motivate policymakers to prioritize future generations<\/h2><p>Even if there are policymakers who have the necessary capabilities to improve the welfare of future generations, there are still several factors that discourage them from doing so. These factors are referred to as motivational determinants in the <a href=\"https://philpapers.org/archive/JOHLIR.pdf\"><u>Longtermist Institutional Reform<\/u><\/a> by Tyler John and William MacAskill, from which the following three sections are heavily based on.<\/p><p>People tend to have a high <a href=\"https://en.wikipedia.org/wiki/Time_preference\"><u>time preference<\/u><\/a> for the present, leading to greater discounting of the value of long-term benefits, which makes policies more short-termist. This is a problem that affects both voters and people in power, although the severity of this problem is unclear.<\/p><p>Self-interest and relational favouritism another source of short-termism, as many people care more about themselves and their relatives than future generations. Self-beneficial policies are generally short-termist as policymakers and their relatives will only live for a short amount of time compared to the potential lifespan of humanity.<\/p><p>Cognitive biases may also affect people’s political decisions, two known biases are the identifiable victim effect and procrastination. The <a href=\"https://en.wikipedia.org/wiki/Identifiable_victim_effect\"><u>Identifiable victim effect<\/u><\/a> is the tendency to prioritize individuals that are visible over individuals that are statistical or theoretic. As future generations are invisible and haven’t been born yet, this naturally leads short-termism. <\/p><p>Procrastination drives people to delay difficult problems until they become urgent and demand action. The further a long-term beneficial action is delayed, the less beneficial it is likely to be for future generations. Longtermism is especially prone to procrastination due to its extremely long timeframe.<\/p><p>Politicians are often even more short-termist than these factors would suggest, and they may frequently make extremely short-term decisions that have minimal benefits and significant costs within a few years, due to the various institutional factors discussed below. <\/p><h2>2.3 Remove institutional barriers to longtermist policy making<\/h2><p>Even policymakers that have the expertise and motivation to improve the welfare of future generations can be held back by institutional barriers that are preventing them from effectively advocating for longtermist policies. Many of these factors are due to the way today’s governmental institutions are designed, other sources include politicians’ economic dependencies and the media.<\/p><p>Most governments have short election cycles that incentivize short-term policy. Elected representatives naturally want to be re-elected, and one way to gain the favour of potential voters is to provide evidence that their previous time in office brought positive and immediate effects, which is predominantly achieved by initiating short-term policies.<\/p><p>Along with short election cycles, most performance measures mainly evaluate the short-term effects of policies, further discouraging policymakers from advocating for long-term policy.<\/p><p>Time inconsistency is also a problem in governmental institutions because subsequent policymakers can repeal previously enacted future-beneficial policies, as well as redirect investments that were originally intended for future generations. Most governments lack strong institutions dedicated to protecting the interests of future generations, which could help combat the problem of time inconsistency.<\/p><p>The media, which is largely focused on today’s current events, demand immediate reactions from policymakers. This pressures the policymakers to focus on short-term issues in order to build their reputation, as abstaining from doing so might lower their odds of re-election.<\/p><h2>2.4 Proposed mechanisms<\/h2><p>To deal with the problems mentioned above (lacking capabilities, disincentivized policymakers and institutional barriers), there is a dire need for institutional reform. There are many different ways to go about this, and there is still a lot of uncertainty about what might be the best solutions. What follows is a list of various longtermist policy proposals chosen with help from Tyler John. The proposals are divided into five main categories, with examples below. A more comprehensive list can be found <a href=\"https://forum.effectivealtruism.org/posts/op93xvHkJ5KvCrKaj/institutions-for-future-generations#Four_branch_Model_of_Government\"><u>here<\/u><\/a>.<\/p><p><strong>Designated stakeholders<\/strong><\/p><p>Key decision-makers or their advisors are appointed as responsible for protecting the interests of future people. Some examples of these are:<\/p><ul><li>Ministers and Executive Departments<\/li><li>Ombudsperson for Future Generations<\/li><li>Parliamentary committees<\/li><\/ul><p><strong>Information interventions<\/strong><\/p><p>Affects how information about the impact of future policies is gained or made publicly available. Some examples of these are:<\/p><ul><li>In-government Think Tank<\/li><li>Posterity Impact Assessments<\/li><li>Intergenerational Deliberation Day<\/li><\/ul><p><strong>Voting mechanisms<\/strong><\/p><p>Democratic election mechanisms and policy voting rules are redesigned to promote candidates that are expected to benefit future people. Some examples of these are:<\/p><ul><li>Choosing legislators via lottery<\/li><li>Demeny voting<\/li><li>Longer election cycles<\/li><\/ul><p><strong>Liability mechanisms<\/strong><\/p><p>Mechanisms that hold current decision-makers liable if their decisions lead to poor outcomes in the future, including formal rights for future people. Some examples of these are:<\/p><ul><li>Intergenerational externality taxes<\/li><li>Making court systems more future-oriented<\/li><li>Pay for Long-term performance<\/li><\/ul><p><strong>Reallocation of resources<\/strong><\/p><p>Control of current resources is deferred to future people. Some examples of these are:<\/p><ul><li>Heritage funds<\/li><li>Financial Institutions for Intergenerational Borrowing<\/li><li>Lower social discount rate<\/li><\/ul><p>For more in-depth analysis of the various proposals, see “Longtermist Institutional Design Literature Review” by Tyler John.’<\/p><p>In addition to the five categories above, another way to encourage long-term policy could be to influence society to be more long-term friendly. An example of this is Roman Krznaric’s writings where he establishes terms and concepts that could enable more longtermist thinking. <\/p><h1><strong>3.0 Directly influence the future trajectory of human civilization<\/strong><\/h1><p>The top layer of the pyramid in figure 1 considers how one can influence the future of humanity in a more direct way than the objectives in layer 1 and 2 does. There are several methods to directly improve the future and positively shift the trajectory of civilization. One approach is to avoid the bad scenarios (as exemplified by the red scenarios in Figure 2), such as extinction and major catastrophes. Another approach is to boost the good scenarios (exemplified by the green scenarios in Figure 2) by increasing the rate of inclusive progress - either by increasing economic growth, by making progress more inclusive, or by increasing our ability to convert economic wealth into wellbeing. <\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/qcctw3cbjlfqdrff7mwq.png\"><figcaption><strong>Figure 2<\/strong>: Illustration of positive and negative trajectories of civilization.<\/figcaption><\/figure><h2>3.1 Mitigate catastrophic risk and build resiliency to tail events and unknown unknowns<\/h2><p>In the effective altruism movement, one commonly recognized way to positively influence the future is to make sure that it actually exists and avoid <a href=\"https://longtermrisk.org/reducing-risks-of-astronomical-suffering-a-neglected-priority/#III_Reducing_s-risks_is_both_tractable_and_neglected\"><u>scenarios of extreme suffering<\/u><\/a>, i.e. by avoiding existential risks. By developing longtermist policy and institutions, we can better prepare for the future by building resiliency to both known and unknown existential risks.<\/p><figure class=\"image image_resized\" style=\"width:624px\"><img src=\"http://res.cloudinary.com/cea/image/upload/v1667996044/mirroredImages/t4Lqh7GHBM9YyEDg8/si5ga5enygb19xnwiigi.png\"><figcaption><strong>Figure 3<\/strong>: Examples of risks based on a <a href=\"https://www.existential-risk.org/concept.html\"><u>figure<\/u><\/a> by Nick Bostrom<\/figcaption><\/figure><p>Let us start with some definitions. Bostrom explains the difference between existential risk and catastrophic risk in <a href=\"https://www.existential-risk.org/concept.html\"><u>Existential Risk Prevention as Global Priority<\/u><\/a>. Existential risks are both pan-generational and crushing, which means that they drastically reduce the quality of life or cause death that humanity cannot recover from. Compared to this, risks that are merely globally catastrophic do not individually threaten the survival of humanity. Assuming that existence is preferable to non-existence, existential risks are considered significantly worse than global catastrophic risks because they affect all future generations. <\/p><p>However, global catastrophes may drastically weaken critical systems and our ability to tackle a second catastrophe. This argument is presented by the Global Catastrophic Risk Institute in a paper about <a href=\"http://gcrinstitute.org/papers/003_double-catastrophe.pdf\"><u>double catastrophes<\/u><\/a> with a case study on how geoengineering may be severely affected by other catastrophes. Moreover, many of the practices that can help us avoid globally catastrophic risks are also useful to prevent existential risks. We have titled this section “mitigate catastrophic risk” to ensure that we cover as many of the risks that may significantly impact the long-term future of humanity as possible.<\/p><p>The list of already known existential risks includes both natural and anthropological risks. Today’s technological advancements have created more anthropological risks, and there are good reasons to believe that they will continue to do so. Bostrom argues in <a href=\"https://www.sciencedirect.com/science/article/pii/S0016328720300604\"><u>The Fragile World Hypothesis<\/u><\/a> that continuous technological development will increase systemic fragility, which can be a source of catastrophic or existential risk. In the Precipice, Toby Ord estimates the chances of existential catastrophe within the next 100 years at one in six. We have already been dangerously close to global catastrophe, e.g. when <a href=\"https://80000hours.org/2012/02/26th-of-september-petrov-day/\"><u>Stanislav Petrov<\/u><\/a> potentially singlehandedly avoided a global nuclear war in 1983 when he did not launch missiles in response to the warning system reporting a US missile launch. To prevent such close calls from happening in the future, we need to gain knowledge about both known and unknown risks and solutions to them. <\/p><p>In the Precipice, Ord proposes that reaching existential security is the first of three steps to optimize the future of human civilization. Reaching existential security includes both eliminating immediate dangers, potential future risks, and establishing long-lasting safeguards. For example, switching to renewable energy sources, electric or hydrogen-based fuel, and clean meat, are ways to safeguard against catastrophic <a href=\"https://www.mckinsey.com/business-functions/sustainability/our-insights/climate-risk-and-response-physical-hazards-and-socioeconomic-impacts\"><u>climate change<\/u><\/a>. This is one risk that 80,000 Hours include in their view of the world’s <a href=\"https://80000hours.org/problem-profiles/\"><u>most pressing problems<\/u><\/a>. 80,000 Hours’ list also includes <a href=\"https://80000hours.org/problem-profiles/positively-shaping-artificial-intelligence/\"><u>positively shaping the development of artificial intelligence<\/u><\/a>. This can be positively influenced by investing in technical research and improving governmental strategy. Another priority area is reaching <a href=\"https://80000hours.org/problem-profiles/nuclear-security/\"><u>nuclear security<\/u><\/a>, which includes shrinking nuclear stockpiles and improving systems and communication to avoid depending on people acting like Petrov in the case of false warnings.<i> <\/i>Another priority catastrophic risk area in the EA movement is <a href=\"https://www.openphilanthropy.org/research/cause-reports/biosecurity\"><u>biorisk and pandemic preparedness<\/u><\/a>, which is one of the focus areas of the Open Philanthropy Project. In addition to protecting against already known risks, humanity should research potential future risks and use forecasting principles to prepare for them. <\/p><p>When we have reached existential security, Ord proposes that the next steps should be <\/p><ol><li>a long reflection where we determine what kind of future we want to create and how to do so, and<\/li><li>achieving our full potential.<\/li><\/ol><p>Thus, Ord argues that existential security should take priority over other objectives described in this article, as it is more urgent.<\/p><p>There are a wide range of actions that can be taken to mitigate catastrophic and existential risks. As mentioned, these actions mainly include eliminating immediate dangers and establishing long-lasting safeguards. The lists below are partially based on the work by <a href=\"https://www.gcrpolicy.com/risk-management\"><u>Global Catastrophic Risk Policy<\/u><\/a>. <\/p><p><strong>Reduce the probability of specific risks<\/strong><\/p><p>The most direct course of action to avoid catastrophe is to reduce the probability of catastrophic or existential risks. Some suggestions to risks and how to reduce them are: <\/p><ul><li>Reducing the potential for both intentional and unintentional use of nuclear weapons through improving early warning systems, reducing the number of nuclear warheads and the number of people having access to them.<\/li><li>Strengthen preparedness against pandemics by improving early warning systems, implementing global procedures for limiting spread, and shorten vaccine development timelines. We can also prepare for pandemics by developing vaccines for diseases with high pandemic potential.<\/li><li>Mitigating climate change by curbing CO<sub>2<\/sub> emissions through technological development or policy changes. Other methods include climate engineering actions such as removing CO<sub>2<\/sub> from the atmosphere.<\/li><\/ul><p><strong>Improve risk management frameworks<\/strong><\/p><p>Another approach is to improve risk management frameworks in such a way that we are prepared and able to react better to future risks. Some examples are: <\/p><ul><li>Developing a centralized all-hazard national risk assessment process that is adaptable to risks in a variety of domains.<\/li><li>Developing a risk prioritization framework to evaluate vulnerabilities, and the impact of possible adverse outcomes.<\/li><li>Deconflicting risk ownership between government stakeholders: Set one department or agency as the primary owner for each risk, with clear responsibilities for mitigation, preparation and response.<\/li><li>Appointing a “national risk officer’ responsible for overseeing the national risk assessment process and coordinating mitigation efforts.<\/li><\/ul><p><strong>Increase resilience of critical systems<\/strong><\/p><p>We can also limit the potential harm done by catastrophic risks or mitigate risks by increasing the resilience of critical systems. Some examples of how to increase critical system resilience are: <\/p><ul><li>Increasing emergency storage capacity of items like food, fuel and medicine at secure locations.<\/li><li>Developing more resilient crops and protecting critical infrastructure assets against disasters both natural and anthropogenic.<\/li><li>Diversifying sourcing to e.g. ensure that digital communication systems tolerate power failures.<\/li><li>Hardening assets such as crops by making them more resilient.<\/li><\/ul><h2>3.2 Build inclusive progress through long-lasting and well-functioning institutions<\/h2><p>Another approach to positively shift the trajectory of civilization is to increase the rate of progress, and make progress more inclusive. Continuous progress can improve human life quality and create a flourishing future for people of diverse backgrounds. Collison and Cohen define <a href=\"https://www.theatlantic.com/science/archive/2019/07/we-need-new-science-progress/594946/\"><u>progress<\/u><\/a> as economic, technological, scientific, cultural or organizational advancements that transform our lives and raise our living standard. This definition is broader than the typical economic definition focused on measuring GDP growth as a proxy or progress. In particular, it includes the opportunity to increase progress by increasing our ability to convert economic wealth into wellbeing. For this reason, we will use the term “economic progress” when referring to GDP growth, while “progress” alone will refer to the broader definition. Moreover, “wellbeing”, “welfare” and “happiness” are used interchangeably, and it is assumed that this is closer to a true measure of progress (in the broader sense) than purely economic metrics.<\/p><p><strong>There is still much we don’t know about progress<\/strong><\/p><p>There is an ongoing debate about whether there are fundamental limits to economic progress (and indeed <a href=\"https://www.researchgate.net/publication/348836201_What_is_the_Upper_Limit_of_Value\"><u>if there are upper limits of progress overall<\/u><\/a>) - if, at some point in the future, GDP growth must slow down and approach zero. If there are limits to economic progress, then increasing the rate of economic progress will only speed up the arrival of a zero-growth world of abundance. This could severely limit the potential value of increasing the rate of economic progress.<\/p><p>If there is no immediate limit to economic progress, there are good reasons to believe that it could continue indefinitely, and improve human welfare in the process. Human quality of life has generally improved significantly since the Industrial Revolution. This strong correlation between GDP growth and improved life quality has been well documented by e.g. <a href=\"https://www.gapminder.org/\"><u>Gapminder<\/u><\/a>. For example, the <a href=\"https://ourworldindata.org/a-history-of-global-living-conditions-in-5-charts\"><u>percentage of people living in extreme poverty<\/u><\/a> has decreased from about 90% in 1820 to 10% in 2015. It is also argued that a <a href=\"https://www.worksinprogress.co/issue/securing-posterity/\"><u>stagnation in growth is risky<\/u><\/a> in regards to existential risks. GDP growth is far from the only factor that influences progress. Other examples include improved economic distribution, sustainable development and effective transforming of economic growth to human welfare. <\/p><p>There are also ongoing discussions about how to best measure (a broader definition of) progress, if progress is slowing down or accelerating, and how existential risk is affected by the rate of economic progress. This is briefly covered in the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>, and somewhat more extensively in sources therein.<\/p><p>To improve our understanding of how progress occurs, Collision and Cowen have proposed to develop “<a href=\"https://www.theatlantic.com/science/archive/2019/07/we-need-new-science-progress/594946/\"><u>Progress Studies<\/u><\/a>” as a field of research. According to Collision and Cowen, progress studies investigates successful institutions, people, organizations and cultures to find common factors that are linked to progress. If we succeed in finding common factors between Ancient Greece, The Industrial Revolution and Silicon Valley, we can improve progress by acting accordingly. Due to the immaturity of progress studies, we have yet to find such common factors. However, scientific reform and interventions as described above are seemingly very promising. <\/p><p><strong>General ideas for how to increase progress<\/strong><\/p><p>There are three main paths to increasing inclusive progress: increasing economic growth, making progress more inclusive, and converting economic wealth into welfare. The first path has been promoted by e.g. <a href=\"https://80000hours.org/podcast/episodes/tyler-cowen-stubborn-attachments/\"><u>Tyler Cowen, arguing<\/u><\/a> that it is among the most powerful tools to improve the future because economic growth compounds over time.<\/p><p>Making progress more inclusive by redistributing resources or social status can increase total human happiness. According to 80,000 Hours, <a href=\"https://80000hours.org/articles/money-and-happiness/\"><u>happiness<\/u><\/a> increases <a href=\"https://www.pnas.org/content/118/4/e2016976118\"><u>logarithmically <\/u><\/a>when one becomes wealthier, which means that it is a lot more cost-effective to increase the wealth of poor people. Therefore, redistribution of progress is also very important toward effectively and positively shifting the trajectory of humanity. <\/p><p>While there is a strong correlation between economic wealth and wellbeing, it is not all that matters. Some countries have higher levels of happiness than others, despite being poorer - for instance, self-reported <a href=\"https://ourworldindata.org/grapher/gdp-vs-happiness\"><u>happiness levels in Costa Rica are higher than in Luxembourg, while GDP is 6x lower<\/u><\/a>. It is plausible that we can find ways to make happiness cheaper, so that a similar level of economic wealth can be translated into more welfare.<\/p><p>It is hard to know the counterfactual impact of interventions focused on any of these paths. While catastrophic risk mitigation is focused on changing the outcomes of forks in the path of civilization, interventions for progress to a larger degree rely on shifting long-term trends that are hard to reason about empirically. So far, hypotheses for effective interventions have been generated through the use of some heuristics, including:<\/p><ul><li>Institutions can coordinate the efforts of individuals, and thereby multiply their total impact. For this reason, changes in institutional designs are “hingey” - a limited effort to improve an institution can have lasting effects at scale<\/li><li>Some institutional reforms matter more than others. In particular, longer-lasting institutions (examples may include the American Constitution or Ivy League schools) can maintain their influence over time, so reforming these institutions is a way to have a more durable impact. This is a version of “<a href=\"https://www.effectivealtruism.org/articles/a-proposed-adjustment-to-the-astronomical-waste-argument-nick-beckstead/\"><u>path-dependent trajectory changes<\/u><\/a>” advocated for by Nick Beckstead, and further discussed in e.g. Eliezer Yudkowsky’s <a href=\"https://equilibriabook.com/\"><u>Inadequate Equilibria<\/u><\/a><\/li><li>Moreover, more influential institutions (e.g. measured in budget size, number of members or technological capabilities) typically offer a larger potential for impact.<\/li><li>Finally, reforms that create positive feedback loops (e.g. by improving processes that are essential for progress, like science, innovation or decision making) accumulate over time<\/li><\/ul><p><strong>Specific proposals for how to increase inclusive progress<\/strong><\/p><p>It is commonly argued that the scientific revolution has been one of the key drivers of progress in the last centuries, but today many scholars criticize the modern academic institutions for being sub-optimal. For this reason, interventions aiming to improve academic research may be one promising category to increase the rate of progress. Some examples among many interventions aiming to improve academic research include <a href=\"https://www.replicationmarkets.com/\"><u>Replication Markets<\/u><\/a>, <a href=\"https://arxiv.org/\"><u>ArXiv<\/u><\/a>, <a href=\"https://www.semanticscholar.org/\"><u>Semantic Scholar<\/u><\/a> and <a href=\"https://ought.org/\"><u>Ought<\/u><\/a>. Replication Markets use forecasting to estimate a research claims chance of replication. ArXiv and Semantic Scholar are archives with scientific papers, and Ought tries to figure out which questions humans can delegate to artificial intelligence. Additionally, “scientific research” is one of the top cause areas of the Open Philanthropy Project.<\/p><p>All of the abovementioned interventions are improving academic progress, but there are also non-academic interventions that may increase progress. Some examples from the US Policy focus area of Open Philanthropy Project (Open Phil) include:<\/p><ul><li><a href=\"https://www.foreignaffairs.com/articles/united-states/2020-09-14/americas-exceptional-housing-crisis\"><u>Urban zoning/land use reform<\/u><\/a>, which is meant to reduce the costs of living in cities. This may increase progress because it allows people to move to areas with great economic opportunities<\/li><li><a href=\"https://www.openphilanthropy.org/focus/us-policy/macroeconomic-policy\"><u>Macroeconomic stabilization policy<\/u><\/a>, where Open Philanthropy funds advocacy initiatives focused on emphasizing the importance of alleviating suffering and lost output from unemployment during economic crises<\/li><li><a href=\"https://www.openphilanthropy.org/focus/us-policy/immigration-policy\"><u>Immigration policy reform<\/u><\/a>, which may both provide economic opportunities for people from lower-income countries and increase long-term economic growth<\/li><li><a href=\"https://forum.effectivealtruism.org/posts/8Rn2gw7escCc2Rmb7/thoughts-on-electoral-reform\"><u>Electoral reform<\/u><\/a>: e.g. campaign financing rules, election security measures, and improved voting systems (e.g. <a href=\"https://electionscience.org/approval-voting-101/\"><u>approval voting<\/u><\/a> or <a href=\"https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2003531\"><u>quadratic voting<\/u><\/a>), to better ensure that elected officials represent the electorate and reduce the <a href=\"https://forum.effectivealtruism.org/posts/LpkXtFXdsRd4rG8Kb/reducing-long-term-risks-from-malevolent-actors\"><u>risk of malevolent leaders<\/u><\/a><\/li><\/ul><h2>3.3 What about sustainability?<\/h2><p>Outside of the effective altruism movement, sustainability is one of the most common cause areas for people concerned about the welfare of future generations. Significant resources are invested in ensuring that our GHG emissions are brought down, that our depletion of natural resources and destruction of species habitats are slowed, and that state budgets are fiscally balanced across generations. Thus it may seem strange that sustainability has played such a small role in this article.<\/p><p>Our argument, borrowed from <a href=\"http://www.stafforini.com/blog/bostrom/\"><u>Bostrom <\/u><\/a>and others in the EA movement, is that unsustainabilities are bad if they exacerbate catastrophic risk, or if they slow down the rate of inclusive progress. <a href=\"https://www.mckinsey.com/business-functions/sustainability/our-insights/climate-risk-and-response-physical-hazards-and-socioeconomic-impacts\"><u>Research by the McKinsey Global Institute<\/u><\/a> shows that unmitigated climate change can be harmful in both of these ways. <a href=\"https://www.mckinsey.com/industries/public-and-social-sector/our-insights/the-social-contract-in-the-21st-century\"><u>Further research<\/u><\/a> by the McKinsey Global Institute demonstrates that the social contract is eroding across developed economies, and that economic outcomes for individuals are worsening as a consequence. In cases like these where the unsustainabilities are expected to create large amounts of human suffering, we should work hard to become more sustainable.<\/p><h1><strong>4.0 Summary<\/strong><\/h1><p>There are several objectives of longtermist policy making. We have presented three categories of objectives, where the objectives in the bottom layers are potential enablers of the upper objectives. All of them are relevant to the necessary prioritization of future generations, given that longtermism is plausible. <\/p><p>Each of the objectives and their sub-objectives are well covered in existing literature, but to our knowledge they have not been presented in this structure before. In this article we have summarized some of the relevant parts of the literature, in the hope of providing an accessible introduction to the field. Furthermore, we hope that some points in this article can serve as coordination points for more experienced longtermists - e.g. when referring to which parts of longtermist policy making they are attempting to improve, and why.<\/p>","commitMessage":"Move images to CDN","wordCount":6543,"htmlHighlight":"<p><i>Estimated reading time: 20-30 minutes<\/i><\/p><p><i>-We would like to thank the following for their excellent feedback and guidance throughout this article, in no particular order: Tyler M. John, Max Stauffer, Aksel Braanen Sterri, Eirik Mofoss, Samuel Hilton, Konrad Seifert, Tildy Stokes, Erik Aunvåg Matsen and Marcel Grewal Sommerfelt.<\/i><\/p><h1><strong>0.0 Introduction<\/strong><\/h1><p>This article is co-authored by five members of Effective Altruism Norway as a pilot project to test if we can contribute in a valuable way to the emerging field of longtermism and policy making.<\/p><p>In the article we summarize some of the work that is being done in the emerging field of longtermism, using a new structure to classify the different interventions (see Figure 1: Three objectives of longtermist policy making). Then, for each objective we describe related challenges and potential solutions, and give some examples of current ongoing work.<\/p><p>We hope that the new structure can help improve coordination in this emerging field, and enable improved prioritization of interventions. If this structure resonates well with established experts in the field, we are happy to write up a shorter version of this article that could serve as an introduction to longtermist policy making for non-experts. Already, at 17 pages this article is one fourth of the length of the <a href=\"https://globalprioritiesinstitute.org/wp-content/uploads/GPI-research-agenda-version-2.1.pdf\"><u>GPI research agenda<\/u><\/a>, which covers many of the same topics. <\/p><p>Finally, we have emphasized some aspects of longtermist policy making that we believe have been underemphasized in the effective altruism- and longtermism communities in the past. Examples include scenario planning, robust decision making and redteaming among others, which we have described together with forecasting in section 2.1 as essential epistemic capabilities for long-term governance. These tools are complementary to forecasting-based epistemic capabilities that the EA/longtermist communities already promote, and we hope that they will receive increased attention going forward.<\/p><p>We hope to produce 1-3 further articles on similar topics through 2021, and welcome any experts who have capacity to provide feedback on our work.<\/p><p>--------------------------------------------------------------------<\/p><p>In 2019 William MacAskill proposed a definition of the term <a href=\"https://forum.effectivealtruism.org/posts/qZyshHCNkjs3TvSem/longtermism\"><u>longtermism<\/u><\/a> as the<i> view that those who live at future times matter just as much, morally, as those who live today<\/i>. There are many reasons to believe that actions ... <\/p>","plaintextDescription":"Estimated reading time: 20-30 minutes\n\n-We would like to thank the following for their excellent feedback and guidance throughout this article, in no particular order: Tyler M. John, Max Stauffer, Aksel Braanen Sterri, Eirik Mofoss, Samuel Hilton, Konrad Seifert, Tildy Stokes, Erik Aunvåg Matsen and Marcel Grewal Sommerfelt.\n\n\n0.0 Introduction\nThis article is co-authored by five members of Effective Altruism Norway as a pilot project to test if we can contribute in a valuable way to the emerging field of longtermism and policy making.\n\nIn the article we summarize some of the work that is being done in the emerging field of longtermism, using a new structure to classify the different interventions (see Figure 1: Three objectives of longtermist policy making). Then, for each objective we describe related challenges and potential solutions, and give some examples of current ongoing work.\n\nWe hope that the new structure can help improve coordination in this emerging field, and enable improved prioritization of interventions. If this structure resonates well with established experts in the field, we are happy to write up a shorter version of this article that could serve as an introduction to longtermist policy making for non-experts. Already, at 17 pages this article is one fourth of the length of the GPI research agenda, which covers many of the same topics. \n\nFinally, we have emphasized some aspects of longtermist policy making that we believe have been underemphasized in the effective altruism- and longtermism communities in the past. Examples include scenario planning, robust decision making and redteaming among others, which we have described together with forecasting in section 2.1 as essential epistemic capabilities for long-term governance. These tools are complementary to forecasting-based epistemic capabilities that the EA/longtermist communities already promote, and we hope that they will receive increased attention going forward.\n\nWe hope to produce 1-3 furt"},"SocialPreviewType:t4Lqh7GHBM9YyEDg8":{"_id":"t4Lqh7GHBM9YyEDg8","__typename":"SocialPreviewType","text":null,"imageUrl":"https://lh3.googleusercontent.com/7clnMCxK9m8ZkWkReSGxs3SZLE4JBN_bfqwimeeqPGFagVamXY4sZA7byYDRDPx2-_Z0167i_SyRowKy6z3FbYC93dfteYXUJhQfbLlx8HjHSW-ggicqZWu0ZLenDX9P2-_PlRUF"},"Tag:tYPXXENLbJxuMAaxq":{"_id":"tYPXXENLbJxuMAaxq","__typename":"Tag","userId":"ytdLMasx3XrLz8oBA","name":"US policy","shortName":null,"slug":"us-policy","core":false,"postCount":156,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-07-04T18:10:51.019Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:zvREBuHHKFwxgQ9qb":{"_id":"zvREBuHHKFwxgQ9qb","__typename":"Tag","userId":"ytdLMasx3XrLz8oBA","name":"UK policy","shortName":null,"slug":"uk-policy","core":false,"postCount":44,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-07-04T18:18:13.993Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:qHXSQkqM7ynEc5qx9":{"_id":"qHXSQkqM7ynEc5qx9","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Global governance","shortName":null,"slug":"global-governance","core":false,"postCount":95,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-25T07:24:42.156Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:Gtv3Reg6kzMZsh5Gs":{"_id":"Gtv3Reg6kzMZsh5Gs","__typename":"Tag","userId":"LKQAieBfNe2Jjo4EG","name":"Electoral politics","shortName":null,"slug":"electoral-politics","core":false,"postCount":114,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-04-17T23:47:45.060Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:MDSk2m6ZtzqdJaS36":{"_id":"MDSk2m6ZtzqdJaS36","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Political polarization","shortName":null,"slug":"political-polarization","core":false,"postCount":52,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-04T09:09:16.426Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:PJhKREgw5K77hKXAp":{"_id":"PJhKREgw5K77hKXAp","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Democracy","shortName":null,"slug":"democracy","core":false,"postCount":68,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-07-29T14:40:27.469Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:KJv36NfEfPRLXiAkQ":{"_id":"KJv36NfEfPRLXiAkQ","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Electoral reform","shortName":null,"slug":"electoral-reform","core":false,"postCount":48,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-06-26T05:13:48.078Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:d4bQXgZhDP43eJMwp":{"_id":"d4bQXgZhDP43eJMwp","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Longtermist institutional reform","shortName":null,"slug":"longtermist-institutional-reform","core":false,"postCount":45,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-11-23T01:31:15.831Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false,"isRead":false,"parentTag":{"__ref":"Tag:of9xBvR3wpbp6qsZC"},"subTags":[],"description":{"__ref":"Revision:d4bQXgZhDP43eJMwp_description"},"canVoteOnRels":null},"Tag:iW9mRiqT3xvmaCbHz":{"_id":"iW9mRiqT3xvmaCbHz","__typename":"Tag","userId":"LMgZyi4w3XoYz3tM5","name":"International relations","shortName":null,"slug":"international-relations","core":false,"postCount":112,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-06-05T09:59:31.192Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:i3yarKsx6qW9jrvLo":{"_id":"i3yarKsx6qW9jrvLo","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"California effect","shortName":null,"slug":"california-effect","core":false,"postCount":15,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-06-12T08:43:11.285Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:uDAGFwZLscHKfoubc":{"_id":"uDAGFwZLscHKfoubc","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Improving institutional decision-making","shortName":null,"slug":"improving-institutional-decision-making","core":false,"postCount":249,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-25T07:16:54.345Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false,"isRead":false,"parentTag":{"__ref":"Tag:of9xBvR3wpbp6qsZC"},"subTags":[],"description":{"__ref":"Revision:uDAGFwZLscHKfoubc_description"},"canVoteOnRels":null},"Tag:9QRdwZ2DDGaHQmJHx":{"_id":"9QRdwZ2DDGaHQmJHx","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Law","shortName":null,"slug":"law","core":false,"postCount":159,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-31T06:58:37.501Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:of9xBvR3wpbp6qsZC_description":{"_id":"of9xBvR3wpbp6qsZC_description","__typename":"Revision","htmlHighlight":"<p>The <strong>policy<\/strong> topic is very broad, covering any post about improving government policy (in developing and developed countries alike).<\/p><h2>Improving policy<\/h2><p>Governments are typically committed to the notion that their policies should be effective. This means that members of the effective altruist community can be in a good position to help governments reach their aims. Moreover, the fact that governments are very powerful, and control significant proportions of world GDP, suggests that helping policy-makers can be a high-value strategy. This strategy can be pursued either from the <i>outside<\/i>—by effective altruist organizations which advise policy-makers—or from the <i>inside<\/i>—by policy-makers who try to do the most good possible.<\/p><p>Some of the highest-impact reforms affect people who are less able to advocate for their own interests, such as <a href=\"https://forum.effectivealtruism.org/tag/criminal-justice-reform\">prisoners<\/a> or <a href=\"https://forum.effectivealtruism.org/tag/immigration-reform\">migrants<\/a>. Other policies, like <a href=\"https://forum.effectivealtruism.org/tag/macroeconomic-policy\">macroeconomic policy<\/a> and <a href=\"https://forum.effectivealtruism.org/tag/land-use-reform\">land use reform<\/a>, have effects that are somewhat diffuse and non-obvious, which makes it difficult to assemble groups to lobby for change. The more mainstream focus areas of <a href=\"https://forum.effectivealtruism.org/tag/global-poverty\">global poverty and health<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/animal-welfare-1\">animal welfare<\/a> and <a href=\"https://forum.effectivealtruism.org/tag/existential-risk\">existential risk<\/a> could also be addressed using political advocacy.<\/p><h2>Further reading<\/h2><p>Bowerman, Niel (2014) <a href=\"https://forum.effectivealtruism.org/posts/n5CNeo9jxDsCit9dj/good-policy-ideas-that-won-t-happen-yet\">Good policy ideas that won’t happen (yet)<\/a>, <i>Effective Altruism Forum<\/i>, September 14.<br><i>A look at the viability of changing public policy on certain issues.<\/i><\/p><p>Clough, Emily (2015) <a href=\"https://bostonreview.net/world/emily-clough-effective-altruism-ngos\">Effective altruism’s political blind spot<\/a>, <i>Boston Review<\/i>, July 14.<br><i>An example of one of the main criticisms of effective altruism: that it paid insufficient attention to political advocacy in the past.<\/i><\/p><p>Farquhar, Sebastian (2016) <a href=\"https://www.youtube.com/watch?v=NB_edlOrPOU&list=PLwp9xeoX5p8P_O5rQg-SNMwQOIvOPF5U2&index=10\">Should EAs do policy?<\/a>, <i>Effective Altruism Global<\/i>, August 5.<br><i>A talk at EA Global 2016 with an overview of why policy work might be effective.<\/i><\/p><p>Global Priorities Project (2015) <a href=\"http://globalprioritiesproject.org/2015/12/new-uk-aid-strategy-prioritising-research-and-crisis-response/\">New UK aid strategy – prioritising research and crisis response<\/a>, <i>Global Priorities Project<\/i>, December 2.<br><i>An example of effective altruist policy work.<\/i><\/p><p>Karnofsky, Holden (2013) <a href=\"https://www.openphilanthropy.org/blog/track-record-policy-oriented-philanthropy\">The track record of policy-oriented philanthropy<\/a>, <i>Open Philanthropy<\/i>, November 6.<br><i>Articles on Open Philanthropy about policy and philanthropy.<\/i><\/p><p>Open Philanthropy (2016) <a href=\"https://www.openphilanthropy.org/focus/us-policy\">U.S. policy<\/a>, <i>Open Philanthropy<\/i>.<br><i>The Philanthropy Project's assesment of policy as a focus area<\/i><\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/appg-on-future-generations\">APPG on Future Generations<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/democracy-defense-fund\">Democracy Defense Fund<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/improving-institutional-decision-making\">improving institutional decision-making<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/longtermist-institutional-reform\">longtermist institutional reform<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/standards-and-regulation\">standards and regul<\/a>... <\/p>"},"Tag:of9xBvR3wpbp6qsZC":{"_id":"of9xBvR3wpbp6qsZC","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[{"__ref":"Tag:tYPXXENLbJxuMAaxq"},{"__ref":"Tag:zvREBuHHKFwxgQ9qb"},{"__ref":"Tag:qHXSQkqM7ynEc5qx9"},{"__ref":"Tag:Gtv3Reg6kzMZsh5Gs"},{"__ref":"Tag:MDSk2m6ZtzqdJaS36"},{"__ref":"Tag:PJhKREgw5K77hKXAp"},{"__ref":"Tag:KJv36NfEfPRLXiAkQ"},{"__ref":"Tag:d4bQXgZhDP43eJMwp"},{"__ref":"Tag:iW9mRiqT3xvmaCbHz"},{"__ref":"Tag:i3yarKsx6qW9jrvLo"},{"__ref":"Tag:uDAGFwZLscHKfoubc"},{"__ref":"Tag:9QRdwZ2DDGaHQmJHx"}],"description":{"__ref":"Revision:of9xBvR3wpbp6qsZC_description"},"canVoteOnRels":null,"userId":"jd3Bs7YAT2KqnLxYD","name":"Policy","shortName":null,"slug":"policy","core":true,"postCount":1928,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-22T09:52:14.574Z","wikiOnly":false,"deleted":false,"isSubforum":true,"noindex":false},"Tag:4neBBrvXansMnDdfi":{"_id":"4neBBrvXansMnDdfi","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Great power conflict","shortName":null,"slug":"great-power-conflict","core":false,"postCount":68,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-05-13T15:39:21.158Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:KdxaNG2Fwkgh4ywFm":{"_id":"KdxaNG2Fwkgh4ywFm","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Nuclear security","shortName":null,"slug":"nuclear-security","core":false,"postCount":262,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-03-30T18:09:40.448Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:uu4eksgjZX5Ru8XHm":{"_id":"uu4eksgjZX5Ru8XHm","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Nuclear winter","shortName":null,"slug":"nuclear-winter","core":false,"postCount":96,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-22T13:59:34.576Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:BWHXesTAEwQNmfFbN":{"_id":"BWHXesTAEwQNmfFbN","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Nuclear disarmament movement","shortName":null,"slug":"nuclear-disarmament-movement","core":false,"postCount":8,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-22T13:58:00.173Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:AMEgA2KBBEXzjK5Cm":{"_id":"AMEgA2KBBEXzjK5Cm","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Weapons of mass destruction","shortName":null,"slug":"weapons-of-mass-destruction","core":false,"postCount":38,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-05-27T12:17:46.221Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:DwJY53daQyQq8eWo7":{"_id":"DwJY53daQyQq8eWo7","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Armed conflict","shortName":null,"slug":"armed-conflict","core":false,"postCount":108,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-19T12:29:49.098Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:zHqdAsiiXiyaS3zw9":{"_id":"zHqdAsiiXiyaS3zw9","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Asteroids","shortName":null,"slug":"asteroids","core":false,"postCount":17,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-22T13:50:39.993Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:srLqySqxyZRhz8DJX":{"_id":"srLqySqxyZRhz8DJX","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Supervolcano","shortName":null,"slug":"supervolcano","core":false,"postCount":15,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-26T15:05:48.484Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:kWNnWtggZuzbatasx":{"_id":"kWNnWtggZuzbatasx","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Geomagnetic storms","shortName":null,"slug":"geomagnetic-storms","core":false,"postCount":11,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-05-17T11:58:03.676Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:JsjbmcRdJheubi2F3":{"_id":"JsjbmcRdJheubi2F3","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Risks from malevolent actors","shortName":null,"slug":"risks-from-malevolent-actors","core":false,"postCount":31,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-07-30T14:09:40.384Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:wvxSKgKkxqptdenFm":{"_id":"wvxSKgKkxqptdenFm","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Existential risk factor","shortName":null,"slug":"existential-risk-factor","core":false,"postCount":35,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-28T13:59:19.157Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:DGhyx7Bk33Ghi5koD":{"_id":"DGhyx7Bk33Ghi5koD","__typename":"Tag","userId":"QNJvSiGxi54JMpEMb","name":"Autonomous weapon","shortName":null,"slug":"autonomous-weapon","core":false,"postCount":12,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-11-12T01:44:44.469Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:RysvCQBYTie5u62rH":{"_id":"RysvCQBYTie5u62rH","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Civilizational collapse","shortName":null,"slug":"civilizational-collapse","core":false,"postCount":117,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-04T09:23:11.475Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:MY2hgLkhKPj2bb3tj":{"_id":"MY2hgLkhKPj2bb3tj","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Human extinction","shortName":null,"slug":"human-extinction","core":false,"postCount":33,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-22T13:38:21.354Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:pdKwPrcTEyJ6BgD99":{"_id":"pdKwPrcTEyJ6BgD99","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Dystopia","shortName":null,"slug":"dystopia","core":false,"postCount":40,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-04T09:36:45.913Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:Gedmfw35JZLHM63oM":{"_id":"Gedmfw35JZLHM63oM","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Existential catastrophe","shortName":null,"slug":"existential-catastrophe-1","core":false,"postCount":5,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-06-12T13:51:08.931Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:QvoqEEmb3FAAa5Cki":{"_id":"QvoqEEmb3FAAa5Cki","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Hellish existential catastrophe","shortName":null,"slug":"hellish-existential-catastrophe","core":false,"postCount":5,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-29T13:37:18.439Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:X6ktJyvZu4BSCviZF":{"_id":"X6ktJyvZu4BSCviZF","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Natural existential risk","shortName":null,"slug":"natural-existential-risk","core":false,"postCount":9,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-21T13:47:56.022Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:bTuCPLohLYPAeorzD":{"_id":"bTuCPLohLYPAeorzD","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Resilient food","shortName":null,"slug":"resilient-food","core":false,"postCount":61,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-06-26T15:46:04.323Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:yc6Tim37shrJbubQk":{"_id":"yc6Tim37shrJbubQk","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Differential progress","shortName":null,"slug":"differential-progress","core":false,"postCount":73,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-04T09:31:22.673Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:qSY46CSmJxwWxjDon":{"_id":"qSY46CSmJxwWxjDon","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Broad vs. narrow interventions","shortName":null,"slug":"broad-vs-narrow-interventions","core":false,"postCount":7,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-22T14:13:42.731Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:xGGyQvqoisJw7hhqi":{"_id":"xGGyQvqoisJw7hhqi","__typename":"Tag","userId":"9TTdmC4N3iwGcCnhW","name":"Emergency response","shortName":null,"slug":"emergency-response","core":false,"postCount":14,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-04-08T23:48:29.548Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:kcjHcrJJoz5rQX4am":{"_id":"kcjHcrJJoz5rQX4am","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Vulnerable world hypothesis","shortName":null,"slug":"vulnerable-world-hypothesis","core":false,"postCount":26,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-27T13:19:26.203Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:xSZDegEMcbexn84K5":{"_id":"xSZDegEMcbexn84K5","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Warning shot","shortName":null,"slug":"warning-shot","core":false,"postCount":16,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-27T13:20:15.967Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:gP4rpeK3CoE7mfrxK":{"_id":"gP4rpeK3CoE7mfrxK","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Criticism of longtermism and existential risk studies","shortName":null,"slug":"criticism-of-longtermism-and-existential-risk-studies","core":false,"postCount":109,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-05-30T19:45:44.439Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:4u8AJBtxz9zPpLnRr":{"_id":"4u8AJBtxz9zPpLnRr","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Space colonization","shortName":null,"slug":"space-colonization","core":false,"postCount":71,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-25T13:54:33.362Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:3yv56vTrjpGn79Pko":{"_id":"3yv56vTrjpGn79Pko","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"S-risk","shortName":null,"slug":"s-risk","core":false,"postCount":114,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-22T14:07:44.632Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:ee66CtAMYurQreWBH_description":{"_id":"ee66CtAMYurQreWBH_description","__typename":"Revision","htmlHighlight":"<p>An <strong>existential risk<\/strong> is a risk that threatens the destruction of the long-term potential of life.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefs39fj4bj7yr\"><sup><a href=\"#fns39fj4bj7yr\">[1]<\/a><\/sup><\/span> An existential risk could threaten the <a href=\"https://forum.effectivealtruism.org/topics/human-extinction\">extinction of humans<\/a> (and other sentient beings), or it could threaten some other unrecoverable <a href=\"https://forum.effectivealtruism.org/topics/civilizational-collapse\">collapse<\/a> or permanent failure to achieve a potential good state. <a href=\"https://forum.effectivealtruism.org/tag/natural-existential-risk\">Natural risks<\/a> such as those posed by <a href=\"https://forum.effectivealtruism.org/tag/asteroids\">asteroids<\/a> or <a href=\"https://forum.effectivealtruism.org/tag/supervolcano\">supervolcanoes<\/a> could be existential risks, as could <a href=\"https://forum.effectivealtruism.org/tag/anthropogenic-existential-risks\">anthropogenic (human-caused) risks<\/a> like accidents from <a href=\"https://forum.effectivealtruism.org/tag/global-catastrophic-biological-risk\">synthetic biology<\/a> or <a href=\"https://forum.effectivealtruism.org/topics/ai-alignment\">unaligned<\/a> <a href=\"https://forum.effectivealtruism.org/tag/ai-risk\">artificial intelligence<\/a>. <\/p><p><a href=\"https://forum.effectivealtruism.org/topics/estimation-of-existential-risk\">Estimating the probability of existential risk<\/a> from different <a href=\"https://forum.effectivealtruism.org/topics/existential-risk-factor\">factors<\/a> is difficult, but there are some estimates.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefs39fj4bj7yr\"><sup><a href=\"#fns39fj4bj7yr\">[1]<\/a><\/sup><\/span> <\/p><p>Some view reducing existential risks as a key moral priority, for a variety of reasons.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefflve249rqxn\"><sup><a href=\"#fnflve249rqxn\">[2]<\/a><\/sup><\/span> Some people simply view the current estimates of existential risk as unacceptably high. Other authors argue that existential risks are especially important because the <a href=\"https://forum.effectivealtruism.org/tag/longtermism\">long-run future of humanity<\/a> matters a great deal.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefehnudz7v1f\"><sup><a href=\"#fnehnudz7v1f\">[3]<\/a><\/sup><\/span> Many believe that there is <a href=\"https://forum.effectivealtruism.org/tag/temporal-discounting\">no intrinsic moral difference<\/a> between the importance of a life today and one in a hundred years. However, there may be many more people in the future than there are now. Given these assumptions, existential risks threaten not only the beings alive right now, but also the enormous number of lives yet to be lived. One objection to this argument is that people have a special responsibility to other people currently alive that they do not have to people who have not yet been born.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefql2k3envp7\"><sup><a href=\"#fnql2k3envp7\">[4]<\/a><\/sup><\/span> Another objection is that, although it would in principle be important to manage, the risks are currently so unlikely and poorly understood that existential risk reduction is less cost-effective than work on other promising areas.<\/p><p>In <a href=\"https://forum.effectivealtruism.org/tag/the-precipice\"><i>The Precipice: Existential Risk and the Future of Humanity<\/i><\/a>, <a href=\"https://forum.effectivealtruism.org/tag/toby-ord\">Toby Ord<\/a> offers several <a href=\"https://forum.effectivealtruism.org/tag/policy\">policy<\/a> and <a href=\"https://forum.effectivealtruism.org/tag/research\">research<\/a> recommendations for handling existential risks:<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref52gvr4dqg9p\"><sup><a href=\"#fn52gvr4dqg9p\">[5]<\/a><\/sup><\/span><\/p><ul><li>Explore options for new <a href=\"https://forum.effectivealtruism.org/tag/global-governance\">international institutions<\/a> aimed at reducing existential risk, both incremental and revolutionary.<\/li><li>Investigate possibilities for making the deliberate or reckless imposition of <a href=\"https://forum.effectivealtruism.org/tag/human-extinction\">human extinction<\/a> risk an international crime.<\/li><li>Investigate possibilities for bringing the <a href=\"https://forum.effectivealtruism.org/topics/longtermist-institutional-reform\">representation of future generations<\/a> into national and international democratic institutions.<\/li><li>Each major world power should have an appointed senior government position responsible for registering and responding to existential risks that<\/li><\/ul>... "},"Tag:ee66CtAMYurQreWBH":{"_id":"ee66CtAMYurQreWBH","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[{"__ref":"Tag:4neBBrvXansMnDdfi"},{"__ref":"Tag:KdxaNG2Fwkgh4ywFm"},{"__ref":"Tag:uu4eksgjZX5Ru8XHm"},{"__ref":"Tag:BWHXesTAEwQNmfFbN"},{"__ref":"Tag:AMEgA2KBBEXzjK5Cm"},{"__ref":"Tag:DwJY53daQyQq8eWo7"},{"__ref":"Tag:zHqdAsiiXiyaS3zw9"},{"__ref":"Tag:srLqySqxyZRhz8DJX"},{"__ref":"Tag:kWNnWtggZuzbatasx"},{"__ref":"Tag:JsjbmcRdJheubi2F3"},{"__ref":"Tag:wvxSKgKkxqptdenFm"},{"__ref":"Tag:DGhyx7Bk33Ghi5koD"},{"__ref":"Tag:RysvCQBYTie5u62rH"},{"__ref":"Tag:MY2hgLkhKPj2bb3tj"},{"__ref":"Tag:pdKwPrcTEyJ6BgD99"},{"__ref":"Tag:Gedmfw35JZLHM63oM"},{"__ref":"Tag:QvoqEEmb3FAAa5Cki"},{"__ref":"Tag:X6ktJyvZu4BSCviZF"},{"__ref":"Tag:bTuCPLohLYPAeorzD"},{"__ref":"Tag:yc6Tim37shrJbubQk"},{"__ref":"Tag:qSY46CSmJxwWxjDon"},{"__ref":"Tag:xGGyQvqoisJw7hhqi"},{"__ref":"Tag:kcjHcrJJoz5rQX4am"},{"__ref":"Tag:xSZDegEMcbexn84K5"},{"__ref":"Tag:gP4rpeK3CoE7mfrxK"},{"__ref":"Tag:4u8AJBtxz9zPpLnRr"},{"__ref":"Tag:3yv56vTrjpGn79Pko"}],"description":{"__ref":"Revision:ee66CtAMYurQreWBH_description"},"canVoteOnRels":null,"userId":"2kBP4gThRsNXB3WWX","name":"Existential risk","shortName":null,"slug":"existential-risk","core":true,"postCount":2717,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-28T05:56:21.918Z","wikiOnly":false,"deleted":false,"isSubforum":true,"noindex":false},"Revision:uDAGFwZLscHKfoubc_description":{"_id":"uDAGFwZLscHKfoubc_description","__typename":"Revision","htmlHighlight":"<p><strong>Improving institutional decision-making<\/strong> is a cause that focuses on increasing the technical quality and <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism\">effective altruism<\/a> alignment of the most important decisions made by the world’s most important decision-making bodies.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefpg98poczmg\"><sup><a href=\"#fnpg98poczmg\">[1]<\/a><\/sup><\/span><\/p><h2>Improving institutions<\/h2><p>Institutions such as governments, companies, and charities control significant resources. One potentially effective way to do good, therefore, is to help institutions use these resources in more productive ways.<\/p><p>Members of the effective altruism community have employed this method extensively. For instance, they have tried to <a href=\"https://forum.effectivealtruism.org/tag/policy-change\">increase the attention policy-makers give<\/a> to <a href=\"https://forum.effectivealtruism.org/tag/existential-risk\">existential risk<\/a>. Similarly, an important goal of effective altruist charity recommendations is to increase the effectiveness of nonprofit organizations. <a href=\"https://forum.effectivealtruism.org/tag/influencing-for-profits\">Within the for-profit sector<\/a>, altruists have sought to shape the incentives of businesses to make them more aligned with social value, and have also tried to create social value themselves by engaging in social entrepreneurship.<\/p><p>Institutions can be improved in two different ways: from the <i>outside<\/i> and from the <i>inside<\/i>. Effective altruism organizations try to improve institutions from the outside by giving them advice or, in the case of charities, by evaluating them, whereas individual members of the effective altruism community may work within institutions to help them achieve their ends more effectively.<\/p><p>One approach to improving decisions is to set up institutional structures that are conducive to good decision-making.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefusvfm9rnves\"><sup><a href=\"#fnusvfm9rnves\">[2]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref2d3u96edm2a\"><sup><a href=\"#fn2d3u96edm2a\">[3]<\/a><\/sup><\/span> This way, institutions like national governments might encourage people to make better decisions (e.g. saving for retirement) or make better decisions themselves (e.g. improving health policy).<\/p><h2>Evaluation<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/80-000-hours\">80,000 Hours<\/a> rates improving institutional decision-making a \"second-highest priority area\": an unusually pressing global problem ranked slightly below their four highest priority areas.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefhjq832ykn24\"><sup><a href=\"#fnhjq832ykn24\">[4]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>Whittlestone, Jess (2017) <a href=\"https://80000hours.org/problem-profiles/improving-institutional-decision-making/\">Improving institutional decision-making<\/a>, <i>80,000 Hours<\/i>, September.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations\">All-Party Parliamentary Group for Future Generations<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/ballot-initiative\">ballot initiative<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/electoral-reform\">electoral reform<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/forecasting\">forecasting<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/international-relations\">international relations<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/longtermist-institutional-reform\">longtermist institutional reform<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/policy-change\">policy change<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/political-polarization\">political polarization<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnpg98poczmg\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefpg98poczmg\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Clayton, Vicky, Dilhan Perera & ibatra171 (2021) <a href=\"https://forum.effectivealtruism.org/posts/FqCSZT3pBvoATkR82/refining-improving-institutional-decision-making-as-a-cause\">Refining improving institutional decision-making as a cause area: results from a scoping surve<\/a><\/p><\/div><\/li><\/ol>... "},"Revision:t2L2RziMDLEuHBWNF_description":{"_id":"t2L2RziMDLEuHBWNF_description","__typename":"Revision","htmlHighlight":"<p><strong>Longtermism<\/strong> is the view that positively influencing the <a href=\"https://forum.effectivealtruism.org/tag/long-term-future\">long-term future<\/a> is a key moral priority of our time.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref2iwddw4crwt\"><sup><a href=\"#fn2iwddw4crwt\">[1]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefz6yx5b2rpim\"><sup><a href=\"#fnz6yx5b2rpim\">[2]<\/a><\/sup><\/span><\/p><p>Longtermism may be seen as following from the conjunction of three core claims:<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref6frik8lwexe\"><sup><a href=\"#fn6frik8lwexe\">[3]<\/a><\/sup><\/span> <\/p><ol><li>Future people matter morally. <\/li><li>If Earth-originating intelligence is not prematurely extinguished, the vast majority of people that will ever exist will exist in the future. <\/li><li>People alive today can predictably influence whether these people exist and how well their lives go.<\/li><\/ol><h2>Types of longtermism<\/h2><h3>Strong vs. weak longtermism<\/h3><p>Strong longtermism holds that positively influencing the long-term future is the key moral priority of our time. This form of longtermism was introduced by <a href=\"https://forum.effectivealtruism.org/topics/hilary-greaves\">Hilary Greaves<\/a> and <a href=\"https://forum.effectivealtruism.org/topics/william-macaskill\">Will MacAskill<\/a>,<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref4lqrm1tu6v4\"><sup><a href=\"#fn4lqrm1tu6v4\">[4]<\/a><\/sup><\/span> and has precedents in the work of <a href=\"https://forum.effectivealtruism.org/tag/nick-bostrom\">Nick Bostrom<\/a>,<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefijdcuqtcsja\"><sup><a href=\"#fnijdcuqtcsja\">[5]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefrxubxlbto4l\"><sup><a href=\"#fnrxubxlbto4l\">[6]<\/a><\/sup><\/span> <a href=\"https://forum.effectivealtruism.org/topics/nick-beckstead\">Nick Beckstead<\/a>,<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefohggtozamwg\"><sup><a href=\"#fnohggtozamwg\">[7]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref1j6xqn8ih7f\"><sup><a href=\"#fn1j6xqn8ih7f\">[8]<\/a><\/sup><\/span> and others. The authors do not define or discuss \"weak\" longtermism; the contrast is rather with longtermism as such, which as noted above holds that positively influencing the long-term future is a key priority, but not necessarily the top priority. <\/p><h3>Patient vs. urgent longtermism<\/h3><p>This distinction can be explained in reference to the <a href=\"https://forum.effectivealtruism.org/topics/hinge-of-history\">hinge of history hypothesis<\/a>, i.e., the hypothesis that we are currently living at a time when humanity has unusually high influence over the long-term future. Urgent longtermists find the hypothesis plausible and, accordingly, hold that it makes sense to spend altruistic resources relatively quickly. (Altruistic resources include not just financial assets, but other resources that can accumulate and be spent deliberately in the pursuit of altruistic goals, such as credibility, <a href=\"https://forum.effectivealtruism.org/topics/career-capital\">career capital<\/a> and <a href=\"https://forum.effectivealtruism.org/topics/altruistic-coordination\">coordination ability<\/a>.) By contrast, patient longtermists hold that the opportunities for influence are not concentrated in the near term and, in line with this, favour investing these resources so that they can be deployed at some point in the future, when the moments of significant influence arrive.<\/p><h3>Broad vs. targeted longtermism<\/h3><p>This distinction between <a href=\"https://forum.effectivealtruism.org/topics/broad-vs-narrow-interventions\">broad and targeted interventions<\/a> was originally introduced by <a href=\"https://forum.effectivealtruism.org/topics/nick-beckstead\">Nick Beckstead<\/a> in his doctoral dissertation, <i>On the Overwhelming Importance of Shaping the Far Future<\/i>.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref73581e7p8q\"><sup><a href=\"#fn73581e7p8q\">[9]<\/a><\/sup><\/span> Targeted (or narrow) longtermism attempts to positively influence the long-term future by focusing on specific, identifiable scenarios, such as the risks of <a href=\"https://forum.effectivealtruism.org/topics/ai-risk\">misaligned AI<\/a> or an <a href=\"https://forum.effectivealtruism.org/topics/biosecurity\">engineered pandemic<\/a>. By contrast, br... <\/p>"},"Tag:t2L2RziMDLEuHBWNF":{"_id":"t2L2RziMDLEuHBWNF","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[],"description":{"__ref":"Revision:t2L2RziMDLEuHBWNF_description"},"canVoteOnRels":null,"userId":"jd3Bs7YAT2KqnLxYD","name":"Longtermism","shortName":null,"slug":"longtermism","core":false,"postCount":720,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T09:27:27.708Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:d4bQXgZhDP43eJMwp_description":{"_id":"d4bQXgZhDP43eJMwp_description","__typename":"Revision","htmlHighlight":"<p><strong>Longtermist institutional reform<\/strong> is research on how institutions can better represent the interests of future generations in the political process.<\/p><h2>Further reading<\/h2><p>Baumann, Tobias (2020) <a href=\"https://centerforreducingsuffering.org/representing-future-generations-in-the-political-process/?utm_source=rss&utm_medium=rss&utm_campaign=representing-future-generations-in-the-political-process\">Representing future generations in the political process<\/a>, <i>Center for Reducing Suffering<\/i>, June 25.<\/p><p>González-Ricoy, Iñigo & Axel Gosseries (eds.) (2016) <a href=\"http://doi.org/10.1093/acprof:oso/9780198746959.003.0001\">Designing institutions for future generations<\/a>, in <i>Institutions for Future Generations<\/i>, Oxford: Oxford University Press, pp. 3–23.<\/p><p>Goth, Aidan & Matt Lerner (2022) <a href=\"https://www.founderspledge.com/stories/longtermist-institutional-reform\">Longtermist institutional reform<\/a>, <i>Founders Pledge<\/i>, January 12.<\/p><p>Jacobs, Alan M. (2011) <a href=\"https://en.wikipedia.org/wiki/Special:BookSources/9780521171779\"><i>Governing for the Long Term: Democracy and the Politics of Investment<\/i><\/a>, Cambridge: Cambridge University Press.<\/p><p>Jacobs, Alan M. (2016) <a href=\"http://doi.org/10.1146/annurev-polisci-110813-034103\">Policy making for the long term in advanced democracies<\/a>, <i>Annual Review of Political Science<\/i>, vol. 19, pp. 433–454.<\/p><p>John, Tyler (2019) <a href=\"https://forum.effectivealtruism.org/posts/op93xvHkJ5KvCrKaj/institutions-for-future-generations\">Institutions for future generations<\/a>, <i>Effective Altruism Forum<\/i>, November 11.<\/p><p>John, Tyler (2021) <a href=\"https://philpapers.org/rec/JOHEFP\">Empowering future people by empowering the young?<\/a>, in <i>Ageing without Ageism: Conceptual Puzzles and Policy Proposals<\/i>, Oxford: Oxford University Press, forthcoming.<\/p><p>John, William & William MacAskill (2021) <a href=\"https://en.wikipedia.org/wiki/Special:BookSources/978-0-9957281-8-9\">Longtermist institutional reform<\/a>, in Natalie Cargill & Tyler John (eds.) <i>The Long View: Essays on Policy, Philanthropy, and the Long-Term Future<\/i>, London: First, pp. 45–60.<\/p><p>Jones, Natalie, Mark O’Brien & Thomas Ryan (2018) <a href=\"http://doi.org/10.1016/j.futures.2018.01.007\">Representation of future generations in United Kingdom policy-making<\/a>, <i>Futures<\/i>, vol. 102, pp. 153–163.<\/p><p>Krznaric, Roman (2019) <a href=\"https://www.bbc.com/future/article/20190318-can-we-reinvent-democracy-for-the-long-term\">Why we need to reinvent democracy for the long-term<\/a>, <i>BBC Future<\/i>, March 18.<\/p><p>MacAskill, William (2019) <a href=\"https://forum.effectivealtruism.org/posts/b7BrGrswgANP3eRzd/age-weighted-voting\">Age-weighted voting<\/a>, <i>Effective Altruism Forum<\/i>, July 12.<\/p><p>McKinnon, Catriona (2017) <a href=\"http://doi.org/10.1080/00455091.2017.1280381\">Endangering humanity: an international crime?<\/a>, <i>Canadian Journal of Philosophy<\/i>, vol. 47, pp. 395–415.<\/p><p>Moorhouse, Fin & Luca Righetti (2021) <a href=\"https://a764aa28-8f1b-4abd-ad69-eed71af9e23a.filesusr.com/ugd/b589e0_6cc51397ac4b4d78b2f68d8f489b0847.pdf\">Institutions for the long run: taking future generations seriously in government<\/a>, <i>Cambridge Journal of Law, Politics, and Art<\/i>, vol. 1, pp. 430–437.<\/p><p>Nesbit, Martin & Andrea Illés (2015) <a href=\"http://www.worldfuturecouncil.org/wp-content/uploads/2016/02/IEEP_WFC_2016_Establishing_an_EU_Guardian_for_Future_Generations.pdf\">Establishing an EU “Guardian for future generations”. Report and recommendations for the World Future Council<\/a>, <i>Institute for European Environmental Policy<\/i>, London.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/all-party-parliamentary-group-for-future-generations\">All-Party Parliamentary Group for Future Generations<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/electoral-reform\">electoral reform<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/improving-institutional-decision-making\">improving institutional decision-making<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/longtermism\">longtermism<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/policy-change\">policy change<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/research-institute-for-future-design\">Research Institute for Future Design<\/a><\/p>"},"Revision:FdbA8vts5JPKCEou8_description":{"_id":"FdbA8vts5JPKCEou8_description","__typename":"Revision","htmlHighlight":"<p>The <strong>long-term future<\/strong> focuses on possible ways in which the future of humanity may unfold over long timescales.<\/p><h2>Bostrom's typology of possible scenarios<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/nick-bostrom\">Nick Bostrom<\/a> has identified four broad possibilities for the future of humanity.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefms8bxsox85m\"><sup><a href=\"#fnms8bxsox85m\">[1]<\/a><\/sup><\/span><\/p><p>First, humans may go prematurely <a href=\"https://forum.effectivealtruism.org/tag/human-extinction\">extinct<\/a>. Since the universe will eventually become inhospitable, extinction is inevitable in the very long run. However, it is also plausible that people will die out far before this deadline.<\/p><p>Second, human civilization may plateau, reaching a level of technological advancement beyond which no further advancement is feasible.<\/p><p>Third, human civilization may experience recurrent <a href=\"https://forum.effectivealtruism.org/tag/civilizational-collapse\">collapse<\/a>, undergoing repeated declines or catastrophes that prevent it from moving beyond a certain level of advancement.<\/p><p>Fourth, human civilization may advance so significantly as to become nearly unrecognizable. Bostrom conceptualizes this scenario as a “posthuman” era where people have developed significantly different cognitive abilities, population sizes, body types, sensory or emotional experiences, or life expectancies.<\/p><h2>Further reading<\/h2><p>Baum, Seth D. <i>et al.<\/i> (2019) <a href=\"http://doi.org/10.1108/FS-04-2018-0037\">Long-term trajectories of human civilization<\/a>, <i>Foresight<\/i>, vol. 21, pp. 53–83.<\/p><p>Bostrom, Nick (2009) <a href=\"http://doi.org/10.1057/9780230227279_10\">The future of humanity<\/a>, in Jan Kyrre Berg Olsen, Evan Selinger & Søren Riis (eds.) <i>New Waves in Philosophy of Technology<\/i>, London: Palgrave Macmillan, pp. 186–215.<\/p><p>Hanson, Robin (1998) <a href=\"http://mason.gmu.edu/~rhanson/longgrow.pdf\">Long-term growth as a sequence of exponential modes<\/a>, working paper, George Mason University (updated December 2000).<\/p><p>Roodman, David (2020) <a href=\"https://www.openphilanthropy.org/blog/modeling-human-trajectory\">Modeling the human trajectory<\/a>, <i>Open Philanthropy<\/i>, June 15.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/longtermism\">longtermism<\/a> | <a href=\"/tag/non-humans-and-the-long-term-future\">non-humans and the long-term future<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/space-colonization\">space colonization<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnms8bxsox85m\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefms8bxsox85m\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Bostrom, Nick (2009) <a href=\"http://doi.org/10.1057/9780230227279_10\">The future of humanity<\/a>, in Jan Kyrre Berg Olsen, Evan Selinger & Søren Riis (eds.) <i>New Waves in Philosophy of Technology<\/i>, London: Palgrave Macmillan, pp. 186–215.<\/p><\/div><\/li><\/ol>"},"Tag:FdbA8vts5JPKCEou8":{"_id":"FdbA8vts5JPKCEou8","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[],"description":{"__ref":"Revision:FdbA8vts5JPKCEou8_description"},"canVoteOnRels":null,"userId":"jd3Bs7YAT2KqnLxYD","name":"Long-term future","shortName":null,"slug":"long-term-future","core":false,"postCount":214,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T09:33:52.701Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:LFsPKPZ36ZTZQXHct_biography":{"_id":"LFsPKPZ36ZTZQXHct_biography","__typename":"Revision","version":null,"updateType":null,"editedAt":"2024-11-28T03:34:30.636Z","userId":null,"html":"<p>Member of Effective Altruism Norway.\nMSc in Computer Science with AI.<\/p>\n","commitMessage":null,"wordCount":null,"htmlHighlight":"<p>Member of Effective Altruism Norway.\nMSc in Computer Science with AI.<\/p>","plaintextDescription":"Member of Effective Altruism Norway. MSc in Computer Science with AI."},"User:LFsPKPZ36ZTZQXHct":{"_id":"LFsPKPZ36ZTZQXHct","__typename":"User","biography":{"__ref":"Revision:LFsPKPZ36ZTZQXHct_biography"},"profileImageId":null,"moderationStyle":null,"bannedUserIds":null,"moderatorAssistance":null,"slug":"henrik-oberg-myhre","createdAt":"2021-01-31T09:48:49.184Z","username":"Henrik Øberg Myhre","displayName":"Henrik Øberg Myhre","previousDisplayName":null,"fullName":null,"karma":80,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"<p>Member of Effective Altruism Norway.\nMSc in Computer Science with AI.<\/p>\n","jobTitle":null,"organization":null,"postCount":1,"commentCount":0,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":0,"reviewedByUserId":"2kBP4gThRsNXB3WWX","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"User:ZkxGhRzN5NKcmWCWq":{"_id":"ZkxGhRzN5NKcmWCWq","__typename":"User","slug":"philiphand","createdAt":"2020-09-10T12:25:26.539Z","username":"philiphand","displayName":"Philip Hall Andersen","profileImageId":null,"previousDisplayName":null,"fullName":null,"karma":137,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"","jobTitle":null,"organization":null,"postCount":2,"commentCount":3,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":0,"reviewedByUserId":"jd3Bs7YAT2KqnLxYD","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false,"biography":{"__ref":"Revision:ZkxGhRzN5NKcmWCWq_biography"},"moderationStyle":null,"bannedUserIds":null,"moderatorAssistance":null},"User:atWycpyDNsFN4ep9z":{"_id":"atWycpyDNsFN4ep9z","__typename":"User","slug":"jakob","createdAt":"2018-07-24T07:20:11.418Z","username":"Jakob","displayName":"Jakob","profileImageId":"Profile/jltpy9wirlqtvpde5a2l","previousDisplayName":null,"fullName":null,"karma":598,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"<p>Research manager at ICFG.eu, board member at Langsikt.no, doing policy research to mitigate risks from biotechnology and AI. Ex-SecureBio manager, ex-McKinsey Global Institute fellow and founder of the McKinsey Effective Altruism community. Follow me on Twitter at @jgraabak<\/p>","jobTitle":"Research manager","organization":"International Center for Future Generations | Ex-SecureBio, ex-McKinsey Global Institute","postCount":4,"commentCount":43,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":0,"reviewedByUserId":"9qZsZAzbC2zxsPHzN","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"User:j7H8ri59wkf9zeGPg":{"_id":"j7H8ri59wkf9zeGPg","__typename":"User","slug":"sanna-baug-warholm","createdAt":"2021-01-31T09:49:06.902Z","username":"Sanna Baug Warholm","displayName":"Sanna Baug Warholm","profileImageId":null,"previousDisplayName":null,"fullName":null,"karma":31,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"","jobTitle":null,"organization":null,"postCount":0,"commentCount":0,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":0.8,"tagRevisionCount":0,"reviewedByUserId":null,"givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"Revision:nR5dGGjQmGcnQxmGz":{"_id":"nR5dGGjQmGcnQxmGz","__typename":"Revision","htmlHighlight":"<p><i>Estimated reading time: 40-45 minutes<\/i><\/p><p><i>-We would like to thank the following for their excellent feedback and guidance throughout this article, in no particular order: Samuel Hilton, Haydn Belfield, Rumtin Sepasspour, Aidan Goth, Gabriella Overödder, Marcel Sommerfelt, Konrad Seifert, Max Stauffer and Tildy Stokes. <\/i><br> <\/p><h2>1. Introduction <\/h2><p>This article is co-authored by five members of Effective Altruism Norway as a follow-up on our previous forum post: <a href=\"https://forum.effectivealtruism.org/posts/t4Lqh7GHBM9YyEDg8/objectives-of-longtermist-policy-making-1\"><u>Objective of longtermist policy making<\/u><\/a>. In the current post, we argue that the EA community should invest in expanding the geographical footprint of EA policy efforts beyond current projects which are mainly focused on the US and UK. We list a range of potentially promising geographies, including emerging countries; multinational organizations; mid-tier geographies; and small, well-governed countries.. Furthermore, we present a case study on the Nordics, covering a range of areas for potential EA-aligned policy impact in the Nordics. This does not reflect a belief that the Nordics are uniquely well suited for EA efforts. We have chosen this region because we know it well, and we fully agree that there are other regions/countries that also are suitable to similar EA policy efforts.<\/p><p>The main hypothesis underpinning this article is that while there is some variance in the EV of policy projects across geographies, there is a relatively broad range of geographies where additional policy efforts could be impactful. Additionally, one can expect a strong \"home turf advantage\" for policy makers and policy advocates - a lot of the impact potential in these careers relate to your local professional network and cultural understanding - therefore, the key question that funders should ask to determine \"where should I invest in additional EA policy projects\" is \"where is there best access to additional EA policy talent?\". This is the question we attempt to answer throughout this article. <\/p><p>The main ideas in this article are based on the following premises:<\/p><ul><li>The EA community’s policy efforts have been mainly concentrated in the UK and US.<\/li><li>Most career advice for political work in the EA community points people towards working in the world’s most influential and populous countries, primarily the US and China.<\/li><li>EA policy efforts could have a bigger global impact by adjusting its portfolio to include a broader set of economies<\/li><\/ul>... ","plaintextDescription":"Estimated reading time: 40-45 minutes\n\n-We would like to thank the following for their excellent feedback and guidance throughout this article, in no particular order: Samuel Hilton, Haydn Belfield, Rumtin Sepasspour, Aidan Goth, Gabriella Overödder, Marcel Sommerfelt, Konrad Seifert, Max Stauffer and Tildy Stokes. \n \n\n\n1. Introduction \nThis article is co-authored by five members of Effective Altruism Norway as a follow-up on our previous forum post: Objective of longtermist policy making. In the current post, we argue that the EA community should invest in expanding the geographical footprint of EA policy efforts beyond current projects which are mainly focused on the US and UK. We list a range of potentially promising geographies, including emerging countries; multinational organizations; mid-tier geographies; and small, well-governed countries.. Furthermore, we present a case study on the Nordics, covering a range of areas for potential EA-aligned policy impact in the Nordics. This does not reflect a belief that the Nordics are uniquely well suited for EA efforts. We have chosen this region because we know it well, and we fully agree that there are other regions/countries that also are suitable to similar EA policy efforts.\n\nThe main hypothesis underpinning this article is that while there is some variance in the EV of policy projects across geographies, there is a relatively broad range of geographies where additional policy efforts could be impactful. Additionally, one can expect a strong \"home turf advantage\" for policy makers and policy advocates - a lot of the impact potential in these careers relate to your local professional network and cultural understanding - therefore, the key question that funders should ask to determine \"where should I invest in additional EA policy projects\" is \"where is there best access to additional EA policy talent?\". This is the question we attempt to answer throughout this article. \n\nThe main ideas in this article are based on ","wordCount":9053,"version":"1.12.1"},"Tag:aFJtBqxDkRX4Dj3m4":{"_id":"aFJtBqxDkRX4Dj3m4","__typename":"Tag","userId":"z9YQK84zJTvjmWRNZ","name":"Community epistemic health","shortName":null,"slug":"community-epistemic-health","core":false,"postCount":147,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-01-17T12:26:21.843Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:WtkxKaLPvMDfcw7ss":{"_id":"WtkxKaLPvMDfcw7ss","__typename":"Tag","userId":"2kBP4gThRsNXB3WWX","name":"Community infrastructure","shortName":null,"slug":"community-infrastructure","core":false,"postCount":322,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-11-24T09:59:54.892Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:A7LTqhZYeXqvQqRMg":{"_id":"A7LTqhZYeXqvQqRMg","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Effective altruism groups","shortName":null,"slug":"effective-altruism-groups","core":false,"postCount":372,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-20T11:52:05.608Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:WuDHK7wyXKCjzwjpA":{"_id":"WuDHK7wyXKCjzwjpA","__typename":"Tag","userId":"LMgZyi4w3XoYz3tM5","name":"Event strategy","shortName":null,"slug":"event-strategy","core":false,"postCount":86,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-01T12:19:51.398Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:8729b26vvCSQE7H5N":{"_id":"8729b26vvCSQE7H5N","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Effective Altruism Survey","shortName":null,"slug":"effective-altruism-survey","core":false,"postCount":71,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-23T15:23:27.571Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:zJv36ZGjWDSwXDEiT":{"_id":"zJv36ZGjWDSwXDEiT","__typename":"Tag","userId":"Tdmci4hT6ttBZ37kw","name":"Conferences","shortName":null,"slug":"conferences","core":false,"postCount":247,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-11T00:39:50.314Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:33HtuxWcS8ppcihWr":{"_id":"33HtuxWcS8ppcihWr","__typename":"Tag","userId":"L4EQZrYZbKEApJp4n","name":"EAGx","shortName":null,"slug":"eagx","core":false,"postCount":131,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-04-25T08:24:48.225Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:3J3dtqjHD2bmLGGAv":{"_id":"3J3dtqjHD2bmLGGAv","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Effective Altruism Global","shortName":null,"slug":"effective-altruism-global","core":false,"postCount":154,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-23T15:19:47.942Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:9tdEph2aXYqwzmmRm":{"_id":"9tdEph2aXYqwzmmRm","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Effective Altruism Forum","shortName":null,"slug":"effective-altruism-forum-1","core":false,"postCount":361,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-28T11:01:13.966Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:xCgaGHaEYg69KTTkR":{"_id":"xCgaGHaEYg69KTTkR","__typename":"Tag","userId":"HFFgbJKdbpoqzw6fi","name":"Effective altruism in the media","shortName":null,"slug":"effective-altruism-in-the-media","core":false,"postCount":186,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-25T16:31:44.455Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:Ap3maZNsJw2McNNQy":{"_id":"Ap3maZNsJw2McNNQy","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Effective altruism messaging","shortName":null,"slug":"effective-altruism-messaging","core":false,"postCount":332,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-06-17T09:54:16.496Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:w4Wz6AmL5d4HzXwra":{"_id":"w4Wz6AmL5d4HzXwra","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Effective altruism outreach in schools","shortName":null,"slug":"effective-altruism-outreach-in-schools","core":false,"postCount":59,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-06-03T19:15:29.090Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:FSEcBJTh4HFHyfanu":{"_id":"FSEcBJTh4HFHyfanu","__typename":"Tag","userId":"tKxXWdBF6mbkSpEFx","name":"Effective altruism education","shortName":null,"slug":"effective-altruism-education","core":false,"postCount":139,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-05-29T08:48:16.848Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:A7Kgc9sMmRcXkWK7S":{"_id":"A7Kgc9sMmRcXkWK7S","__typename":"Tag","userId":"Tdmci4hT6ttBZ37kw","name":"Field-building","shortName":null,"slug":"field-building","core":false,"postCount":123,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-04-20T05:15:02.607Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:jjNySc23WPumhShAp":{"_id":"jjNySc23WPumhShAp","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Global outreach","shortName":null,"slug":"global-outreach","core":false,"postCount":118,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-08T02:57:40.332Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false,"isRead":false,"parentTag":{"__ref":"Tag:EHLmbEmJ2Qd5WfwTb"},"subTags":[],"description":{"__ref":"Revision:jjNySc23WPumhShAp_description"},"canVoteOnRels":null},"Tag:Y7DckZtXjsddZykpa":{"_id":"Y7DckZtXjsddZykpa","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Movement collapse","shortName":null,"slug":"movement-collapse","core":false,"postCount":14,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-21T13:38:12.180Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:tqPxveBdDvk8g6WLM":{"_id":"tqPxveBdDvk8g6WLM","__typename":"Tag","userId":"Tdmci4hT6ttBZ37kw","name":"Network building","shortName":null,"slug":"network-building","core":false,"postCount":44,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-04-20T04:44:34.817Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:HrZ8whku9ov3dKvdX":{"_id":"HrZ8whku9ov3dKvdX","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Scalably using labour","shortName":null,"slug":"scalably-using-labour","core":false,"postCount":54,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-03-26T23:03:43.792Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:FdGbtrAvvnkrfCbEL":{"_id":"FdGbtrAvvnkrfCbEL","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Value drift","shortName":null,"slug":"value-drift","core":false,"postCount":54,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T09:48:13.537Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:LFviDDNxpD7Mf3WzD":{"_id":"LFviDDNxpD7Mf3WzD","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Value of movement growth","shortName":null,"slug":"value-of-movement-growth","core":false,"postCount":33,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-04-27T08:46:28.265Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:QvvdayWWSvmeBCtqZ":{"_id":"QvvdayWWSvmeBCtqZ","__typename":"Tag","userId":"Tdmci4hT6ttBZ37kw","name":"Community experiences","shortName":null,"slug":"community-experiences","core":false,"postCount":306,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-10T02:54:04.128Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:C4WsLjWE4epWfu493":{"_id":"C4WsLjWE4epWfu493","__typename":"Tag","userId":"Tdmci4hT6ttBZ37kw","name":"Discussion norms","shortName":null,"slug":"discussion-norms","core":false,"postCount":151,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-11-26T15:34:47.377Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:mAq9qa8rAyB6vM5z7":{"_id":"mAq9qa8rAyB6vM5z7","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Diversity and inclusion","shortName":null,"slug":"diversity-and-inclusion","core":false,"postCount":238,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T11:29:39.230Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:doAxFNMmB7qMbJsDR":{"_id":"doAxFNMmB7qMbJsDR","__typename":"Tag","userId":"tKxXWdBF6mbkSpEFx","name":"Criticism of the effective altruism community","shortName":null,"slug":"criticism-of-the-effective-altruism-community","core":false,"postCount":203,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-12-09T23:22:15.143Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:sSZ8pCvackwaY9vBx":{"_id":"sSZ8pCvackwaY9vBx","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Criticism of effective altruism","shortName":null,"slug":"criticism-of-effective-altruism","core":false,"postCount":463,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T09:35:22.911Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:aEfhhqymeFsdhDQLk":{"_id":"aEfhhqymeFsdhDQLk","__typename":"Tag","userId":"SuPnfB9qqKWsucNzm","name":"Criticism and Red Teaming Contest","shortName":null,"slug":"criticism-and-red-teaming-contest","core":false,"postCount":282,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-03-25T11:54:05.415Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:ui3ZZqd23HzA7wNwP":{"_id":"ui3ZZqd23HzA7wNwP","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Criticism of effective altruist causes","shortName":null,"slug":"criticism-of-effective-altruist-causes","core":false,"postCount":157,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-16T10:00:16.782Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:RcuE86WhJQguD2whE":{"_id":"RcuE86WhJQguD2whE","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Communities adjacent to effective altruism","shortName":null,"slug":"communities-adjacent-to-effective-altruism","core":false,"postCount":12,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-08-16T23:49:15.791Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:jRK6K3XRLhcqCWfzn":{"_id":"jRK6K3XRLhcqCWfzn","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Rationality community","shortName":null,"slug":"rationality-community","core":false,"postCount":18,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-24T14:09:04.201Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:FtscHgKMASeoT5sPu":{"_id":"FtscHgKMASeoT5sPu","__typename":"Tag","userId":"86kbCNY523Arv4h6k","name":"Moral psychology","shortName":null,"slug":"moral-psychology","core":false,"postCount":91,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-06-15T13:51:57.618Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:vrpunJqGNgp4g5HfJ":{"_id":"vrpunJqGNgp4g5HfJ","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"History of effective altruism","shortName":null,"slug":"history-of-effective-altruism","core":false,"postCount":64,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-06-17T10:07:37.910Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:RNRbmXTqXb2Lc9pih":{"_id":"RNRbmXTqXb2Lc9pih","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"History of philanthropy","shortName":null,"slug":"history-of-philanthropy","core":false,"postCount":26,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-03-15T05:58:04.715Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:is8kynTqvpzQ5RtEh":{"_id":"is8kynTqvpzQ5RtEh","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Constraints on effective altruism","shortName":null,"slug":"constraints-on-effective-altruism","core":false,"postCount":50,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-03-15T07:00:49.543Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:nGqPZkKJuiKfAqDcs":{"_id":"nGqPZkKJuiKfAqDcs","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Effective altruism funding","shortName":null,"slug":"effective-altruism-funding","core":false,"postCount":350,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T09:52:09.656Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false,"isRead":false,"parentTag":{"__ref":"Tag:EHLmbEmJ2Qd5WfwTb"},"subTags":[],"description":{"__ref":"Revision:nGqPZkKJuiKfAqDcs_description"},"canVoteOnRels":null},"Tag:aoPgP9xHSGAczRQNt":{"_id":"aoPgP9xHSGAczRQNt","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Markets for altruism","shortName":null,"slug":"markets-for-altruism","core":false,"postCount":37,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-11-16T10:36:25.689Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:Wurqm8Gnm9tYqBYbP":{"_id":"Wurqm8Gnm9tYqBYbP","__typename":"Tag","userId":"AtP7r7rtXoRt7yNhf","name":"Hackathons","shortName":null,"slug":"hackathons","core":false,"postCount":23,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-07-06T15:50:13.015Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:69wrutbRBMjiypNmE":{"_id":"69wrutbRBMjiypNmE","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Workshops / Retreats / Summits","shortName":null,"slug":"workshops-retreats-summits","core":false,"postCount":74,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-04-08T13:50:31.650Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:EHLmbEmJ2Qd5WfwTb_description":{"_id":"EHLmbEmJ2Qd5WfwTb_description","__typename":"Revision","htmlHighlight":"<p><strong>Building effective altruism<\/strong> refers to the family of interventions aimed at growing, shaping, or otherwise improving <a href=\"https://forum.effectivealtruism.org/tag/effective-altruism\">effective altruism<\/a> as a practical and intellectual community.<\/p><p>Examples of building effective altruism include starting <a href=\"https://forum.effectivealtruism.org/topics/university-groups\">student groups<\/a> and <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism-groups\">local groups<\/a>, writing articles, and organizing social gatherings for people interested in effective altruism.<\/p><p>An influential model in effective altruism community building is the Awareness/Inclination model, developed by Owen Cotton-Barratt.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref7kc2d9bhbrk\"><sup><a href=\"#fn7kc2d9bhbrk\">[1]<\/a><\/sup><\/span> Awareness and inclination are both limiting factors for movement growth: in order to join effective altruism, a potential new member both needs to know what the movement is (awareness) and have a positive impression of it and desire to be involved (inclination). Ideally, EA movement-building work would increase both awareness and inclination, but there is sometimes a trade-off between the two. For instance, some social movements draw attention to themselves by generating controversy, which increases awareness while decreases inclination.<br><br>There has been debate about the <a href=\"https://forum.effectivealtruism.org/topics/value-of-movement-growth\">value of growing the effective altruism movement<\/a>. A larger movement means more people trying to do good effectively, but there may also be downsides to a larger movement. For example, it may be more difficult to have nuanced discussions, protect key effective altruism priorities, or coordinate.<\/p><h2>Evaluation<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/80-000-hours\">80,000 Hours<\/a> rates building effective altruism a \"highest priority area\": a problem at the top of their ranking of global issues assessed by <a href=\"https://forum.effectivealtruism.org/tag/itn-framework-1\">importance, tractability and neglectedness<\/a>.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefsrtlc2g8s8h\"><sup><a href=\"#fnsrtlc2g8s8h\">[2]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>Bachmann, Michael (2018), <a href=\"https://medium.com/@michelbachmann/start-with-who-15b8857ed718\">Start with who<\/a><br><br>Centre for Effective Altruism (2017, updated 2021), <a href=\"https://www.centreforeffectivealtruism.org/blog/the-fidelity-model-of-spreading-ideas\">The fidelity model of spreading ideas<\/a><br><br>Cotton-Barratt, Owen (2015), <a href=\"How valuable is movement growth?\">How valuable is movement growth?<\/a><\/p><p>Duda, Roman (2018) <a href=\"https://80000hours.org/problem-profiles/promoting-effective-altruism/\">Building effective altruism<\/a>, <i>80,000 Hours<\/i>, March (updated July 2020).<\/p><p>Whittlestone, Jess (2017) <a href=\"https://www.effectivealtruism.org/articles/cause-profile-building-an-effective-altruism-community/\">Building an effective altruism community<\/a>, <i>Effective Altruism<\/i>, November 16.<\/p><p> <\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/altruistic-coordination\">altruistic coordination<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/coworking-spaces\">coworking spaces<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/effective-altruism-education\">effective altruism education<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/global-outreach\">global outreach<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/moral-trade\">moral trade<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/movement-collapse\">movement collapse<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/network-building\">network building<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/retreats\">retreats<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/social-and-intellectual-movements\">social and intellectual movements<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/value-of-movement-growth\">value of movement growth<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fn7kc2d9bhbrk\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnref7kc2d9bhbrk\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Cotton-Barratt (2015) <a href=\"https://www.effectivealtruism.org/articles/how-valuable-is-movement-growth-owen-cotton-barratt\">How valuable is movement growth?<\/a>, <i>Effective Altruism.org<\/i><\/p><\/div><\/li><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnsrtlc2g8s8h\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefsrtlc2g8s8h\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>80,000 Hours (2021) <a href=\"https://80000hours.org/problem-profiles/\">Our current list of the most important world problems<\/a>, <i>80,000 Hours<\/i>.<\/p><\/div><\/li><\/ol>"},"Tag:EHLmbEmJ2Qd5WfwTb":{"_id":"EHLmbEmJ2Qd5WfwTb","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[{"__ref":"Tag:aFJtBqxDkRX4Dj3m4"},{"__ref":"Tag:WtkxKaLPvMDfcw7ss"},{"__ref":"Tag:A7LTqhZYeXqvQqRMg"},{"__ref":"Tag:WuDHK7wyXKCjzwjpA"},{"__ref":"Tag:8729b26vvCSQE7H5N"},{"__ref":"Tag:zJv36ZGjWDSwXDEiT"},{"__ref":"Tag:33HtuxWcS8ppcihWr"},{"__ref":"Tag:3J3dtqjHD2bmLGGAv"},{"__ref":"Tag:9tdEph2aXYqwzmmRm"},{"__ref":"Tag:xCgaGHaEYg69KTTkR"},{"__ref":"Tag:Ap3maZNsJw2McNNQy"},{"__ref":"Tag:w4Wz6AmL5d4HzXwra"},{"__ref":"Tag:FSEcBJTh4HFHyfanu"},{"__ref":"Tag:A7Kgc9sMmRcXkWK7S"},{"__ref":"Tag:jjNySc23WPumhShAp"},{"__ref":"Tag:Y7DckZtXjsddZykpa"},{"__ref":"Tag:tqPxveBdDvk8g6WLM"},{"__ref":"Tag:HrZ8whku9ov3dKvdX"},{"__ref":"Tag:FdGbtrAvvnkrfCbEL"},{"__ref":"Tag:LFviDDNxpD7Mf3WzD"},{"__ref":"Tag:QvvdayWWSvmeBCtqZ"},{"__ref":"Tag:C4WsLjWE4epWfu493"},{"__ref":"Tag:mAq9qa8rAyB6vM5z7"},{"__ref":"Tag:doAxFNMmB7qMbJsDR"},{"__ref":"Tag:sSZ8pCvackwaY9vBx"},{"__ref":"Tag:aEfhhqymeFsdhDQLk"},{"__ref":"Tag:ui3ZZqd23HzA7wNwP"},{"__ref":"Tag:RcuE86WhJQguD2whE"},{"__ref":"Tag:jRK6K3XRLhcqCWfzn"},{"__ref":"Tag:FtscHgKMASeoT5sPu"},{"__ref":"Tag:vrpunJqGNgp4g5HfJ"},{"__ref":"Tag:RNRbmXTqXb2Lc9pih"},{"__ref":"Tag:is8kynTqvpzQ5RtEh"},{"__ref":"Tag:nGqPZkKJuiKfAqDcs"},{"__ref":"Tag:aoPgP9xHSGAczRQNt"},{"__ref":"Tag:Wurqm8Gnm9tYqBYbP"},{"__ref":"Tag:69wrutbRBMjiypNmE"}],"description":{"__ref":"Revision:EHLmbEmJ2Qd5WfwTb_description"},"canVoteOnRels":null,"userId":"2kBP4gThRsNXB3WWX","name":"Building effective altruism","shortName":"Building EA","slug":"building-effective-altruism","core":true,"postCount":5535,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-28T13:22:17.182Z","wikiOnly":false,"deleted":false,"isSubforum":true,"noindex":false},"Tag:xXRMBbnoM2P8BPF4k":{"_id":"xXRMBbnoM2P8BPF4k","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Effective altruism lifestyle","shortName":null,"slug":"effective-altruism-lifestyle","core":false,"postCount":260,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T11:29:29.967Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:ZCihBFp5P64JCvQY6_description":{"_id":"ZCihBFp5P64JCvQY6_description","__typename":"Revision","htmlHighlight":"<p>The <strong>community <\/strong>topic covers posts about the effective altruism community, as well as applying EA in one's personal life. The tag also applies to posts about the Forum itself, since this is a community space. You should use the <strong>community<\/strong> tag if one of the following things is true:<\/p><ul><li>The post is about EA as a cultural phenomenon (as opposed to EA as a project of doing good)<\/li><li>The post is about norms, attitudes or practices you'd like to see more or less of within the EA community<\/li><li>The post would be irrelevant to someone who was interested in doing good effectively, but NOT interested in the effective altruism community <\/li><li>The post concerns an ongoing conversation, scandal or discourse that would not be relevant to someone who doesn't care about the EA community.<\/li><\/ul><p>Posts tagged with <strong>community<\/strong> are displayed in a separate list on the homepage to other posts; this list can be hidden. <\/p><h1><br>About the effective altruism community <\/h1><p>The effective altruism community is partly a professional network of people who are interested in pursuing <a href=\"https://forum.effectivealtruism.org/topics/career-choice\">impactful careers<\/a>. It is also partly a social community of people who are interested in doing the most good possible. Although there is a <a href=\"https://forum.effectivealtruism.org/topics/centre-for-effective-altruism-1\">Centre for Effective Altruism<\/a>, effective altruism is decentralised, and various organizations, <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism-groups\">local groups<\/a> and individuals consider themselves as part of the community. In 2020, David Moss estimated that there were 4500-10,000 members of the EA community (90% confidence interval).<span class=\"footnote-reference\" data-footnote-reference=\"\" data-footnote-index=\"1\" data-footnote-id=\"hnj51v8w31g\" role=\"doc-noteref\" id=\"fnrefhnj51v8w31g\"><sup><a href=\"#fnhnj51v8w31g\">[1]<\/a><\/sup><\/span><br> <\/p><h1><br>Further reading <\/h1><p><br><a href=\"https://rethinkpriorities.org/ea-survey\">EA surveys <\/a>produced by Rethink Priorities <br> <\/p><h1><br>Related entries <\/h1><p><a href=\"https://forum.effectivealtruism.org/topics/building-effective-altruism\">building effective altruism<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism-groups\">effective altruism groups<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/centre-for-effective-altruism-1\">Centre for Effective Altruism<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism-survey\">effective altruism survey<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/data-ea-community-1\">data (EA community<\/a>)| <a href=\"https://forum.effectivealtruism.org/topics/community-experiences\">community experiences<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism-lifestyle\">effective altruism lifestyle<\/a><br> <\/p><p> <\/p><ol class=\"footnote-section footnotes\" data-footnote-section=\"\" role=\"doc-endnotes\"><li class=\"footnote-item\" data-footnote-item=\"\" data-footnote-index=\"1\" data-footnote-id=\"hnj51v8w31g\" role=\"doc-endnote\" id=\"fnhnj51v8w31g\"><span class=\"footnote-back-link\" data-footnote-back-link=\"\" data-footnote-id=\"hnj51v8w31g\"><sup><strong><a href=\"#fnrefhnj51v8w31g\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\" data-footnote-content=\"\"><p>Moss, David (2020) <a href=\"https://forum.effectivealtruism.org/posts/zQRHAFKGWcXXicYMo/ea-survey-2019-series-how-many-people-are-there-in-the-ea\">EA Survey 2019 Series: How many people are there in the EA community?<\/a> (EA Forum)<\/p><\/div><\/li><\/ol>"},"Tag:ZCihBFp5P64JCvQY6":{"_id":"ZCihBFp5P64JCvQY6","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[{"__ref":"Tag:xXRMBbnoM2P8BPF4k"}],"description":{"__ref":"Revision:ZCihBFp5P64JCvQY6_description"},"canVoteOnRels":["userOwnsOnlyUpvote","admins","sunshineRegiment"],"userId":"9qZsZAzbC2zxsPHzN","name":"Community","shortName":null,"slug":"community","core":true,"postCount":5303,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-08-11T15:28:58.111Z","wikiOnly":false,"deleted":false,"isSubforum":true,"noindex":false},"Tag:xsiR75hLgHBgtosDy":{"_id":"xsiR75hLgHBgtosDy","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Global priorities research","shortName":null,"slug":"global-priorities-research","core":false,"postCount":213,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-14T01:46:20.448Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false,"isRead":false,"parentTag":{"__ref":"Tag:psBzwdY8ipfCeExJ7"},"subTags":[],"description":{"__ref":"Revision:xsiR75hLgHBgtosDy_description"},"canVoteOnRels":null},"Tag:aoG4RzP4KCkckH2Gz":{"_id":"aoG4RzP4KCkckH2Gz","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Cause neutrality","shortName":null,"slug":"cause-neutrality","core":false,"postCount":7,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-04-12T11:05:56.012Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:69w9jjBQ7QMG8vRA4":{"_id":"69w9jjBQ7QMG8vRA4","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Cause X","shortName":null,"slug":"cause-x","core":false,"postCount":43,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-22T14:58:11.187Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:eRwcqwQ6PaQp59Yct":{"_id":"eRwcqwQ6PaQp59Yct","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Less-discussed causes","shortName":null,"slug":"less-discussed-causes","core":false,"postCount":530,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-28T09:18:23.318Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:3MaNTgoRt9iLHEcW3":{"_id":"3MaNTgoRt9iLHEcW3","__typename":"Tag","userId":"TuhLfxEaaX73gp5MC","name":"Local priorities research","shortName":null,"slug":"local-priorities-research-1","core":false,"postCount":31,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-04-24T05:44:01.721Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false,"isRead":false,"parentTag":{"__ref":"Tag:psBzwdY8ipfCeExJ7"},"subTags":[],"description":{"__ref":"Revision:3MaNTgoRt9iLHEcW3_description"},"canVoteOnRels":null},"Tag:eSWSZv3oF6sydzD9L":{"_id":"eSWSZv3oF6sydzD9L","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"ITN framework","shortName":null,"slug":"itn-framework","core":false,"postCount":60,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-07T05:00:30.094Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false,"isRead":false,"parentTag":{"__ref":"Tag:psBzwdY8ipfCeExJ7"},"subTags":[],"description":{"__ref":"Revision:eSWSZv3oF6sydzD9L_description"},"canVoteOnRels":null},"Tag:f2Y8FabnfC7hNsoms":{"_id":"f2Y8FabnfC7hNsoms","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Importance","shortName":null,"slug":"importance","core":false,"postCount":4,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-18T13:59:50.952Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:LntHSATTnW3cft2rr":{"_id":"LntHSATTnW3cft2rr","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Tractability","shortName":null,"slug":"tractability","core":false,"postCount":10,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-18T14:14:01.270Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:gouqafb3g5bb6fuM7":{"_id":"gouqafb3g5bb6fuM7","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Neglectedness","shortName":null,"slug":"neglectedness","core":false,"postCount":32,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-18T14:17:16.485Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:7fCxZm5PghjWPqknE":{"_id":"7fCxZm5PghjWPqknE","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Systemic change","shortName":null,"slug":"systemic-change","core":false,"postCount":79,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-26T15:06:12.104Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:tXxetAAaYGXM8KoyP":{"_id":"tXxetAAaYGXM8KoyP","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Distribution of cost-effectiveness","shortName":null,"slug":"distribution-of-cost-effectiveness","core":false,"postCount":35,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-02-17T18:23:42.950Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:CDEKHTGHAqLcJPw5A":{"_id":"CDEKHTGHAqLcJPw5A","__typename":"Tag","userId":"RvR6DrvuoXK6bSm67","name":"Impact assessment","shortName":null,"slug":"impact-assessment","core":false,"postCount":182,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-09-13T14:36:01.148Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:7MDBDhPJezCqvgMg2":{"_id":"7MDBDhPJezCqvgMg2","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"SPC framework","shortName":null,"slug":"spc-framework","core":false,"postCount":8,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-08-19T17:40:12.858Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:W3Wnemq6zoiXwSNiC":{"_id":"W3Wnemq6zoiXwSNiC","__typename":"Tag","userId":"BkbwT5TzSj4aRxJMN","name":"Thinking at the margin","shortName":null,"slug":"thinking-at-the-margin","core":false,"postCount":15,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-01-18T14:19:09.271Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:xPgDmQ8nPZhtva9k8":{"_id":"xPgDmQ8nPZhtva9k8","__typename":"Tag","userId":"jd3Bs7YAT2KqnLxYD","name":"Cause Exploration Prizes","shortName":null,"slug":"cause-exploration-prizes","core":false,"postCount":193,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-05-25T11:15:52.478Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:rdxSaRhkHxwjbNkD7":{"_id":"rdxSaRhkHxwjbNkD7","__typename":"Tag","userId":"LMgZyi4w3XoYz3tM5","name":"Cost-effectiveness analysis","shortName":null,"slug":"cost-effectiveness-analysis","core":false,"postCount":297,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-04-26T15:45:34.826Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:psBzwdY8ipfCeExJ7_description":{"_id":"psBzwdY8ipfCeExJ7_description","__typename":"Revision","htmlHighlight":"<p><strong>Cause prioritization<\/strong> refers to efforts to find the most pressing problems to work on and compare interventions across different cause areas so that we can do as much good as possible with the resources available to us. <\/p><p>Governments and philanthropists spend considerable resources attempting to do good in the world. <i>How<\/i> those resources are used, however, can make an even bigger difference than <i>how many<\/i> resources are available. Some work has been done on prioritizing <i>within<\/i> areas, but the question of how to prioritize <i>between<\/i> areas—which is arguably more important<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefemqxxljq1u\"><sup><a href=\"#fnemqxxljq1u\">[1]<\/a><\/sup><\/span>—has received limited attention. <\/p><p><a href=\"https://concepts.effectivealtruism.org/concepts/neutrality-in-focus-area-selection/\">Cause-neutral<\/a> prioritization research seeks to identify new <a href=\"https://concepts.effectivealtruism.org/concepts/promising-areas/\">promising focus areas<\/a> and to compare their relative value (for instance by considering factors like <a href=\"https://forum.effectivealtruism.org/topics/itn-framework\">scope, neglectedness, and tractability<\/a> of the problems). <\/p><p>Cause prioritization research (or <a href=\"https://forum.effectivealtruism.org/topics/global-priorities-research\">global priorities research<\/a>) is often regarded as a highly promising area of work.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefrkj7kykbci\"><sup><a href=\"#fnrkj7kykbci\">[2]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>80,000 Hours (2016) <a href=\"https://80000hours.org/problem-profiles/global-priorities-research/\">Global priorities research<\/a>, <i>80,000 Hours<\/i>, April (updated July 2018).<\/p><p>Grace, Katja (2014) <a href=\"https://80000hours.org/2014/08/conversation-with-paul-christiano-on-cause-prioritization-research/\">Conversation with Paul Christiano <\/a><a href=\"https://forum.effectivealtruism.org/posts/b6y9zSkRtxvKSdqcc/paul-christiano-on-cause-prioritization\">on<\/a><a href=\"https://web.archive.org/web/20220712175031/https://80000hours.org/2014/08/conversation-with-paul-christiano-on-cause-prioritization-research\"> cause prioritization research<\/a>, <i>80,000 Hours<\/i>, August 20.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/topics/cause-x\">Cause X<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/cost-effectiveness\">cost-effectiveness<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/distribution-of-cost-effectiveness\">distribution of cost-effectiveness<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/global-priorities-research\">global priorities research<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/impact-assessment\">impact assessment<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/intervention-evaluation\">intervention evaluation<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/itn-framework-1\">ITN framework<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnemqxxljq1u\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefemqxxljq1u\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Todd, Benjamin (2013) <a href=\"https://80000hours.org/2013/12/why-pick-a-cause/\">Why pick a cause?<\/a> <i>80,000 Hours website<\/i>, December 10.<\/p><\/div><\/li><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnrkj7kykbci\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefrkj7kykbci\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Stafforini, Pablo (2014) <a href=\"https://forum.effectivealtruism.org/posts/b6y9zSkRtxvKSdqcc/paul-christiano-on-cause-prioritization\">Paul Christiano on cause prioritization research<\/a>, <i>Effective Altruism Forum<\/i>, March 23.<\/p><\/div><\/li><\/ol>"},"Tag:psBzwdY8ipfCeExJ7":{"_id":"psBzwdY8ipfCeExJ7","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[{"__ref":"Tag:xsiR75hLgHBgtosDy"},{"__ref":"Tag:aoG4RzP4KCkckH2Gz"},{"__ref":"Tag:69w9jjBQ7QMG8vRA4"},{"__ref":"Tag:eRwcqwQ6PaQp59Yct"},{"__ref":"Tag:3MaNTgoRt9iLHEcW3"},{"__ref":"Tag:eSWSZv3oF6sydzD9L"},{"__ref":"Tag:f2Y8FabnfC7hNsoms"},{"__ref":"Tag:LntHSATTnW3cft2rr"},{"__ref":"Tag:gouqafb3g5bb6fuM7"},{"__ref":"Tag:7fCxZm5PghjWPqknE"},{"__ref":"Tag:tXxetAAaYGXM8KoyP"},{"__ref":"Tag:CDEKHTGHAqLcJPw5A"},{"__ref":"Tag:7MDBDhPJezCqvgMg2"},{"__ref":"Tag:W3Wnemq6zoiXwSNiC"},{"__ref":"Tag:xPgDmQ8nPZhtva9k8"},{"__ref":"Tag:rdxSaRhkHxwjbNkD7"}],"description":{"__ref":"Revision:psBzwdY8ipfCeExJ7_description"},"canVoteOnRels":null,"userId":"jd3Bs7YAT2KqnLxYD","name":"Cause prioritization","shortName":null,"slug":"cause-prioritization","core":true,"postCount":2714,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T09:38:21.092Z","wikiOnly":false,"deleted":false,"isSubforum":true,"noindex":false},"Revision:jjNySc23WPumhShAp_description":{"_id":"jjNySc23WPumhShAp_description","__typename":"Revision","htmlHighlight":"<p>The <strong>global outreach<\/strong> tag covers posts about developing EA-aligned communities around the world, especially in places where very few exist so far (for example, Latin America or the Middle East).<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/building-effective-altruism-1\">building effective altruism<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/value-of-movement-growth\">value of movement growth<\/a><\/p>"},"Revision:eSWSZv3oF6sydzD9L_description":{"_id":"eSWSZv3oF6sydzD9L_description","__typename":"Revision","htmlHighlight":"<p>The <strong>importance, tractability and neglectedness framework<\/strong>, or <strong>ITN framework<\/strong> for short, is a framework for estimating the value of allocating <a href=\"https://forum.effectivealtruism.org/tag/thinking-at-the-margin\">marginal<\/a> resources to solving a problem based on its <a href=\"https://forum.effectivealtruism.org/tag/importance\">importance<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/tractability\">tractability<\/a>, and <a href=\"https://forum.effectivealtruism.org/tag/neglectedness\">neglectedness<\/a>.<\/p><h2>History<\/h2><p>The ITN framework was first developed by <a href=\"https://forum.effectivealtruism.org/tag/holden-karnofsky\">Holden Karnofsky<\/a> around 2013 as part of his work for GiveWell Labs (which later became <a href=\"https://forum.effectivealtruism.org/tag/open-philanthropy\">Open Philanthropy<\/a>).<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefuc4xj13qrf\"><sup><a href=\"#fnuc4xj13qrf\">[1]<\/a><\/sup><\/span><\/p><p><a href=\"https://forum.effectivealtruism.org/tag/80-000-hours\">80,000 Hours<\/a> later presented its own, quantitative version of the framework.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefgi07mnssz6e\"><sup><a href=\"#fngi07mnssz6e\">[2]<\/a><\/sup><\/span> On this version, developed by Owen Cotton-Barratt in late 2014,<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefij97eh8n5wr\"><sup><a href=\"#fnij97eh8n5wr\">[3]<\/a><\/sup><\/span> the three factors are formally defined as follows:<\/p><ul><li>importance = good done / % of a problem solved<\/li><li>tractability = % of a problem solved / % increase in resources<\/li><li>neglectedness = % increase in resources / extra person or dollar<\/li><\/ul><p>When these terms are multiplied, some of the units cancel out, resulting in a quantity denominated in good done per extra person or dollar.<\/p><p>Other differences between Karnofsky's model and Cotton-Barratt's are the terminology (\"importance, tractability and uncrowdedness\" is replaced by \"scale, solvability and neglectedness\") and the use of <i>problems<\/i> rather than <i>causes<\/i> as the main unit of analysis.<\/p><p>More recently, in an article introducing the <a href=\"https://forum.effectivealtruism.org/topics/spc-framework\">SPC framework<\/a>, <a href=\"https://forum.effectivealtruism.org/topics/william-macaskill\">Will MacAskill<\/a>, Teruji Thomas and Aron Vallinder replace neglectedness with <i>leverage<\/i>, a factor that describes how the work already being done on a problem affects the cost-effectiveness of additional work. The resulting framework generalizes to problems with constant or increasing returns to additional work, whereas the ITN framework remains appropriate for problems with diminishing, especially logarithmic, returns.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefj7nsmnubyva\"><sup><a href=\"#fnj7nsmnubyva\">[4]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefig270ghkvg\"><sup><a href=\"#fnig270ghkvg\">[5]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>80,000 Hours (2016) <a href=\"https://80000hours.org/problem-profiles/\">Our current list of especially pressing world problems<\/a>, <i>80,000 Hours<\/i>, June.<br><i>A set of applications of the ITN framework.<\/i><\/p><p>Dickens, Michael (2016) <a href=\"http://mdickens.me/2016/06/10/evaluation_frameworks_(or-_when_scale-neglectedness-tractability_doesn%27t_apply)\">Evaluation frameworks (or: when importance / neglectedness / tractability doesn't apply)<\/a>, <i>Philosophical Multicore<\/i>, June 10.<br><i>A criticism of the ITN framework.<\/i><\/p><p>MacAskill, William, Teruji Thomas & Aron Vallinder (2022) <a href=\"https://drive.google.com/file/d/1Lapv64IYsvUnaYWDFmZBoDWe_x5_zkr7/view\">The significance, persistence, contingency framework<\/a>, <i>What We Owe the Future: Supplementary Materials<\/i>.<br><i>Section 4 discusses the ITN framework and how it relates to the SPC framework.<\/i><\/p><p>Wiblin, Robert (2016) <a href=\"https://80000hours.org/articles/problem-framework/\">One approach to comparing global problems in terms of expected impact<\/a>, <i>80,000 Hours<\/i>, April (updated October 2019).<br><i>80<\/i>... <\/p>"},"Revision:xsiR75hLgHBgtosDy_description":{"_id":"xsiR75hLgHBgtosDy_description","__typename":"Revision","htmlHighlight":"<p><strong>Global priorities research<\/strong> (<strong>GPR<\/strong>) is research into issues that can help decide how to allocate finite resources most cost-effectively.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefnyvbflivwv\"><sup><a href=\"#fnnyvbflivwv\">[1]<\/a><\/sup><\/span> GPR can include finding and <a href=\"https://forum.effectivealtruism.org/tag/cause-prioritization\">prioritising between different causes<\/a> as well as <a href=\"https://forum.effectivealtruism.org/tag/macrostrategy\">macrostrategy<\/a> or \"foundational\" research that would inform cause prioritization in a less direct way (e.g., research into the <a href=\"https://forum.effectivealtruism.org/tag/fermi-paradox\">Fermi paradox<\/a> or the <a href=\"https://forum.effectivealtruism.org/tag/hinge-of-history\">hinge of history hypothesis<\/a>).<\/p><h2>Evaluation<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/80-000-hours\">80,000 Hours<\/a> rates GPR a \"highest priority area\": a problem at the top of their ranking of global issues assessed by <a href=\"https://forum.effectivealtruism.org/tag/itn-framework-1\">importance, tractability and neglectedness<\/a>.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefbr35f2n6g3k\"><sup><a href=\"#fnbr35f2n6g3k\">[2]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>Duda, Roman (2016) <a href=\"https://80000hours.org/problem-profiles/global-priorities-research/\">Global priorities research<\/a>, <i>80,000 Hours<\/i>, April (updated July 2018).<\/p><p>O’Keeffe-O’Donovan, Rossa (2020) <a href=\"https://forum.effectivealtruism.org/posts/MKWujo6nBvjwYAu82/rossa-o-keeffe-o-donovan-an-introduction-to-global\">An introduction to global priorities research<\/a>, <i>Effective Altruism Student Summit 2020<\/i>, October 25.<br><i>An introduction to global priorities research, including a discussion of how it differs from cause prioritization research.<\/i><\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/cause-candidates\">cause candidates<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/cause-prioritization\">cause prioritization<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/cause-x\">Cause X<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/global-priorities-institute\">Global Priorities Institute<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/less-discussed-causes\">less-discussed causes<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/local-priorities-research-1\">local priorities research<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/macrostrategy\">macrostrategy<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnnyvbflivwv\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefnyvbflivwv\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Global Priorities Institute (2019) <a href=\"https://globalprioritiesinstitute.org/about-us/\">About us<\/a>, <i>Global Priorities Institute<\/i>.<\/p><\/div><\/li><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnbr35f2n6g3k\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefbr35f2n6g3k\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>80,000 Hours (2021) <a href=\"https://80000hours.org/problem-profiles/\">Our current list of the most important world problems<\/a>, <i>80,000 Hours<\/i>.<\/p><\/div><\/li><\/ol>"},"Revision:uxoyvActFbMwEcGqF_description":{"_id":"uxoyvActFbMwEcGqF_description","__typename":"Revision","htmlHighlight":"<p><strong>Low- and middle-income countries<\/strong> (<strong>LMICs<\/strong>) are all countries not considered to be high-income. Although there is no universally agreed-upon definition, the World Bank defines high-income countries as those with a gross national income per capita of $12,696 or more in 2020. Countries with a gross national income per capita below that line include upper-middle, lower-middle, and low-income countries, all of which are classified as LMICs.<\/p><h2>External links<\/h2><p><a href=\"https://data.worldbank.org/country/XO\">Low- and middle-income countries<\/a>. List of countries currently classified as low- and middle-income by the World Bank.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/topics/building-effective-altruism-1\">Building effective altruism<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/diversity-and-inclusion\">Diversity and inclusion<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/altruistic-coordination\">altruistic coordination<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/global-priorities-research\">global priorities research<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/cause-x\">Cause X<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/distribution-of-cost-effectiveness\">distribution of cost-effectiveness<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/impact-assessment\">impact assessment<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/intervention-evaluation\">intervention evaluation<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/cause-prioritization\">cause prioritization<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/scalably-using-labour\">scalably using labour<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/markets-for-altruism\">markets for altruism<\/a> <\/p>"},"Tag:uxoyvActFbMwEcGqF":{"_id":"uxoyvActFbMwEcGqF","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[],"description":{"__ref":"Revision:uxoyvActFbMwEcGqF_description"},"canVoteOnRels":null,"userId":"BkbwT5TzSj4aRxJMN","name":"Low- and middle-income countries","shortName":null,"slug":"low-and-middle-income-countries","core":false,"postCount":62,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-07-12T17:02:15.279Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:3MaNTgoRt9iLHEcW3_description":{"_id":"3MaNTgoRt9iLHEcW3_description","__typename":"Revision","htmlHighlight":"<p><strong>Local priorities research<\/strong> (<strong>LPR<\/strong>) is research aimed at identifying high priority problems within a local context, such as the context of a particular country. <\/p><p>LPR is quite similar to global priorities research (GPR), except that it’s narrowed down to a certain country; the idea is to leverage local opportunities, not to narrow the moral circle. While GPR is about figuring out what are the most important <i>global <\/i>problems to work on, LPR is about figuring out what are the most important problems <i>in a local context<\/i> that can best maximize impact both locally and globally.<\/p><p>The key difference in this geographic difference is in career pathways. Currently, GPR has already narrowed to some very important cause areas for EAs to work on, and organisations like 80,000 Hours have identified potentially very impactful organisations and jobs in those cause areas. However, not all countries have such impactful organisations and jobs available to them. Hence, LPR exists to determine the most impactful cause areas, organisations, and jobs in a local context.<\/p><p>For example, even if it is not as impactful as working in a top AI lab in the US, China, or Europe, LPR has helped EA Singapore identify <a href=\"https://forum.effectivealtruism.org/posts/fLroJGMbszAjYBSdE/singapore-s-technical-ai-alignment-research-career-guide-1\"><u>technical AI alignment research<\/u><\/a> as one of the most impactful cause areas to work on in Singapore. We also found a few potentially impactful local organisations and jobs that people can apply to in this area.<\/p><p>Furthermore, LPR includes not just cause areas that can create impact within a local setting (e.g. research into the effects of a certain health intervention in a country may only be generalisable in that location), it also includes ucase areas that can create impact cross border (e.g. research into biological risks can help others in different countries). LPR is also the more applied kind (i.e. figuring out which specific cause areas we should prioritise in a certain country) and less the foundational or philosophical kind (e.g. figuring out the value of the far future).<\/p><p>Finally, LPR is a broad catch-all term for many specific research activities that one can do in your local context. Here is a non-exhaustive list of examples:<\/p><ul><li>Local cause area prioritisation<\/li><li>Local problem profile research<\/li><li>High impact local career pathway research<\/li><li>Risk assessment<\/li><li>Giving and philanthropy landscape research<\/li><li>Charity evaluation<\/li><li>Public policy research<\/li><li>Analysis of the convergence and divergence between EA and local<\/li><\/ul>... "},"SocialPreviewType:7SjtFYo6sCe3588Tx":{"_id":"7SjtFYo6sCe3588Tx","__typename":"SocialPreviewType","imageUrl":"https://lh6.googleusercontent.com/rDDFFhr3uNGL4lyOtsSE9tyTn65qFxK5xlzUB5fN5po2DIMznbOOnXisMkReWgFW4DqhAxZ0Sun9ToV7kebbYpKP-brtfeo1JVprvepS-m_fZ_wYG3mnTbTK2bmYegfhEc6h3mPo"},"Revision:ZkxGhRzN5NKcmWCWq_biography":{"_id":"ZkxGhRzN5NKcmWCWq_biography","__typename":"Revision","version":"1.7.0","updateType":"minor","editedAt":"2023-08-21T23:19:16.348Z","userId":"ZkxGhRzN5NKcmWCWq","html":"","commitMessage":"","wordCount":0,"htmlHighlight":"","plaintextDescription":""},"User:ee4HWBeSdeYsRCLG5":{"_id":"ee4HWBeSdeYsRCLG5","__typename":"User","slug":"erik-aunvag-matsen","createdAt":"2021-01-31T09:57:47.333Z","username":"Erik Aunvåg Matsen","displayName":"Erik Aunvåg Matsen","profileImageId":null,"previousDisplayName":null,"fullName":null,"karma":28,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"","jobTitle":null,"organization":null,"postCount":0,"commentCount":0,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":0,"reviewedByUserId":"2kBP4gThRsNXB3WWX","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"Post:7SjtFYo6sCe3588Tx":{"_id":"7SjtFYo6sCe3588Tx","__typename":"Post","deletedDraft":false,"contents":{"__ref":"Revision:nR5dGGjQmGcnQxmGz"},"fmCrosspost":{"isCrosspost":false},"readTimeMinutes":36,"rejectedReason":null,"customHighlight":null,"lastPromotedComment":null,"bestAnswer":null,"tags":[{"__ref":"Tag:of9xBvR3wpbp6qsZC"},{"__ref":"Tag:EHLmbEmJ2Qd5WfwTb"},{"__ref":"Tag:ZCihBFp5P64JCvQY6"},{"__ref":"Tag:psBzwdY8ipfCeExJ7"},{"__ref":"Tag:jjNySc23WPumhShAp"},{"__ref":"Tag:eSWSZv3oF6sydzD9L"},{"__ref":"Tag:xsiR75hLgHBgtosDy"},{"__ref":"Tag:t2L2RziMDLEuHBWNF"},{"__ref":"Tag:d4bQXgZhDP43eJMwp"},{"__ref":"Tag:uxoyvActFbMwEcGqF"},{"__ref":"Tag:3MaNTgoRt9iLHEcW3"},{"__ref":"Tag:uDAGFwZLscHKfoubc"}],"socialPreviewData":{"__ref":"SocialPreviewType:7SjtFYo6sCe3588Tx"},"feedId":null,"totalDialogueResponseCount":0,"unreadDebateResponseCount":0,"dialogTooltipPreview":null,"disableSidenotes":false,"url":null,"postedAt":"2021-08-15T17:48:27.751Z","createdAt":null,"sticky":false,"metaSticky":false,"stickyPriority":2,"status":2,"frontpageDate":"2021-08-15T18:10:40.420Z","meta":false,"postCategory":"post","tagRelevance":{"3MaNTgoRt9iLHEcW3":3,"EHLmbEmJ2Qd5WfwTb":2,"ZCihBFp5P64JCvQY6":2,"d4bQXgZhDP43eJMwp":4,"eSWSZv3oF6sydzD9L":5,"jjNySc23WPumhShAp":5,"of9xBvR3wpbp6qsZC":8,"psBzwdY8ipfCeExJ7":1,"t2L2RziMDLEuHBWNF":4,"uDAGFwZLscHKfoubc":1,"uxoyvActFbMwEcGqF":4,"xsiR75hLgHBgtosDy":4},"shareWithUsers":["atWycpyDNsFN4ep9z","LFsPKPZ36ZTZQXHct","q6ZLMygEtBW5mhgGY","j7H8ri59wkf9zeGPg"],"sharingSettings":null,"linkSharingKey":null,"contents_latest":"nR5dGGjQmGcnQxmGz","commentCount":11,"voteCount":48,"baseScore":84,"extendedScore":{},"emojiReactors":{},"unlisted":false,"score":0.025490859523415565,"lastVisitedAt":null,"isFuture":false,"isRead":false,"lastCommentedAt":"2022-12-13T10:23:04.945Z","lastCommentPromotedAt":null,"canonicalCollectionSlug":null,"curatedDate":null,"commentsLocked":null,"commentsLockedToAccountsCreatedAfter":null,"debate":false,"question":false,"hiddenRelatedQuestion":false,"originalPostRelationSourceId":null,"userId":"ZkxGhRzN5NKcmWCWq","location":null,"googleLocation":null,"onlineEvent":false,"globalEvent":false,"startTime":null,"endTime":null,"localStartTime":null,"localEndTime":null,"eventRegistrationLink":null,"joinEventLink":null,"facebookLink":null,"meetupLink":null,"website":null,"contactInfo":null,"isEvent":false,"eventImageId":null,"eventType":null,"types":[],"groupId":null,"reviewedByUserId":"jd3Bs7YAT2KqnLxYD","suggestForCuratedUserIds":null,"suggestForCuratedUsernames":null,"reviewForCuratedUserId":null,"authorIsUnreviewed":false,"afDate":null,"suggestForAlignmentUserIds":null,"reviewForAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":{},"afCommentCount":0,"afLastCommentedAt":"2021-08-15T14:22:06.875Z","afSticky":false,"hideAuthor":false,"moderationStyle":null,"ignoreRateLimits":null,"submitToFrontpage":true,"shortform":false,"onlyVisibleToLoggedIn":false,"onlyVisibleToEstablishedAccounts":false,"reviewCount":0,"reviewVoteCount":0,"positiveReviewVoteCount":0,"manifoldReviewMarketId":null,"annualReviewMarketProbability":0,"annualReviewMarketIsResolved":false,"annualReviewMarketYear":0,"annualReviewMarketUrl":"0","group":null,"podcastEpisodeId":null,"forceAllowType3Audio":false,"nominationCount2019":0,"reviewCount2019":0,"votingSystem":"eaEmojis","disableRecommendation":false,"user":{"__ref":"User:ZkxGhRzN5NKcmWCWq"},"coauthors":[{"__ref":"User:LFsPKPZ36ZTZQXHct"},{"__ref":"User:q6ZLMygEtBW5mhgGY"},{"__ref":"User:atWycpyDNsFN4ep9z"},{"__ref":"User:j7H8ri59wkf9zeGPg"},{"__ref":"User:ee4HWBeSdeYsRCLG5"}],"slug":"why-scale-is-overrated-the-case-for-increasing-ea-policy","title":"Why scale is overrated: The case for increasing EA policy efforts in smaller countries","draft":null,"hideCommentKarma":false,"af":false,"currentUserReviewVote":null,"coauthorStatuses":[{"userId":"LFsPKPZ36ZTZQXHct","confirmed":true,"requested":false},{"userId":"q6ZLMygEtBW5mhgGY","confirmed":true,"requested":false},{"userId":"atWycpyDNsFN4ep9z","confirmed":true,"requested":false},{"userId":"j7H8ri59wkf9zeGPg","confirmed":true,"requested":false},{"userId":"ee4HWBeSdeYsRCLG5","confirmed":true,"requested":false}],"hasCoauthorPermission":true,"rejected":false,"collabEditorDialogue":false},"Revision:MB3LAbzgSqG4jNBcB":{"_id":"MB3LAbzgSqG4jNBcB","__typename":"Revision","htmlHighlight":"<h3><strong>Upcoming Virtual Events<\/strong><br> <\/h3><ul><li>3rd March - <a href=\"https://www.facebook.com/events/330306941744241/\"><u>Lead Exposure Elimination<\/u><\/a> - A global health career profile with Lucia Coulter, organised by EA Cambridge<\/li><li>9th March - <a href=\"https://www.crowdcast.io/e/tang/register\"><u>Audrey Tang on Taiwan’s digital democracy<\/u><\/a>, an event by the Centre for the Governance of AI<\/li><li>11th March - <a href=\"https://www.facebook.com/events/1995084430639045/\"><u>Tech EA London Social - Icebreakers<\/u><\/a><\/li><li>14th March - <a href=\"https://us02web.zoom.us/meeting/register/tZUkf-mvqTgtE9HaI2pNY7s4nN-lL22fDDB9\"><u>Giving What We Can Meetup<\/u><\/a><\/li><li>17-19th March - <a href=\"https://globalprioritiesinstitute.org/6th-oxford-workshop-on-global-priorities-research/\"><u>Oxford Workshop on Global Priorities Research<\/u><\/a> - applications close 28th February<\/li><li>20-21st March - <a href=\"https://www.eaglobal.org/events/reconnect/\"><u>EA Global: Reconnect<\/u><\/a> - Virtual conference<\/li><li>27th March - <a href=\"https://www.facebook.com/events/431166334727066\"><u>LSE Future of Humanity Summit<\/u><\/a><\/li><li>26-28th March - <a href=\"https://www.eaglobal.org/events/ea-fellowship-weekend/\"><u>EA Fellowship Weekend<\/u><\/a> - For people who are newer to EA<\/li><li>29-31st October - <a href=\"https://www.eaglobal.org/events/london2021/\"><u>EA Global: London 2021<\/u><\/a><\/li><\/ul><p> <\/p><h2><br><strong>Latest Research and Updates<\/strong><br> <\/h2><h3><strong>Meta<\/strong><\/h3><ul><li>The Giving What We Can <a href=\"https://www.givingwhatwecan.org/post/2021/02/welcome-to-our-978-new-pledges/\"><u>pledge campaign has brought in 978 new members with over 500 having taken the GWWC Pledge<\/u><\/a><\/li><li>Global Priorities Institute have released their <a href=\"https://globalprioritiesinstitute.org/global-priorities-institute-annual-report-2019-20/\"><u>annual report for 2019-20<\/u><\/a><\/li><li>There is an <a href=\"https://forum.effectivealtruism.org/posts/ZQPig66wteqwbNHGh/ea-focusmate-group-announcement\"><u>EA Focusmate group<\/u><\/a> for people who want to increase their productivity and also meet others interested in EA<\/li><li>Luke Freeman is looking for feedback on <a href=\"https://forum.effectivealtruism.org/posts/4RwTW5zBPaG3viGiA/would-an-interactive-table-help-you-to-make-donation\"><u>an interactive donation tracking table to help map the EA charity space<\/u><\/a><\/li><li>Let's Fund <a href=\"https://forum.effectivealtruism.org/posts/xhDxSPx8ez37BcTWY/let-s-fund-living-review-2020-update\"><u>2020 update<\/u><\/a><\/li><li>One for the World - <a href=\"https://forum.effectivealtruism.org/posts/8xoCoxhQyZSzjCEF4/one-for-the-world-january-2021-update\"><u>2021 update<\/u><\/a><\/li><li>80,000 Hours <a href=\"https://forum.effectivealtruism.org/posts/R7rLHPhdwPajQtyGv/80-000-hours-one-on-one-team-plans-plus-projects-we-d-like\"><u>one-on-one team plans, plus projects they’d like to see<\/u><\/a><\/li><li>Ben West with research into <a href=\"https://forum.effectivealtruism.org/posts/7xEcaFMBzCF9bqoAy/retention-in-ea-part-i-survey-data\"><u>retention within effective altruism<\/u><\/a><\/li><li>An <a href=\"https://forum.effectivealtruism.org/posts/59egqFgZBrfPqXWTr/ama-we-work-in-operations-at-ea-aligned-organizations-ask-us\"><u>ask me anything with people who work in operations at EA related organisations<\/u><\/a><\/li><li>Founders Pledge on <a href=\"https://founderspledge.com/stories/why-we-say-funding-opportunity-instead-of-charity\"><u>why they say ‘funding opportunity’ instead of ‘charity’<\/u><\/a><\/li><li>Haydn Belfield with <a href=\"https://forum.effectivealtruism.org/posts/ST8vFfPropD9AYqkX/alternatives-to-donor-lotteries\"><u>several alternatives to donor lotteries<\/u><\/a><\/li><\/ul><p> <\/p><h3> <\/h3><h3><strong>Grants<\/strong><\/h3><ul><li>Open Phil have made <a href=\"https://www.openphilanthropy.org/giving/grants\"><u>5 grants<\/u><\/a> recently with a total value of $2,461,000<ul><li>$1,306,000 - Farm animal welfare<\/li><li>$1,005,000 - Scientific research<\/li><li>$150,000 - Potential Risks from Advanced AI<\/li><\/ul><\/li><li><a href=\"https://forum.effectivealtruism.org/posts/NfkdSooNiHcdCBSJs/apply-to-ea-funds-now-1\"><u>The Animal Welfare Fund, the Long-Term Future Fund, and the EA Infrastructure Fund<\/u><\/a> are calling for applications, closing on the 7th of March<\/li><li><a href=\"https://animalcharityevaluators.org/for-charities/apply-for-funding/movement-grants-application/\"><u>Animal Charity Evaluator Movement Grants<\/u><\/a> are accepting applications until the 12th of March<\/li><\/ul><p> <\/p><h3> <\/h3><h3><strong>Global Development<\/strong><\/h3><ul><li>Max Roser on <a href=\"https://ourworldindata.org/poverty-growth-needed\"><u>why economic growth should be a key focus if we want global poverty to decline<\/u><\/a><\/li><li>GiveWell on <a href=\"https://www.givewell.org/charities/top-charities/2020/open-philanthropy-recommendation\"><u>where they recommend Open Philanthropy to donate $70,000,000 to<\/u><\/a> in 2020<\/li><li>An article looking at <a href=\"https://undark.org/2020/12/28/malaria-prevention-pushes-forward-in-africa-despite-pandemic/\"><u>how Covid-19 has impacted the fight against malaria<\/u><\/a><\/li><li><a href=\"https://podcasts.apple.com/us/podcast/id1172218725?i=1000506752012\"><u>Azeem Azhar in conversation with Regina Dugan<\/u><\/a>, CEO of new biomedical non-profit Wellcome Leap and former director of DARPA, discussing approaches to delivering breakthrough technologies in global health<\/li><li>K<\/li><\/ul>... ","plaintextDescription":"Upcoming Virtual Events\n \n * 3rd March - Lead Exposure Elimination - A global health career profile with Lucia Coulter, organised by EA Cambridge\n * 9th March - Audrey Tang on Taiwan’s digital democracy, an event by the Centre for the Governance of AI\n * 11th March - Tech EA London Social - Icebreakers\n * 14th March - Giving What We Can Meetup\n * 17-19th March - Oxford Workshop on Global Priorities Research - applications close 28th February\n * 20-21st March - EA Global: Reconnect - Virtual conference\n * 27th March - LSE Future of Humanity Summit\n * 26-28th March - EA Fellowship Weekend - For people who are newer to EA\n * 29-31st October - EA Global: London 2021\n\n \n\n\n\nLatest Research and Updates\n \n\n\nMeta\n * The Giving What We Can pledge campaign has brought in 978 new members with over 500 having taken the GWWC Pledge\n * Global Priorities Institute have released their annual report for 2019-20\n * There is an EA Focusmate group for people who want to increase their productivity and also meet others interested in EA\n * Luke Freeman is looking for feedback on an interactive donation tracking table to help map the EA charity space\n * Let's Fund 2020 update\n * One for the World - 2021 update\n * 80,000 Hours one-on-one team plans, plus projects they’d like to see\n * Ben West with research into retention within effective altruism\n * An ask me anything with people who work in operations at EA related organisations\n * Founders Pledge on why they say ‘funding opportunity’ instead of ‘charity’\n * Haydn Belfield with several alternatives to donor lotteries\n\n \n\n\n \n\n\nGrants\n * Open Phil have made 5 grants recently with a total value of $2,461,000\n * $1,306,000 - Farm animal welfare\n * $1,005,000 - Scientific research\n * $150,000 - Potential Risks from Advanced AI\n * The Animal Welfare Fund, the Long-Term Future Fund, and the EA Infrastructure Fund are calling for applications, closing on the 7th of March\n * Animal Charity Evaluator Movement Grants are accepting applications","wordCount":1123,"version":"1.0.0"},"Revision:hvSAuXockimYDiAx2_description":{"_id":"hvSAuXockimYDiAx2_description","__typename":"Revision","htmlHighlight":"<p><strong>Monthly Overload of Effective Altruism<\/strong> (previously <strong>EA Updates<\/strong>) is a monthly <a href=\"https://forum.effectivealtruism.org/topics/newsletters\">newsletter<\/a> about research and events in the <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism\">effective altruism<\/a> community, published by David Nash from <a href=\"https://forum.effectivealtruism.org/tag/effective-altruism-london\">Effective Altruism London<\/a>.<\/p><h2>External links<\/h2><p><a href=\"https://moea.substack.com\">Monthly Overload of Effective Altruism<\/a>. Substack website.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/ea-organization-updates-monthly-series\">EA Organization Updates<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/effective-altruism-london\">Effective Altruism London<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/effective-altruism-newsletter\">Effective Altruism Newsletter<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/newsletters\">newsletters<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/organization-updates\">organization updates<\/a><\/p>"},"Tag:hvSAuXockimYDiAx2":{"_id":"hvSAuXockimYDiAx2","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[],"description":{"__ref":"Revision:hvSAuXockimYDiAx2_description"},"canVoteOnRels":null,"userId":"BkbwT5TzSj4aRxJMN","name":"Monthly Overload of Effective Altruism","shortName":null,"slug":"monthly-overload-of-effective-altruism","core":false,"postCount":49,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2021-11-29T14:27:42.460Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:4FDyuFrgeFEtvnhqh_description":{"_id":"4FDyuFrgeFEtvnhqh_description","__typename":"Revision","htmlHighlight":"<p>The <strong>Centre for the Governance of AI<\/strong> (<strong>GovAI<\/strong>) is an <a href=\"https://forum.effectivealtruism.org/topics/ai-governance\">AI governance<\/a> research center.<\/p><h2>History<\/h2><p>GovAI was founded in 2018, as part of the <a href=\"https://forum.effectivealtruism.org/tag/future-of-humanity-institute\">Future of Humanity Institute<\/a>. In June 2021, it became an independent organization.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefq391t0l6ufm\"><sup><a href=\"#fnq391t0l6ufm\">[1]<\/a><\/sup><\/span><\/p><h2>Funding<\/h2><p>As of July 2022, GovAI has received nearly $3 million in funding from <a href=\"https://forum.effectivealtruism.org/tag/open-philanthropy\">Open Philanthropy<\/a><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnreft14nlg7nqw8\"><sup><a href=\"#fnt14nlg7nqw8\">[2]<\/a><\/sup><\/span>, over $600,000 from the <a href=\"https://forum.effectivealtruism.org/topics/survival-and-flourishing\">Survival and Flourishing<\/a> Fund, <span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefp8o46htwmss\"><sup><a href=\"#fnp8o46htwmss\">[3]<\/a><\/sup><\/span><span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefnzrd0bloxt\"><sup><a href=\"#fnnzrd0bloxt\">[4]<\/a><\/sup><\/span> and over $170,000 from <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism-funds\">Effective Altruism Funds<\/a>.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnref1816esk6hik\"><sup><a href=\"#fn1816esk6hik\">[5]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>Dafoe, Allan (2018) <a href=\"https://www.fhi.ox.ac.uk/wp-content/uploads/GovAI-Agenda.pdf\">AI governance: a research agenda<\/a>, Centre for the Governance of AI, Future of Humanity Institute, University of Oxford.<\/p><p>Monrad, Joshua & Mojmír Stehlík (2019) <a href=\"https://thepolitic.org/an-interview-with-ben-garfinkel-governance-of-ai-program-researcher/\">An Interview with Ben Garfinkel, Governance of AI Program Researcher<\/a>, <i>The Politic<\/i>, June 20.<\/p><p>Perry, Lucas (2019) <a href=\"https://futureoflife.org/2019/07/22/on-the-governance-of-ai-with-jade-leung/\">AI Alignment Podcast: On the governance of AI with Jade Leung<\/a>, <i>Future of Life Institute<\/i>, July 22.<\/p><p>Wiblin, Robert & Keiran Harris (2018) <a href=\"https://80000hours.org/podcast/episodes/allan-dafoe-politics-of-ai/\">Prof Allan Dafoe on trying to prepare the world for the possibility that ai will destabilise global politics<\/a>, <i>80,000 Hours<\/i>, May 18.<\/p><h2>External links<\/h2><p><a href=\"https://governance.ai/\">Centre for the Governance of AI<\/a>. Official website.<\/p><p><a href=\"https://www.governance.ai/opportunities/open-positions\">Apply for a job<\/a>.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/topics/ai-governance\">AI governance<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/ai-safety\">AI safety<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/artificial-intelligence\">artificial intelligence<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/windfall-clause\">Windfall Clause<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnq391t0l6ufm\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefq391t0l6ufm\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Centre for the Governance of AI (2021) <a href=\"https://governance.ai\">Centre for the Governance of AI<\/a>, <i>Centre for the Governance of AI<\/i>, June.<\/p><\/div><\/li><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnt14nlg7nqw8\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnreft14nlg7nqw8\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Open Philanthropy (2022) <a href=\"https://www.openphilanthropy.org/grants/?q=&organization-name=centre-for-the-governance-of-ai\">Grants database: Centre for the Governance of AI<\/a>, <i>Open Philanthropy.<\/i><\/p><\/div><\/li><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnp8o46htwmss\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefp8o46htwmss\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Survival and Flourishing Fund (2020a) <a href=\"https://survivalandflourishing.fund/sff-2021-h1-recommendations\">SFF-2021-H1 S-process recommendations announcement<\/a>, <i>Survival and Flourishing Fund<\/i>. <\/p><\/div><\/li><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnnzrd0bloxt\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefnzrd0bloxt\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Survival and Flourishing Fund (2020b) <a href=\"https://survivalandflourishing.fund/sff-2021-h2-recommendations\">SFF-2021-H2 S-process recommendations announcement<\/a>, <i>Survival and Flourishing Fund<\/i>.<\/p><\/div><\/li><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fn1816esk6hik\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnref1816esk6hik\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Long-Term Future Fund (2021) <a href=\"https://funds.effectivealtruism.org/funds/payouts/july-2021-long-term-future-fund-grants\">July 2021: Long-Term Future Fund grants<\/a>, <i>Effective Altruism Funds<\/i>, July.<\/p><\/div><\/li><\/ol>"},"Tag:4FDyuFrgeFEtvnhqh":{"_id":"4FDyuFrgeFEtvnhqh","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[],"description":{"__ref":"Revision:4FDyuFrgeFEtvnhqh_description"},"canVoteOnRels":null,"userId":"BkbwT5TzSj4aRxJMN","name":"Centre for the Governance of AI","shortName":null,"slug":"centre-for-the-governance-of-ai","core":false,"postCount":20,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-22T15:23:38.910Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:nGqPZkKJuiKfAqDcs_description":{"_id":"nGqPZkKJuiKfAqDcs_description","__typename":"Revision","htmlHighlight":"<p>Posts with the <strong>effective altruism funding<\/strong> tag discuss EA funding networks, the availability of funds for different cause areas, grantmaking strategy, etc.<\/p>\n<p>Not meant for posts that discuss specific funding decisions (for that, use the \"<a href=\"https://forum.effectivealtruism.org/tag/donation-writeup\">Donation Writeup<\/a>\" tag).<\/p>\n<p>Examples of EA funders includes: <a href=\"https://forum.effectivealtruism.org/tag/effective-altruism-funds\">EA Funds<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/open-philanthropy\">Open Philanthropy Project<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/survival-and-flourishing-fund\">Survival & Flourishing Fund<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/founders-pledge\">Founders Pledge Funds<\/a><\/p>\n<h2>Related entries<\/h2>\n<p><a href=\"https://forum.effectivealtruism.org/tag/certificate-of-impact\">certificate of impact<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/diminishing-returns\">diminishing returns<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/grantmaking\">grantmaking<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/philanthropic-coordination\">philanthropic coordination<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/philanthropic-diversification\">philanthropic diversification<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/room-for-more-funding\">room for more funding<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/funding-opportunities\">funding opportunities<\/a><\/p>"},"Revision:EdwSzDq9bTH9yqQX9_description":{"_id":"EdwSzDq9bTH9yqQX9_description","__typename":"Revision","htmlHighlight":"<p><strong>Effective Altruism UK<\/strong> is an EA meta organisation based in London working on <a href=\"https://forum.effectivealtruism.org/topics/building-effective-altruism\">building effective altruism<\/a> in the UK. <\/p><h2>History<\/h2><p>The organisations was initially launched as EA London in March 2012 by <a href=\"https://forum.effectivealtruism.org/users/weeatquince_duplicate0-37104097316182916?mention=user\">@weeatquince<\/a>. The group evolved from casual monthly meetups to more structured activities including cause/career groups and 1-1 support. <span class=\"footnote-reference\" data-footnote-reference=\"\" data-footnote-index=\"1\" data-footnote-id=\"wnsk0illjum\" role=\"doc-noteref\" id=\"fnrefwnsk0illjum\"><sup><a href=\"#fnwnsk0illjum\">[1]<\/a><\/sup><\/span><\/p><p>In 2016, <a href=\"https://forum.effectivealtruism.org/users/weeatquince_duplicate0-37104097316182916?mention=user\">@weeatquince<\/a> took a year out to work as a full-time community organiser and to set up EA London as a registered charity with the goal of inspiring many people to give more effectively and to choose higher impact careers. <\/p><p>In 2017, enough money was raised to fund Holly Morgan and <a href=\"https://forum.effectivealtruism.org/users/davidnash?mention=user\">@DavidNash<\/a> working part time as Sam went back into the civil service.<\/p><p>In 2019, <a href=\"https://forum.effectivealtruism.org/users/davidnash?mention=user\">@DavidNash<\/a> began working on community building full time funded by a grant from the <a href=\"https://forum.effectivealtruism.org/users/centre-for-effective-altruism?from=search_autocomplete\">Centre for Effective Altruism<\/a> .<\/p><p>In 2022, Lynn Tan as co-director and EA London rebranded to Effective Altruism UK.<span class=\"footnote-reference\" data-footnote-reference=\"\" data-footnote-index=\"2\" data-footnote-id=\"d11km5pyxym\" role=\"doc-noteref\" id=\"fnrefd11km5pyxym\"><sup><a href=\"#fnd11km5pyxym\">[2]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>Effective Altruism London (2019) <a href=\"https://docs.google.com/document/d/1OlqrxnpHSEhstuexv4Se9MWDbiYVq-lk41zw4-vLbMw/edit?usp=sharing&usp=embed_facebook\">Effective Altruism London strategy<\/a>, <i>Effective Altruism London<\/i>.<\/p><p>Nash, David (2019) <a href=\"https://forum.effectivealtruism.org/posts/P4yXkPTkBgQCcSxD3/effective-altruism-london-landscape\">Effective Altruism London landscape<\/a>, <i>Effective Altruism Forum<\/i>, May 17 (updated 2021).<\/p><h2>External links<\/h2><p><a href=\"https://www.ealondon.com/\">Effective Altruism London<\/a>. Official website.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/effective-altruism-groups\">effective altruism groups<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/monthly-overload-of-effective-altruism\">Monthly Overload of Effective Altruism<\/a><\/p><ol class=\"footnote-section footnotes\" data-footnote-section=\"\" role=\"doc-endnotes\"><li class=\"footnote-item\" data-footnote-item=\"\" data-footnote-index=\"1\" data-footnote-id=\"wnsk0illjum\" role=\"doc-endnote\" id=\"fnwnsk0illjum\"><span class=\"footnote-back-link\" data-footnote-back-link=\"\" data-footnote-id=\"wnsk0illjum\"><sup><strong><a href=\"#fnrefwnsk0illjum\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\" data-footnote-content=\"\"><p><a href=\"https://www.effectivealtruism.uk/about/#ealondon\">About | Effective Altruism UK — Effective Altruism UK<\/a><\/p><\/div><\/li><li class=\"footnote-item\" data-footnote-item=\"\" data-footnote-index=\"2\" data-footnote-id=\"d11km5pyxym\" role=\"doc-endnote\" id=\"fnd11km5pyxym\"><span class=\"footnote-back-link\" data-footnote-back-link=\"\" data-footnote-id=\"d11km5pyxym\"><sup><strong><a href=\"#fnrefd11km5pyxym\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\" data-footnote-content=\"\"><p>https://forum.effectivealtruism.org/posts/dDTdviDpm8dAFssqe/ea-london-rebranding-to-ea-uk<\/p><\/div><\/li><\/ol>"},"Tag:EdwSzDq9bTH9yqQX9":{"_id":"EdwSzDq9bTH9yqQX9","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[],"description":{"__ref":"Revision:EdwSzDq9bTH9yqQX9_description"},"canVoteOnRels":null,"userId":"LMgZyi4w3XoYz3tM5","name":"Effective Altruism UK (formerly EA London)","shortName":null,"slug":"effective-altruism-uk-formerly-ea-london","core":false,"postCount":41,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-03T15:05:43.569Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:gxgimN3k2z5BLmgDa_description":{"_id":"gxgimN3k2z5BLmgDa_description","__typename":"Revision","htmlHighlight":"<p><strong>Giving What We Can<\/strong> (<strong>GWWC<\/strong>) is an organisation dedicated to inspiring and supporting people to give more, and give more effectively. It is a project of the <a href=\"https://forum.effectivealtruism.org/tag/centre-for-effective-altruism-cea\">Centre for Effective Altruism<\/a>.<\/p><p>Giving What We Can promotes three <a href=\"https://forum.effectivealtruism.org/tag/donation-pledge\">donation pledges<\/a>, the most prominent of which is a commitment to donate at least 10% of one's income each year in the way one thinks will achieve the most good. As of June 2022, this pledge has been signed by more than 7,500 people.<\/p><p>Giving What We Can also aims to support its members, to help them connect with and support each other, and to <a href=\"https://forum.effectivealtruism.org/tag/building-effective-altruism-1\">build effective altruism<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/effective-giving-1\">effective giving<\/a>, and various related cause areas.<\/p><h2>History<\/h2><p>Giving What We Can was conceived of by <a href=\"https://forum.effectivealtruism.org/tag/toby-ord\">Toby Ord<\/a> (who was inspired by the ideas of ethicists such as <a href=\"https://forum.effectivealtruism.org/tag/peter-singer\">Peter Singer<\/a>) to commit to donating a large proportion of his income to effective charities.<\/p><p>In November 2009, Ord and <a href=\"https://forum.effectivealtruism.org/tag/william-macaskill\">Will MacAskill<\/a> launched Giving What We Can as an international community of people who were committed to giving more, and giving more effectively. The Centre for Effective Altruism was incorporated in 2011 as registered charity and an umbrella organisation for Giving What We Can and the then newly founded <a href=\"https://forum.effectivealtruism.org/tag/80-000-hours\">80,000 Hours<\/a>.<\/p><p>Giving What We Can was one of the first in a growing network of like-minded organisations focused on <a href=\"https://forum.effectivealtruism.org/tag/effective-altruism\">effective altruism<\/a>, the project of using evidence and reason to figure out how to best contribute to helping others, and taking action on that basis.<\/p><h2>Members<\/h2><p>Notable Giving What We Can members include <a href=\"https://forum.effectivealtruism.org/tag/nick-beckstead\">Nick Beckstead<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/liv-boeree\">Liv Boeree<\/a>, Rachel Glennerster, Jim Greenbaum, <a href=\"https://forum.effectivealtruism.org/tag/michael-kremer\">Michael Kremer<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/william-macaskill\">William MacAskill<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/dylan-matthews\">Dylan Matthews<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/derek-parfit\">Derek Parfit<\/a>, <a href=\"https://forum.effectivealtruism.org/tag/kelsey-piper\">Kelsey Piper<\/a>, Janet Radcliffe Richards, <a href=\"https://forum.effectivealtruism.org/tag/peter-singer\">Peter Singer<\/a>, and Eva Vivalt.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefjhcsnfxxmx\"><sup><a href=\"#fnjhcsnfxxmx\">[1]<\/a><\/sup><\/span><\/p><h2>Funding<\/h2><p>As of July 2022, Giving What We Can has received $700,000 in funding from the <a href=\"https://forum.effectivealtruism.org/topics/future-fund\">Future Fund<\/a>.<span class=\"footnote-reference\" role=\"doc-noteref\" id=\"fnrefaqwsnsj4f8e\"><sup><a href=\"#fnaqwsnsj4f8e\">[2]<\/a><\/sup><\/span><\/p><h2>Further reading<\/h2><p>Righetti, Luca & Fin Moorhouse (2020) <a href=\"https://hearthisidea.com/episodes/luke\">Luke Freeman on Giving What We Can and community building<\/a>, <i>Hear This Idea<\/i>, November 29.<\/p><h2>External links<\/h2><p><a href=\"https://www.givingwhatwecan.org/\">Giving What We Can<\/a>. Official website.<\/p><p><a href=\"https://www.givingwhatwecan.org/get-involved/careers\">Apply for a job<\/a>.<\/p><p><a href=\"https://www.givingwhatwecan.org/pledge/\">Take the Giving What We Can pledge<\/a>.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/centre-for-effective-altruism-1\">Centre for Effective Altruism<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/donation-pledge\">donation pledge<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/effective-giving-1\">effective giving<\/a> | <a href=\"https://forum.effectivealtruism.org/topics/giving-what-we-can-newsletter?sortedBy=new\">Giving What We Can Newsletter<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/toby-ord\">Toby Ord<\/a><\/p><ol class=\"footnotes\" role=\"doc-endnotes\"><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnjhcsnfxxmx\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefjhcsnfxxmx\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Giving What We Can (2022) <a href=\"https://www.givingwhatwecan.org/about-us/members/\">Our members<\/a>, <i>Giving What We Can<\/i>.<\/p><\/div><\/li><li class=\"footnote-item\" role=\"doc-endnote\" id=\"fnaqwsnsj4f8e\"><span class=\"footnote-back-link\"><sup><strong><a href=\"#fnrefaqwsnsj4f8e\">^<\/a><\/strong><\/sup><\/span><div class=\"footnote-content\"><p>Future Fund (2022) <a href=\"https://ftxfuturefund.org/all-grants/?_organization_name=giving-what-we-can\">Our grants and investments: Giving What We Can<\/a>, <i>Future Fund<\/i>.<\/p><\/div><\/li><\/ol>"},"Tag:gxgimN3k2z5BLmgDa":{"_id":"gxgimN3k2z5BLmgDa","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[],"description":{"__ref":"Revision:gxgimN3k2z5BLmgDa_description"},"canVoteOnRels":null,"userId":"m6eoK6qNpZjsLQPWN","name":"Giving What We Can","shortName":null,"slug":"giving-what-we-can","core":false,"postCount":250,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2020-12-08T22:39:43.535Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Tag:bY6qtCihTzzSCw9gc":{"_id":"bY6qtCihTzzSCw9gc","__typename":"Tag","userId":"D5tAFjN5axTcp9mGL","name":"Announcements and updates","shortName":null,"slug":"announcements-and-updates","core":false,"postCount":901,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2023-07-31T14:11:48.645Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"Revision:bBS5GQJgFcobNBLki_description":{"_id":"bBS5GQJgFcobNBLki_description","__typename":"Revision","htmlHighlight":"<p>Use the <strong>organization updates<\/strong> topic for posts where EA-aligned organizations share their recent progress and other news.<\/p><p>Not to be confused with <a href=\"https://forum.effectivealtruism.org/topics/ea-organization-updates-monthly-series\">EA Organization Updates<\/a>, a monthly post series containing updates about organizations within the <a href=\"https://forum.effectivealtruism.org/topics/effective-altruism\">effective altruism<\/a> community.<\/p><h1>List of EA-related organizations<\/h1><p><br>The definition of 'EA-related' and the original list come from Jamie Gittins.<span class=\"footnote-reference\" data-footnote-reference=\"\" data-footnote-index=\"1\" data-footnote-id=\"nz0fwx5dh8h\" role=\"doc-noteref\" id=\"fnrefnz0fwx5dh8h\"><sup><a href=\"#fnnz0fwx5dh8h\">[1]<\/a><\/sup><\/span><\/p><h2>Infrastructure<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/80-000-hours\"><strong>80,000 Hours<\/strong><\/a> — Does research into how people can have greater impact with their careers. Also maintains a high impact <a href=\"https://jobs.80000hours.org/\">jobs board<\/a> and produces a <a href=\"https://80000hours.org/podcast/\">podcast<\/a>.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/animal-advocacy-careers\"><strong>Animal Advocacy Careers<\/strong><\/a> — Seeks to address the career and talent bottlenecks in the animal advocacy movement, especially the farmed animal movement, by providing career services and advice. Incubated by <a href=\"https://forum.effectivealtruism.org/topics/charity-entrepreneurship\">Charity Entrepreneurship<\/a>.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/animal-charity-evaluators\"><strong>Animal Charity Evaluators<\/strong><\/a> — Evaluates and recommends the most effective animal charities.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/ayuda-efectiva\"><strong>Ayuda Efectiva<\/strong><\/a> — Promotes <a href=\"https://forum.effectivealtruism.org/topics/effective-giving\">effective giving<\/a> in Spain. Their Global Health Fund routes donations to a selection of <a href=\"https://forum.effectivealtruism.org/topics/givewell\">GiveWell<\/a>'s recommended charities, providing tax deductibility for Spanish donors. They plan to launch similar funds for other cause areas in the near future.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/centre-for-effective-altruism-1\"><strong>Centre for Effective Altruism<\/strong><\/a> — Helps to grow and support the EA community.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/charity-entrepreneurship\"><strong>Charity Entrepreneurship<\/strong><\/a> — Does research into the most effective interventions and incubates charities to implement these interventions.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/doebem\"><strong>Doebem<\/strong><\/a> — A Brazilian-based donation platform which recommends effective charities according to EA principles.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/donational\"><strong>Donational<\/strong><\/a> — A donation platform which recommends effective charities to users, and helps them to pledge and allocate a proportion of their income to those charities.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/effective-altruism-foundation\"><strong>Effective Altruism Foundation<\/strong><\/a> — Implements projects aimed at doing the most good in terms of reducing suffering. Once initiated, projects are carried forward by EAF with differing degrees of independence and in some cases become autonomous organisations. Projects have included <a href=\"https://forum.effectivealtruism.org/topics/raising-for-effective-giving\">Raising for Effective Giving<\/a> (REG) and the <a href=\"https://forum.effectivealtruism.org/topics/center-on-long-term-risk\">Center on Long-Term Risk<\/a> (CLR).<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/effective-giving-2\"><strong>Effective Giving<\/strong><\/a> — Helps major donors to explore the best giving strategies backed by evidence and research in order to maximize the impact of their donations.<\/p><p><a href=\"https://forum.effectivealtruism.org/tag/effektiv-spenden-org\"><strong>Effektiv-Spenden.org<\/strong><\/a> — The name roughly translates to 'effective giving' (verbatim, 'to donate effectively') in German. The organisation is a dedicated donation regranting platform for Germany that was founded in coordination with the Effective Altruism Foundation. Its... <\/p>"},"Tag:bBS5GQJgFcobNBLki":{"_id":"bBS5GQJgFcobNBLki","__typename":"Tag","isRead":false,"parentTag":{"__ref":"Tag:bY6qtCihTzzSCw9gc"},"subTags":[],"description":{"__ref":"Revision:bBS5GQJgFcobNBLki_description"},"canVoteOnRels":null,"userId":"jd3Bs7YAT2KqnLxYD","name":"Organization updates","shortName":null,"slug":"organization-updates","core":false,"postCount":911,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":null,"descriptionTruncationCount":null,"createdAt":"2020-07-27T20:35:28.784Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"SocialPreviewType:yEKQQQoN2W3Jn2Mue":{"_id":"yEKQQQoN2W3Jn2Mue","__typename":"SocialPreviewType","imageUrl":""},"Revision:xn3DTnCvwddKTX9Sm_biography":{"_id":"xn3DTnCvwddKTX9Sm_biography","__typename":"Revision","version":"1.4.0","updateType":"minor","editedAt":"2023-12-08T11:02:04.805Z","userId":"xn3DTnCvwddKTX9Sm","html":"<p>Community Organiser for <a href=\"https://www.effectivealtruism.uk\">EA UK<\/a><\/p><p>Organiser for <a href=\"https://www.eafinance.org/\">EA Finance<\/a><\/p><p><a href=\"https://moea.substack.com/\">Newsletter - Monthly Overload of EA<\/a><\/p><p><a href=\"https://gdea.substack.com/\">Newsletter - Global Development & EA<\/a><\/p>","commitMessage":null,"wordCount":21,"htmlHighlight":"<p>Community Organiser for <a href=\"https://www.effectivealtruism.uk\">EA UK<\/a><\/p><p>Organiser for <a href=\"https://www.eafinance.org/\">EA Finance<\/a><\/p><p><a href=\"https://moea.substack.com/\">Newsletter - Monthly Overload of EA<\/a><\/p><p><a href=\"https://gdea.substack.com/\">Newsletter - Global Development & EA<\/a><\/p>","plaintextDescription":"Community Organiser for EA UK\n\nOrganiser for EA Finance\n\nNewsletter - Monthly Overload of EA\n\nNewsletter - Global Development & EA"},"User:xn3DTnCvwddKTX9Sm":{"_id":"xn3DTnCvwddKTX9Sm","__typename":"User","biography":{"__ref":"Revision:xn3DTnCvwddKTX9Sm_biography"},"profileImageId":null,"moderationStyle":null,"bannedUserIds":null,"moderatorAssistance":null,"slug":"davidnash","createdAt":"2014-12-19T12:45:07.177Z","username":"DavidNash","displayName":"DavidNash","previousDisplayName":null,"fullName":null,"karma":4566,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"<p>Community Organiser for <a href=\"https://www.effectivealtruism.uk\">EA UK<\/a><\/p><p>Organiser for <a href=\"https://www.eafinance.org/\">EA Finance<\/a><\/p><p><a href=\"https://moea.substack.com/\">Newsletter - Monthly Overload of EA<\/a><\/p><p><a href=\"https://gdea.substack.com/\">Newsletter - Global Development & EA<\/a><\/p>","jobTitle":"Co-Director","organization":"Effective Altruism UK","postCount":75,"commentCount":304,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":1,"reviewedByUserId":"PSBFYGLmnNYkxe7Lx","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"Post:yEKQQQoN2W3Jn2Mue":{"_id":"yEKQQQoN2W3Jn2Mue","__typename":"Post","deletedDraft":false,"contents":{"__ref":"Revision:MB3LAbzgSqG4jNBcB"},"fmCrosspost":{"isCrosspost":false},"readTimeMinutes":4,"rejectedReason":null,"customHighlight":null,"lastPromotedComment":null,"bestAnswer":null,"tags":[{"__ref":"Tag:EHLmbEmJ2Qd5WfwTb"},{"__ref":"Tag:ZCihBFp5P64JCvQY6"},{"__ref":"Tag:hvSAuXockimYDiAx2"},{"__ref":"Tag:4FDyuFrgeFEtvnhqh"},{"__ref":"Tag:nGqPZkKJuiKfAqDcs"},{"__ref":"Tag:EdwSzDq9bTH9yqQX9"},{"__ref":"Tag:gxgimN3k2z5BLmgDa"},{"__ref":"Tag:bBS5GQJgFcobNBLki"}],"socialPreviewData":{"__ref":"SocialPreviewType:yEKQQQoN2W3Jn2Mue"},"feedId":null,"totalDialogueResponseCount":0,"unreadDebateResponseCount":0,"dialogTooltipPreview":null,"disableSidenotes":false,"url":null,"postedAt":"2021-02-26T11:35:31.739Z","createdAt":null,"sticky":false,"metaSticky":false,"stickyPriority":2,"status":2,"frontpageDate":"2021-02-26T15:36:39.667Z","meta":false,"postCategory":"post","tagRelevance":{"4FDyuFrgeFEtvnhqh":1,"EHLmbEmJ2Qd5WfwTb":1,"EdwSzDq9bTH9yqQX9":1,"ZCihBFp5P64JCvQY6":1,"bBS5GQJgFcobNBLki":1,"gxgimN3k2z5BLmgDa":1,"hvSAuXockimYDiAx2":2,"nGqPZkKJuiKfAqDcs":1},"shareWithUsers":[],"sharingSettings":null,"linkSharingKey":null,"contents_latest":"MB3LAbzgSqG4jNBcB","commentCount":1,"voteCount":14,"baseScore":37,"extendedScore":null,"emojiReactors":{},"unlisted":false,"score":0.0115133011713624,"lastVisitedAt":null,"isFuture":false,"isRead":false,"lastCommentedAt":"2021-02-26T19:06:00.941Z","lastCommentPromotedAt":null,"canonicalCollectionSlug":null,"curatedDate":null,"commentsLocked":null,"commentsLockedToAccountsCreatedAfter":null,"debate":false,"question":false,"hiddenRelatedQuestion":false,"originalPostRelationSourceId":null,"userId":"xn3DTnCvwddKTX9Sm","location":null,"googleLocation":null,"onlineEvent":false,"globalEvent":false,"startTime":null,"endTime":null,"localStartTime":null,"localEndTime":null,"eventRegistrationLink":null,"joinEventLink":null,"facebookLink":null,"meetupLink":null,"website":null,"contactInfo":null,"isEvent":false,"eventImageId":null,"eventType":null,"types":[],"groupId":null,"reviewedByUserId":"2kBP4gThRsNXB3WWX","suggestForCuratedUserIds":null,"suggestForCuratedUsernames":null,"reviewForCuratedUserId":null,"authorIsUnreviewed":false,"afDate":null,"suggestForAlignmentUserIds":null,"reviewForAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":null,"afCommentCount":0,"afLastCommentedAt":"2021-02-26T11:35:31.743Z","afSticky":false,"hideAuthor":false,"moderationStyle":null,"ignoreRateLimits":null,"submitToFrontpage":true,"shortform":false,"onlyVisibleToLoggedIn":false,"onlyVisibleToEstablishedAccounts":false,"reviewCount":0,"reviewVoteCount":0,"positiveReviewVoteCount":0,"manifoldReviewMarketId":null,"annualReviewMarketProbability":0,"annualReviewMarketIsResolved":false,"annualReviewMarketYear":0,"annualReviewMarketUrl":"0","group":null,"podcastEpisodeId":null,"forceAllowType3Audio":false,"nominationCount2019":0,"reviewCount2019":0,"votingSystem":"eaEmojis","disableRecommendation":false,"user":{"__ref":"User:xn3DTnCvwddKTX9Sm"},"coauthors":[],"slug":"ea-updates-for-march-2021","title":"EA Updates for March 2021","draft":null,"hideCommentKarma":false,"af":false,"currentUserReviewVote":null,"coauthorStatuses":null,"hasCoauthorPermission":true,"rejected":false,"collabEditorDialogue":false},"Revision:4CT88cfPjQYzMpn5Q":{"_id":"4CT88cfPjQYzMpn5Q","__typename":"Revision","htmlHighlight":"<p>Welcome to our March newsletter!<\/p><p>This edition contains information about:<\/p><ul><li>Our new study<\/li><li>Member motivations<\/li><li>Upcoming events<\/li><li>Updates from our community<\/li><li>News about our top priority cause areas<\/li><li>Ways you can help advocate for causes you care about<\/li><\/ul><h2>Study: What makes our members unique?<\/h2><p>In an effort to better understand our members, we are collaborating with researchers from Yale University's Mind and Development Lab. <strong>If you have taken one of our giving pledges<\/strong>, we would really appreciate you answering a ~15-minute survey about your beliefs and attitudes. Your responses may help us to recruit new members and raise more for effective charities.<\/p><p><a href=\"https://yalesurvey.ca1.qualtrics.com/jfe/form/SV_3frfTXkagXBh6qW\">Begin Study<\/a> (for GWWC members)<\/p><p>Have questions about the study? Contact the study lead, Matti Wilks, at matti.wilks@yale.edu.<\/p><h2>Why do people pledge to give effectively?<\/h2><blockquote><p>\"I didn't know that giving can actually work. Once I learned about that, the realisation fell into place that there was nothing more useful I could possibly be doing with that money.\" - <strong>Rachel B, Germany<\/strong><\/p><\/blockquote><blockquote><p>\"I'm usually motivated by the idea that there is someone out there who has been spared from having something terrible happen to them because an action that I took. Or an animal has been spared from having to come into existence and live a life full of suffering. Or I have taken some kind of action to help build an amazing future for sentient life.\" - <strong>Rupert M, The Netherlands<\/strong><\/p><\/blockquote><blockquote><p>\"I live a pretty great life with plenty of food and medicine and friends and I'd like more people to have that too. Pretty crazy that we live in a world where people are dying of hunger and diseases that we collectively have the wealth to fix. Hope my money helps get us a little closer to a world where everyone is having a great time. Happy to be helping!\" - <strong>Ted Sanders, USA<\/strong><\/p><\/blockquote><p>Thanks to those who replied to our last newsletter to tell us why you're motivated to use a portion of your income to help others.<\/p><h2>Attend an Online Event<\/h2><p>Come along and meet other people who are committed to effectively using their resources to improve the world:<\/p><ul><li><strong>Mar 31: <\/strong><a href=\"https://us02web.zoom.us/webinar/register/2316167379690/WN_moKY-x1JRYq2ig1rfj94rA\"><strong>Law + Effective Altruism: How can we do good better<\/strong><\/a><\/li><li><strong>Apr 1: <\/strong><a href=\"https://us02web.zoom.us/webinar/register/4416153545389/WN_2zQU8W3hQm-ra62jC8uGEA\"><strong>What does it take to be a charity founder?<\/strong><\/a><\/li><li><strong>Apr 3: <\/strong><a href=\"https://us02web.zoom.us/meeting/register/tZEufuGorz4vE9yNQtx-27JLGnwe6xUmRaOM\"><strong>Online Meetup (Europe/Americas)<\/strong><\/a><\/li><li><strong>April 15: <\/strong><a href=\"https://us02web.zoom.us/meeting/register/tZYqd-qrqjgsEtKoGVXdR7oiS9iltC8i8kpg\"><strong>Open Forum (Europe/Asia)<\/strong><\/a><\/li><li><strong>April 26: <\/strong><a href=\"https://us02web.zoom.us/webinar/register/6016167383642/WN_RtPaPiUDR9OcUtm2v97h7A\"><strong>World Malaria Day: How can we eradicate malaria?<\/strong><\/a><\/li><\/ul><p>If you have any question... <\/p>","plaintextDescription":"Welcome to our March newsletter!\n\nThis edition contains information about:\n\n * Our new study\n * Member motivations\n * Upcoming events\n * Updates from our community\n * News about our top priority cause areas\n * Ways you can help advocate for causes you care about\n\n\nStudy: What makes our members unique?\nIn an effort to better understand our members, we are collaborating with researchers from Yale University's Mind and Development Lab. If you have taken one of our giving pledges, we would really appreciate you answering a ~15-minute survey about your beliefs and attitudes. Your responses may help us to recruit new members and raise more for effective charities.\n\nBegin Study (for GWWC members)\n\nHave questions about the study? Contact the study lead, Matti Wilks, at matti.wilks@yale.edu.\n\n\nWhy do people pledge to give effectively?\n> \"I didn't know that giving can actually work. Once I learned about that, the realisation fell into place that there was nothing more useful I could possibly be doing with that money.\" - Rachel B, Germany\n\n> \"I'm usually motivated by the idea that there is someone out there who has been spared from having something terrible happen to them because an action that I took. Or an animal has been spared from having to come into existence and live a life full of suffering. Or I have taken some kind of action to help build an amazing future for sentient life.\" - Rupert M, The Netherlands\n\n> \"I live a pretty great life with plenty of food and medicine and friends and I'd like more people to have that too. Pretty crazy that we live in a world where people are dying of hunger and diseases that we collectively have the wealth to fix. Hope my money helps get us a little closer to a world where everyone is having a great time. Happy to be helping!\" - Ted Sanders, USA\n\nThanks to those who replied to our last newsletter to tell us why you're motivated to use a portion of your income to help others.\n\n\nAttend an Online Event\nCome along and meet other people who","wordCount":1580,"version":"1.0.0"},"Revision:Wj5wb2JnAwBJj2zj4_description":{"_id":"Wj5wb2JnAwBJj2zj4_description","__typename":"Revision","htmlHighlight":"<p>The <strong>Giving What We Can Newsletter<\/strong> is a monthly <a href=\"https://forum.effectivealtruism.org/tag/newsletters\">newsletter<\/a> by <a href=\"https://forum.effectivealtruism.org/tag/giving-what-we-can\">Giving What We Can<\/a>.<\/p><h2>External links<\/h2><p><a href=\"https://www.givingwhatwecan.org/newsletter/\">Giving What We Can Newsletter<\/a>. Official website.<\/p><h2>Related entries<\/h2><p><a href=\"https://forum.effectivealtruism.org/tag/giving-what-we-can\">Giving What We Can<\/a> | <a href=\"https://forum.effectivealtruism.org/tag/newsletters\">newsletters<\/a><\/p>"},"Tag:Wj5wb2JnAwBJj2zj4":{"_id":"Wj5wb2JnAwBJj2zj4","__typename":"Tag","isRead":false,"parentTag":null,"subTags":[],"description":{"__ref":"Revision:Wj5wb2JnAwBJj2zj4_description"},"canVoteOnRels":null,"userId":"T9vXEgCbvr4wff3P5","name":"Giving What We Can Newsletter","shortName":null,"slug":"giving-what-we-can-newsletter","core":false,"postCount":18,"adminOnly":false,"canEditUserIds":null,"suggestedAsFilter":false,"needsReview":false,"descriptionTruncationCount":0,"createdAt":"2022-05-27T03:05:05.673Z","wikiOnly":false,"deleted":false,"isSubforum":false,"noindex":false},"SocialPreviewType:Cct4uvs7frmpKx8Nb":{"_id":"Cct4uvs7frmpKx8Nb","__typename":"SocialPreviewType","imageUrl":""},"Revision:m6eoK6qNpZjsLQPWN_biography":{"_id":"m6eoK6qNpZjsLQPWN_biography","__typename":"Revision","version":"1.4.0","updateType":"minor","editedAt":"2024-10-03T02:13:47.621Z","userId":"m6eoK6qNpZjsLQPWN","html":"<p>From 2020-2024 I led the team at <a href=\"https://www.givingwhatwecan.org/\">Giving What We Can<\/a> (GWWC), a global community on a mission to make giving effectively and significantly a cultural norm.<\/p><p>Prior to GWWC my career later was mostly in tech start-ups, focusing on marketing and growth. I was on the initial team at <a href=\"https://www.sendle.com/\">Sendle<\/a>, Australia’s first technology B-Corp, and co-founded <a href=\"https://www.positly.com/\">Positly<\/a>.<\/p><p>Beyond my work, I’m a devoted member of <a href=\"https://www.givingwhatwecan.org/\">Giving What We Can<\/a> (🔸10% Pledger #1560) and <a href=\"https://founderspledge.com/\">Founders Pledge<\/a>, pledging to donate a significant portion of my income to effective charities.<\/p><p>I enjoy sharing my thoughts and experiences on effective giving and have had the pleasure to do so on platforms such as <a href=\"https://www.bbc.co.uk/programmes/m000j2zl\">BBC Radio 4<\/a>, <a href=\"https://www.aussiefirebug.com/afi-019-effective-altruism/\">Aussie Firebug<\/a>, <a href=\"https://www.youtube.com/watch?v=H4IhGBcTR_k\">DW News<\/a>, and hosting <a href=\"https://www.givingwhatwecan.org/podcast/\">The Giving What We Can Podcast<\/a>. <\/p>","commitMessage":"","wordCount":125,"htmlHighlight":"<p>From 2020-2024 I led the team at <a href=\"https://www.givingwhatwecan.org/\">Giving What We Can<\/a> (GWWC), a global community on a mission to make giving effectively and significantly a cultural norm.<\/p><p>Prior to GWWC my career later was mostly in tech start-ups, focusing on marketing and growth. I was on the initial team at <a href=\"https://www.sendle.com/\">Sendle<\/a>, Australia’s first technology B-Corp, and co-founded <a href=\"https://www.positly.com/\">Positly<\/a>.<\/p><p>Beyond my work, I’m a devoted member of <a href=\"https://www.givingwhatwecan.org/\">Giving What We Can<\/a> (🔸10% Pledger #1560) and <a href=\"https://founderspledge.com/\">Founders Pledge<\/a>, pledging to donate a significant portion of my income to effective charities.<\/p><p>I enjoy sharing my thoughts and experiences on effective giving and have had the pleasure to do so on platforms such as <a href=\"https://www.bbc.co.uk/programmes/m000j2zl\">BBC Radio 4<\/a>, <a href=\"https://www.aussiefirebug.com/afi-019-effective-altruism/\">Aussie Firebug<\/a>, <a href=\"https://www.youtube.com/watch?v=H4IhGBcTR_k\">DW News<\/a>, and hosting <a href=\"https://www.givingwhatwecan.org/podcast/\">The Giving What We Can Podcast<\/a>. <\/p>","plaintextDescription":"From 2020-2024 I led the team at Giving What We Can (GWWC), a global community on a mission to make giving effectively and significantly a cultural norm.\n\nPrior to GWWC my career later was mostly in tech start-ups, focusing on marketing and growth. I was on the initial team at Sendle, Australia’s first technology B-Corp, and co-founded Positly.\n\nBeyond my work, I’m a devoted member of Giving What We Can (🔸10% Pledger #1560) and Founders Pledge, pledging to donate a significant portion of my income to effective charities.\n\nI enjoy sharing my thoughts and experiences on effective giving and have had the pleasure to do so on platforms such as BBC Radio 4, Aussie Firebug, DW News, and hosting The Giving What We Can Podcast. "},"User:m6eoK6qNpZjsLQPWN":{"_id":"m6eoK6qNpZjsLQPWN","__typename":"User","biography":{"__ref":"Revision:m6eoK6qNpZjsLQPWN_biography"},"profileImageId":"Profile/imilqz8sxv11hwnrhcet","moderationStyle":null,"bannedUserIds":null,"moderatorAssistance":null,"slug":"luke-freeman","createdAt":"2021-08-20T01:31:11.838Z","username":"Luke Freeman","displayName":"Luke Freeman 🔸","previousDisplayName":null,"fullName":null,"karma":6263,"afKarma":0,"deleted":false,"isAdmin":false,"htmlBio":"<p>From 2020-2024 I led the team at <a href=\"https://www.givingwhatwecan.org/\">Giving What We Can<\/a> (GWWC), a global community on a mission to make giving effectively and significantly a cultural norm.<\/p><p>Prior to GWWC my career later was mostly in tech start-ups, focusing on marketing and growth. I was on the initial team at <a href=\"https://www.sendle.com/\">Sendle<\/a>, Australia’s first technology B-Corp, and co-founded <a href=\"https://www.positly.com/\">Positly<\/a>.<\/p><p>Beyond my work, I’m a devoted member of <a href=\"https://www.givingwhatwecan.org/\">Giving What We Can<\/a> (🔸10% Pledger #1560) and <a href=\"https://founderspledge.com/\">Founders Pledge<\/a>, pledging to donate a significant portion of my income to effective charities.<\/p><p>I enjoy sharing my thoughts and experiences on effective giving and have had the pleasure to do so on platforms such as <a href=\"https://www.bbc.co.uk/programmes/m000j2zl\">BBC Radio 4<\/a>, <a href=\"https://www.aussiefirebug.com/afi-019-effective-altruism/\">Aussie Firebug<\/a>, <a href=\"https://www.youtube.com/watch?v=H4IhGBcTR_k\">DW News<\/a>, and hosting <a href=\"https://www.givingwhatwecan.org/podcast/\">The Giving What We Can Podcast<\/a>. <\/p>","jobTitle":"Former CEO","organization":"Giving What We Can","postCount":90,"commentCount":294,"sequenceCount":0,"afPostCount":0,"afCommentCount":0,"spamRiskScore":1,"tagRevisionCount":0,"reviewedByUserId":"PSBFYGLmnNYkxe7Lx","givingSeason2024DonatedFlair":false,"givingSeason2024VotedFlair":false},"Post:Cct4uvs7frmpKx8Nb":{"_id":"Cct4uvs7frmpKx8Nb","__typename":"Post","deletedDraft":false,"contents":{"__ref":"Revision:4CT88cfPjQYzMpn5Q"},"fmCrosspost":{"isCrosspost":false},"readTimeMinutes":6,"rejectedReason":null,"customHighlight":null,"lastPromotedComment":null,"bestAnswer":null,"tags":[{"__ref":"Tag:ZCihBFp5P64JCvQY6"},{"__ref":"Tag:gxgimN3k2z5BLmgDa"},{"__ref":"Tag:Wj5wb2JnAwBJj2zj4"}],"socialPreviewData":{"__ref":"SocialPreviewType:Cct4uvs7frmpKx8Nb"},"feedId":null,"totalDialogueResponseCount":0,"unreadDebateResponseCount":0,"dialogTooltipPreview":null,"disableSidenotes":false,"url":"https://www.givingwhatwecan.org/post/2021/03/march-2021-newsletter/","postedAt":"2021-03-30T03:49:17.112Z","createdAt":null,"sticky":false,"metaSticky":false,"stickyPriority":2,"status":2,"frontpageDate":"2021-03-30T05:08:46.422Z","meta":false,"postCategory":"linkpost","tagRelevance":{"Wj5wb2JnAwBJj2zj4":1,"ZCihBFp5P64JCvQY6":2,"gxgimN3k2z5BLmgDa":7},"shareWithUsers":[],"sharingSettings":null,"linkSharingKey":null,"contents_latest":"4CT88cfPjQYzMpn5Q","commentCount":0,"voteCount":5,"baseScore":10,"extendedScore":null,"emojiReactors":{},"unlisted":false,"score":0.0049985735677182674,"lastVisitedAt":null,"isFuture":false,"isRead":false,"lastCommentedAt":"2021-03-30T03:49:17.112Z","lastCommentPromotedAt":null,"canonicalCollectionSlug":null,"curatedDate":null,"commentsLocked":null,"commentsLockedToAccountsCreatedAfter":null,"debate":false,"question":false,"hiddenRelatedQuestion":false,"originalPostRelationSourceId":null,"userId":"m6eoK6qNpZjsLQPWN","location":null,"googleLocation":null,"onlineEvent":false,"globalEvent":false,"startTime":null,"endTime":null,"localStartTime":null,"localEndTime":null,"eventRegistrationLink":null,"joinEventLink":null,"facebookLink":null,"meetupLink":null,"website":null,"contactInfo":null,"isEvent":false,"eventImageId":null,"eventType":null,"types":[],"groupId":null,"reviewedByUserId":"2kBP4gThRsNXB3WWX","suggestForCuratedUserIds":null,"suggestForCuratedUsernames":null,"reviewForCuratedUserId":null,"authorIsUnreviewed":false,"afDate":null,"suggestForAlignmentUserIds":null,"reviewForAlignmentUserId":null,"afBaseScore":0,"afExtendedScore":null,"afCommentCount":0,"afLastCommentedAt":"2021-03-30T03:49:17.117Z","afSticky":false,"hideAuthor":false,"moderationStyle":null,"ignoreRateLimits":null,"submitToFrontpage":true,"shortform":false,"onlyVisibleToLoggedIn":false,"onlyVisibleToEstablishedAccounts":false,"reviewCount":0,"reviewVoteCount":0,"positiveReviewVoteCount":0,"manifoldReviewMarketId":null,"annualReviewMarketProbability":0,"annualReviewMarketIsResolved":false,"annualReviewMarketYear":0,"annualReviewMarketUrl":"0","group":null,"podcastEpisodeId":null,"forceAllowType3Audio":false,"nominationCount2019":0,"reviewCount2019":0,"votingSystem":"eaEmojis","disableRecommendation":false,"user":{"__ref":"User:m6eoK6qNpZjsLQPWN"},"coauthors":[],"slug":"gwwc-march-2021-newsletter","title":"GWWC March 2021 Newsletter","draft":null,"hideCommentKarma":false,"af":false,"currentUserReviewVote":null,"coauthorStatuses":null,"hasCoauthorPermission":true,"rejected":false,"collabEditorDialogue":false}}</script> <script>window.__APOLLO_FOREIGN_STATE__ = {}</script> </html>