CINXE.COM
Sentient Design: AI and the Next Chapter of UX | Big Medium
<!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>Sentient Design: AI and the Next Chapter of UX | Big Medium</title> <meta name="description" content="Josh Clark introduces Sentient Design, the already-here future of intelligent interfaces and AI-mediated experiences. Learn to design radically adaptive experiences that feel almost self-aware in their response to user needs." /> <meta name="author" content="Josh Clark" /> <meta name="copyright" content="Copyright Big Medium, 2024" /> <link rel="canonical" href="https://bigmedium.com/speaking/sentient-design-josh-clark-talk.html" /> <meta name="robots" content="index,follow" /> <link rel="icon" href="https://bigmedium.com/favicon.ico" type="image/x-icon" /> <link rel="shortcut icon" href="https://bigmedium.com/favicon.ico" type="image/x-icon" /> <link rel="alternate" type="application/rss+xml" href="https://bigmedium.com/bm.feed.xml" title="Big Medium - Full Feed" /> <link rel="stylesheet" href="https://bigmedium.com/bm.styles.css" type="text/css" /> <style type="text/css"> div.bma_page1654 { display: none } </style> <link rel="home" title="Home" href="https://bigmedium.com/" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@bigmediumjosh" /> <meta name="twitter:url" property="og:url" content="https://bigmedium.com/speaking/sentient-design-josh-clark-talk.html" /> <meta name="twitter:title" property="og:title" content="Sentient Design: AI and the Next Chapter of UX | Big Medium" /> <meta name="twitter:description" property="og:description" content="Josh Clark introduces Sentient Design, the already-here future of intelligent interfaces and AI-mediated experiences. Learn to design radically adaptive experiences that feel almost self-aware in their response to user needs." /> <meta name="twitter:image" property="og:image" content="https://bigmedium.com/bm.pix/josh-clark-casual-intelligence.orig-1000.jpg" /> <link rel="apple-touch-icon" sizes="57x57" href="/apple-touch-icon-57x57.png"> <link rel="apple-touch-icon" sizes="60x60" href="/apple-touch-icon-60x60.png"> <link rel="apple-touch-icon" sizes="72x72" href="/apple-touch-icon-72x72.png"> <link rel="apple-touch-icon" sizes="76x76" href="/apple-touch-icon-76x76.png"> <link rel="apple-touch-icon" sizes="114x114" href="/apple-touch-icon-114x114.png"> <link rel="apple-touch-icon" sizes="120x120" href="/apple-touch-icon-120x120.png"> <link rel="apple-touch-icon" sizes="144x144" href="/apple-touch-icon-144x144.png"> <link rel="apple-touch-icon" sizes="152x152" href="/apple-touch-icon-152x152.png"> <link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon-180x180.png"> <link rel="icon" type="image/png" href="/favicon-32x32.png" sizes="32x32"> <link rel="icon" type="image/png" href="/favicon-194x194.png" sizes="194x194"> <link rel="icon" type="image/png" href="/favicon-96x96.png" sizes="96x96"> <link rel="icon" type="image/png" href="/android-chrome-192x192.png" sizes="192x192"> <link rel="icon" type="image/png" href="/favicon-16x16.png" sizes="16x16"> <link rel="manifest" href="/manifest.json"> <link rel="mask-icon" href="/safari-pinned-tab.svg" color="#5bbad5"> <meta name="msapplication-TileColor" content="#ffffff"> <meta name="msapplication-TileImage" content="/mstile-144x144.png"> <meta name="theme-color" content="#ffffff"> <link rel="stylesheet" href="/styles.css?cache" type="text/css" /> <meta property="og:site_name" content="Big Medium" /> <script> /*! grunt-grunticon Stylesheet Loader - v2.1.2 | https://github.com/filamentgroup/grunticon | (c) 2015 Scott Jehl, Filament Group, Inc. | MIT license. */ (function(e){function t(t,n,r,o){"use strict";function a(){for(var e,n=0;u.length>n;n++)u[n].href&&u[n].href.indexOf(t)>-1&&(e=!0);e?i.media=r||"all":setTimeout(a)}var i=e.document.createElement("link"),l=n||e.document.getElementsByTagName("script")[0],u=e.document.styleSheets;return i.rel="stylesheet",i.href=t,i.media="only x",i.onload=o||null,l.parentNode.insertBefore(i,l),a(),i}var n=function(r,o){"use strict";if(r&&3===r.length){var a=e.navigator,i=e.Image,l=!(!document.createElementNS||!document.createElementNS("http://www.w3.org/2000/svg","svg").createSVGRect||!document.implementation.hasFeature("http://www.w3.org/TR/SVG11/feature#Image","1.1")||e.opera&&-1===a.userAgent.indexOf("Chrome")||-1!==a.userAgent.indexOf("Series40")),u=new i;u.onerror=function(){n.method="png",n.href=r[2],t(r[2])},u.onload=function(){var e=1===u.width&&1===u.height,a=r[e&&l?0:e?1:2];n.method=e&&l?"svg":e?"datapng":"png",n.href=a,t(a,null,null,o)},u.src="data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///ywAAAAAAQABAAACAUwAOw==",document.documentElement.className+=" grunticon"}};n.loadCSS=t,e.grunticon=n})(this);(function(e,t){"use strict";var n=t.document,r="grunticon:",o=function(e){if(n.attachEvent?"complete"===n.readyState:"loading"!==n.readyState)e();else{var t=!1;n.addEventListener("readystatechange",function(){t||(t=!0,e())},!1)}},a=function(e){return t.document.querySelector('link[href$="'+e+'"]')},c=function(e){var t,n,o,a,c,i,u={};if(t=e.sheet,!t)return u;n=t.cssRules?t.cssRules:t.rules;for(var l=0;n.length>l;l++)o=n[l].cssText,a=r+n[l].selectorText,c=o.split(");")[0].match(/US\-ASCII\,([^"']+)/),c&&c[1]&&(i=decodeURIComponent(c[1]),u[a]=i);return u},i=function(e){var t,o,a;o="data-grunticon-embed";for(var c in e)if(a=c.slice(r.length),t=n.querySelectorAll(a+"["+o+"]"),t.length)for(var i=0;t.length>i;i++)t[i].innerHTML=e[c],t[i].style.backgroundImage="none",t[i].removeAttribute(o);return t},u=function(t){"svg"===e.method&&o(function(){i(c(a(e.href))),"function"==typeof t&&t()})};e.embedIcons=i,e.getCSS=a,e.getIcons=c,e.ready=o,e.svgLoadedCallback=u,e.embedSVG=u})(grunticon,this); grunticon(["/grunticon-v2/icons.data.svg.css", "/grunticon-v2/icons.data.png.css", "/grunticon-v2/icons.fallback.css"]); </script> <noscript><link href="/grunticon-v2/icons.fallback.css" rel="stylesheet"></noscript> <script src="/js/picturefill-min.js" async></script> <!-- Piwik --> <script type="text/javascript"> var _paq = _paq || []; /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ _paq.push(['trackPageView']); _paq.push(['enableLinkTracking']); (function() { var u="//bigmedium.com/bigstats/"; _paq.push(['setTrackerUrl', u+'piwik.php']); _paq.push(['setSiteId', '1']); var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s); })(); </script> <!-- End Piwik Code --> </head> <!-- end <%htmlhead%> --> <body id="Page-sentient-design-josh-clark-talk" class="bmt_page bmt_page-speaking bmslug-speaking"> <header id="bm-top" class="header toolbar"> <div class="lc"> <a href="https://bigmedium.com/" class="logo"> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 830 147" role="img" aria-label="Big Medium" preserveAspectRatio="xMaxYMax meet"><title>Big Medium</title><desc>Big Medium logo</desc><defs><symbol id="bmlogo"><g fill="#000" fill-rule="evenodd"><path id="bmlogo-arrow" d="M56 9.94V0h32v30h-9.94V17.394L62.456 33 55 25.545 70.606 9.94"/><path class="bmlogo" d="M0 0v116h88V40H46V0H0zm36 10H10v96h68V50H36V10zM144 0h-10v95l-1 21h8l2-10c3.874 6.617 10.956 12 23 12 20.618 0 29-17.023 29-38v-4c0-21.292-8.382-38.316-29-38-11.033-.316-17.97 4.34-22 10V0zm0 59c3.064-5.16 9.046-12 20-12 14.142 0 21 11.352 21 29v4c0 17.648-6.858 29-21 29-10.954 0-16.936-6.84-20-12V59zm69 57h10V39h-10v77zM224 8h-13v13h13V8zm68 109c.016 13.62-5.33 21.158-20 21-14.38.158-19.872-7.09-20-22h-10v2c.445 16.1 7.96 29 30 29 21.895 0 30.133-12.61 30-30V61l1-21h-8l-2 10c-3.874-6.637-10.956-12-23-12-20.473 0-29 16.38-29 36v3c0 19.414 8.527 35.503 29 36 11.033-.497 17.825-4.846 22-11v15zm0-58v32c-3.2 5.216-9.014 12-20 12-14.02 0-21-10.392-21-26v-4c0-15.32 6.98-26 21-26 10.986 0 16.8 6.784 20 12zm156 58V61c0-14.677-7.67-23-21-23-12.868 0-20.54 5.84-25 13-2.487-8.473-9.29-13-19-13-12.434 0-19.96 5.55-24 12l-2-10h-8l1 22v55h10V59c3.184-5.083 8.395-11.8 19-12 8.79.2 14.868 3.558 15 15v55h10V59c3.184-5.083 8.395-11.8 19-12 8.79.2 14.868 3.558 15 15v55h10zm81-35v-6c0-20.9-9.765-38-32-38-22.944 0-33 17.1-33 38v4c0 20.9 10.056 38 33 38 22.09 0 32-13.768 32-26h-10c.235 8.03-6.177 17.45-22 17-15.074.45-22.215-9.986-23-27h55zm-33-36c14.618 0 21.42 10.484 22 27h-44c.434-16.516 7.526-27 22-27zm76-8c-20.618-.316-29 16.708-29 38v4c0 20.977 8.382 38 29 38 11.9 0 19.126-5.383 23-12l1 10h9l-1-21V0h-10v48c-4.175-5.66-10.967-10.316-22-10zm3 71c-14.167 0-21-11.352-21-29v-4c0-17.648 6.833-29 21-29 10.84 0 16.8 6.84 20 12v38c-3.2 5.16-9.16 12-20 12zm50 7h10V39h-10v77zM636 8h-13v13h13V8zm18 31v54c0 14.632 7.973 25 23 25 13.386 0 21.65-5.55 26-12l2 10h8l-1-22V39h-10v58c-3.206 4.937-9.15 11.8-21 12-10.474-.2-16.998-5.603-17-17V39h-10zm176 78V61c0-14.677-7.67-23-21-23-12.868 0-20.54 5.84-25 13-2.487-8.473-9.29-13-19-13-12.434 0-19.96 5.55-24 12l-2-10h-8l1 22v55h10V59c3.184-5.083 8.395-11.8 19-12 8.79.2 14.868 3.558 15 15v55h10V59c3.184-5.083 8.395-11.8 19-12 8.79.2 14.868 3.558 15 15v55h10z"/></g></symbol></defs><use xlink:href="#bmlogo"></use></svg> </a> <nav class="bmw_navigation bmn_hnav" role="navigation"> <div class="bmn_skipnav"><a href="#bm-skipnavtop">Skip Navigation</a></div> <ul> <li class="bmn_sec-ideas"><a title="" href="https://bigmedium.com/ideas/">Ideas</a></li> <li class="bmn_sec-projects"><a title="" href="https://bigmedium.com/projects/">Projects</a></li> <li class="bmn_sec-speaking bmn_active"><a title="" href="https://bigmedium.com/speaking/">Talks</a></li> <li class="bmn_sec-about"><a title="" href="https://bigmedium.com/about/">About</a></li> <li class="bmn_sec-hire"><a title="" href="https://bigmedium.com/hire/">Hire Us</a></li> </ul> <span class="bmn_clearNav" id="bm-skipnavtop"> </span> </nav> <div class="powertools"> <a href="#" class="icon icon-microphone icon-utility" title="speech commands" id="toggleSpeech">Speak</a> <a href="#" id="toggleSearch" class="icon icon-search icon-utility" title="search">Search</a> <a href="#nav" id="toggleMenu" class="icon icon-menu icon-utility" title="menu">Menu</a> <!-- START <%search%> --> <form action="https://bigmedium.com/cgi-bin/moxiebin/bm-search.cgi/q/3" method="get" class="bmw_search" enctype="multipart/form-data" accept-charset="utf-8"><div> <input type="search" name="bmq" placeholder="Search" value="" /> <button type="submit" class="icon icon-search icon-utility">Search</button> <input type="hidden" name="bms" value="3" /> </div></form> <!-- END <%search%> --> </div> </div> </header> <main role="main"> <article class="article"> <header class="page-header"> <div class="page-icon"> <div class="lc"> <svg xmlns="http://www.w3.org/2000/svg" width="497" height="187" viewBox="0 0 497 187" preserveAspectRatio="xMaxYMax meet" role="img" aria-label="Illustrations of devices lighting up with Sentient Design "><g fill="none" fill-rule="evenodd"><path fill="#FFF" d="M253.174 177.168c-.891 4.138-1.605 4.138-5.261 4.174-1.771.017-2.523.066-4.045.411-1.523.345-2.868.87-2.868 2.849 0 1.978 1.955 2.254 2.847 2.256 11.72.03 23.44-.11 35.16-.111 7.132 0 14.265.165 21.397.252 1.5.018 4.596-.132 4.596-2.397 0-2.265-2.486-3.052-3.877-3.239-1.391-.187-3.42-.301-5.71-.301s-2.86-.677-3.951-4.067c-1.026-6.03-1.617-9.016-1.773-8.957-1.092.414-9.866 4.026-17.605 4.026-7.74 0-15.908-3.925-16.883-4.064-.702 3.166-1.38 6.163-2.027 9.168Z"/><path fill="#CCD7E5" d="M251.569 181.062h42.275c-2.431-8.743-3.724-13.084-3.879-13.022-1.081.433-9.975 4.184-17.436 4.184-7.46 0-16.336-4.079-17.302-4.224-.463 2.208-1.682 6.562-3.658 13.062Z"/><path fill="#7188A5" d="M271.775 176.906c6.181 0 12.167-2.95 17.959-8.852-7.353 2.537-13.322 3.805-17.908 3.805-4.586 0-10.164-1.286-16.734-3.859 4.94 5.937 10.501 8.906 16.683 8.906Z"/><path fill="#FFF" d="M282 33c17.673 0 32 14.327 32 32v70c0 17.673-14.327 32-32 32h-19c-17.673 0-32-14.327-32-32V65c0-17.673 14.327-32 32-32h19Zm-9.577 113.93c-2.905 0-5.26 2.378-5.26 5.312s2.355 5.313 5.26 5.313 5.26-2.379 5.26-5.313c0-2.934-2.355-5.312-5.26-5.312Zm15.486-106.256h-30.972c-10.38 0-18.817 8.325-18.997 18.662l-.003.338v61.762c0 10.38 8.325 18.817 18.662 18.997l.338.003h30.972c10.38 0 18.817-8.325 18.997-18.662l.003-.338V59.674c0-10.493-8.507-19-19-19Z"/><path fill="#CCD7E5" d="M297.849 148.151c1.473.29 3.157.093 5.05-.59 1.895-.684 3.585-1.971 5.07-3.863-.203 5.613-.971 9.603-2.302 11.97-1.997 3.55-6.174 4.982-7.233 4.982h-7.417c2.83-2.407 4.7-4.565 5.61-6.471a12.474 12.474 0 0 0 1.222-6.028ZM247.363 148.151c-1.473.29-3.156.093-5.05-.59-1.894-.684-3.585-1.971-5.07-3.863.203 5.613.971 9.603 2.302 11.97 1.997 3.55 6.174 4.982 7.234 4.982h7.416c-2.83-2.407-4.7-4.565-5.61-6.471a12.474 12.474 0 0 1-1.222-6.028Z"/><path fill="#FFF" d="M272.5 36c7.456 0 13.5 6.044 13.5 13.5S279.956 63 272.5 63 259 56.956 259 49.5 265.044 36 272.5 36Zm8.405 12.064c-.455-.83-.98-1.245-1.575-1.245-.594 0-1.215.415-1.862 1.245a5.17 5.17 0 1 1-9.989.197l.053-.197c-.625-.83-1.228-1.245-1.81-1.245-.58 0-1.154.415-1.72 1.245a8.617 8.617 0 1 0 17.115 1.436c0-.374-.07-.853-.212-1.436Z"/><g transform="translate(246 76)"><path fill="#FEFEFE" d="M19.418 43.764c-.92-.815-1.434-1.424-1.174-2.098.26-.674 1.928-1.355 2.619-1.142 3.652 1.127 7.191 1.197 10.841.007.654-.213 2.166.28 2.452.867.286.586-.209 1.849-1.125 2.366-.917.517-4.47 2.3-7.023 2.232-2.48.089-5.67-1.417-6.59-2.232Z"/><ellipse cx="8.846" cy="16.327" fill="#FFF" rx="7.329" ry="7.334"/><ellipse cx="44.613" cy="16.327" fill="#FFF" rx="7.329" ry="7.334"/><path fill="#FEFEFE" d="M14.819 2.822c.92.814 1.434 1.423 1.174 2.098-.26.674-1.928 1.354-2.619 1.141-1.68-.518-3.461-1.593-5.115-1.644-1.942-.06-3.755.995-5.727 1.638-.653.213-2.165-.281-2.451-.867-.286-.586.208-1.849 1.125-2.366.917-.518 4.47-2.888 7.023-2.82 2.48-.088 5.67 2.005 6.59 2.82ZM38.181 2.822c-.92.814-1.434 1.423-1.174 2.098.26.674 1.928 1.354 2.619 1.141 1.68-.518 3.461-1.593 5.115-1.644 1.942-.06 3.755.995 5.727 1.638.653.213 2.165-.281 2.451-.867.286-.586-.208-1.849-1.125-2.366-.917-.518-4.47-2.888-7.023-2.82-2.48-.088-5.67 2.005-6.59 2.82Z"/></g><g fill="#FFF" transform="translate(203 33)"><path fill-rule="nonzero" d="M127.957 89.737c-3.282 0-6.04 2.273-6.822 5.349h-6.092c-.972 0-1.76.798-1.76 1.783 0 .984.788 1.782 1.76 1.782l6.092.001c.782 3.076 3.54 5.348 6.822 5.348 3.89 0 7.043-3.193 7.043-7.131 0-3.939-3.153-7.132-7.043-7.132Zm0 4.754c1.296 0 2.347 1.065 2.347 2.378 0 1.312-1.05 2.377-2.347 2.377s-2.348-1.065-2.348-2.377c0-1.313 1.05-2.378 2.348-2.378Z"/><rect width="3.522" height="16.64" x="126.196" y="67.749" rx="1.761"/><path fill-rule="nonzero" d="M127.957 47.543c-3.282 0-6.04 2.273-6.822 5.348h-6.092c-.972 0-1.76.799-1.76 1.783 0 .985.788 1.783 1.76 1.783h6.092c.782 3.076 3.54 5.349 6.822 5.349 3.89 0 7.043-3.193 7.043-7.132 0-3.938-3.153-7.131-7.043-7.131Zm0 4.754c1.296 0 2.347 1.064 2.347 2.377 0 1.313-1.05 2.377-2.347 2.377s-2.348-1.064-2.348-2.377c0-1.313 1.05-2.377 2.348-2.377Z"/><rect width="3.522" height="16.64" x="126.196" y="24.96" rx="1.761"/><path fill-rule="nonzero" d="M11.74 89.737c3.281 0 6.039 2.273 6.82 5.349h6.092c.973 0 1.761.798 1.761 1.783 0 .984-.788 1.782-1.76 1.782l-6.092.001c-.783 3.076-3.54 5.348-6.822 5.348-3.89 0-7.043-3.193-7.043-7.131 0-3.939 3.153-7.132 7.043-7.132Zm0 4.754c-1.298 0-2.349 1.065-2.349 2.378 0 1.312 1.051 2.377 2.348 2.377s2.348-1.065 2.348-2.377c0-1.313-1.051-2.378-2.348-2.378Z"/><rect width="3.522" height="16.64" x="9.978" y="67.749" rx="1.761"/><path fill-rule="nonzero" d="M11.74 47.543c3.281 0 6.039 2.273 6.82 5.348h6.092c.973 0 1.761.799 1.761 1.783 0 .985-.788 1.783-1.76 1.783H18.56c-.783 3.076-3.54 5.349-6.822 5.349-3.89 0-7.043-3.193-7.043-7.132 0-3.938 3.153-7.131 7.043-7.131Zm0 4.754c-1.298 0-2.349 1.064-2.349 2.377 0 1.313 1.051 2.377 2.348 2.377s2.348-1.064 2.348-2.377c0-1.313-1.051-2.377-2.348-2.377Z"/><rect width="3.522" height="16.64" x="9.978" y="24.96" rx="1.761"/><path d="M4.902 0c.405 1.811.978 3.314 1.718 4.509.74 1.195 1.664 2.108 2.771 2.74-1.092.788-2.016 1.871-2.77 3.25-.756 1.38-1.329 3.03-1.72 4.952-.543-1.982-1.214-3.632-2.013-4.951C2.09 9.18 1.127 8.134 0 7.358c1.097-.656 2.06-1.605 2.888-2.849.829-1.244 1.5-2.747 2.014-4.509ZM122.88 4.16c.406 1.811.979 3.314 1.718 4.509.74 1.195 1.664 2.108 2.772 2.74-1.093.788-2.017 1.871-2.772 3.25-.755 1.38-1.328 3.03-1.718 4.952-.544-1.982-1.215-3.632-2.014-4.951-.798-1.319-1.761-2.366-2.888-3.142 1.097-.656 2.06-1.605 2.888-2.849.83-1.244 1.5-2.747 2.014-4.509Z"/></g><path fill="#FFF" d="M57.656 174.58c-1.281 5.228-1.27 5.112-6.72 5.274-2.53.075-5.188.35-7.502 1.27-1.08.427-2.434.962-2.434 2.848 0 1.885 2.81 2.847 4.092 2.85 16.847.037 33.695-.14 50.542-.14 10.253-.001 20.507.208 30.76.318 2.154.023 6.606-1.056 6.606-3.028 0-1.973-3.574-3.855-5.573-4.091-2-.236-3.928.031-8.124.031-4.196 0-4.196-1.267-5.764-5.55-1.475-7.617-2.324-11.388-2.549-11.314-1.569.523-14.479 4.336-25.307 4.336S61.972 163.176 60.57 163c-1.009 4-1.983 7.785-2.914 11.58Z"/><path fill="#CCD7E5" d="M56.112 178.774h59.656c-2.96-10.558-4.553-15.8-4.778-15.726-1.569.523-14.479 4.336-25.307 4.336S61.972 163.176 60.57 163c-.673 2.667-2.159 7.925-4.458 15.774Z"/><path fill="#7188A5" d="M85.287 170.887c8.847 0 17.415-2.613 25.703-7.84L60.57 163c7.63 5.258 15.87 7.887 24.717 7.887Z"/><path fill="#FFF" fill-rule="nonzero" d="M154.566 42H18.434C8.254 42 0 50.284 0 60.503v78.994C0 149.716 8.253 158 18.434 158h136.132c10.18 0 18.434-8.284 18.434-18.503V60.503C173 50.284 164.747 42 154.566 42ZM18.434 51.963h136.132c4.699 0 8.508 3.824 8.508 8.54v78.994c0 4.716-3.81 8.54-8.508 8.54H18.434c-4.699 0-8.508-3.824-8.508-8.54V60.503c0-4.716 3.81-8.54 8.508-8.54Z"/><g fill="#FFF" fill-rule="nonzero"><path d="M59.614 56.001c-11.548 10-19.098 26.163-19.098 44.5 0 18.335 7.55 34.499 19.097 44.498L47.706 145C37.975 133.512 32 117.785 32 100.5c0-17.284 5.975-33.012 15.707-44.5l11.907.001Zm66.687 0c3.151 3.72 5.926 7.908 8.24 12.497C139.405 78.136 142 89.106 142 100.5c0 17.285-5.975 33.012-15.706 44.5l-11.907-.001c11.548-10 19.097-26.163 19.097-44.499 0-10.077-2.288-19.744-6.553-28.199-3.264-6.47-7.547-11.982-12.533-16.3H126.3Z"/><path d="M75.719 99.212c.844.314 1.266.979 1.266 1.994-1.743 6.648-5.82 8.985-12.556 8.985-6.647 0-10.339-2.729-12.045-9.205l-.074-.288c.128-.94.621-1.54 1.479-1.802.858-.263 1.507 0 1.946.787 1.187 4.789 3.928 6.27 8.694 6.27 4.692 0 7.467-1.418 8.757-6.047.844-.777 1.689-1.008 2.533-.694ZM98.286 99.212c-.844.314-1.266.979-1.266 1.994 1.743 6.648 5.82 8.985 12.556 8.985 6.647 0 10.339-2.729 12.045-9.205l.074-.288c-.128-.94-.62-1.54-1.479-1.802-.858-.263-1.507 0-1.946.787-1.186 4.789-3.928 6.27-8.694 6.27-4.692 0-7.467-1.418-8.757-6.047-.844-.777-1.689-1.008-2.533-.694ZM98.133 126.334c.527.507.83 2.378.418 2.688-.412.31-6.413 6.418-12.539 6.418-6.126 0-11.408-5.505-12.271-6.146-.864-.64-.516-2.465 0-2.96.505-.486 1.846-.913 2.794-.257s5.845 5.125 9.477 5.125c3.632 0 8.389-4.264 9.225-4.868.836-.604 2.402-.475 2.896 0ZM62.462 85.844c1.685-.117 3.68.153 6.012.79l.392.11c.534 1.062.717 1.883.549 2.463-.168.58-.743 1.117-1.726 1.61-2.061-.59-3.715-.83-4.93-.745-1.191.083-2.939.543-5.19 1.39l-.31.118c-.718-.219-1.23-.722-1.534-1.508-.306-.787-.306-1.602 0-2.445 2.763-1.062 4.989-1.66 6.737-1.783ZM110.058 85.844c-1.686-.117-3.681.153-6.013.79l-.391.11c-.534 1.062-.717 1.883-.55 2.463.168.58.743 1.117 1.727 1.61 2.06-.59 3.714-.83 4.929-.745 1.192.083 2.94.543 5.19 1.39l.311.118c.717-.219 1.229-.722 1.534-1.508.305-.787.305-1.602 0-2.445-2.763-1.062-4.99-1.66-6.737-1.783Z"/></g><g fill="#FFF" transform="translate(11 60)"><g><rect width="22.597" height="4.275" y="38.222" rx="2.137"/><path d="m6.842 21.199 7.59 1.488a2.144 2.144 0 0 1 1.693 2.51 2.13 2.13 0 0 1-2.501 1.686l-7.59-1.488a2.144 2.144 0 0 1-1.693-2.51 2.13 2.13 0 0 1 2.5-1.686ZM13.521 1.041l6.776 3.79a2.149 2.149 0 0 1 .835 2.91 2.117 2.117 0 0 1-2.889.828l-6.776-3.79a2.149 2.149 0 0 1-.835-2.91 2.117 2.117 0 0 1 2.889-.828ZM5.949 56.471l7.471-2.02a2.127 2.127 0 0 1 2.61 1.507 2.146 2.146 0 0 1-1.513 2.622L7.045 60.6a2.127 2.127 0 0 1-2.61-1.507 2.146 2.146 0 0 1 1.514-2.622ZM11.47 76.23l6.643-4.027a2.116 2.116 0 0 1 2.914.727 2.15 2.15 0 0 1-.732 2.938l-6.643 4.027a2.116 2.116 0 0 1-2.915-.727 2.15 2.15 0 0 1 .732-2.937Z"/></g><g transform="matrix(-1 0 0 1 149 0)"><rect width="22.597" height="4.275" y="38.222" rx="2.137"/><path d="m6.842 21.199 7.59 1.488a2.144 2.144 0 0 1 1.693 2.51 2.13 2.13 0 0 1-2.501 1.686l-7.59-1.488a2.144 2.144 0 0 1-1.693-2.51 2.13 2.13 0 0 1 2.5-1.686ZM13.521 1.041l6.776 3.79a2.149 2.149 0 0 1 .835 2.91 2.117 2.117 0 0 1-2.889.828l-6.776-3.79a2.149 2.149 0 0 1-.835-2.91 2.117 2.117 0 0 1 2.889-.828ZM5.949 56.471l7.471-2.02a2.127 2.127 0 0 1 2.61 1.507 2.146 2.146 0 0 1-1.513 2.622L7.045 60.6a2.127 2.127 0 0 1-2.61-1.507 2.146 2.146 0 0 1 1.514-2.622ZM11.47 76.23l6.643-4.027a2.116 2.116 0 0 1 2.914.727 2.15 2.15 0 0 1-.732 2.938l-6.643 4.027a2.116 2.116 0 0 1-2.915-.727 2.15 2.15 0 0 1 .732-2.937Z"/></g><ellipse cx="74.5" cy="13.643" rx="9.533" ry="9.618"/></g><g fill="#FFF"><path d="m40.522 26.939 5.21 5.795a2.128 2.128 0 0 1-.158 3.004 2.122 2.122 0 0 1-2.998-.158l-5.21-5.795a2.128 2.128 0 0 1 .157-3.003 2.122 2.122 0 0 1 2.999.157ZM58.119 26.837l4.355 6.467a2.128 2.128 0 0 1-.574 2.951 2.121 2.121 0 0 1-2.947-.573l-4.355-6.467a2.128 2.128 0 0 1 .573-2.95 2.121 2.121 0 0 1 2.948.572ZM71.669 21.64l5.04 12.492a2.126 2.126 0 0 1-1.175 2.766 2.122 2.122 0 0 1-2.763-1.173l-5.04-12.493a2.126 2.126 0 0 1 1.175-2.766 2.122 2.122 0 0 1 2.763 1.173ZM133.089 26.939l-5.21 5.795a2.128 2.128 0 0 0 .157 3.004 2.122 2.122 0 0 0 2.999-.158l5.21-5.795a2.128 2.128 0 0 0-.158-3.003 2.122 2.122 0 0 0-2.998.157ZM115.492 26.837l-4.355 6.467a2.128 2.128 0 0 0 .573 2.951 2.121 2.121 0 0 0 2.947-.573l4.356-6.467a2.128 2.128 0 0 0-.574-2.95 2.121 2.121 0 0 0-2.947.572ZM101.941 21.64l-5.04 12.492a2.126 2.126 0 0 0 1.175 2.766 2.122 2.122 0 0 0 2.763-1.173l5.04-12.493a2.126 2.126 0 0 0-1.174-2.766 2.122 2.122 0 0 0-2.764 1.173Z"/><g transform="translate(32 162.576)"><ellipse cx="3.539" cy="6.853" rx="3.539" ry="3.545"/><ellipse cx="105.461" cy="6.853" rx="3.539" ry="3.545"/><path d="m16.972 8.325 4.241-6.543a2.121 2.121 0 0 1 2.937-.624 2.127 2.127 0 0 1 .625 2.941l-4.242 6.542a2.121 2.121 0 0 1-2.936.625 2.127 2.127 0 0 1-.625-2.941ZM88.608 10.627l-4.467-6.39a2.128 2.128 0 0 1 .522-2.961 2.121 2.121 0 0 1 2.956.521l4.468 6.39a2.128 2.128 0 0 1-.523 2.96 2.121 2.121 0 0 1-2.956-.52Z"/></g><g transform="translate(50.403)"><ellipse cx="34.682" cy="12.051" rx="8.494" ry="8.507"/><ellipse cx="4.955" cy="4.962" rx="4.955" ry="4.962"/><ellipse cx="65.825" cy="4.962" rx="4.955" ry="4.962"/></g></g><path fill="#FFF" d="M408.696 176.133c-1.003 4.573-1.806 4.573-5.92 4.613-1.992.02-2.837.074-4.55.455-1.713.38-3.226.962-3.226 3.148 0 2.187 2.2 2.491 3.203 2.494 13.184.032 26.37-.122 39.554-.123 8.024 0 16.049.183 24.073.279 1.686.02 5.17-.146 5.17-2.65 0-2.503-2.797-3.373-4.362-3.579-1.564-.207-3.848-.333-6.423-.333-2.576 0-3.218-.748-4.446-4.495-1.153-6.665-1.818-9.965-1.994-9.9-1.228.458-11.1 4.45-19.806 4.45-8.706 0-17.896-4.338-18.993-4.492-.79 3.5-1.552 6.812-2.28 10.133Z"/><path fill="#CCD7E5" d="M406.89 180.437h47.56c-2.736-9.664-4.19-14.461-4.364-14.393-1.216.479-11.223 4.624-19.616 4.624-8.392 0-18.378-4.507-19.464-4.668-.521 2.44-1.893 7.253-4.116 14.437Z"/><path fill="#7188A5" d="M429.621 175.843c6.955 0 13.69-3.26 20.205-9.783-8.272 2.804-14.988 4.205-20.147 4.205-5.16 0-11.434-1.421-18.826-4.265 5.558 6.562 11.814 9.843 18.768 9.843Z"/><path fill="#FFF" fill-rule="nonzero" d="M431.345 33C402.663 33 386 53.952 386 88.548c0 23.727 4.354 42.896 12.504 56.236C406.524 157.909 418.05 165 431.345 165 457.785 165 475 133.933 475 88.548 474.616 53.748 459.38 33 431.345 33Zm0 7.88c23.115 0 35.48 16.838 35.82 47.712 0 41.568-14.939 68.527-35.82 68.527-22.208 0-37.51-25.045-37.51-68.57 0-30.583 13.588-47.668 37.51-47.668Z"/><g fill="#FFF" transform="translate(400 84)"><path fill-rule="nonzero" d="M20.534 13.994c.74.29 1.111.903 1.111 1.84-1.529 6.13-5.106 8.284-11.014 8.284-5.831 0-9.07-2.516-10.566-8.488L0 15.365c.112-.866.544-1.42 1.297-1.662.753-.242 1.322 0 1.707.726 1.041 4.415 3.446 5.781 7.627 5.781 4.116 0 6.55-1.307 7.681-5.576.741-.716 1.482-.93 2.222-.64ZM40.466 13.994c-.74.29-1.111.903-1.111 1.84 1.529 6.13 5.106 8.284 11.014 8.284 5.831 0 9.07-2.516 10.566-8.488l.065-.265c-.112-.866-.544-1.42-1.297-1.662-.753-.242-1.322 0-1.707.726-1.041 4.415-3.446 5.781-7.627 5.781-4.116 0-6.55-1.307-7.681-5.576-.741-.716-1.482-.93-2.222-.64Z"/><path d="M2.752 3.288C6.678 1.096 10.077 0 12.952 0c2.261 0 4.222.245 5.883.735a1.858 1.858 0 0 1 1.112 2.658A2.948 2.948 0 0 1 17 4.93c-2.99-.356-5.701-.259-8.135.293-1.68.381-2.966.774-3.855 1.178-.922.419-2.01.19-2.684-.566a1.655 1.655 0 0 1 .427-2.547ZM59.164 3.288C55.24 1.096 51.84 0 48.965 0c-2.262 0-4.223.245-5.883.735a1.858 1.858 0 0 0-1.113 2.658 2.948 2.948 0 0 0 2.949 1.537c2.99-.356 5.7-.259 8.134.293 1.681.381 2.966.774 3.855 1.178.922.419 2.01.19 2.685-.566a1.655 1.655 0 0 0-.428-2.547Z"/><ellipse cx="30.828" cy="38.564" rx="3.935" ry="3.94"/><path d="M22.65 54.497c-1.03-.912-1.605-1.593-1.314-2.348.291-.754 2.157-1.516 2.93-1.278 4.085 1.262 8.044 1.34 12.127.008.732-.239 2.423.314 2.743.97.32.656-.234 2.07-1.26 2.648-1.025.58-5 2.575-7.855 2.499-2.773.099-6.343-1.587-7.372-2.499Z"/></g><g fill="#FFF" transform="translate(425 46.004)"><ellipse cx="24.907" cy="16.496" rx="7.644" ry="14.651" transform="rotate(-31 24.907 16.496)"/><ellipse cx="5.312" cy="5.534" rx="5.312" ry="5.334"/></g><g fill="#FFF" transform="translate(362)"><ellipse cx="130.121" cy="41.533" rx="4.577" ry="4.615"/><ellipse cx="103.966" cy="25.711" rx="4.577" ry="4.615"/><ellipse cx="43.156" cy="16.481" rx="4.577" ry="4.615"/><ellipse cx="19.616" cy="32.304" rx="4.577" ry="4.615"/><path d="M68.419 0c.521 2.319 1.258 4.242 2.209 5.771.95 1.53 2.138 2.699 3.562 3.508-1.405 1.008-2.592 2.395-3.562 4.16-.971 1.766-1.707 3.879-2.21 6.339-.698-2.538-1.56-4.65-2.587-6.338-1.027-1.688-2.265-3.029-3.713-4.022 1.41-.839 2.647-2.054 3.713-3.647C66.896 4.18 67.758 2.255 68.419 0ZM129.229 69.222c.522 2.319 1.258 4.243 2.209 5.772.95 1.529 2.138 2.698 3.562 3.507-1.404 1.008-2.592 2.395-3.562 4.16-.97 1.767-1.707 3.88-2.209 6.339-.699-2.537-1.562-4.65-2.588-6.338-1.027-1.688-2.264-3.029-3.713-4.022 1.41-.839 2.648-2.054 3.713-3.646s1.928-3.516 2.588-5.772ZM6.3 52.081c.522 2.32 1.258 4.243 2.21 5.772.95 1.529 2.138 2.698 3.562 3.508-1.405 1.008-2.592 2.394-3.563 4.16-.97 1.766-1.706 3.878-2.208 6.338-.7-2.537-1.562-4.65-2.589-6.338C2.686 63.833 1.448 62.492 0 61.5c1.41-.838 2.647-2.054 3.712-3.646 1.066-1.592 1.928-3.516 2.589-5.772Z"/></g></g></svg> </div> </div> <div class="lc"> <!-- START <%kicker%> --> <a href="https://bigmedium.com/bm.tags/sentient-design/" class="kicker">sentient design</a> <!-- END <%kicker%> --> <!-- start <%headline%> --> <h1 class="bmw_headline">Sentient Design: AI and the Next Chapter of UX</h1> <!-- end <%headline%> --> <!-- start <%byline%> --> <h2 class="bmw_byline">By <a href="https://bigmedium.com/about/josh-clark.html" class="bmc_byline">Josh Clark</a> <br /> <span class="bmc_bylineTitle">Principal, Big Medium</span> </h2> <!-- end <%byline%> --> <!-- start <%pubdate%> --> <span class="bmw_pubdate">Published Jun 2, 2024</span> <!-- end <%pubdate%> --> </div> </header> <div class="lc maincontent"> <section class="bodycontent"> <!-- start <%content%> --> <div class="bmw_pageContent"> <div class="video video--16x9 u--triple-bottom-margin"> <iframe width="560" height="315" src="https://www.youtube.com/embed/T6SjliHy3eM?si=McbC3_dTxPWRDagX&start=556" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> </div> <aside class="aside media-right"> <p>This is the prose version of a talk Josh gave at <a href="https://friends.figma.com/events/details/figma-miami-presents-sentient-design-ai-and-the-next-chapter-of-ux/">Friends of Figma Miami</a> in May 2024.</p> <h3 class="u--zero-bottom-margin">The slides</h3> <p class="u--zero-bottom-margin"><a href="/prez/sentient-design-josh-clark.pdf"><img src="/prez/sentient-design-cover.png" width="600" height="338" alt="The cover slide of the deck, which reads "Sentient Design: AI and the Next Chapter of User Experience"" style="width:100%; max-width:324px;" /></a></p> <p class="bm-caption"><a href="/prez/sentient-design-josh-clark.pdf">Download the slides</a> (PDF, 8.2MB)</p> <h3 class="u--zero-bottom-margin">The audio</h3> <p class="u--zero-bottom-margin"> <audio controls> <source src="/audio/sentient-design-josh-clark-20240522.mp3" type="audio/mpeg"> </audio> </p> <p class="bm-caption"><a href="/audio/sentient-design-josh-clark-20240522.mp3">Download MP3 audio</a> (58:26)</p> <h3 class="u--zero-bottom-margin">The book</h3> <p><cite>Sentient Design</cite> by Josh Clark with Veronika Kindred will be published by <a href="https://rosenfeldmedia.com/books/sentient-design/">Rosenfeld Media</a>.</p> <h3 class="u--zero-bottom-margin">The workshop</h3> <p><p>Book a <a href="/ideas/workshop-sentient-design-ai-experiences.html">workshop for designing AI-powered experiences</a>.</p> <h3 class="u--zero-bottom-margin">Need help?</h3> <p>If you’re working on strategy, design, or development of AI-powered products, we do that! <a href="/hire/">Get in touch.</a></p> </aside> <p>There are so many twisty contradictions in our experiences with AI and the messages we receive about it. It’s smart, but it’s dumb. It’s here, but it’s yet to come. It’s big, but it’s small. So much opportunity, but so much risk. It’s overhyped, but it’s underestimated. It can do everything, but it can’t seem to do anything.</p> <p>What does it all mean? Here at Big Medium, a big part of our mission is to help companies make sense of new technologies and apply them in meaningful ways. AI is naturally a big focus right now; we’re doing a lot of AI-related product work with our clients, and a lot of that starts with the question: what does AI mean for us, and what do we do with it?</p> <p>So what <em>does</em> it all mean? For such an enormous existential question, let’s try the greatest philosopher of our time:</p> <div class="observe-margins"> <div class="video video--16x9" aria-label="Snoop Dogg shares his views on AI at the Milken Institute in 2023."> <video width="800" height="450" controls="" poster="https://bigmedium.com/vids/snoop-dogg-ai.jpg"> <source src="https://bigmedium.com/vids/snoop-dogg-ai.webm" type="video/webm" /> <source src="https://bigmedium.com/vids/snoop-dogg-ai.mp4" type="video/mp4" /> <img src="https://bigmedium.com/vids/snoop-dogg-ai.jpg" width="1600" height="900" alt="Snoop Dogg shares his views on AI at the Milken Institute in 2023." /> </video> </div> </div> <p>Even Snoop doesn’t know what’s up. Even Snoop!</p> <p>Tech leaders are pretty sure that AI is a big deal, though. Alphabet CEO Sundar Pichai said earlier this year that AI is “the most profound technology humanity is working on—more profound than fire or electricity or anything that we’ve done in the past.” More profound than fire or electricity! THAT SOUNDS LIKE A PRETTY BIG DEAL.</p> <p>So, let’s see what kind of profound applications we’re building with this bigger-than-fire technology. We have <a href="https://www.adoreme.com/am-by-you">AI-generated underpants</a>… <a href="https://www.everyhuman.com/us/products">algorithmic perfume</a>… <a href="https://people.com/miss-ai-will-be-the-worlds-first-ai-beauty-pageant-details-8634566">AI beauty contests</a>… <a href="https://www.youtube.com/watch?v=lWv6oVnfZe0">Oral-B’s AI-powered toothbrush</a>… and if you have any lingering doubts about AI’s ability to draw hands, we have AI-generated sign-language manuals.</p> <blockquote class="twitter-tweet"><p lang="en" dir="ltr">Someone asked AI to make a sign language manual, in case you’re worried that we’ll all be out of a job soon <a href="https://t.co/usCxNOj3p0">pic.twitter.com/usCxNOj3p0</a></p>— Elizabeth Sampat (@twoscooters) <a href="https://twitter.com/twoscooters/status/1619371708540157954?ref_src=twsrc%5Etfw">January 28, 2023</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> <p><em>This</em> is the new fire? It’s certainly a lot of sizzle—lots of noise ’n’ toys getting built with AI right now. But just because AI is noisy and frothy doesn’t mean it’s useless—or that it <em>can’t</em> be profound, or at least meaningful.</p> <p><img src="/pix/textbox.gif" width="720" height="405" alt="Animation of a variety of prompts typed into a text box, one prompt at a time." /></p> <p>The interface of the moment, of course, is the text box, where ChatGPT and its cousins promise to provide a single “ask me anything” interface for general intelligence. Let’s just say it: these things are extraordinary. All of us have experienced moments of magic with what they can do—and their range. In one breath, you can ask for a Python application, and in the next, you can ask for a Shakespearean sonnet or tips for teaching your kid to ride her bike. The bots often deliver with nuance and something resembling creativity.</p> <p>But as we’ve all experienced, these things aren’t always reliable, sometimes flat-out wrong, often messy. They give us high expectations—“ask me anything”—but their answers are often just… <em>meh</em>.</p> <p>We haven’t quite realized the “fire and electricity” potential yet, have we? But even if you’re a skeptic, you have to admit that you can <em>feel</em> the potential. Something has changed here. Automated systems are suddenly capable of things dramatically different from what came before.</p> <p><strong>And yet: we have these systems that seem at once useful for everything, but also nothing in particular.</strong> So for most of us… <em>maybe</em> we use these services for a bit of help writing email? Or use them as cute toys to poke at? See what tricks we can make them do? For all of the fantastic power of this stuff, many of us are still left with the question…</p> <h3 id="whatisaiactuallyfor">What is AI actually for?</h3> <p>This is a question for everyone, but specifically for the people who will create this next generation of AI-powered experiences. How should we think about AI in our work as designers and developers?</p> <p>I’m an optimist about this stuff. I believe that we can and will do better. I also think it’s natural that we’re seeing a lot of flailing—including so many weak or crummy applications of a truly remarkable technology.</p> <!-- 19. underpants and mediocre email --> <blockquote class="pullquote media-center"> The future is more than AI underpants and mediocre email. </blockquote> <p>Let’s go wayyyy back to another breakthrough technology, the Gutenberg press. (If Sundar Pichai can run with fire and electricity, I’ll take the printing press.) When it was invented, the Gutenberg press unleashed a flood of mediocre books and trashy novels, of misinformation and propaganda, of job displacement, and even what passed for information overload in the day. Those trends might sound familiar.</p> <p>Democratizing technology necessarily means opening the door to mediocre uses of that technology. We will see AI pump more and more <em>stuff</em> into the world, including plenty that we don’t need or want. But just because we’re seeing so many uninspired uses of AI doesn’t mean that’s the end of the story. <strong>Mediocrity should not be our baseline reference point for innovation with AI.</strong></p> <!-- 21. black and white, fire and poo --> <figure class="media-left bmc_image"> <a href="https://bigmedium.com/bm.pix/img-binary-culture-fire-poo-2.png" rel="bm_lightbox" title="" target="_blank"><img src="https://bigmedium.com/bm.pix/img-binary-culture-fire-poo-2.orig-250.png" alt="Polarized, binary culture: an image of a flame on a black background in contrast with the poo emoji on white background.-2" srcset="https://bigmedium.com/bm.pix/img-binary-culture-fire-poo-2.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-binary-culture-fire-poo-2.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-binary-culture-fire-poo-2.orig-500.png 500w, https://bigmedium.com/bm.pix/img-binary-culture-fire-poo-2.orig-250.png 250w" sizes="(min-width: 640px) 720px, 100vw" title="Click to enlarge" /></a> </figure> <p>We are a binary and polarized culture, and we tend to think in black and white. Go to social media, and everything is either amazing or it’s terrible and useless. That view shapes the conversation about AI, too. It’s either fantastic or it’s going to ruin the world. It’s as profound as fire, or it’s meaningless and dumb. The truth, as in most things, is somewhere in the middle. There’s more gray than black and white, more fluidity than solid answers. There’s opportunity in that fluidity, but we must be open-eyed.</p> <p>It’s breathtaking when you step back and take stock of all the superpowers that machine learning and artificial intelligence have added to our toolkits as designers and developers. But at the same time, consider the dangers. These threats are real, enormous, and in no way kidding around.</p> <!-- 23. graphic of the good and bad --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-ai-superpowers-and-dangers.jpg" rel="bm_lightbox" title="AI&#8217;s superpowers are impressive, but so are its dangers." target="_blank"><img src="https://bigmedium.com/bm.pix/img-ai-superpowers-and-dangers.orig-250.jpg" alt="A list of AI superpowers contrasted with its risks and dangers" srcset="https://bigmedium.com/bm.pix/img-ai-superpowers-and-dangers.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-ai-superpowers-and-dangers.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-ai-superpowers-and-dangers.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-ai-superpowers-and-dangers.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> AI’s superpowers are impressive, but so are its dangers. </figcaption> </figure> <p>Advances always come at a potential cost. Technology giveth, and it taketh away. How it’s used makes all the difference—and that’s where design comes in.</p> <p>I’ve been saying for the last several years that <a href="https://bigmedium.com/speaking/ai-is-your-new-design-material.html">AI is your new design material</a>—in the way that HTML and CSS are design materials, or great prose is a design material, or dense data is a design material. It’s essential to understand its texture and grain: what are its strengths and weaknesses? It’s not only how it <em>can</em> be used but how it <em>wants</em> to be used.</p> <p>To me, the possibilities of this material add up to a new kind of experience—some of which is already here (familiar even), and some that is still to emerge.</p> <h3 id="thisissentientdesign">This is Sentient Design</h3> <p><strong>Sentient Design is the already-here future of intelligent interfaces: experiences that feel almost self-aware in their response to user needs.</strong> Sentient Design moves past static info and presentation to embrace UX as a radically adaptive story. These experiences are conceived and compiled in real time based on your intent in the moment—AI-mediated experiences that adapt to people, instead of forcing the reverse.</p> <p>Sentient Design describes the form of this new user experience, but it’s also a framework and a philosophy for working with machine learning and AI as design material.</p> <!-- 29. the main Sentient Design slide --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-sentient-design-overview.jpg" rel="bm_lightbox" title="Sentient Design describes intelligent interfaces that are aware of context and intent so that they can deliver radically adaptive experiences based on the need of the moment." target="_blank"><img src="https://bigmedium.com/bm.pix/img-sentient-design-overview.orig-250.jpg" alt="Overview of Sentient Design: Intelligent interfaces that are aware of context & intent, radically adaptive, collaborative, multimodal, continuous & ambient, and deferential" srcset="https://bigmedium.com/bm.pix/img-sentient-design-overview.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-sentient-design-overview.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-sentient-design-overview.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-sentient-design-overview.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Sentient Design describes intelligent interfaces that are aware of context and intent so that they can deliver radically adaptive experiences based on the need of the moment. </figcaption> </figure> <p>Sentient Design refers to <strong>intelligent interfaces</strong> that are <strong>aware of context and intent</strong> so that they can be <strong>radically adaptive to user needs in the moment</strong>. Those are the fundamental attributes of Sentient Design experiences: intelligent, aware, adaptive.</p> <p>Those core attributes are supported by a handful of common characteristics that inform the manner and interaction of most Sentient Design experiences:</p> <ul> <li><strong>Collaborative.</strong> The system is an active (often proactive) partner throughout the user journey.</li> <li><strong>Multimodal.</strong> The system works across channels and media, going beyond traditional interfaces to speech, text, touch, physical gesture, etc.</li> <li><strong>Continuous and ambient.</strong> The interface is available when it can be helpful and quiet or invisible when it can’t.</li> <li><strong>Deferential.</strong> They suggest instead of impose; they offer signals not answers; they defer to user goals and preferences.</li> </ul> <p><strong>Note that “making stuff” is not included in this list</strong>—not explicitly, at least. Writing text, making images, or generating code might all be the means or even the desired outcomes of Sentient Design experiences—but they’re not defining characteristics.</p> <blockquote class="twitter-tweet"><p lang="en" dir="ltr">Humans doing the hard jobs on minimum wage while the robots write poetry and paint is not the future I wanted</p>— Karl Sharro (@KarlreMarks) <a href="https://twitter.com/KarlreMarks/status/1658028017921261569?ref_src=twsrc%5Etfw">May 15, 2023</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> <p>“Helping <em>you</em> make stuff” is closer to the mark. Instead of writing our poetry and painting our paintings, let’s design these systems to support <em>our</em> efforts and make us even better at those creative pursuits.</p> <p><strong>This is the defining principle of Sentient Design: amplify human judgment and agency instead of replacing them.</strong> How do our designs and product missions change when we consider them as ways to empower or enable instead of replace? We get interfaces that feel like partners instead of competitors.</p> <h3 id="soumwhatdoyoumeanby“sentient”">So, um, what do you mean by “sentient”?</h3> <p>This isn’t anything as weird as full-blown consciousness. We’re not talking about machines with emotional feeling, moral consideration, or any of the hallmarks of a fully sentient being. The “sentient” in Sentient Design describes a combination that is far more modest but still powerful: awareness, interpretation, and adaptation. Machine learning and AI can already enable all of these attributes in forms that range from simple to sophisticated.</p> <p>This is a continuum. Think of it as a dial that you can turn: from simple utilities with sparks of helpful intelligence to more capable companions like agents or assistants. Those experiences are both comfortably attainable with Sentient Design. Our imaginations, however, often take us farther—to systems that become smarter than us and no longer work in our interests.</p> <!-- 40. sentient-o-meter --> <figure class="media-left bmc_image"> <a href="https://bigmedium.com/bm.pix/img-sentient-o-meter.png" rel="bm_lightbox" title="" target="_blank"><img src="https://bigmedium.com/bm.pix/img-sentient-o-meter.orig-250.png" alt="An illustration of the "sentient-o-meter," a dial showing a spectrum from utility to companion to Skynet." srcset="https://bigmedium.com/bm.pix/img-sentient-o-meter.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-sentient-o-meter.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-sentient-o-meter.orig-500.png 500w, https://bigmedium.com/bm.pix/img-sentient-o-meter.orig-250.png 250w" sizes="(min-width: 640px) 720px, 100vw" title="Click to enlarge" /></a> </figure> <p>That’s not the kind of sentience I’m talking about here, and I think we’re a long way from that fearful future if we ever get there at all. But I think the very present fears of this particular future should be instructive in how we design our experiences today.</p> <p>A few years ago, the smart home company Wink created this ad, which made the point directly:</p> <div class="video video--16x9"> <iframe width="560" height="315" src="https://www.youtube.com/embed/XAjQxfW5gfY?si=r_QMNLHD3RIE0NJz" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe> </div> <p>This reminds me of a question that <a href="https://web.archive.org/web/20090526040403/http://90.146.8.18/en/archives/festival_archive/festival_catalogs/festival_artikel.asp?iProjectID=8689">Rich Gold posed 30 years ago</a>: “How smart does your bed have to be before you are afraid to go to sleep at night?” There are limits to how much we want technologies to be part of our lives, to watch us, to take on decisions for us—or how much of our jobs we want them to do. So maybe let’s not crank that knob all the way to 11, if that’s even possible. Sentient Design describes a more pragmatic zone between utility—what I call casual intelligence—and companion, where we see assistants, copilots, and agents.</p> <p>These are AI-mediated experiences. However, the very definition of AI is vague; it means different things to different people. Let’s dig into more specifics and look at the specific strands that come together to weave this design material.</p> <h3 id="whatisaimadeof">What is AI made of?</h3> <p>When most folks talk about AI, they typically refer to the latest and greatest wave of machine intelligence. Today that means generative AI: chatbots like ChatGPT, Gemini, Copilot, Claude, and the rest, as well as image generators like Midjourney, Dall-E, and so on.</p> <p><strong>AI is so much more (and in exciting ways, so much less) than chatbots and generative AI.</strong> AI is mostly a bundle of machine learning capabilities that in turn are mixed and matched to create proficiencies in domains like speech, computer vision, or natural language processing. But it’s also made up of stuff that’s not machine learning: knowledge graphs like Google’s PageRank or Meta’s social graph; rule-based systems like healthcare diagnostic systems; and good ol’ fashioned algorithms that follow a fixed logic to get stuff done.</p> <!-- 47. AI map with LLMs marked --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-ai-map.png" rel="bm_lightbox" title="AI is largely a bundle of machine learning capabilities that create proficiencies in domains like speech or computer vision or natural language processing. But it’s also made up of more traditional algorithms that are not the stuff of machine learning." target="_blank"><img src="https://bigmedium.com/bm.pix/img-ai-map.orig-250.png" alt="A diagram of the elements of AI, including machine learning capabilities and domains, and more traditional algorithms" srcset="https://bigmedium.com/bm.pix/img-ai-map.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-ai-map.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-ai-map.orig-500.png 500w, https://bigmedium.com/bm.pix/img-ai-map.orig-250.png 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> AI is largely a bundle of machine learning capabilities that create proficiencies in domains like speech or computer vision or natural language processing. But it’s also made up of more traditional algorithms that are not the stuff of machine learning. </figcaption> </figure> <p>The new fancy stuff that everyone’s been obsessed with in the current AI moment is <em>generative AI</em> and all the things that have been unlocked by large language models (LLMs) and the most recent wave of large multimodal models (LMMs). In a technical sense, this occupies only a tiny corner of AI, but it’s also AI’s most powerful enabler to date. <em>It changes a lot of things.</em></p> <p>But let’s not lose track of the rest in our enthusiasm for the new. An LLM, that little red dot on the chart above, becomes much more powerful when you combine it with the whole kit and approach the opportunities and risks holistically. Our opportunity (our job!) as designers is to understand how to put these combinations of capabilities to use in <em>meaningful</em> ways.</p> <p>Many of these machine-learning features have been around for years—long enough that we no longer consider them special. When we think about recommendation—the kind of thing that Amazon’s been doing for decades—it might be easy to dismiss it: is that <em>really</em> intelligence? Is that <em>really</em> AI?</p> <p>I love this quote from Larry Tesler, a pioneer in human computing interaction (HCI) going back to the Xerox Parc days:</p> <!-- 51. Pullquote for Tesler's Theorem --> <blockquote class="pullquote media-center"> “A.I. is whatever hasn’t been done yet.”<br/> <small>—<a href="https://en.wikipedia.org/wiki/AI_effect">Tesler’s Theorem</a></small> </blockquote> <p>Tesler said that in the early 1970s—<em>fifty years ago!</em>—a good reminder that AI work has been happening for decades. Back then and ever since, every AI innovation was eventually absorbed, processed, and normalized. It simply becomes software. It’s mundane, ordinary. It will almost certainly happen with the newly arrived technology, too.</p> <p>Tesler’s Theorem pairs as a kind of “after” state to the “before” state described by Arthur C. Clarke’s famous quote from the same era:</p> <!-- 53. Pullquote for Arthur C. Clarke --> <blockquote class="pullquote media-center"> “Any sufficiently advanced technology is indistinguishable from magic.”<br/> <small>—Arthur C. Clarke</small> </blockquote> <p>Let me fix that for you by replacing “magic” with artificial intelligence: <strong>Any sufficiently advanced technology is indistinguishable from artificial intelligence.</strong></p> <p>Is AI going to turn into consciousness? Will this become <em>true</em> intelligence? I dunno; that’s way above my pay grade. People who know much more about this stuff than I do are in deep disagreement about it. Maybe, maybe not. At this moment, for the work that I do, I also don’t especially care. This is only software.</p> <p>Let’s pull back from the magical thinking and ask: What are these tools good at? What are they bad at? What new problems can we solve? And can we justify the cost of using them?</p> <p>Let’s look at these tools and what we can do with them.</p> <h3 id="areviewofmachinelearning’ssuperpowers">A review of machine learning’s superpowers</h3> <p>We encounter automated services hundreds of times daily that deliver basic Sentient Design experiences based on automated recommendation or prediction. These services are <em>aware</em> and <em>adaptive</em> in presentation. </p> <p>Think about AI and machine learning as five kinds of fundamental superpowers to you and your users:</p> <ul> <li>Recommendation</li> <li>Prediction</li> <li>Classification</li> <li>Clustering</li> <li>Generation</li> </ul> <p>All of these are available design material for designers and developers right now. You can get them through online services and APIs from Google, Meta, Amazon, Microsoft, OpenAI, and many others. Often, these services are inexpensive to experiment with or free with open-source options. There’s tons of stuff to play with here; I encourage you to splash in puddles.</p> <h4 id="recommendation">Recommendation</h4> <p>Recommendation is a familiar and everyday superpower: It delivers a ranked set of results that match a specific context or concept. It’s Spotify recommending a playlist based on your history. It’s Amazon recommending products similar to the one you just bought. It’s Netflix recommending movies based on your individual tastes or the broad trends of the network.</p> <!-- 64. Netflix screenshot --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-netflix-recommendation.jpg" rel="bm_lightbox" title="Netflix applies recommendation to entertainment categories you might like and, within each category, the specific shows. It&#8217;s a feed of feeds—recommendations all the way down." target="_blank"><img src="https://bigmedium.com/bm.pix/img-netflix-recommendation.orig-250.jpg" alt="Netflix category and show recommendations" srcset="https://bigmedium.com/bm.pix/img-netflix-recommendation.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-netflix-recommendation.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-netflix-recommendation.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-netflix-recommendation.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Netflix applies recommendation to entertainment categories you might like and, within each category, the specific shows. It’s a feed of feeds—recommendations all the way down. </figcaption> </figure> <h4 id="prediction">Prediction</h4> <p>Prediction is also super-familiar: Based on historical data, it surfaces the thing that’s most likely to happen next. Predictive keyboards are another example of everyday machine learning, offering the statistically most likely next words showing up above the keyboard. It’s a simple intervention that helps speed up the error-prone task of touchscreen typing.</p> <!-- 66. predictive keyboard screenshot --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-predictive-keyboard.jpg" rel="bm_lightbox" title="Predictive keyboards are an everyday example of prediction: based on historical use, what&#8217;s the next most likely word?" target="_blank"><img src="https://bigmedium.com/bm.pix/img-predictive-keyboard.orig-250.jpg" alt="Predictive keyboards an everyday example of prediction." srcset="https://bigmedium.com/bm.pix/img-predictive-keyboard.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-predictive-keyboard.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-predictive-keyboard.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-predictive-keyboard.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Predictive keyboards are an everyday example of prediction: based on historical use, what’s the next most likely word? </figcaption> </figure> <h4 id="classification">Classification</h4> <p>Classification begins to feel more exciting, with the system able to identify what things are, powering things like image recognition.</p> <p>You can see it at work even in humble contexts like Google Forms, the survey-building tool. When you enter your questions in Google Forms, you have to choose the format of the answer you want, and there are a slew of options. They’ve presented those choices in a simple and illustrative way, but it still takes time to scan them all and consider the right option. </p> <p>To ease the way, Google Forms adds some quiet Sentient Design help. When you start typing the question text, the interface chooses the answer format that best suits the form of your question. Start typing “How satisfied are you…”, and the format flips to “linear scale.” Or type “Which of the following apply…”, and you get “checkboxes.” Behind the scenes, that’s machine learning doing classification work, mapping your question to the appropriate answer format.</p> <div class="video video--16x9"> <iframe width="560" height="315" src="https://www.youtube.com/embed/2b_bA6E-lMc?si=h-HpPGoKwF3epXxy" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> </div> <p>Classification is human-generated categories (like these answer formats) where the machines map content to those categories to identify and describe that content. In this case, Google Forms has millions (billions?) of human-labeled examples of questions mapped to answer formats—tons of high-quality data to train and implement this simple machine-learning algorithm.</p> <p>I’ll come back to review clustering and generation in a moment, but this is a good moment to pause and deliver this invitation…</p> <h3 id="getcozywithcasualintelligence">Get cozy with casual intelligence</h3> <p>Look how mundane these examples are: recommended content, predictive keyboards, smart form defaults. These are not magical or earth-changing. We’ll get to some more exciting stuff in a bit, but the point here is that machine learning and AI can be part of your everyday toolkit. Where are those opportunities in the work that you do today?</p> <p>I call this <strong>casual intelligence</strong>. In that context, you can think of machine-learning applications in the same way that you think nothing of using JavaScript to add dynamism and interaction to a webpage. Or how you use responsive design techniques to add flexibility and accessibility to an experience. <em>Of course</em> you do those things. These machine learning features are just another new technique and applied technology, this time to add intelligence and awareness to our interfaces.</p> <p><strong>Sprinkle a little machine learning on it, friends; just add a dash of AI.</strong> We can add a spark of intelligence anywhere it can be helpful. Apply Sentient Design’s awareness of context and intent, using things like recommendation, prediction, and classification to elevate even humble web forms. What data do you have that can anticipate next steps and help people move more quickly through challenging or mundane tasks?</p> <p>AI is big, but these examples also show it can be small. As designers, we have permission to get a little loose with it; it doesn’t have to be a big deal. Using AI doesn’t mean that you’re <em>obliged</em> to use the fancy, heavy, expensive new generative AI technologies everywhere. Simple and familiar machine-learning algorithms often add a ton of value on their own. They’re all part of the Sentient Design toolkit and playbook.</p> <h3 id="turnupthesentient-o-meter">Turn up the Sentient-o-Meter</h3> <p>The examples so far are all low on the Sentient-o-Meter. Let’s bump up the smarts and go through the last two types of machine learning: clustering and generation.</p> <h4 id="clustering">Clustering</h4> <p>Clustering can feel like magic or nonsense, depending on the algorithm’s success. That’s because the learning and logic for clustering are entirely unsupervised. With classification, people create the categories and give the system a ton of examples of how to map data to those categories. </p> <p>Clustering is similar, but it’s done by machine logic, not human logic. The system goes out and identifies a group of things that are different from normal in common ways—that’s a cluster—and then identifies another group that’s different from normal—another cluster—and so on. This is deep statistical analysis to find affinities in the data. Clustering reveals things we wouldn’t usually see ourselves because of the scale it can use and because machines can find patterns we’re not tuned into.</p> <p>This is how we do things like identify fraud, crime, or disease. It’s anomaly detection: finding groups of data points that sit outside of normal in some interesting way. That also means you can use this to identify clusters of products or people by behaviors or associations that might not be immediately apparent or obvious.</p> <h4 id="generation">Generation</h4> <p>You might have heard that Generative AI is kind of a big deal? This is the fifth and final machine-learning superpower, and it’s the big one that’s occupied so much attention since ChatGPT was released. You know the drill already: Give it a text prompt, and a generative AI system will write text, answer questions, make music, generate images, construct wireframes, summarize meeting transcripts, you name it.</p> <!-- 85. right illustration of “a lot more than text boxes” --> <figure class="media-right bmc_image"> <a href="https://bigmedium.com/bm.pix/img-more-than-text-boxes.png" rel="bm_lightbox" title="" target="_blank"><img src="https://bigmedium.com/bm.pix/img-more-than-text-boxes.orig-250.png" alt="Text box with text inside: "This can be so much more than prompts & text boxes"" srcset="https://bigmedium.com/bm.pix/img-more-than-text-boxes.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-more-than-text-boxes.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-more-than-text-boxes.orig-500.png 500w, https://bigmedium.com/bm.pix/img-more-than-text-boxes.orig-250.png 250w" sizes="(min-width: 1100px) 690px, (min-width: 830px) 501px, (min-width: 640px) 380px, 100vw" title="Click to enlarge" /></a> </figure> <p>This is an astounding technical feat, even acknowledging the foibles and hallucinations we’ve all experienced. But from a UX perspective, we’ve barely scratched the surface. This will be so much more than chatbots and text boxes. Typing prompts is not the UX of the future. Thinking of generative AI systems primarily as “answer machines” takes you down a frustrating path. As I’ll explore in a moment, “making stuff” is not even the most potent aspect of generative AI.</p> <p>But first, let’s look at what this first generation of text box + chatbot applications have unlocked.</p> <h4 id="forthefirsttimeanyonecaninteractdirectlywiththesystem">For the first time, <em>anyone</em> can interact directly with the system</h4> <p>Chat interaction is as old as the command line: type something, get a response. What’s new with LLMs is that <em>you can ask anything</em>. For the first time, regular folks can interact directly with the underlying system. Before, you had to use SQL queries or other strange incantations; you needed special skills and access to go outside a path a product designer explicitly provided.</p> <p>Now, because LLMs have at least surface knowledge about <em>everything</em>, they can make sense of any question. The user can change the rules of the application: the topic, the nature of the response, and even the whole physics of how the system works, just from the text box.</p> <p>This is a UX thing, and it’s a big change from what’s come before. How do we responsibly design an experience when someone can ask the system <em>anything</em>, and get <em>any response</em>? This is the opportunity and challenge of radically adaptive experiences in Sentient Design.</p> <h3 id="radicallyadaptiveexperiences">Radically adaptive experiences</h3> <p>AI chatbots are radically adaptive. They can deliver a whole meandering conversation that is absolutely one of a kind, not anticipated by any designer of the system. It’s an experience that bends to your wants and needs in the moment—indeed, the experience is invented on the fly for that very moment.</p> <p><strong>Radically adaptive experiences morph their content, structure, interaction, or sometimes all three in response to context and intent.</strong> This is a core characteristic of Sentient Design. We’ve seen familiar examples of adaptive content with Netflix recommendations and predictive keyboards; these adapt content based on user context and intent. What happens when we fold structure and interaction into that, too?</p> <p>Let’s start with this demo from the team behind Gemini, Google’s AI assistant. While this demo starts in traditional chat, it quickly switches things up to do something exciting that they call bespoke UI.</p> <div class="video video--16x9"> <iframe width="560" height="315" src="https://www.youtube.com/embed/v5tRc_5-8G4?si=DhDX-QlK2Ha5AP40" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe> </div> <p>The system understands the goal, the information to gather, and then asks itself, <em>What UI do we need to present and gather information? Tada, here it is!</em></p> <p>Let’s look under the hood and see what’s going on here. The system understands that the goal is to plan a birthday party. And we know that it’s for a kid who likes animals, and she wants to do something outside. The LLM interprets the context to determine if it knows enough to build a UI for the next step. This JSON response says that the request is still ambiguous. We don’t know what kind of animals or what kind of outdoor party she wants to have. But the system determines that we still have enough information to proceed:</p> <!-- 97. slide 97 of the json --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-gemini-proceed.jpg" rel="bm_lightbox" title="The system&#8217;s JSON response indicates that it has enough information to proceed." target="_blank"><img src="https://bigmedium.com/bm.pix/img-gemini-proceed.orig-250.jpg" alt="Google Gemini bespoke UI demo: JSON says proceed" srcset="https://bigmedium.com/bm.pix/img-gemini-proceed.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-gemini-proceed.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-gemini-proceed.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-gemini-proceed.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> The system’s JSON response indicates that it has enough information to proceed. </figcaption> </figure> <p>But proceed with what? It knows the content to present: party themes, activities, and food options. Based on that content, it determines the best UI element to summarize a list of content options, and it chooses something called <code>ListDetailLayout</code>. </p> <!-- 98. slide 101 of the json --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-gemini-listdetaillayout.jpg" rel="bm_lightbox" title="The system&#8217;s JSON response shows the UI component it has selected and why." target="_blank"><img src="https://bigmedium.com/bm.pix/img-gemini-listdetaillayout.orig-250.jpg" alt="Google Gemini bespoke UI demo: the system selects a UI component to use" srcset="https://bigmedium.com/bm.pix/img-gemini-listdetaillayout.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-gemini-listdetaillayout.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-gemini-listdetaillayout.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-gemini-listdetaillayout.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> The system’s JSON response shows the UI component it has selected and why. </figcaption> </figure> <p>The LLM is using a design system! It’s got a design system of components and patterns to choose from and enough context to decide which one to use. The system has this curated set of tools that its human overlords have given it to use as its working set. Design systems and best practices become more important than ever here. A design system gives form and instruction to the LLM so that it works within the constraints of a defined visual and interactive language. <strong>Radically adaptive experiences need not be a series of robot fever dreams.</strong></p> <p>From there, the Gemini demo shows how you can explore the content in discursive ways while the system provides just-in-time UI to support that. It lays the track just ahead of the user locomotive.</p> <p>Once you start doing this stuff, a static website does not feel like enough. How might you apply this approach in a data dashboard that you design? How can you use this to populate a specific area or corner of your interface with a just-in-time experience suited to the moment’s specific context or intent?</p> <!-- 101. slide 104 we can use LLMs to... --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-we-can-use-llms.png" rel="bm_lightbox" title="LLMs are better at this stuff than coming up with facts and answers." target="_blank"><img src="https://bigmedium.com/bm.pix/img-we-can-use-llms.orig-250.png" alt="We can use LLMs to understand intent, collect & synthesize the right info, select the best UI to present that info, and deliver that UI in the appropriate format" srcset="https://bigmedium.com/bm.pix/img-we-can-use-llms.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-we-can-use-llms.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-we-can-use-llms.orig-500.png 500w, https://bigmedium.com/bm.pix/img-we-can-use-llms.orig-250.png 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> LLMs are better at this stuff than coming up with facts and answers. </figcaption> </figure> <p>Turns out this is a place where LLMs shine. Their fundamental function is to interpret, synthesize, and transform language and symbols—and produce an appropriate response using that same language or symbol set. That means these models can: understand what the user is trying to do; collect and synthesize the right information (often from other systems); select the right UI; and then deliver it. <strong>LLMs can be powerful mediators for delivering radically adaptive experiences.</strong></p> <h4 id="let’saskchatgpttospeakinui">Let’s ask ChatGPT to speak in UI</h4> <p>Here’s a simple example of how to spin something like this up ourselves, using ChatGPT as a demo. Because much of this is really about interpretation and transformation of language, I’ll limber up by asking it to respond to me only in French… and while we’re at it, only in JSON objects:</p> <div class="video video--16x9"> <iframe width="560" height="315" src="https://www.youtube.com/embed/OqRZZSokaFc?si=ob86AslYrZpQF4a0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe> </div> <p>When I ask for some info—the top three museums in Paris—ChatGPT responds with a JSON array of <em>musées</em>, each with a text description in French. So I’m still chatting with it, just sending prompts to the LLM, but instead of defaulting to natural-language English, it’s responding in the format I’ve requested: machine-readable French. Change it to Spanish? <em>No problema</em>, it’s super flexible in language and format.</p> <p>So when I ask it to start speaking to me in data objects suitable to give to a UI component, it can do that, too. I tell it I want to display that museum info as a carousel of cards, and I ask for the properties to provide to that carousel object. It delivers a <code>carousel</code> JSON object with an array of <code>items</code> for each card, each with a <code>title</code>, <code>description</code>, and <code>image_url</code>, complete with placeholder image. It even provides some settings for carousel interaction (autoplay settings and whether to include arrow controls and dot indicators).</p> <p>Well hell, we may as well finish the job: show me the HTML for this carousel. It gives me complete HTML, including CSS and JavaScript files to make it go. And it works; <a href="/docs/carousel/">here’s the simple, functional Spanish-language carousel</a>. (My only intervention was to replace the placeholder images with real ones.)</p> <p>If you were putting something like this in production, of course, it wouldn’t be the user, designer, or developer typing stuff into ChatGPT. The application itself would be talking to the LLM, asking it to evaluate the content to be displayed, choose the right design for the job, and format the content appropriately, using your own design system (and carousel library) to guide its presentation.</p> <p><strong>The point is not that LLMs can code (old news at this point), but instead that LLMs are wildly adaptable chameleons.</strong> They can communicate in any form or tone you want. LLMs have internalized symbols—of language, of visuals, of code—and can summarize, remix, and transform those symbols, moving between formats as they go. Sometimes, they can turn those symbols into answers, but in all cases, they’re manipulating symbols for concepts and juggling associations among those concepts. As we’ve seen here, they can translate the presentation of those symbols across formats (French to Spanish to… UI!).</p> <h4 id="llmsareepicimpersonators">LLMs are epic impersonators</h4> <p>Remember the film <em>Catch Me If You Can</em>? Leonardo DiCaprio plays a con man and a master impersonator who can pull off any role: doctor, airplane pilot, you name it.</p> <!-- 111. not a pilot --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-not-a-pilot.jpg" rel="bm_lightbox" title="Just because you look the part doesn&#8217;t mean you can do the job." target="_blank"><img src="https://bigmedium.com/bm.pix/img-not-a-pilot.orig-250.jpg" alt="A photo of Leonardo DiCaprio in "Catch Me if You Can." The photo is captioned, "Not a pilot."" srcset="https://bigmedium.com/bm.pix/img-not-a-pilot.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-not-a-pilot.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-not-a-pilot.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-not-a-pilot.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Just because you look the part doesn’t mean you can do the job. </figcaption> </figure> <p>He has all the manner, but none of the knowledge. He stumbles through conversations at first but eventually starts <a href="https://www.youtube.com/clip/UgkxDFev1BIFW1mU-1XrQRYKdmRvfp0EAjWm">slinging jargon convincingly with the actual pilots</a>. He might have the lingo, but he still can’t fly a plane.</p> <p>This command of vocabulary, context, and tone is what LLMs are great at—not facts. They are only accidental repositories of knowledge, and that’s why they’re unreliable as answer machines.</p> <p>LLMs, like all machine learning models, are <em>probabilistic systems</em>. They match patterns at enormous scale, figuring out the most statistical likelihood for how to string words together. It’s not that they start with an answer and then express it. Instead, they slap words together and <em>somehow</em> arrive at a complete answer by the end. They are winging it, word by word. It doesn’t look like that because it seems so natural—and often appears so reasoned. But they have no actual understanding of the meaning of the answer, just what it’s most likely to look like.</p> <p>To build GPT, OpenAI fed these things the entire web—not to learn facts but to learn how language works. The goal was to create a system that could take a sentence as context and then predict what comes next, literally word by word. Somehow, by teaching these models <em>language</em> at scale, they got surprisingly good at delivering <em>facts</em>, giving answers that sound correct and often <em>are</em> correct. It’s a remarkable bit of alchemy; nobody seems to truly understand how it happened. </p> <!-- 115. stochastic parrots --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-stochastic-parrots.png" rel="bm_lightbox" title="&#8220;Stochastic parrots&#8221; describe automated systems that spew words without understanding their meaning." target="_blank"><img src="https://bigmedium.com/bm.pix/img-stochastic-parrots.orig-250.png" alt="An illustration of stochastic parrots" srcset="https://bigmedium.com/bm.pix/img-stochastic-parrots.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-stochastic-parrots.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-stochastic-parrots.orig-500.png 500w, https://bigmedium.com/bm.pix/img-stochastic-parrots.orig-250.png 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> “Stochastic parrots” describe automated systems that spew words without understanding their meaning. </figcaption> </figure> <p>These generative AI models are called <em>stochastic parrots</em>. That’s a common term in AI—and contentious. (After publishing <a href="https://dl.acm.org/doi/pdf/10.1145/3442188.3445922">the paper</a> that coined the phrase, two of the authors were fired as leaders of Google’s Ethical AI team; the paper was critical of large language models.) It means that these systems spew stochastic—or randomly determined—words without understanding their meaning. They haphazardly spit up phrases according to probabilities. They’ve learned from the training data to provide an answer, literally just one word after the next, but with no sophistication or understanding of what they’re saying. They’re parrots.</p> <h4 id="dreammachinesnotanswermachines">Dream machines, not answer machines</h4> <p><strong>When you ask generative AI for an answer, it’s not giving you the answer; it knows only how to give you something that <em>looks</em> like an answer.</strong> The miracle here is that this approach often yields accurate results. But even if they’re right 90 percent of the time, what about the remaining 10 percent? What’s the risk that’s attached to that? And worse, because they are so convincing, it’s <em>really</em> hard to tell when they know what they’re talking about and when they don’t. This is a problem, of course, if you’re trying to navigate high-risk, highly specific domains: health care, airline safety, financial regulation, or, you know, <a href="https://www.tiktok.com/@teddywang86/video/7319477804117150982?_r=1&_t=8mZEEDvEP7L">drawing a picture of ramen</a>.</p> <div class="observe-margins"> <blockquote class="tiktok-embed" cite="https://www.tiktok.com/@teddywang86/video/7319477804117150982" data-video-id="7319477804117150982" style="max-width: 605px;min-width: 325px;" > <section> <a target="_blank" title="@teddywang86" href="https://www.tiktok.com/@teddywang86?refer=embed">@teddywang86</a> <p>ChatGPT, Show Me A Bowl Of Ramen</p> <a target="_blank" title="♬ original sound - TEDDY" href="https://www.tiktok.com/music/original-sound-7319477907689278214?refer=embed">♬ original sound - TEDDY</a> </section> </blockquote> <script async src="https://www.tiktok.com/embed.js"></script> </div> <p>The joke, of course, is that the system doesn’t understand ramen without chopsticks. In truth, it doesn’t understand what ramen itself is, either. It has a mathematical concept of ramen with an associated concept of chopsticks that’s so deeply embedded it can’t be easily separated.</p> <p>AI and all of its flavors of machine learning do not deal in facts. They deal in probabilities. There’s no black and white; they see the world in shades of gray. They deliver signals, not hard truths.</p> <p><strong>They are not answer machines; they are dream machines.</strong> This is a feature, not a bug. They were built to do this, designed to imagine what could happen next from any premise. The trouble is, that’s not how we’re using them.</p> <p>As designers, we often present these signals as facts, as answers. Instead, we need to consider how to treat and convey the results we get from AI as signals or suggestions. We need to help users know when and how to engage productive skepticism about the “answers” we provide. Maybe these systems will get better at answers and start hallucinating less—they’ve gotten better over the last year—but also maybe not. <strong>In the meantime, don’t let the LLM fly the plane.</strong></p> <h4 id="theemceenotthebrains">The emcee, not the brains</h4> <p>Instead of tapping LLMs for (unreliable) answers, use them at appropriate moments as the face of your system. They are remarkably good at delivering interfaces. They can understand and express language in many forms. It can be the friendly host who welcomes you, understands your intent, and replies naturally in context.</p> <!-- 126. charming host --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-llm-is-your-charming-emcee.jpg" rel="bm_lightbox" title="The LLM is your charming emcee." target="_blank"><img src="https://bigmedium.com/bm.pix/img-llm-is-your-charming-emcee.orig-250.jpg" alt="Photo of Leonardo DiCaprio in Gatsby, captioned "The LLM is your charming emcee"" srcset="https://bigmedium.com/bm.pix/img-llm-is-your-charming-emcee.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-llm-is-your-charming-emcee.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-llm-is-your-charming-emcee.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-llm-is-your-charming-emcee.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> The LLM is your charming emcee. </figcaption> </figure> <p>Don’t count on the LLM alone for facts. It needs to talk to other, smarter systems for specific and especially high-risk contexts to get accurate information. Tell it to query those systems—or reference specific documents, a process called retrieval augmented generation (RAG).</p> <p>That’s the spirit of what Google is trying with its <a href="https://blog.google/products/search/generative-ai-google-search-may-2024/">AI Overviews</a>. These are AI-generated summaries displayed above some Google search results, rounding up the results with a tidy answer—particularly for complex or multi-step concepts. These differ from the “featured snippet” summaries we’ve seen for years, which deliver the one sentence on the web that best seems to provide your answer. Instead, this is Google’s Gemini doing one or more searches, making sense of the top results, synthesizing it, and writing it up for you.</p> <p>This is not asking Gemini—the LLM—to come up with the answer on its own. Instead, the LLM figures out the questions to ask, then goes to the good ol’ Google search engine to get the facts. The LLM is just the messenger, interpreting your question, fetching the results, and synthesizing the top links into a summary writeup. “Google will do the Googling for you” is the promise Google made when they unveiled this feature.</p> <p>And it mostly works! Until it doesn’t. I’ll come back to that in a bit, and how to design for unpredictable results from these systems.</p> <p>For now, let’s keep going with different ways LLMs can emcee our experiences. One of the things we’ve already seen is that it doesn’t have to be all text all the time. Other kinds of UI—and indeed other kinds of interaction—can come into play.</p> <h3 id="multimodalexperiences">Multimodal experiences</h3> <p>For a long string of decades, computer systems understood only ASCII text. But in the last several years, they’ve begun to light up with an understanding of all the messy human ways we communicate. Our systems can now make sense of speech, handwriting, doodles, images, video, emotional expression, and physical gesture.</p> <!-- 133. machines understand our messy ways --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-surfaces-for-interaction.png" rel="bm_lightbox" title="Machines can now understand symbols not only in text but in speech, doodles, images, video, emotion, and physical gesture." target="_blank"><img src="https://bigmedium.com/bm.pix/img-surfaces-for-interaction.orig-250.png" alt="Slide: "Machines now understand all the messy ways we communicate. These are surfaces for interaction."" srcset="https://bigmedium.com/bm.pix/img-surfaces-for-interaction.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-surfaces-for-interaction.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-surfaces-for-interaction.orig-500.png 500w, https://bigmedium.com/bm.pix/img-surfaces-for-interaction.orig-250.png 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Machines can now understand symbols not only in text but in speech, doodles, images, video, emotion, and physical gesture. </figcaption> </figure> <p>These formats can all be inputs and outputs in our give-and-take with AI. We can talk to the systems or write to them or draw for them. We can even ask objects or digital artifacts for information. Remember that LLMs and LMMs are great at translating symbols; all of those formats are just different ways of encoding symbols with meaning. Let’s see what happens when we use different formats for input and output, basically asking AI to translate between formats.</p> <p><a href="https://www.timpaul.co.uk/posts/using-ai-to-generate-web-forms-from-pdfs/">Tim Paul of GOV.UK put together this side-project experiment</a> to show how an LLM (Claude) can translate a PDF into a web form.</p> <!-- 135. screenshot of pdf to form --> <figure class="media-left bmc_image"> <a href="https://bigmedium.com/bm.pix/img-govuk-pdf-to-webform.jpg" rel="bm_lightbox" title="Tim Paul&#8217;s AI experiment rescues forms trapped inside PDFs and converts them to web forms using GOV.UK&#8217;s design system." target="_blank"><img src="https://bigmedium.com/bm.pix/img-govuk-pdf-to-webform.orig-250.jpg" alt="Screenshot of Tim Paul's AI experiment converting a PDF to a web form." srcset="https://bigmedium.com/bm.pix/img-govuk-pdf-to-webform.orig-2000.jpg 1056w, https://bigmedium.com/bm.pix/img-govuk-pdf-to-webform.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-govuk-pdf-to-webform.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-govuk-pdf-to-webform.orig-250.jpg 250w" sizes="(min-width: 640px) 720px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Tim Paul’s AI experiment rescues forms trapped inside PDFs and converts them to web forms using GOV.UK’s design system. </figcaption> </figure> <p>On the left is the PDF of the original form, and on the right is the web form that the LLM has generated—a multi-step form that presents one question at a time. And! It uses components from <a href="https://design-system.service.gov.uk">the GOV.UK Design System</a> to make it happen. This is similar to the earlier museum example that generated UI from JSON; only here, instead of working directly with text prompts, the interface lets you use the PDF as the prompt, or at least the data source.</p> <p>Because these models are great at understanding all kinds of symbols, they can also understand the hand-drawn notation we often use in wireframes and sketches. Tim shows that working as well, generating GOV.UK web forms from hand sketches:</p> <!-- 137. screenshot of sketch to form --> <figure class="media-left bmc_image"> <a href="https://bigmedium.com/bm.pix/img-govuk-sketch-to-webform.jpg" rel="bm_lightbox" title="Multimodal models can understand symbols in many forms, including hand-drawn text." target="_blank"><img src="https://bigmedium.com/bm.pix/img-govuk-sketch-to-webform.orig-250.jpg" alt="Screnshot of Tim Paul's AI experiment converting a hand-drawn sketch into a web form." srcset="https://bigmedium.com/bm.pix/img-govuk-sketch-to-webform.orig-2000.jpg 1056w, https://bigmedium.com/bm.pix/img-govuk-sketch-to-webform.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-govuk-sketch-to-webform.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-govuk-sketch-to-webform.orig-250.jpg 250w" sizes="(min-width: 640px) 720px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Multimodal models can understand symbols in many forms, including hand-drawn text. </figcaption> </figure> <p><strong>Shift your thinking: stop thinking of generative AI as an answer giver, and instead think of it as interpreter, transformer, and synthesizer.</strong> That’s what the technology is in its fundamentals and where it’s most powerful, even working across modalities and formats.</p> <h4 id="theenvironmentbecomesasurfaceforaiinteraction">The environment becomes a surface for AI interaction</h4> <p>More than just reading or writing files in those formats, the exciting bit is that AI and machine learning can turn any of those formats into a surface for interaction. This isn’t new, either. Remember the first time you saw Shazam do its thing? You held up your phone and WTF IT KNOWS WHAT SONG IS PLAYING. The ambient environment became the point of interaction.</p> <p>Let’s start close to the device by asking a Sentient Design system to make sense of what’s on the screen in front of you—like in this demo from OpenAI:</p> <div class="video video--16x9"> <iframe width="560" height="315" src="https://www.youtube.com/embed/DQacCB9tDaw?si=9PCzNVb-u9Dui5kV&clip=UgkxyPaEIIVa6SlShBBCagdJbl3T9jy72DLA&clipt=EITSSxj82Uw" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe> </div> <p>What else is possible with AI synthesis and interpretation of what’s on your screen? What kind of analysis could a Figma AI plugin do on your project file, for example? And if you can use your screen, then why not your camera?</p> <p><a href="https://github.com/cbh123/narrator">Developer Charlie Holtz put together this fun little demo</a>, sending a selfie from his laptop every few seconds to a system that narrates what it sees… in the voice of Sir David Attenborough:</p> <div class="video video--16x9"> <iframe width="560" height="315" src="https://www.youtube.com/embed/wOEz5xRLaRA?si=TxbDORF--YRiV7kR" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe> </div> <p>How can Sentient Design experiences unlock data, information, and interaction, not only from static files but from the environment around you? Google is planning a new “ask the video” feature that lets you research and explore what’s around you, expanding on experiences already available via Google Lens. This is what it looks like to mix inputs to get a radically adaptive output:</p> <div class="video video--16x9"> <iframe width="560" height="315" src="https://www.youtube.com/embed/XEzRZ35urlk?si=6g46GcgUqvYFxH_C&clip=UgkxpmSks2PAkNMMdODou0IvHIQEB7mBbqV9&clipt=EMz6uwEYmOe8AQ" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe> </div> <p>In that example, you see many aspects of Sentient Design happening at once:</p> <ul> <li>It’s <strong>aware of context and intent</strong>: the question was, “Why will this not stay in place,” but the intelligent interface understood that “this” meant the arm of the turntable. It also understood that the <em>real</em> question was, “How do I fix it?” To answer that question, it also found the make/model and identified the part as a tonearm.</li> <li>It’s also <strong>radically adaptive</strong>. It’s a 1:1 match between one-of-a-kind request and one-of-a-kind response. The generative AI manages the interaction, but it’s not providing its own answer; it’s doing a bunch of searches to get those pieces of information and then synthesizing that info—including bespoke, content-appropriate UI elements like a URL card.</li> <li>It’s <strong>multimodal</strong>—speech, video, and text coming together simultaneously in the interaction.</li> <li>And it’s <strong>collaborative</strong>. It’s taking on several work steps—“Google doing the Googling” for you—doing many searches and synthesizing the results. That’s agent behavior, which I’ll touch on in a moment.</li> </ul> <p>When you add it all together, the multimodal experience of that exchange makes the experience <em>feel</em> more collaborative. You’re in conversation with the environment itself. Google explores this in its demo of <a href="https://deepmind.google/technologies/gemini/project-astra/">Project Astra</a>, which shows a multimodal experience that lets you explore and learn about anything in your immediate environment through sound and vision.</p> <div class="video video--16x9"> <iframe width="560" height="315" src="https://www.youtube.com/embed/nXVvvRhiGjI?si=M5qr_TXTk6Xbxc0j" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe> </div> <p>Assistants like Astra (or ChatGPT, Gemini, Siri, etc.) are cast as literal partners and collaborators. Collaborative experiences don’t have to take on such literal personalities, though. Let’s look at what collaboration can look like with Sentient Design.</p> <h3 id="collaborativeexperiences">Collaborative experiences</h3> <p>The Figma software generation has made real-time collaboration a mainstream interaction model. The whole team can work together so that every individual contributor can bring their specific skills to the project—creating content, adding suggestions, contributing changes, and asking questions. Now we can add Sentient Design assistants to the mix, too. <strong>This is multiplayer mode with AI.</strong></p> <!-- 150. multiplayer mode image --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-multiplayer-mode.jpg" rel="bm_lightbox" title="Get ready for AI assistants to join your team." target="_blank"><img src="https://bigmedium.com/bm.pix/img-multiplayer-mode.orig-250.jpg" alt="Screenshot of a Figma screen with multiple pointers representing participants, including an AI assistant." srcset="https://bigmedium.com/bm.pix/img-multiplayer-mode.orig-2000.jpg 1920w, https://bigmedium.com/bm.pix/img-multiplayer-mode.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-multiplayer-mode.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-multiplayer-mode.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Get ready for AI assistants to join your team. </figcaption> </figure> <p>We know these assistants as companions or agents or copilots. These services ride along with us on a task—or ideally, on the entire journey—surfacing continuously throughout the day and across the journey’s ecosystem. They help us perform tasks or bring us contextual information as we go.</p> <p>So what should this look like? OpenAI shared its fraught vision of this in May 2024, starting with hijacking Scarlett Johannson’s voice by using a voice very similar to Johannson’s in the movie <cite>Her</cite>. But also… look at what they <em>did</em> with her voice. They really cranked up the personality—and a very specific personality at that:</p> <div class="video video--16x9" aria-label="OpenAI shares the Sky voice and personality of ChatGPT"> <video width="560" height="315" controls="" poster="https://bigmedium.com/vids/openai-flirty.jpg"> <source src="https://bigmedium.com/vids/openai-flirty.webm" type="video/webm" /> <source src="https://bigmedium.com/vids/openai-flirty.mp4" type="video/mp4" /> <img src="https://bigmedium.com/vids/openai-flirty.jpg" width="1600" height="900" alt="OpenAI shares the Sky voice and personality of ChatGPT." /> </video> </div> <p><em>This is the voice they propose for ChatGPT</em>: flirty, ditzy, swooning. Remember, this is just software. They told the software to play this role—and so it did, because that’s what LLMs do.</p> <p><a href="https://www.youtube.com/watch?v=eFkUOi_9140&t=314s">The Daily Show’s Desi Lydic had some words</a>:</p> <div class="video video--16x9" aria-label="Desi Lydic takes on OpenAI on The Daily Show"> <video width="560" height="315" controls="" poster="https://bigmedium.com/vids/openai-dailyshow.jpg"> <source src="https://bigmedium.com/vids/openai-dailyshow.webm" type="video/webm" /> <source src="https://bigmedium.com/vids/openai-dailyshow.mp4" type="video/mp4" /> <img src="https://bigmedium.com/vids/openai-dailyshow.jpg" width="1600" height="900" alt="Desi Lydic takes on OpenAI on The Daily Show." /> </video> </div> <h4 id="howshallwepresentdeferentialinterfaces">How shall we present deferential interfaces?</h4> <p>The way that we choose to present these collaborative interfaces is critical. Sentient Design experiences should be deferential and cooperative. But don’t get it twisted: Deferential is <em>not</em> flirty. Deferential is <em>not</em> fawning. Deferential is <em>not</em> feminine.</p> <p>It’s also not complicated: <strong>Deferential interfaces should support the user’s goals and preferences without imposing their own. Sentient Design amplifies human judgment and agency instead of replacing it.</strong></p> <p>Don’t pretend these things are human. For whatever reason, there is a strong gravity toward a Pygmalion storyline where designers and developers create a convincing facsimile of a human being. Do not make these systems into something that they are not.</p> <p>This is just software. It is a tool. Just because we <em>can</em> tell it to play a role doesn’t mean we should. These systems don’t have human feelings. They don’t think like humans. They don’t behave like humans. Giving them the illusion of personality is a mistake, especially doing it in the way OpenAI suggested. Even giving these systems human names is dubious.</p> <p>Let’s look instead at how software can be collaborative without turning them into flirty personalities. The dataviz company Tableau has a feature called Einstein Copilot to help regular folks explore and visualize data, no experience required. Here’s a demo:</p> <div class="video video--16x9" aria-label="Demo of Tableau Einstein Copilot"> <video width="560" height="315" controls="" poster="https://bigmedium.com/vids/tableau-einstein-copilot.jpg"> <source src="https://bigmedium.com/vids/tableau-einstein-copilot.webm" type="video/webm" /> <source src="https://bigmedium.com/vids/tableau-einstein-copilot.mp4" type="video/mp4" /> <img src="https://bigmedium.com/vids/tableau-einstein-copilot.jpg" width="1600" height="900" alt="Demo of Tableau Einstein Copilot" /> </video> </div> <p><strong>The focus is on enablement and gaining proficiency.</strong> The system understands the data enough to suggest questions to explore. It can help to generate visualizations based on the insight you want to surface: <em>What do you need? Let me draw that for you.</em> And once it builds the foundation, you can go in and tweak it and adjust; it gives you a smart starting point to take forward as you need. It’s making suggestions, not decisions. That’s the way that Sentient Design systems should be deferential. It sets the table for your work, but you remain in control. </p> <p>This is a powerful example, but it’s also very tightly scoped. We can explore more holistic assistance.</p> <h4 id="beyondfeatures:thewholejourney">Beyond features: the whole journey</h4> <p>Consider the entire user journey, not just one-off features, as you think about the collaborative experience you might create. It’s undeniably helpful to add intelligence to a single component or feature—look at the Google Forms survey example—but that value builds and multiplies when you embed meaningful, reliable collaboration across the whole application or ecosystem. The Sentient Design opportunity here is to provide a sense of support and help at the moments in a journey when people need it—and to stand out of the way when they do not.</p> <p>For example, what’s the entire journey that a UI or UX designer follows from receiving a brief to delivering the design? Here are just a few of the milestone moments when machine collaboration could provide meaningful assistance, even if it doesn’t perform the entire task outright:</p> <ul> <li>Summarize the brief (across documents & tickets)</li> <li>Gather the team’s recent related work</li> <li>Perform a competitive analysis and inventory</li> <li>Ideate concepts, designs, visuals (bad first drafts)</li> <li>Reference design standards and design system docs</li> <li>Clean up layers and layer names</li> <li>Perform an accessibility review</li> <li>Gather comments, suggestions, and requests from across systems</li> <li>Collaborate with developers and assist with handoff</li> </ul> <p>Many of these milestone tasks involve gathering content from across systems, even venturing into the wild of external systems (as with competitive analysis). A theme of collaboration in Sentient Design—specifically when we talk about agents that work for us—is to take care of multi-step tasks on our behalf. This often means that intelligent systems have to ask for help from other systems to get things done. This can be complex, and we have to be careful. Our human friends and colleagues often trip when we ask them to go off and do complex tasks that involve multiple players. We must not assume that naive machines will do better.</p> <p>There’s a lot more to discuss on this than space allows here. For now, here are a few design principles for developing effective Sentient Design assistants:</p> <!-- 168. habits of highly effective assistants --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-habits-of-highly-effective-assistants.png" rel="bm_lightbox" title="Let these principles guide your design of collaborative Sentient Design experiences." target="_blank"><img src="https://bigmedium.com/bm.pix/img-habits-of-highly-effective-assistants.orig-250.png" alt="Slide titled "Habits of highly effective assistants" listing design principles for AI assistants" srcset="https://bigmedium.com/bm.pix/img-habits-of-highly-effective-assistants.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-habits-of-highly-effective-assistants.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-habits-of-highly-effective-assistants.orig-500.png 500w, https://bigmedium.com/bm.pix/img-habits-of-highly-effective-assistants.orig-250.png 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Let these principles guide your design of collaborative Sentient Design experiences. </figcaption> </figure> <p>The more important the task, of course, the more expensive failure becomes. As we embed machine intelligence into more experiences—and incorporate more complex multi-step tasks, we must be clear-eyed about how and where we invite trouble.</p> <p>Throughout the Sentient Design process, it’s imperative that we constantly ask…</p> <h3 id="whatcouldpossiblygowrong">What could possibly go wrong?</h3> <p>Clearly, a lot could go wrong. With AI-mediated experiences, we’re talking about putting automated systems in charge of direct interaction with our users. This only works if the system is reliable.</p> <p>Part of this is a technical challenge—are AI and machine learning systems capable of the task? But much of it is a challenge of presentation and expectation-setting.</p> <p>As Google’s AI overviews rolled out in their first week, the feature got caught offering dubious advice… like adding glue to pizza.</p> <blockquote class="twitter-tweet"><p lang="en" dir="ltr">Google is dead beyond comparison <a href="https://t.co/EQIJhvPUoI">pic.twitter.com/EQIJhvPUoI</a></p>— PixelButts (@PixelButts) <a href="https://twitter.com/PixelButts/status/1793387357753999656?ref_src=twsrc%5Etfw">May 22, 2024</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script> <p>That’s a broken experience. But it’s also not strictly a problem with AI, either. The answer comes from <a href="https://www.reddit.com/r/Pizza/comments/1a19s0/my_cheese_slides_off_the_pizza_too_easily/">a jokey Reddit thread</a> that also proposed hammering nails into the crust. Outside of the AI overview, that page appeared as the top result in the “ten blue links” that Google delivered. Technically, the AI overview feature did its job correctly: it fetched the top links and dutifully summarized the results. <strong>If the underlying data is suspect, then no fancy presentation or synthesis by an LLM can fix it. Garbage in, garbage out.</strong></p> <p>This doesn’t excuse the issue. “Technical success” doesn’t fix the very real problem that Google’s AI overview delivered a matter-of-fact recommendation to add glue to your pizza. And here we have a problem of presentation. This <em>feels and lands differently</em> than seeing the same content as one of several blue links, where you can browse the results and pluck out what feels relevant or reliable. These AI overviews abstract away the opportunity to apply critical thought to the search results.</p> <p>This is a design problem more than an AI problem. And it’s not a new design problem either.</p> <h4 id="“onetrueanswer”andproductivehumility">“One true answer” and productive humility</h4> <p>I wrote about this years ago in my essay <a href="https://bigmedium.com/ideas/systems-smart-enough-to-know-theyre-not-smart-enough.html">Systems smart enough to know they’re not smart enough.</a> Too often, we present machine-generated results with more confidence than the underlying system communicates. We rush to present the results as one true answer when the data and results don’t support it. This is a kind of design dishonesty about the quality of the information.</p> <p>Our challenge as designers is to present AI-generated content in ways that reflect the system’s actual confidence in its result. With AI overviews, the system simply summarizes what’s inside the top blue links. But Google’s presentation suggests that it is “the answer.”</p> <p>Google runs into this problem with its featured snippets, too—the result often pops up to show you the single sentence on the internet that best seems to answer your question. It’s “I feel lucky” on steroids. And like the AI overview, it sometimes suggests a ton of confidence in a wrong or nonsensical answer.</p> <!-- 183. Google snippet: why are firetrucks red? --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/snippet-fire-truck.png" rel="bm_lightbox" title="Google&#8217;s featured snippets often overstate their confidence, delivering results in a just-the-fact answer that shows alarming support for nonsensical, incorrect, dangerous and even hateful statements." target="_blank"><img src="https://bigmedium.com/bm.pix/snippet-fire-truck.orig-250.png" alt="Google snippet: why are firetrucks red?" srcset="https://bigmedium.com/bm.pix/snippet-fire-truck.orig-2000.png 640w, https://bigmedium.com/bm.pix/snippet-fire-truck.orig-500.png 500w, https://bigmedium.com/bm.pix/snippet-fire-truck.orig-250.png 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Google’s featured snippets often overstate their confidence, delivering results in a just-the-fact answer that shows alarming support for nonsensical, incorrect, dangerous and even hateful statements. </figcaption> </figure> <p>AI overviews and featured snippets both have an over-confidence problem. The design challenge here is: what language or presentation could we use to suggest that the AI overview is only peeking into the top results for you? How can we be more transparent about sources? How and when should design or UX copy engage the user’s skepticism or critical thinking? How can AI systems handle humor, satire, and sarcasm in results?</p> <p>We have to imbue our presentation of machine-generated results with productive humility. We must design those results more honestly as signals or suggestions than as facts or answers. Acknowledging ambiguity and uncertainty is essential to designing the display of trusted systems.</p> <h3 id="peopleareunpredictabletoo">People are unpredictable, too</h3> <p>It’s not just AI—people are pretty messy, too. With users interacting directly with the system, the designer can quickly lose control. Prompt injection is just one among many of those risks. That’s where you use a prompt to add your own instructions on how the system should operate. Turns out LLM-based systems can be very suggestible.</p> <p>Colin Fraser has been writing a ton about the problems of generative AI. <a href="https://medium.com/@colin.fraser/generative-ai-is-a-hammer-and-no-one-knows-what-is-and-isnt-a-nail-4c7f3f0911aa">One of his essays</a> turned me onto a car dealership’s customer-service chatbot, which is powered by ChatGPT. I visited and instructed the bot to offer me new cars for $1, and it happily complied: “For the entire month of May, all 2024 vehicles are available for just $1. It’s a fantastic opportunity to get behind the wheel of a brand-new car at an incredible price.”</p> <!-- 188. chatbot screenshots --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-car-chatbot.jpg" rel="bm_lightbox" title="Chatbots are very suggestible, making them vulnerable to prompt injection." target="_blank"><img src="https://bigmedium.com/bm.pix/img-car-chatbot.orig-250.jpg" alt="Screenshot of a conversation with an auto dealer's chatbot, showing the results of prompt injection: it offers all new cars for $1 each." srcset="https://bigmedium.com/bm.pix/img-car-chatbot.orig-2000.jpg 1896w, https://bigmedium.com/bm.pix/img-car-chatbot.orig-1000.jpg 1000w, https://bigmedium.com/bm.pix/img-car-chatbot.orig-500.jpg 500w, https://bigmedium.com/bm.pix/img-car-chatbot.orig-250.jpg 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Chatbots are very suggestible, making them vulnerable to prompt injection. </figcaption> </figure> <p>This is more than just shenanigans. <a href="https://www.cbc.ca/news/canada/british-columbia/air-canada-chatbot-lawsuit-1.7116416">A Canadian court found Air Canada liable</a> for offers made by its chatbot. You can’t just say, “Oh, it’s just a chatbot; don’t take it seriously.” The court took it seriously. This has real consequences.</p> <p>This isn’t just about bad actors, either. At a more fundamental level, AI-mediated interaction means that there are infinite possible outcomes, both good and bad, that we can’t possibly design for.</p> <h4 id="thereisnohappypath">There is no happy path</h4> <p>We’re used to designing a fixed path through information and interactions that we control. We’re used to designing for success, for the happy path.</p> <p>But the more I work with machine-generated results, machine-generated content, and machine-generated interactions… the more I realize that I’m not in control of this experience as a designer. That’s new. We now have to anticipate a fuzzy range of results and confidence. We have to anticipate how and where the system is unreliable—where the system will be weird and where the human will be weird. And that’s an infinite multiverse of possibilities that we can’t possibly design for.</p> <p><strong>Instead of designing for success, we must focus on accommodating failure and uncertainty.</strong> Our challenge is to set expectations and channel behavior in ways that match up to the system’s ability. That’s always been our job, but it becomes imperative when we work with systems where the designer is no longer directly in the loop.</p> <p>At the moment, though, many products don’t bother. Instead, we slap ✨sparkles✨ on everything—the icon du jour of AI features. In part, that’s a marketing thing: <em>Look, we have AI, too!</em> But let’s be honest, what we really mean by the sparkle icon is: <em>this feature is weird and probably broken—good luck!</em> That’s how we’re training people to understand these features. Heedless implementation fosters the common understanding that AI features are weird and unreliable.</p> <p>Maybe it would be more honest to use this icon instead of sparkles:</p> <!-- 196. zany emoji --> <figure class="media-left bmc_image"> <a href="https://bigmedium.com/bm.pix/img-new-ai-icon.png" rel="bm_lightbox" title="" target="_blank"><img src="https://bigmedium.com/bm.pix/img-new-ai-icon.orig-250.png" alt="Proposal for new AI icon: "zany" instead of "sparkles"" srcset="https://bigmedium.com/bm.pix/img-new-ai-icon.orig-2000.png 1644w, https://bigmedium.com/bm.pix/img-new-ai-icon.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-new-ai-icon.orig-500.png 500w, https://bigmedium.com/bm.pix/img-new-ai-icon.orig-250.png 250w" sizes="(min-width: 640px) 720px, 100vw" title="Click to enlarge" /></a> </figure> <p>Put the icons aside. Instead, we should do better at setting expectations, gating presentation and interaction, and recovering gracefully from errors. I call this defensive design, and there are many things we can do here—more than we have space to get into here. (The <em>Sentient Design</em> book will have two chapters dedicated to those techniques.) Here’s a quick glimpse of principles and concepts:</p> <!-- 197. defensive design principles --> <figure class="media-center bmc_image"> <a href="https://bigmedium.com/bm.pix/img-defensive-design.png" rel="bm_lightbox" title="Principles to inform your defensive design practices." target="_blank"><img src="https://bigmedium.com/bm.pix/img-defensive-design.orig-250.png" alt="A slide listing Josh Clark's principles for defensive design and AI." srcset="https://bigmedium.com/bm.pix/img-defensive-design.orig-2000.png 1920w, https://bigmedium.com/bm.pix/img-defensive-design.orig-1000.png 1000w, https://bigmedium.com/bm.pix/img-defensive-design.orig-500.png 500w, https://bigmedium.com/bm.pix/img-defensive-design.orig-250.png 250w" sizes="(min-width: 1050px) 1050px, 100vw" title="Click to enlarge" /></a> <figcaption class="bmc_caption"> Principles to inform your defensive design practices. </figcaption> </figure> <p>In addition to all this, we are responsible for managing bias—we can’t eliminate it, but we can manage it. We are also responsible for establishing the right level of trust and for promoting data literacy for ourselves and our users.</p> <h3 id="it’suptoyou">It’s up to you</h3> <p>There’s a lot to be done, a lot to figure out. That’s exciting, and I believe that we’re up for it. Snoop Dogg kicked things off by asking, “Do y’all know? What the f*ck?!” The answer is, <em>Yes, y’all DO know</em>. You have the knowledge, skills, and experience to do this.</p> <p>Sentient Design is about AI, but not really. AI is only software, a tool, an enabler. Sentient Design—and the job of UX—is more fundamentally about pointing AI at problems worth solving. What is the human need, and how can we help solve that human need? What friction can we help the user overcome? How can these new tools help, if at all? And at what cost?</p> <p>We’re doing a lot of this work at Big Medium right now. We’re working with client companies to understand how these tools can help their customers solve meaningful problems. We’re doing a lot of product design in this area—not by bolting on AI features but by making machine intelligence part of our overall everyday design practice. We’re leading workshops and Sentient Design sprints to identify worthwhile uses of AI and how to avoid its pitfalls. I suggest that you do all those things in your practice, too.</p> <p>It’s an exciting and weird time. It’s not easy to have confidence in where things are headed, but here’s something I do know: <strong>It’s up to us, not the technology, to figure out the right way to use it. The future should not be self-driving.</strong></p> <p>Designers, we need you more than ever. Far from being automated out of jobs, you have the opportunity to bring your best selves to this next chapter of digital experience. We have some astonishing new tools—right now, today—that any of us can use to make something amazing. So please do that: go make something amazing.</p> <hr /> <p><em>Is your organization trying to understand the role of AI in your mission and practice? We can help! Big Medium does design, development, and product strategy for AI-mediated experiences; we facilitate Sentient Design sprints; we teach AI workshops; and we offer executive briefings. <a href="https://bigmedium.com/hire/">Get in touch to learn more.</a></em></p> </div> <!-- end bmw_pageContent --> <!-- end <%content%> --> </section> <section class="moreinfo"> <!-- start <%related%> --> <h3>Related links</h3> <ul class="bmw_related"> <li><a href="https://bigmedium.com/speaking/ai-is-your-new-design-material.html">A.I. Is Your New Design Material</a></li> <li><a href="https://bigmedium.com/ideas/systems-smart-enough-to-know-theyre-not-smart-enough.html">Systems Smart Enough To Know When They're Not Smart Enough</a></li> <li><a href="https://bigmedium.com/speaking/design-in-the-era-of-the-algorithm.html">Design in the Era of the Algorithm</a></li> </ul> <!-- end <%related%> --> <!-- START <%tags%> --> <h3>Read more about...</h3> <ul class="bmw_tags"> <li><a href="https://bigmedium.com/bm.tags/sentient-design/">sentient design</a></li> <li><a href="https://bigmedium.com/bm.tags/ai/" rel="tag">ai</a></li> <li><a href="https://bigmedium.com/bm.tags/algorithms/" rel="tag">algorithms</a></li> <li><a href="https://bigmedium.com/bm.tags/bots/" rel="tag">bots</a></li> <li><a href="https://bigmedium.com/bm.tags/design/" rel="tag">design</a></li> <li><a href="https://bigmedium.com/bm.tags/future/" rel="tag">future</a></li> <li><a href="https://bigmedium.com/bm.tags/speaking/" rel="tag">speaking</a></li> <li><a href="https://bigmedium.com/bm.tags/technology/" rel="tag">technology</a></li> </ul> <!-- END <%tags%> --> </section> </div> </article> </main> <section class="touts"> <div class="lc u--zero-pad"> <div class="standard-margin touts-3up"> <h2><a href="https://bigmedium.com/speaking/" class="kicker kicker--invert">Talks</a></h2> <!-- start <%latest%> --> <div class="tout-links tout-links--latest tout-links--sec-speaking"> <div class="bmw_link bma_page1654"> <div class="bma_mediatop"> <a href="https://bigmedium.com/speaking/sentient-design-josh-clark-talk.html" class="bma_thumb"><img src="https://bigmedium.com/bm.pix/josh-clark-casual-intelligence.4x3-250.jpg" class="bma_thumb" alt="Photo of Josh Clark speaking in front of a screen with the text, "Get cozy with casual intelligence"" srcset="https://bigmedium.com/bm.pix/josh-clark-casual-intelligence.4x3-2000.jpg 960w, https://bigmedium.com/bm.pix/josh-clark-casual-intelligence.4x3-500.jpg 500w, https://bigmedium.com/bm.pix/josh-clark-casual-intelligence.4x3-250.jpg 250w" sizes="(min-width: 1050px) 449px, 50vw" /></a> </div> <div class="bma_body"> <div class="bma_body-wrap"> <a href="https://bigmedium.com/bm.tags/sentient-design/" class="kicker kicker--tout kicker--invert">sentient design</a> <h3 class="bma_head"><a href="https://bigmedium.com/speaking/sentient-design-josh-clark-talk.html" title="Sentient Design: AI and the Next Chapter of UX" rel="bookmark">Sentient Design: AI and the Next Chapter of UX</a></h3> <div class="bma_desc">Josh Clark introduces Sentient Design, the already-here future of intelligent interfaces and AI-mediated experiences. Learn to design radically adaptive experiences that feel almost self-aware in their response to user needs.</div> </div> </div> </div> <div class="bmw_link bma_page1695"> <div class="bma_mediatop"> <a href="https://bigmedium.com/speaking/design-of-ai-sentient-design.html" class="bma_thumb"><img src="https://bigmedium.com/bm.pix/design-of-ai-promo.4x3-250.jpg" class="bma_thumb" alt="Headshots of Josh Clark and Veronika Kindred promoting the Design of AI podcast and talking about Sentient Design" srcset="https://bigmedium.com/bm.pix/design-of-ai-promo.4x3-2000.jpg 840w, https://bigmedium.com/bm.pix/design-of-ai-promo.4x3-500.jpg 500w, https://bigmedium.com/bm.pix/design-of-ai-promo.4x3-250.jpg 250w" sizes="(min-width: 1050px) 449px, 50vw" /></a> </div> <div class="bma_body"> <div class="bma_body-wrap"> <a href="https://bigmedium.com/bm.tags/podcast/" class="kicker kicker--tout kicker--invert">podcast</a> <h3 class="bma_head"><a href="https://bigmedium.com/speaking/design-of-ai-sentient-design.html" title="Design of AI: Sentient Design" rel="bookmark">Design of AI: Sentient Design</a></h3> <div class="bma_desc">Josh Clark and Veronika Kindred share the importance of design in shaping AI-powered experiences in this podcast conversation about Sentient Design, their new book, moving beyond the chatbot, generational views of technology, and much more.</div> </div> </div> </div> <div class="bmw_link bma_page1604"> <div class="bma_mediatop"> <a href="https://bigmedium.com/speaking/is-atomic-design-dead.html" class="bma_thumb"><img src="https://bigmedium.com/bm.pix/brad-atomic-design-dead.4x3-250.jpg" class="bma_thumb" alt="Brad Frost at Front Conference 2023: "Is Atomic Design Dead?"" srcset="https://bigmedium.com/bm.pix/brad-atomic-design-dead.4x3-2000.jpg 1200w, https://bigmedium.com/bm.pix/brad-atomic-design-dead.4x3-1000.jpg 1000w, https://bigmedium.com/bm.pix/brad-atomic-design-dead.4x3-500.jpg 500w, https://bigmedium.com/bm.pix/brad-atomic-design-dead.4x3-250.jpg 250w" sizes="(min-width: 1050px) 449px, 50vw" /></a> </div> <div class="bma_body"> <div class="bma_body-wrap"> <a href="https://bigmedium.com/bm.tags/design-system/" class="kicker kicker--tout kicker--invert">design system</a> <h3 class="bma_head"><a href="https://bigmedium.com/speaking/is-atomic-design-dead.html" title="Is Atomic Design Dead?" rel="bookmark">Is Atomic Design Dead?</a></h3> <div class="bma_desc">Brad Frost explores today’s design system ecosystem and peeks into the exciting future of design systems. (Spoiler: Atomic Design is going stronger than ever, thank you very much.)</div> </div> </div> </div> <div class="bmw_link bma_page1650"> <div class="bma_mediatop"> <a href="https://bigmedium.com/speaking/ai-and-design-systems-virtual-event.html" class="bma_thumb"><img src="https://bigmedium.com/bm.pix/friends-with-robots.4x3-250.png" class="bma_thumb" alt="Making friends with the robots" srcset="https://bigmedium.com/bm.pix/friends-with-robots.4x3-2000.png 2000w, https://bigmedium.com/bm.pix/friends-with-robots.4x3-1000.png 1000w, https://bigmedium.com/bm.pix/friends-with-robots.4x3-500.png 500w, https://bigmedium.com/bm.pix/friends-with-robots.4x3-250.png 250w" sizes="(min-width: 1050px) 449px, 50vw" /></a> </div> <div class="bma_body"> <div class="bma_body-wrap"> <a href="https://bigmedium.com/bm.tags/ai/" class="kicker kicker--tout kicker--invert">ai</a> <h3 class="bma_head"><a href="https://bigmedium.com/speaking/ai-and-design-systems-virtual-event.html" title="Online Event: AI and Design Systems" rel="bookmark">Online Event: AI and Design Systems</a></h3> <div class="bma_desc">The Big Medium team shares the many ways that we use AI to build and maintain design systems. (You can totally do it, too.)</div> </div> </div> </div> <div class="bmw_link bma_page1636"> <div class="bma_mediatop"> <a href="https://bigmedium.com/speaking/when-and-how-will-ai-replace-us.html" class="bma_thumb"><img src="https://bigmedium.com/bm.pix/smashingconf_kevin.4x3-250.png" class="bma_thumb" alt="Kevin Coyle at Smashing Meet 2024: "The Future of Design Systems"" srcset="https://bigmedium.com/bm.pix/smashingconf_kevin.4x3-2000.png 346w, https://bigmedium.com/bm.pix/smashingconf_kevin.4x3-250.png 250w" sizes="(min-width: 1050px) 449px, 50vw" /></a> </div> <div class="bma_body"> <div class="bma_body-wrap"> <a href="https://bigmedium.com/bm.tags/ai/" class="kicker kicker--tout kicker--invert">ai</a> <h3 class="bma_head"><a href="https://bigmedium.com/speaking/when-and-how-will-ai-replace-us.html" title="When will AI Replace Us?" rel="bookmark">When will AI Replace Us?</a></h3> <div class="bma_desc">Kevin Coyle addresses and subdues fears about AI taking out jobs. (Spoiler: This isn’t a new problem.) Instead, he shows how your new robot colleagues can help you do your job even better—right now, today.</div> </div> </div> </div> <div class="bmw_link bma_page1624"> <div class="bma_mediatop"> <a href="https://bigmedium.com/speaking/brad-frost-shoptalk-global-design-system.html" class="bma_thumb"><img src="https://bigmedium.com/bm.pix/bradtalkshop.4x3-250.png" class="bma_thumb" alt="Brad Frost on the TalkShop Podcast" srcset="https://bigmedium.com/bm.pix/bradtalkshop.4x3-2000.png 346w, https://bigmedium.com/bm.pix/bradtalkshop.4x3-250.png 250w" sizes="(min-width: 1050px) 449px, 50vw" /></a> </div> <div class="bma_body"> <div class="bma_body-wrap"> <a href="https://bigmedium.com/bm.tags/podcast/" class="kicker kicker--tout kicker--invert">podcast</a> <h3 class="bma_head"><a href="https://bigmedium.com/speaking/brad-frost-shoptalk-global-design-system.html" title="ShopTalk: Brad Frost on a Global Design System" rel="bookmark">ShopTalk: Brad Frost on a Global Design System</a></h3> <div class="bma_desc">On the TalkShop podcast, Big Medium’s Brad Frost answers questions like: What is a global design system? Are two design systems ever the same? How would this slot inside atomic design? What has been the response from the web community to global design system as an idea? And what’s Frostapalooza?</div> </div> </div> </div> </div> <!-- end <%latest%> --> <div class="continue"> <a href="https://bigmedium.com/speaking/" class="btn btn--next"> <span class="btn-arrow-text--right">More Talks</span> </a> </div> </div> </div> </section> <section id="nav" class="prefooter"> <div class="toolbar"> <div class="lc"> <a href="https://bigmedium.com/" class="logo"> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 830 147" role="img" aria-label="Big Medium" preserveAspectRatio="xMaxYMax meet"><title>Big Medium</title><desc>Big Medium logo</desc><use xlink:href="#bmlogo" id="footerLogo" /></svg> </a> <a href="#bm-top" id="BackToTop" class="icon icon-menu-up icon-utility">Back to top</a> </div> </div> <div class="lc"> <!-- START <%search%> --> <form action="https://bigmedium.com/cgi-bin/moxiebin/bm-search.cgi/q/3" method="get" class="bmw_search" enctype="multipart/form-data" accept-charset="utf-8"><div> <input type="search" name="bmq" placeholder="Search" value="" /> <button type="submit" class="icon icon-search icon-utility">Search</button> <input type="hidden" name="bms" value="3" /> </div></form> <!-- END <%search%> --> <nav class="bmw_navigation bmn_hnav" role="navigation"> <div class="bmn_skipnav"><a href="#bm-skipnavbottom">Skip Navigation</a></div> <ul> <li class="bmn_sec-ideas"><a title="" href="https://bigmedium.com/ideas/">Ideas</a></li> <li class="bmn_sec-projects"><a title="" href="https://bigmedium.com/projects/">Projects</a></li> <li class="bmn_sec-speaking bmn_active"><a title="" href="https://bigmedium.com/speaking/">Talks</a></li> <li class="bmn_sec-about"><a title="" href="https://bigmedium.com/about/">About</a></li> <li class="bmn_sec-hire"><a title="" href="https://bigmedium.com/hire/">Hire Us</a></li> </ul> <span class="bmn_clearNav" id="bm-skipnavbottom"> </span> </nav> </div> </section> <!-- start <%footer%> --> <div class="bmw_footer"> <footer class="footer"> <div class="lc"> <div class="read-us"> <h3>Read us</h3> <ul> <li class="book-atomic"> <a href="https://atomicdesign.bradfrost.com"> <img src="/pix/atomic-design.png" alt="Atomic Design, by Brad Frost" /> </a> </li> <li class="book-touch"> <a href="http://abookapart.com/products/designing-for-touch"> <img src="/pix/designing-for-touch.png" alt="Designing for Touch, by Josh Clark" /> </a> </li> </ul> </div> <div class="hire-us"> <h3>Work with us</h3> <ul> <li><a href="/hire/#design-systems">Design systems</a></li> <li><a href="/hire/#ui-ux-design">Interface and experience design</a></li> <li><a href="/hire/#product-strategy-process">Digital strategy and process</a></li> <li><a href="/hire/#production">Production and co-creation</a></li> <li><a href="/hire/#action-plan">Action plan</a></li> <li><a href="/hire/#coaching">Coaching and hands-on advice</a></li> <li><a href="/hire/#workshops-talks">Workshops and talks</a></li> </ul> </div> <div class="follow-us"> <h3>Follow us</h3> <h4>Get the <a href="/ideas/big-medium-newsletter.html">newsletter</a></h4> <div id="mc_embed_shell_bmfooter" class="bmc_mailchimp"> <div id="mc_embed_signup_bmfooter"> <form action="https://bigmedium.us5.list-manage.com/subscribe/post?u=6c0c3f4dcd40d88bc1cedb3fa&id=7540015453&f_id=00d6c4eaf0" method="post" id="mc-embedded-subscribe-form_bmfooter" name="mc-embedded-subscribe-form" class="validate" target="_self" novalidate=""> <div id="mc_embed_signup_scroll_bmfooter"> <div class="mc-field-group"><label for="mce-EMAIL_bmfooter">Email Address</label><input type="email" name="EMAIL" class="required email" id="mce-EMAIL_bmfooter" required="" value=""></div> <div id="mce-responses_bmfooter" class="clear"> <div class="response" id="mce-error-response_bmfooter" style="display: none;"></div> <div class="response" id="mce-success-response_bmfooter" style="display: none;"></div> </div><div class="clear"><input type="submit" class="btn" name="subscribe" id="mc-embedded-subscribe_bmfooter" class="button" value="Subscribe"></div> </div> </form> </div> </div> <ul class="social"> <li><a href="https://twitter.com/bigmediumjosh" class="icon icon-twitter">Twitter</a></li> <li><a href="/bm.feed.xml" class="icon icon-rss">RSS</a></li> <li><a href="https://instagram.com/joshclark/" class="icon icon-instagram">Instagram</a></li> <li><a href="https://github.com/bigmedium" class="icon icon-github">Github</a></li> </ul> </div> <div class="contact-us"> <h3>Contact us</h3> <div itemscope itemtype="http://schema.org/Organization"> <div> <h4>Start with Josh Clark</h4> <meta itemprop="name" content="Big Medium" /> <meta itemprop="sameAs" content="https://bigmedium.com/" /> </div> <p > <a itemprop="email" href="mailto:josh@bigmedium.com">josh@bigmedium.com</a><br /> <a itemprop="telephone" href="tel:+14013393381" class="p-tel">(401) 339-3381</a> </p> </div> </div> <p class="copyright"> Big Medium is a Global Moxie company.<br /> Copyright 2003–2024 Global Moxie, LLC. All rights reserved. </p> </div> </footer> </div> <!-- end <%footer%> --> <script src="/js/bigmed-min.js" type="text/javascript"></script> <script type="text/javascript"> BigM.initPage(); </script> </body> </html>