CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;29 of 29 results for author: <span class="mathjax">Heer, J</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Heer%2C+J">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Heer, J"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Heer%2C+J&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Heer, J"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.03137">arXiv:2411.03137</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.03137">pdf</a>, <a href="https://arxiv.org/format/2411.03137">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> From Pen to Prompt: How Creative Writers Integrate AI into their Writing Practice </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Guo%2C+A">Alicia Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Sathyanarayanan%2C+S">Shreya Sathyanarayanan</a>, <a href="/search/cs?searchtype=author&amp;query=Wang%2C+L">Leijie Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+A">Amy Zhang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.03137v2-abstract-short" style="display: inline;"> Creative writing is a deeply human craft, yet AI systems using large language models (LLMs) offer the automation of significant parts of the writing process. So why do some creative writers choose to use AI? Through interviews and observed writing sessions with 18 creative writers who already use AI regularly in their writing practice, we find that creative writers are intentional about how they i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03137v2-abstract-full').style.display = 'inline'; document.getElementById('2411.03137v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.03137v2-abstract-full" style="display: none;"> Creative writing is a deeply human craft, yet AI systems using large language models (LLMs) offer the automation of significant parts of the writing process. So why do some creative writers choose to use AI? Through interviews and observed writing sessions with 18 creative writers who already use AI regularly in their writing practice, we find that creative writers are intentional about how they incorporate AI, making many deliberate decisions about when and how to engage AI based on their core values, such as authenticity and craftsmanship. We characterize the interplay between writers&#39; values, their fluid relationships with AI, and specific integration strategies -- ultimately enabling writers to create new AI workflows without compromising their creative values. We provide insight for writing communities, AI developers and future researchers on the importance of supporting transparency of these emerging writing processes and rethinking what AI features can best serve writers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.03137v2-abstract-full').style.display = 'none'; document.getElementById('2411.03137v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.09667">arXiv:2408.09667</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.09667">pdf</a>, <a href="https://arxiv.org/format/2408.09667">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> BLADE: Benchmarking Language Model Agents for Data-Driven Science </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gu%2C+K">Ken Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Shang%2C+R">Ruoxi Shang</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+R">Ruien Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Kuang%2C+K">Keying Kuang</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+R">Richard-John Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Lyu%2C+D">Donghe Lyu</a>, <a href="/search/cs?searchtype=author&amp;query=Mao%2C+Y">Yue Mao</a>, <a href="/search/cs?searchtype=author&amp;query=Pan%2C+Y">Youran Pan</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+T">Teng Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+J">Jiaqian Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Y">Yikun Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+T+M">Tianmai M. Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+L">Lanyi Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Merrill%2C+M+A">Mike A. Merrill</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Althoff%2C+T">Tim Althoff</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.09667v2-abstract-short" style="display: inline;"> Data-driven scientific discovery requires the iterative integration of scientific domain knowledge, statistical expertise, and an understanding of data semantics to make nuanced analytical decisions, e.g., about which variables, transformations, and statistical models to consider. LM-based agents equipped with planning, memory, and code execution capabilities have the potential to support data-dri&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.09667v2-abstract-full').style.display = 'inline'; document.getElementById('2408.09667v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.09667v2-abstract-full" style="display: none;"> Data-driven scientific discovery requires the iterative integration of scientific domain knowledge, statistical expertise, and an understanding of data semantics to make nuanced analytical decisions, e.g., about which variables, transformations, and statistical models to consider. LM-based agents equipped with planning, memory, and code execution capabilities have the potential to support data-driven science. However, evaluating agents on such open-ended tasks is challenging due to multiple valid approaches, partially correct steps, and different ways to express the same decisions. To address these challenges, we present BLADE, a benchmark to automatically evaluate agents&#39; multifaceted approaches to open-ended research questions. BLADE consists of 12 datasets and research questions drawn from existing scientific literature, with ground truth collected from independent analyses by expert data scientists and researchers. To automatically evaluate agent responses, we developed corresponding computational methods to match different representations of analyses to this ground truth. Though language models possess considerable world knowledge, our evaluation shows that they are often limited to basic analyses. However, agents capable of interacting with the underlying data demonstrate improved, but still non-optimal, diversity in their analytical decision making. Our work enables the evaluation of agents for data-driven science and provides researchers deeper insights into agents&#39; analysis approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.09667v2-abstract-full').style.display = 'none'; document.getElementById('2408.09667v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.06845">arXiv:2408.06845</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.06845">pdf</a>, <a href="https://arxiv.org/format/2408.06845">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> DracoGPT: Extracting Visualization Design Preferences from Large Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wang%2C+H+W">Huichen Will Wang</a>, <a href="/search/cs?searchtype=author&amp;query=Gordon%2C+M">Mitchell Gordon</a>, <a href="/search/cs?searchtype=author&amp;query=Battle%2C+L">Leilani Battle</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.06845v2-abstract-short" style="display: inline;"> Trained on vast corpora, Large Language Models (LLMs) have the potential to encode visualization design knowledge and best practices. However, if they fail to do so, they might provide unreliable visualization recommendations. What visualization design preferences, then, have LLMs learned? We contribute DracoGPT, a method for extracting, modeling, and assessing visualization design preferences fro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.06845v2-abstract-full').style.display = 'inline'; document.getElementById('2408.06845v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.06845v2-abstract-full" style="display: none;"> Trained on vast corpora, Large Language Models (LLMs) have the potential to encode visualization design knowledge and best practices. However, if they fail to do so, they might provide unreliable visualization recommendations. What visualization design preferences, then, have LLMs learned? We contribute DracoGPT, a method for extracting, modeling, and assessing visualization design preferences from LLMs. To assess varied tasks, we develop two pipelines--DracoGPT-Rank and DracoGPT-Recommend--to model LLMs prompted to either rank or recommend visual encoding specifications. We use Draco as a shared knowledge base in which to represent LLM design preferences and compare them to best practices from empirical research. We demonstrate that DracoGPT can accurately model the preferences expressed by LLMs, enabling analysis in terms of Draco design constraints. Across a suite of backing LLMs, we find that DracoGPT-Rank and DracoGPT-Recommend moderately agree with each other, but both substantially diverge from guidelines drawn from human subjects experiments. Future work can build on our approach to expand Draco&#39;s knowledge base to model a richer set of preferences and to provide a robust and cost-effective stand-in for LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.06845v2-abstract-full').style.display = 'none'; document.getElementById('2408.06845v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE Transactions on Visualization and Computer Graphics (Proc. VIS 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.21285">arXiv:2407.21285</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.21285">pdf</a>, <a href="https://arxiv.org/format/2407.21285">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Mixing Linters with GUIs: A Color Palette Design Probe </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=McNutt%2C+A">Andrew McNutt</a>, <a href="/search/cs?searchtype=author&amp;query=Stone%2C+M+C">Maureen C. Stone</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.21285v1-abstract-short" style="display: inline;"> Visualization linters are end-user facing evaluators that automatically identify potential chart issues. These spell-checker like systems offer a blend of interpretability and customization that is not found in other forms of automated assistance. However, existing linters do not model context and have primarily targeted users who do not need assistance, resulting in obvious -- even annoying -- ad&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21285v1-abstract-full').style.display = 'inline'; document.getElementById('2407.21285v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.21285v1-abstract-full" style="display: none;"> Visualization linters are end-user facing evaluators that automatically identify potential chart issues. These spell-checker like systems offer a blend of interpretability and customization that is not found in other forms of automated assistance. However, existing linters do not model context and have primarily targeted users who do not need assistance, resulting in obvious -- even annoying -- advice. We investigate these issues within the domain of color palette design, which serves as a microcosm of visualization design concerns. We contribute a GUI-based color palette linter as a design probe that covers perception, accessibility, context, and other design criteria, and use it to explore visual explanations, integrated fixes, and user defined linting rules. Through a formative interview study and theory-driven analysis, we find that linters can be meaningfully integrated into graphical contexts thereby addressing many of their core issues. We discuss implications for integrating linters into visualization tools, developing improved assertion languages, and supporting end-user tunable advice -- all laying the groundwork for more effective visualization linters in any context. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.21285v1-abstract-full').style.display = 'none'; document.getElementById('2407.21285v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear at VIS2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.13853">arXiv:2406.13853</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.13853">pdf</a>, <a href="https://arxiv.org/format/2406.13853">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/VIS55277.2024.00020">10.1109/VIS55277.2024.00020 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> AltGeoViz: Facilitating Accessible Geovisualization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Li%2C+C">Chu Li</a>, <a href="/search/cs?searchtype=author&amp;query=Pang%2C+R+Y">Rock Yuren Pang</a>, <a href="/search/cs?searchtype=author&amp;query=Sharif%2C+A">Ather Sharif</a>, <a href="/search/cs?searchtype=author&amp;query=Chheda-Kothary%2C+A">Arnavi Chheda-Kothary</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Froehlich%2C+J+E">Jon E. Froehlich</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.13853v3-abstract-short" style="display: inline;"> Geovisualizations are powerful tools for exploratory spatial analysis, enabling sighted users to discern patterns, trends, and relationships within geographic data. However, these visual tools have remained largely inaccessible to screen-reader users. We present AltGeoViz, a new system we designed to facilitate geovisualization exploration for these users. AltGeoViz dynamically generates alt-text&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.13853v3-abstract-full').style.display = 'inline'; document.getElementById('2406.13853v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.13853v3-abstract-full" style="display: none;"> Geovisualizations are powerful tools for exploratory spatial analysis, enabling sighted users to discern patterns, trends, and relationships within geographic data. However, these visual tools have remained largely inaccessible to screen-reader users. We present AltGeoViz, a new system we designed to facilitate geovisualization exploration for these users. AltGeoViz dynamically generates alt-text descriptions based on the user&#39;s current map view, providing summaries of spatial patterns and descriptive statistics. In a study of five screen-reader users, we found that AltGeoViz enabled them to interact with geovisualizations in previously infeasible ways. Participants demonstrated a clear understanding of data summaries and their location context, and they could synthesize spatial understandings of their explorations. Moreover, we identified key areas for improvement, such as the addition of intuitive spatial navigation controls and comparative analysis features. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.13853v3-abstract-full').style.display = 'none'; document.getElementById('2406.13853v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE VIS 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.12259">arXiv:2404.12259</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.12259">pdf</a>, <a href="https://arxiv.org/format/2404.12259">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3613904.3642830">10.1145/3613904.3642830 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Concept Induction: Analyzing Unstructured Text with High-Level Concepts Using LLooM </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lam%2C+M+S">Michelle S. Lam</a>, <a href="/search/cs?searchtype=author&amp;query=Teoh%2C+J">Janice Teoh</a>, <a href="/search/cs?searchtype=author&amp;query=Landay%2C+J">James Landay</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Bernstein%2C+M+S">Michael S. Bernstein</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.12259v1-abstract-short" style="display: inline;"> Data analysts have long sought to turn unstructured text data into meaningful concepts. Though common, topic modeling and clustering focus on lower-level keywords and require significant interpretative work. We introduce concept induction, a computational process that instead produces high-level concepts, defined by explicit inclusion criteria, from unstructured text. For a dataset of toxic online&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.12259v1-abstract-full').style.display = 'inline'; document.getElementById('2404.12259v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.12259v1-abstract-full" style="display: none;"> Data analysts have long sought to turn unstructured text data into meaningful concepts. Though common, topic modeling and clustering focus on lower-level keywords and require significant interpretative work. We introduce concept induction, a computational process that instead produces high-level concepts, defined by explicit inclusion criteria, from unstructured text. For a dataset of toxic online comments, where a state-of-the-art BERTopic model outputs &#34;women, power, female,&#34; concept induction produces high-level concepts such as &#34;Criticism of traditional gender roles&#34; and &#34;Dismissal of women&#39;s concerns.&#34; We present LLooM, a concept induction algorithm that leverages large language models to iteratively synthesize sampled text and propose human-interpretable concepts of increasing generality. We then instantiate LLooM in a mixed-initiative text analysis tool, enabling analysts to shift their attention from interpreting topics to engaging in theory-driven analysis. Through technical evaluations and four analysis scenarios ranging from literature review to content moderation, we find that LLooM&#39;s concepts improve upon the prior art of topic models in terms of quality and data coverage. In expert case studies, LLooM helped researchers to uncover new insights even from familiar datasets, for example by suggesting a previously unnoticed concept of attacks on out-party stances in a political social media dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.12259v1-abstract-full').style.display = 'none'; document.getElementById('2404.12259v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear at CHI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.11602">arXiv:2404.11602</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.11602">pdf</a>, <a href="https://arxiv.org/format/2404.11602">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Interaction Techniques for Exploratory Data Visualization on Mobile Devices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Snyder%2C+L+S">Luke S. Snyder</a>, <a href="/search/cs?searchtype=author&amp;query=Rossi%2C+R+A">Ryan A. Rossi</a>, <a href="/search/cs?searchtype=author&amp;query=Koh%2C+E">Eunyee Koh</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffswell%2C+J">Jane Hoffswell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.11602v1-abstract-short" style="display: inline;"> The ubiquity and on-the-go availability of mobile devices makes them central to many tasks such as interpersonal communication and media consumption. However, despite the potential of mobile devices for on-demand exploratory data visualization, existing mobile interactions are difficult, often using highly custom interactions, complex gestures, or multi-modal input. We synthesize limitations from&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.11602v1-abstract-full').style.display = 'inline'; document.getElementById('2404.11602v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.11602v1-abstract-full" style="display: none;"> The ubiquity and on-the-go availability of mobile devices makes them central to many tasks such as interpersonal communication and media consumption. However, despite the potential of mobile devices for on-demand exploratory data visualization, existing mobile interactions are difficult, often using highly custom interactions, complex gestures, or multi-modal input. We synthesize limitations from the literature and outline four motivating principles for improved mobile interaction: leverage ubiquitous modalities, prioritize discoverability, enable rapid in-context data exploration, and promote graceful recovery. We then contribute thirteen interaction candidates and conduct a formative study with twelve participants who experienced our interactions in a testbed prototype. Based on these interviews, we discuss design considerations and tradeoffs from four main themes: precise and rapid inspection, focused navigation, single-touch and fixed orientation interaction, and judicious use of motion. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.11602v1-abstract-full').style.display = 'none'; document.getElementById('2404.11602v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 1 figure, 1 table, EuroVis 2024 Short Papers</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.11681">arXiv:2312.11681</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.11681">pdf</a>, <a href="https://arxiv.org/format/2312.11681">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Designing LLM Chains by Adapting Techniques from Crowdsourcing Workflows </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Grunde-McLaughlin%2C+M">Madeleine Grunde-McLaughlin</a>, <a href="/search/cs?searchtype=author&amp;query=Lam%2C+M+S">Michelle S. Lam</a>, <a href="/search/cs?searchtype=author&amp;query=Krishna%2C+R">Ranjay Krishna</a>, <a href="/search/cs?searchtype=author&amp;query=Weld%2C+D+S">Daniel S. Weld</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.11681v4-abstract-short" style="display: inline;"> LLM chains enable complex tasks by decomposing work into a sequence of subtasks. Similarly, the more established techniques of crowdsourcing workflows decompose complex tasks into smaller tasks for human crowdworkers. Chains address LLM errors analogously to the way crowdsourcing workflows address human error. To characterize opportunities for LLM chaining, we survey 107 papers across the crowdsou&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.11681v4-abstract-full').style.display = 'inline'; document.getElementById('2312.11681v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.11681v4-abstract-full" style="display: none;"> LLM chains enable complex tasks by decomposing work into a sequence of subtasks. Similarly, the more established techniques of crowdsourcing workflows decompose complex tasks into smaller tasks for human crowdworkers. Chains address LLM errors analogously to the way crowdsourcing workflows address human error. To characterize opportunities for LLM chaining, we survey 107 papers across the crowdsourcing and chaining literature to construct a design space for chain development. The design space covers a designer&#39;s objectives and the tactics used to build workflows. We then surface strategies that mediate how workflows use tactics to achieve objectives. To explore how techniques from crowdsourcing may apply to chaining, we adapt crowdsourcing workflows to implement LLM chains across three case studies: creating a taxonomy, shortening text, and writing a short story. From the design space and our case studies, we identify takeaways for effective chain design and raise implications for future research and development. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.11681v4-abstract-full').style.display = 'none'; document.getElementById('2312.11681v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.17814">arXiv:2310.17814</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.17814">pdf</a>, <a href="https://arxiv.org/format/2310.17814">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> DIVI: Dynamically Interactive Visualization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Snyder%2C+L+S">Luke S. Snyder</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.17814v2-abstract-short" style="display: inline;"> Dynamically Interactive Visualization (DIVI) is a novel approach for orchestrating interactions within and across static visualizations. DIVI deconstructs Scalable Vector Graphics charts at runtime to infer content and coordinate user input, decoupling interaction from specification logic. This decoupling allows interactions to extend and compose freely across different tools, chart types, and ana&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.17814v2-abstract-full').style.display = 'inline'; document.getElementById('2310.17814v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.17814v2-abstract-full" style="display: none;"> Dynamically Interactive Visualization (DIVI) is a novel approach for orchestrating interactions within and across static visualizations. DIVI deconstructs Scalable Vector Graphics charts at runtime to infer content and coordinate user input, decoupling interaction from specification logic. This decoupling allows interactions to extend and compose freely across different tools, chart types, and analysis goals. DIVI exploits positional relations of marks to detect chart components such as axes and legends, reconstruct scales and view encodings, and infer data fields. DIVI then enumerates candidate transformations across inferred data to perform linking between views. To support dynamic interaction without prior specification, we introduce a taxonomy that formalizes the space of standard interactions by chart element, interaction type, and input event. We demonstrate DIVI&#39;s usefulness for rapid data exploration and analysis through a usability study with 13 participants and a diverse gallery of dynamically interactive visualizations, including single chart, multi-view, and cross-tool configurations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.17814v2-abstract-full').style.display = 'none'; document.getElementById('2310.17814v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 2 pages supplementary material, 10 figures, IEEE TVCG 2024 (Proc. VIS 2023)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.16262">arXiv:2310.16262</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.16262">pdf</a>, <a href="https://arxiv.org/format/2310.16262">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation">stat.CO</span> </div> </div> <p class="title is-5 mathjax"> rTisane: Externalizing conceptual models for data analysis increases engagement with domain knowledge and improves statistical model quality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jun%2C+E">Eunice Jun</a>, <a href="/search/cs?searchtype=author&amp;query=Misback%2C+E">Edward Misback</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Just%2C+R">Ren茅 Just</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.16262v1-abstract-short" style="display: inline;"> Statistical models should accurately reflect analysts&#39; domain knowledge about variables and their relationships. While recent tools let analysts express these assumptions and use them to produce a resulting statistical model, it remains unclear what analysts want to express and how externalization impacts statistical model quality. This paper addresses these gaps. We first conduct an exploratory s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.16262v1-abstract-full').style.display = 'inline'; document.getElementById('2310.16262v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.16262v1-abstract-full" style="display: none;"> Statistical models should accurately reflect analysts&#39; domain knowledge about variables and their relationships. While recent tools let analysts express these assumptions and use them to produce a resulting statistical model, it remains unclear what analysts want to express and how externalization impacts statistical model quality. This paper addresses these gaps. We first conduct an exploratory study of analysts using a domain-specific language (DSL) to express conceptual models. We observe a preference for detailing how variables relate and a desire to allow, and then later resolve, ambiguity in their conceptual models. We leverage these findings to develop rTisane, a DSL for expressing conceptual models augmented with an interactive disambiguation process. In a controlled evaluation, we find that rTisane&#39;s DSL helps analysts engage more deeply with and accurately externalize their assumptions. rTisane also leads to statistical models that match analysts&#39; assumptions, maintain analysis intent, and better fit the data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.16262v1-abstract-full').style.display = 'none'; document.getElementById('2310.16262v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2; D.2.2; H.1.2; D.3.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.10108">arXiv:2309.10108</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.10108">pdf</a>, <a href="https://arxiv.org/format/2309.10108">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> How Do Data Analysts Respond to AI Assistance? A Wizard-of-Oz Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gu%2C+K">Ken Gu</a>, <a href="/search/cs?searchtype=author&amp;query=Grunde-McLaughlin%2C+M">Madeleine Grunde-McLaughlin</a>, <a href="/search/cs?searchtype=author&amp;query=McNutt%2C+A+M">Andrew M. McNutt</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Althoff%2C+T">Tim Althoff</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.10108v2-abstract-short" style="display: inline;"> Data analysis is challenging as analysts must navigate nuanced decisions that may yield divergent conclusions. AI assistants have the potential to support analysts in planning their analyses, enabling more robust decision making. Though AI-based assistants that target code execution (e.g., Github Copilot) have received significant attention, limited research addresses assistance for both analysis&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.10108v2-abstract-full').style.display = 'inline'; document.getElementById('2309.10108v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.10108v2-abstract-full" style="display: none;"> Data analysis is challenging as analysts must navigate nuanced decisions that may yield divergent conclusions. AI assistants have the potential to support analysts in planning their analyses, enabling more robust decision making. Though AI-based assistants that target code execution (e.g., Github Copilot) have received significant attention, limited research addresses assistance for both analysis execution and planning. In this work, we characterize helpful planning suggestions and their impacts on analysts&#39; workflows. We first review the analysis planning literature and crowd-sourced analysis studies to categorize suggestion content. We then conduct a Wizard-of-Oz study (n=13) to observe analysts&#39; preferences and reactions to planning assistance in a realistic scenario. Our findings highlight subtleties in contextual factors that impact suggestion helpfulness, emphasizing design implications for supporting different abstractions of assistance, forms of initiative, increased engagement, and alignment of goals between analysts and assistants. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.10108v2-abstract-full').style.display = 'none'; document.getElementById('2309.10108v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to CHI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.14241">arXiv:2308.14241</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.14241">pdf</a>, <a href="https://arxiv.org/format/2308.14241">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TVCG.2023.3326527">10.1109/TVCG.2023.3326527 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Too Many Cooks: Exploring How Graphical Perception Studies Influence Visualization Recommendations in Draco </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Z">Zehua Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J">Junran Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Moritz%2C+D">Dominik Moritz</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Battle%2C+L">Leilani Battle</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.14241v1-abstract-short" style="display: inline;"> Findings from graphical perception can guide visualization recommendation algorithms in identifying effective visualization designs. However, existing algorithms use knowledge from, at best, a few studies, limiting our understanding of how complementary (or contradictory) graphical perception results influence generated recommendations. In this paper, we present a pipeline of applying a large body&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14241v1-abstract-full').style.display = 'inline'; document.getElementById('2308.14241v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.14241v1-abstract-full" style="display: none;"> Findings from graphical perception can guide visualization recommendation algorithms in identifying effective visualization designs. However, existing algorithms use knowledge from, at best, a few studies, limiting our understanding of how complementary (or contradictory) graphical perception results influence generated recommendations. In this paper, we present a pipeline of applying a large body of graphical perception results to develop new visualization recommendation algorithms and conduct an exploratory study to investigate how results from graphical perception can alter the behavior of downstream algorithms. Specifically, we model graphical perception results from 30 papers in Draco -- a framework to model visualization knowledge -- to develop new recommendation algorithms. By analyzing Draco-generated algorithms, we showcase the feasibility of our method to (1) identify gaps in existing graphical perception literature informing recommendation algorithms, (2) cluster papers by their preferred design rules and constraints, and (3) investigate why certain studies can dominate Draco&#39;s recommendations, whereas others may have little influence. Given our findings, we discuss the potential for mutually reinforcing advancements in graphical perception and visualization recommendation research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.14241v1-abstract-full').style.display = 'none'; document.getElementById('2308.14241v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.13024">arXiv:2308.13024</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2308.13024">pdf</a>, <a href="https://arxiv.org/format/2308.13024">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> EVM: Incorporating Model Checking into Exploratory Visual Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kale%2C+A">Alex Kale</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Z">Ziyang Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Qiao%2C+X+L">Xiao Li Qiao</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Hullman%2C+J">Jessica Hullman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.13024v1-abstract-short" style="display: inline;"> Visual analytics (VA) tools support data exploration by helping analysts quickly and iteratively generate views of data which reveal interesting patterns. However, these tools seldom enable explicit checks of the resulting interpretations of data -- e.g., whether patterns can be accounted for by a model that implies a particular structure in the relationships between variables. We present EVM, a d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.13024v1-abstract-full').style.display = 'inline'; document.getElementById('2308.13024v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.13024v1-abstract-full" style="display: none;"> Visual analytics (VA) tools support data exploration by helping analysts quickly and iteratively generate views of data which reveal interesting patterns. However, these tools seldom enable explicit checks of the resulting interpretations of data -- e.g., whether patterns can be accounted for by a model that implies a particular structure in the relationships between variables. We present EVM, a data exploration tool that enables users to express and check provisional interpretations of data in the form of statistical models. EVM integrates support for visualization-based model checks by rendering distributions of model predictions alongside user-generated views of data. In a user study with data scientists practicing in the private and public sector, we evaluate how model checks influence analysts&#39; thinking during data exploration. Our analysis characterizes how participants use model checks to scrutinize expectations about data generating process and surfaces further opportunities to scaffold model exploration in VA tools. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.13024v1-abstract-full').style.display = 'none'; document.getElementById('2308.13024v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.08323">arXiv:2305.08323</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.08323">pdf</a>, <a href="https://arxiv.org/format/2305.08323">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Approximation and Progressive Display of Multiverse Analyses </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Althoff%2C+T">Tim Althoff</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.08323v1-abstract-short" style="display: inline;"> A multiverse analysis evaluates all combinations of &#34;reasonable&#34; analytic decisions to promote robustness and transparency, but can lead to a combinatorial explosion of analyses to compute. Long delays before assessing results prevent users from diagnosing errors and iterating early. We contribute (1) approximation algorithms for estimating multiverse sensitivity and (2) monitoring visualizations&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.08323v1-abstract-full').style.display = 'inline'; document.getElementById('2305.08323v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.08323v1-abstract-full" style="display: none;"> A multiverse analysis evaluates all combinations of &#34;reasonable&#34; analytic decisions to promote robustness and transparency, but can lead to a combinatorial explosion of analyses to compute. Long delays before assessing results prevent users from diagnosing errors and iterating early. We contribute (1) approximation algorithms for estimating multiverse sensitivity and (2) monitoring visualizations for assessing progress and controlling execution on the fly. We evaluate how quickly three sampling-based algorithms converge to accurately rank sensitive decisions in both synthetic and real multiverse analyses. Compared to uniform random sampling, round robin and sketching approaches are 2 times faster in the best case, while on average estimating sensitivity accurately using 20% of the full multiverse. To enable analysts to stop early to fix errors or decide when results are &#34;good enough&#34; to move forward, we visualize both effect size and decision sensitivity estimates with confidence intervals, and surface potential issues including runtime warnings and model quality metrics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.08323v1-abstract-full').style.display = 'none'; document.getElementById('2305.08323v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.06777">arXiv:2303.06777</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.06777">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Live, Rich, and Composable: Qualities for Programming Beyond Static Text </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Horowitz%2C+J">Joshua Horowitz</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.06777v1-abstract-short" style="display: inline;"> Efforts to push programming beyond static textual code have sought to imbue programming with multiple distinct qualities. One long-acknowledged quality is liveness: providing programmers with in-depth feedback about a program&#39;s dynamic behavior as the program is edited. A second quality, long-explored but lacking a shared term of art, is richness: allowing programmers to edit programs though domai&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.06777v1-abstract-full').style.display = 'inline'; document.getElementById('2303.06777v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.06777v1-abstract-full" style="display: none;"> Efforts to push programming beyond static textual code have sought to imbue programming with multiple distinct qualities. One long-acknowledged quality is liveness: providing programmers with in-depth feedback about a program&#39;s dynamic behavior as the program is edited. A second quality, long-explored but lacking a shared term of art, is richness: allowing programmers to edit programs though domain-specific representations and interactions rather than solely through text. In this paper, we map the relationship between these two qualities and survey past work that exemplifies them. We observe that systems combining liveness and richness often do so at the cost of an essential quality of traditional programming: composability. We argue that, by combining liveness, richness, and composability, programming systems can better capture the full potential of interactive computation without leaving behind the expressivity of traditional code. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.06777v1-abstract-full').style.display = 'none'; document.getElementById('2303.06777v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in the proceedings of PLATEAU 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.07346">arXiv:2302.07346</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2302.07346">pdf</a>, <a href="https://arxiv.org/format/2302.07346">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3581641.3584059">10.1145/3581641.3584059 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> ScatterShot: Interactive In-context Example Curation for Text Transformation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wu%2C+T">Tongshuang Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+H">Hua Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Weld%2C+D+S">Daniel S. Weld</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Ribeiro%2C+M+T">Marco Tulio Ribeiro</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.07346v1-abstract-short" style="display: inline;"> The in-context learning capabilities of LLMs like GPT-3 allow annotators to customize an LLM to their specific tasks with a small number of examples. However, users tend to include only the most obvious patterns when crafting examples, resulting in underspecified in-context functions that fall short on unseen cases. Further, it is hard to know when &#34;enough&#34; examples have been included even for kno&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.07346v1-abstract-full').style.display = 'inline'; document.getElementById('2302.07346v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.07346v1-abstract-full" style="display: none;"> The in-context learning capabilities of LLMs like GPT-3 allow annotators to customize an LLM to their specific tasks with a small number of examples. However, users tend to include only the most obvious patterns when crafting examples, resulting in underspecified in-context functions that fall short on unseen cases. Further, it is hard to know when &#34;enough&#34; examples have been included even for known patterns. In this work, we present ScatterShot, an interactive system for building high-quality demonstration sets for in-context learning. ScatterShot iteratively slices unlabeled data into task-specific patterns, samples informative inputs from underexplored or not-yet-saturated slices in an active learning manner, and helps users label more efficiently with the help of an LLM and the current example set. In simulation studies on two text perturbation scenarios, ScatterShot sampling improves the resulting few-shot functions by 4-5 percentage points over random sampling, with less variance as more examples are added. In a user study, ScatterShot greatly helps users in covering different patterns in the input space and labeling in-context examples more efficiently, resulting in better in-context learning and less user effort. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.07346v1-abstract-full').style.display = 'none'; document.getElementById('2302.07346v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IUI 2023: 28th International Conference on Intelligent User Interfaces</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.03109">arXiv:2301.03109</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.03109">pdf</a>, <a href="https://arxiv.org/format/2301.03109">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Cinematic Techniques in Narrative Visualization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Conlen%2C+M">Matthew Conlen</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Mushkin%2C+H">Hillary Mushkin</a>, <a href="/search/cs?searchtype=author&amp;query=Davidoff%2C+S">Scott Davidoff</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.03109v1-abstract-short" style="display: inline;"> The many genres of narrative visualization (e.g. data comics, data videos) each offer a unique set of affordances and constraints. To better understand a genre that we call cinematic visualizations-3D visualizations that make highly deliberate use of a camera to convey a narrative-we gathered 50 examples and analyzed their traditional cinematic aspects to identify the benefits and limitations of t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.03109v1-abstract-full').style.display = 'inline'; document.getElementById('2301.03109v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.03109v1-abstract-full" style="display: none;"> The many genres of narrative visualization (e.g. data comics, data videos) each offer a unique set of affordances and constraints. To better understand a genre that we call cinematic visualizations-3D visualizations that make highly deliberate use of a camera to convey a narrative-we gathered 50 examples and analyzed their traditional cinematic aspects to identify the benefits and limitations of the form. While the cinematic visualization approach can violate traditional rules of visualization, we find that through careful control of the camera, cinematic visualizations enable immersion in data-driven, anthropocentric environments, and can naturally incorporate in-situ narrators, concrete scales, and visual analogies. Our analysis guides our design of a series of cinematic visualizations, created for NASA&#39;s Earth Science Communications team. We present one as a case study to convey design guidelines covering cinematography, lighting, set design, and sound, and discuss challenges in creating cinematic visualizations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.03109v1-abstract-full').style.display = 'none'; document.getElementById('2301.03109v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.09858">arXiv:2205.09858</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.09858">pdf</a>, <a href="https://arxiv.org/format/2205.09858">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Fidyll: A Compiler for Cross-Format Data Stories &amp; Explorable Explanations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Conlen%2C+M">Matthew Conlen</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.09858v1-abstract-short" style="display: inline;"> Narrative visualization is a powerful communicative tool that can take on various formats such as interactive articles, slideshows, and data videos. These formats each have their strengths and weaknesses, but existing authoring tools only support one output target. We conducted a series of formative interviews with seven domain experts to understand needs and practices around cross-format data sto&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.09858v1-abstract-full').style.display = 'inline'; document.getElementById('2205.09858v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.09858v1-abstract-full" style="display: none;"> Narrative visualization is a powerful communicative tool that can take on various formats such as interactive articles, slideshows, and data videos. These formats each have their strengths and weaknesses, but existing authoring tools only support one output target. We conducted a series of formative interviews with seven domain experts to understand needs and practices around cross-format data stories, and developed Fidyll, a cross-format compiler for authoring interactive data stories and explorable explanations. Our open-source tool can be used to rapidly create formats including static articles, low-motion articles, interactive articles, slideshows, and videos. We evaluate our system through a series of real-world usage scenarios, showing how it benefits authors in the domains of data journalism, scientific publishing, and nonprofit advocacy. We show how Fidyll, provides expressive leverage by reducing the amount of non-narrative markup that authors need to write by 80-90% compared to Idyll, an existing markup language for authoring interactive articles. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.09858v1-abstract-full').style.display = 'none'; document.getElementById('2205.09858v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 6 figures, for associated examples see https://idyll-lang.github.io/fidyll-examples/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.02705">arXiv:2201.02705</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.02705">pdf</a>, <a href="https://arxiv.org/format/2201.02705">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation">stat.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Other Statistics">stat.OT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3491102.3501888">10.1145/3491102.3501888 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Tisane: Authoring Statistical Models via Formal Reasoning from Conceptual and Data Relationships </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jun%2C+E">Eunice Jun</a>, <a href="/search/cs?searchtype=author&amp;query=Seo%2C+A">Audrey Seo</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Just%2C+R">Ren茅 Just</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.02705v1-abstract-short" style="display: inline;"> Proper statistical modeling incorporates domain theory about how concepts relate and details of how data were measured. However, data analysts currently lack tool support for recording and reasoning about domain assumptions, data collection, and modeling choices in an integrated manner, leading to mistakes that can compromise scientific validity. For instance, generalized linear mixed-effects mode&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.02705v1-abstract-full').style.display = 'inline'; document.getElementById('2201.02705v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.02705v1-abstract-full" style="display: none;"> Proper statistical modeling incorporates domain theory about how concepts relate and details of how data were measured. However, data analysts currently lack tool support for recording and reasoning about domain assumptions, data collection, and modeling choices in an integrated manner, leading to mistakes that can compromise scientific validity. For instance, generalized linear mixed-effects models (GLMMs) help answer complex research questions, but omitting random effects impairs the generalizability of results. To address this need, we present Tisane, a mixed-initiative system for authoring generalized linear models with and without mixed-effects. Tisane introduces a study design specification language for expressing and asking questions about relationships between variables. Tisane contributes an interactive compilation process that represents relationships in a graph, infers candidate statistical models, and asks follow-up questions to disambiguate user queries to construct a valid model. In case studies with three researchers, we find that Tisane helps them focus on their goals and assumptions while avoiding past mistakes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.02705v1-abstract-full').style.display = 'none'; document.getElementById('2201.02705v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.04385">arXiv:2108.04385</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.04385">pdf</a>, <a href="https://arxiv.org/format/2108.04385">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Gemini2: Generating Keyframe-Oriented Animated Transitions Between Statistical Graphics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Younghoon Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.04385v1-abstract-short" style="display: inline;"> Complex animated transitions may be easier to understand when divided into separate, consecutive stages. However, effective staging requires careful attention to both animation semantics and timing parameters. We present Gemini2, a system for creating staged animations from a sequence of chart keyframes. Given only a start state and an end state, Gemini2 can automatically recommend intermediate ke&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.04385v1-abstract-full').style.display = 'inline'; document.getElementById('2108.04385v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.04385v1-abstract-full" style="display: none;"> Complex animated transitions may be easier to understand when divided into separate, consecutive stages. However, effective staging requires careful attention to both animation semantics and timing parameters. We present Gemini2, a system for creating staged animations from a sequence of chart keyframes. Given only a start state and an end state, Gemini2 can automatically recommend intermediate keyframes for designers to consider. The Gemini2 recommendation engine leverages Gemini, our prior work, and GraphScape to itemize the given complex change into semantic edit operations and to recombine operations into stages with a guided order for clearly conveying the semantics. To evaluate Gemini2&#39;s recommendations, we conducted a human-subject study in which participants ranked recommended animations from both Gemini2 and Gemini. We find that Gemini2&#39;s animation recommendation ranking is well aligned with subjects&#39; preferences, and Gemini2 can recommend favorable animations that Gemini cannot support. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.04385v1-abstract-full').style.display = 'none'; document.getElementById('2108.04385v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.02712">arXiv:2104.02712</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2104.02712">pdf</a>, <a href="https://arxiv.org/format/2104.02712">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Other Computer Science">cs.OH</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Hypothesis Formalization: Empirical Findings, Software Limitations, and Design Implications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jun%2C+E">Eunice Jun</a>, <a href="/search/cs?searchtype=author&amp;query=Birchfield%2C+M">Melissa Birchfield</a>, <a href="/search/cs?searchtype=author&amp;query=de+Moura%2C+N">Nicole de Moura</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Just%2C+R">Rene Just</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.02712v1-abstract-short" style="display: inline;"> Data analysis requires translating higher level questions and hypotheses into computable statistical models. We present a mixed-methods study aimed at identifying the steps, considerations, and challenges involved in operationalizing hypotheses into statistical models, a process we refer to as hypothesis formalization. In a formative content analysis of research papers, we find that researchers hi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.02712v1-abstract-full').style.display = 'inline'; document.getElementById('2104.02712v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.02712v1-abstract-full" style="display: none;"> Data analysis requires translating higher level questions and hypotheses into computable statistical models. We present a mixed-methods study aimed at identifying the steps, considerations, and challenges involved in operationalizing hypotheses into statistical models, a process we refer to as hypothesis formalization. In a formative content analysis of research papers, we find that researchers highlight decomposing a hypothesis into sub-hypotheses, selecting proxy variables, and formulating statistical models based on data collection design as key steps. In a lab study, we find that analysts fixated on implementation and shaped their analysis to fit familiar approaches, even if sub-optimal. In an analysis of software tools, we find that tools provide inconsistent, low-level abstractions that may limit the statistical models analysts use to formalize hypotheses. Based on these observations, we characterize hypothesis formalization as a dual-search process balancing conceptual and statistical considerations constrained by data and computation, and discuss implications for future tools. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.02712v1-abstract-full').style.display = 'none'; document.getElementById('2104.02712v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.00288">arXiv:2101.00288</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.00288">pdf</a>, <a href="https://arxiv.org/format/2101.00288">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Polyjuice: Generating Counterfactuals for Explaining, Evaluating, and Improving Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wu%2C+T">Tongshuang Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Ribeiro%2C+M+T">Marco Tulio Ribeiro</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Weld%2C+D+S">Daniel S. Weld</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.00288v2-abstract-short" style="display: inline;"> While counterfactual examples are useful for analysis and training of NLP models, current generation methods either rely on manual labor to create very few counterfactuals, or only instantiate limited types of perturbations such as paraphrases or word substitutions. We present Polyjuice, a general-purpose counterfactual generator that allows for control over perturbation types and locations, train&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.00288v2-abstract-full').style.display = 'inline'; document.getElementById('2101.00288v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.00288v2-abstract-full" style="display: none;"> While counterfactual examples are useful for analysis and training of NLP models, current generation methods either rely on manual labor to create very few counterfactuals, or only instantiate limited types of perturbations such as paraphrases or word substitutions. We present Polyjuice, a general-purpose counterfactual generator that allows for control over perturbation types and locations, trained by finetuning GPT-2 on multiple datasets of paired sentences. We show that Polyjuice produces diverse sets of realistic counterfactuals, which in turn are useful in various distinct applications: improving training and evaluation on three different tasks (with around 70% less annotation effort than manual generation), augmenting state-of-the-art explanation techniques, and supporting systematic counterfactual error analysis by revealing behaviors easily missed by human experts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.00288v2-abstract-full').style.display = 'none'; document.getElementById('2101.00288v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ACL 2021, main conference, long paper</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.01429">arXiv:2009.01429</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2009.01429">pdf</a>, <a href="https://arxiv.org/format/2009.01429">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Gemini: A Grammar and Recommender System for AnimatedTransitions in Statistical Graphics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kim%2C+Y">Younghoon Kim</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.01429v1-abstract-short" style="display: inline;"> Animated transitions help viewers follow changes between related visualizations. Specifying effective animations demands significant effort: authors must select the elements and properties to animate, provide transition parameters, and coordinate the timing of stages. To facilitate this process, we present Gemini, a declarative grammar and recommendation system for animated transitions between sin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.01429v1-abstract-full').style.display = 'inline'; document.getElementById('2009.01429v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.01429v1-abstract-full" style="display: none;"> Animated transitions help viewers follow changes between related visualizations. Specifying effective animations demands significant effort: authors must select the elements and properties to animate, provide transition parameters, and coordinate the timing of stages. To facilitate this process, we present Gemini, a declarative grammar and recommendation system for animated transitions between single-view statistical graphics. Gemini specifications define transition &#34;steps&#34; in terms of high-level visual components (marks, axes, legends) and composition rules to synchronize and concatenate steps. With this grammar, Gemini can recommend animation designs to augment and accelerate designers&#39; work. Gemini enumerates staged animation designs for given start and end states, and ranks those designs using a cost function informed by prior perceptual studies. To evaluate Gemini, we conduct both a formative study on Mechanical Turk to assess and tune our ranking function, and a summative study in which 8 experienced visualization developers implement animations in D3 that we then compare to Gemini&#39;s suggestions. We find that most designs (9/11) are exactly replicable in Gemini, with many (8/11) achievable via edits to suggestions, and that Gemini suggestions avoid multiple participant errors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.01429v1-abstract-full').style.display = 'none'; document.getElementById('2009.01429v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.12828">arXiv:2008.12828</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2008.12828">pdf</a>, <a href="https://arxiv.org/format/2008.12828">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Digital Libraries">cs.DL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> CORAL: COde RepresentAtion Learning with Weakly-Supervised Transformers for Analyzing Data Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+G">Ge Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Merrill%2C+M+A">Mike A. Merrill</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Althoff%2C+T">Tim Althoff</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.12828v1-abstract-short" style="display: inline;"> Large scale analysis of source code, and in particular scientific source code, holds the promise of better understanding the data science process, identifying analytical best practices, and providing insights to the builders of scientific toolkits. However, large corpora have remained unanalyzed in depth, as descriptive labels are absent and require expert domain knowledge to generate. We propose&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.12828v1-abstract-full').style.display = 'inline'; document.getElementById('2008.12828v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.12828v1-abstract-full" style="display: none;"> Large scale analysis of source code, and in particular scientific source code, holds the promise of better understanding the data science process, identifying analytical best practices, and providing insights to the builders of scientific toolkits. However, large corpora have remained unanalyzed in depth, as descriptive labels are absent and require expert domain knowledge to generate. We propose a novel weakly supervised transformer-based architecture for computing joint representations of code from both abstract syntax trees and surrounding natural language comments. We then evaluate the model on a new classification task for labeling computational notebook cells as stages in the data analysis process from data import to wrangling, exploration, modeling, and evaluation. We show that our model, leveraging only easily-available weak supervision, achieves a 38% increase in accuracy over expert-supplied heuristics and outperforms a suite of baselines. Our model enables us to examine a set of 118,000 Jupyter Notebooks to uncover common data analysis patterns. Focusing on notebooks with relationships to academic articles, we conduct the largest ever study of scientific code and find that notebook composition correlates with the citation count of corresponding papers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.12828v1-abstract-full').style.display = 'none'; document.getElementById('2008.12828v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.05551">arXiv:2007.05551</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2007.05551">pdf</a>, <a href="https://arxiv.org/format/2007.05551">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TVCG.2020.3028985">10.1109/TVCG.2020.3028985 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Boba: Authoring and Visualizing Multiverse Analyses </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Kale%2C+A">Alex Kale</a>, <a href="/search/cs?searchtype=author&amp;query=Althoff%2C+T">Tim Althoff</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.05551v2-abstract-short" style="display: inline;"> Multiverse analysis is an approach to data analysis in which all &#34;reasonable&#34; analytic decisions are evaluated in parallel and interpreted collectively, in order to foster robustness and transparency. However, specifying a multiverse is demanding because analysts must manage myriad variants from a cross-product of analytic decisions, and the results require nuanced interpretation. We contribute Bo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.05551v2-abstract-full').style.display = 'inline'; document.getElementById('2007.05551v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.05551v2-abstract-full" style="display: none;"> Multiverse analysis is an approach to data analysis in which all &#34;reasonable&#34; analytic decisions are evaluated in parallel and interpreted collectively, in order to foster robustness and transparency. However, specifying a multiverse is demanding because analysts must manage myriad variants from a cross-product of analytic decisions, and the results require nuanced interpretation. We contribute Boba: an integrated domain-specific language (DSL) and visual analysis system for authoring and reviewing multiverse analyses. With the Boba DSL, analysts write the shared portion of analysis code only once, alongside local variations defining alternative decisions, from which the compiler generates a multiplex of scripts representing all possible analysis paths. The Boba Visualizer provides linked views of model results and the multiverse decision space to enable rapid, systematic assessment of consequential decisions and robustness, including sampling uncertainty and model fit. We demonstrate Boba&#39;s utility through two data analysis case studies, and reflect on challenges and design opportunities for multiverse analysis software. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.05551v2-abstract-full').style.display = 'none'; document.getElementById('2007.05551v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to IEEE Transactions on Visualization and Computer Graphics (Proc. VAST)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.00568">arXiv:1911.00568</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1911.00568">pdf</a>, <a href="https://arxiv.org/format/1911.00568">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Goals, Process, and Challenges of Exploratory Data Analysis: An Interview Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wongsuphasawat%2C+K">Kanit Wongsuphasawat</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.00568v1-abstract-short" style="display: inline;"> How do analysis goals and context affect exploratory data analysis (EDA)? To investigate this question, we conducted semi-structured interviews with 18 data analysts. We characterize common exploration goals: profiling (assessing data quality) and discovery (gaining new insights). Though the EDA literature primarily emphasizes discovery, we observe that discovery only reliably occurs in the contex&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.00568v1-abstract-full').style.display = 'inline'; document.getElementById('1911.00568v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.00568v1-abstract-full" style="display: none;"> How do analysis goals and context affect exploratory data analysis (EDA)? To investigate this question, we conducted semi-structured interviews with 18 data analysts. We characterize common exploration goals: profiling (assessing data quality) and discovery (gaining new insights). Though the EDA literature primarily emphasizes discovery, we observe that discovery only reliably occurs in the context of open-ended analyses, whereas all participants engage in profiling across all of their analyses. We describe the process and challenges of EDA highlighted by our interviews. We find that analysts must perform repetitive tasks (e.g., examine numerous variables), yet they may have limited time or lack domain knowledge to explore data. Analysts also often have to consult other stakeholders and oscillate between exploration and other tasks, such as acquiring and wrangling additional data. Based on these observations, we identify design opportunities for exploratory analysis tools, such as augmenting exploration with automation and guidance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.00568v1-abstract-full').style.display = 'none'; document.getElementById('1911.00568v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.13602">arXiv:1910.13602</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1910.13602">pdf</a>, <a href="https://arxiv.org/format/1910.13602">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3313831.3376533">10.1145/3313831.3376533 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Paths Explored, Paths Omitted, Paths Obscured: Decision Points &amp; Selective Reporting in End-to-End Data Analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Althoff%2C+T">Tim Althoff</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.13602v3-abstract-short" style="display: inline;"> Drawing reliable inferences from data involves many, sometimes arbitrary, decisions across phases of data collection, wrangling, and modeling. As different choices can lead to diverging conclusions, understanding how researchers make analytic decisions is important for supporting robust and replicable analysis. In this study, we pore over nine published research studies and conduct semi-structured&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.13602v3-abstract-full').style.display = 'inline'; document.getElementById('1910.13602v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.13602v3-abstract-full" style="display: none;"> Drawing reliable inferences from data involves many, sometimes arbitrary, decisions across phases of data collection, wrangling, and modeling. As different choices can lead to diverging conclusions, understanding how researchers make analytic decisions is important for supporting robust and replicable analysis. In this study, we pore over nine published research studies and conduct semi-structured interviews with their authors. We observe that researchers often base their decisions on methodological or theoretical concerns, but subject to constraints arising from the data, expertise, or perceived interpretability. We confirm that researchers may experiment with choices in search of desirable results, but also identify other reasons why researchers explore alternatives yet omit findings. In concert with our interviews, we also contribute visualizations for communicating decision processes throughout an analysis. Based on our results, we identify design opportunities for strengthening end-to-end analysis, for instance via tracking and meta-analysis of multiple decision paths. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.13602v3-abstract-full').style.display = 'none'; document.getElementById('1910.13602v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 January, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.13568">arXiv:1907.13568</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1907.13568">pdf</a>, <a href="https://arxiv.org/format/1907.13568">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TVCG.2019.2934281">10.1109/TVCG.2019.2934281 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Critical Reflections on Visualization Authoring Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Satyanarayan%2C+A">Arvind Satyanarayan</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+B">Bongshin Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Ren%2C+D">Donghao Ren</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a>, <a href="/search/cs?searchtype=author&amp;query=Stasko%2C+J">John Stasko</a>, <a href="/search/cs?searchtype=author&amp;query=Thompson%2C+J">John Thompson</a>, <a href="/search/cs?searchtype=author&amp;query=Brehmer%2C+M">Matthew Brehmer</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Z">Zhicheng Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.13568v1-abstract-short" style="display: inline;"> An emerging generation of visualization authoring systems support expressive information visualization without textual programming. As they vary in their visualization models, system architectures, and user interfaces, it is challenging to directly compare these systems using traditional evaluative methods. Recognizing the value of contextualizing our decisions in the broader design space, we pres&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.13568v1-abstract-full').style.display = 'inline'; document.getElementById('1907.13568v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.13568v1-abstract-full" style="display: none;"> An emerging generation of visualization authoring systems support expressive information visualization without textual programming. As they vary in their visualization models, system architectures, and user interfaces, it is challenging to directly compare these systems using traditional evaluative methods. Recognizing the value of contextualizing our decisions in the broader design space, we present critical reflections on three systems we developed -- Lyra, Data Illustrator, and Charticulator. This paper surfaces knowledge that would have been daunting within the constituent papers of these three systems. We compare and contrast their (previously unmentioned) limitations and trade-offs between expressivity and learnability. We also reflect on common assumptions that we made during the development of our systems, thereby informing future research directions in visualization authoring systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.13568v1-abstract-full').style.display = 'none'; document.getElementById('1907.13568v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1807.06641">arXiv:1807.06641</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1807.06641">pdf</a>, <a href="https://arxiv.org/format/1807.06641">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Beyond Heuristics: Learning Visualization Design </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Saket%2C+B">Bahador Saket</a>, <a href="/search/cs?searchtype=author&amp;query=Moritz%2C+D">Dominik Moritz</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+H">Halden Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Dibia%2C+V">Victor Dibia</a>, <a href="/search/cs?searchtype=author&amp;query=Demiralp%2C+C">Cagatay Demiralp</a>, <a href="/search/cs?searchtype=author&amp;query=Heer%2C+J">Jeffrey Heer</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1807.06641v2-abstract-short" style="display: inline;"> In this paper, we describe a research agenda for deriving design principles directly from data. We argue that it is time to go beyond manually curated and applied visualization design guidelines. We propose learning models of visualization design from data collected using graphical perception studies and build tools powered by the learned models. To achieve this vision, we need to 1) develop scala&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.06641v2-abstract-full').style.display = 'inline'; document.getElementById('1807.06641v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1807.06641v2-abstract-full" style="display: none;"> In this paper, we describe a research agenda for deriving design principles directly from data. We argue that it is time to go beyond manually curated and applied visualization design guidelines. We propose learning models of visualization design from data collected using graphical perception studies and build tools powered by the learned models. To achieve this vision, we need to 1) develop scalable methods for collecting training data, 2) collect different forms of training data, 3) advance interpretability of machine learning models, and 4) develop adaptive models that evolve as more data becomes available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.06641v2-abstract-full').style.display = 'none'; document.getElementById('1807.06641v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 July, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2018. </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10