CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–22 of 22 results for author: <span class="mathjax">Ofek, E</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Ofek%2C+E">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Ofek, E"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Ofek%2C+E&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Ofek, E"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.06233">arXiv:2502.06233</a> <span> [<a href="https://arxiv.org/pdf/2502.06233">pdf</a>, <a href="https://arxiv.org/format/2502.06233">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Confidence Improves Self-Consistency in LLMs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Taubenfeld%2C+A">Amir Taubenfeld</a>, <a href="/search/cs?searchtype=author&query=Sheffer%2C+T">Tom Sheffer</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eran Ofek</a>, <a href="/search/cs?searchtype=author&query=Feder%2C+A">Amir Feder</a>, <a href="/search/cs?searchtype=author&query=Goldstein%2C+A">Ariel Goldstein</a>, <a href="/search/cs?searchtype=author&query=Gekhman%2C+Z">Zorik Gekhman</a>, <a href="/search/cs?searchtype=author&query=Yona%2C+G">Gal Yona</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.06233v1-abstract-short" style="display: inline;"> Self-consistency decoding enhances LLMs' performance on reasoning tasks by sampling diverse reasoning paths and selecting the most frequent answer. However, it is computationally expensive, as sampling many of these (lengthy) paths is required to increase the chances that the correct answer emerges as the most frequent one. To address this, we introduce Confidence-Informed Self-Consistency (CISC).… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06233v1-abstract-full').style.display = 'inline'; document.getElementById('2502.06233v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.06233v1-abstract-full" style="display: none;"> Self-consistency decoding enhances LLMs' performance on reasoning tasks by sampling diverse reasoning paths and selecting the most frequent answer. However, it is computationally expensive, as sampling many of these (lengthy) paths is required to increase the chances that the correct answer emerges as the most frequent one. To address this, we introduce Confidence-Informed Self-Consistency (CISC). CISC performs a weighted majority vote based on confidence scores obtained directly from the model. By prioritizing high-confidence paths, it can identify the correct answer with a significantly smaller sample size. When tested on nine models and four datasets, CISC outperforms self-consistency in nearly all configurations, reducing the required number of reasoning paths by over 40% on average. In addition, we introduce the notion of within-question confidence evaluation, after showing that standard evaluation methods are poor predictors of success in distinguishing correct and incorrect answers to the same question. In fact, the most calibrated confidence method proved to be the least effective for CISC. Lastly, beyond these practical implications, our results and analyses show that LLMs can effectively judge the correctness of their own outputs, contributing to the ongoing debate on this topic. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.06233v1-abstract-full').style.display = 'none'; document.getElementById('2502.06233v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.11805">arXiv:2312.11805</a> <span> [<a href="https://arxiv.org/pdf/2312.11805">pdf</a>, <a href="https://arxiv.org/format/2312.11805">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Gemini: A Family of Highly Capable Multimodal Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gemini+Team"> Gemini Team</a>, <a href="/search/cs?searchtype=author&query=Anil%2C+R">Rohan Anil</a>, <a href="/search/cs?searchtype=author&query=Borgeaud%2C+S">Sebastian Borgeaud</a>, <a href="/search/cs?searchtype=author&query=Alayrac%2C+J">Jean-Baptiste Alayrac</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+J">Jiahui Yu</a>, <a href="/search/cs?searchtype=author&query=Soricut%2C+R">Radu Soricut</a>, <a href="/search/cs?searchtype=author&query=Schalkwyk%2C+J">Johan Schalkwyk</a>, <a href="/search/cs?searchtype=author&query=Dai%2C+A+M">Andrew M. Dai</a>, <a href="/search/cs?searchtype=author&query=Hauth%2C+A">Anja Hauth</a>, <a href="/search/cs?searchtype=author&query=Millican%2C+K">Katie Millican</a>, <a href="/search/cs?searchtype=author&query=Silver%2C+D">David Silver</a>, <a href="/search/cs?searchtype=author&query=Johnson%2C+M">Melvin Johnson</a>, <a href="/search/cs?searchtype=author&query=Antonoglou%2C+I">Ioannis Antonoglou</a>, <a href="/search/cs?searchtype=author&query=Schrittwieser%2C+J">Julian Schrittwieser</a>, <a href="/search/cs?searchtype=author&query=Glaese%2C+A">Amelia Glaese</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+J">Jilin Chen</a>, <a href="/search/cs?searchtype=author&query=Pitler%2C+E">Emily Pitler</a>, <a href="/search/cs?searchtype=author&query=Lillicrap%2C+T">Timothy Lillicrap</a>, <a href="/search/cs?searchtype=author&query=Lazaridou%2C+A">Angeliki Lazaridou</a>, <a href="/search/cs?searchtype=author&query=Firat%2C+O">Orhan Firat</a>, <a href="/search/cs?searchtype=author&query=Molloy%2C+J">James Molloy</a>, <a href="/search/cs?searchtype=author&query=Isard%2C+M">Michael Isard</a>, <a href="/search/cs?searchtype=author&query=Barham%2C+P+R">Paul R. Barham</a>, <a href="/search/cs?searchtype=author&query=Hennigan%2C+T">Tom Hennigan</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+B">Benjamin Lee</a> , et al. (1325 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.11805v4-abstract-short" style="display: inline;"> This report introduces a new family of multimodal models, Gemini, that exhibit remarkable capabilities across image, audio, video, and text understanding. The Gemini family consists of Ultra, Pro, and Nano sizes, suitable for applications ranging from complex reasoning tasks to on-device memory-constrained use-cases. Evaluation on a broad range of benchmarks shows that our most-capable Gemini Ultr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.11805v4-abstract-full').style.display = 'inline'; document.getElementById('2312.11805v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.11805v4-abstract-full" style="display: none;"> This report introduces a new family of multimodal models, Gemini, that exhibit remarkable capabilities across image, audio, video, and text understanding. The Gemini family consists of Ultra, Pro, and Nano sizes, suitable for applications ranging from complex reasoning tasks to on-device memory-constrained use-cases. Evaluation on a broad range of benchmarks shows that our most-capable Gemini Ultra model advances the state of the art in 30 of 32 of these benchmarks - notably being the first model to achieve human-expert performance on the well-studied exam benchmark MMLU, and improving the state of the art in every one of the 20 multimodal benchmarks we examined. We believe that the new capabilities of the Gemini family in cross-modal reasoning and language understanding will enable a wide variety of use cases. We discuss our approach toward post-training and deploying Gemini models responsibly to users through services including Gemini, Gemini Advanced, Google AI Studio, and Cloud Vertex AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.11805v4-abstract-full').style.display = 'none'; document.getElementById('2312.11805v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.10400">arXiv:2305.10400</a> <span> [<a href="https://arxiv.org/pdf/2305.10400">pdf</a>, <a href="https://arxiv.org/format/2305.10400">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> What You See is What You Read? Improving Text-Image Alignment Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yarom%2C+M">Michal Yarom</a>, <a href="/search/cs?searchtype=author&query=Bitton%2C+Y">Yonatan Bitton</a>, <a href="/search/cs?searchtype=author&query=Changpinyo%2C+S">Soravit Changpinyo</a>, <a href="/search/cs?searchtype=author&query=Aharoni%2C+R">Roee Aharoni</a>, <a href="/search/cs?searchtype=author&query=Herzig%2C+J">Jonathan Herzig</a>, <a href="/search/cs?searchtype=author&query=Lang%2C+O">Oran Lang</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eran Ofek</a>, <a href="/search/cs?searchtype=author&query=Szpektor%2C+I">Idan Szpektor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.10400v4-abstract-short" style="display: inline;"> Automatically determining whether a text and a corresponding image are semantically aligned is a significant challenge for vision-language models, with applications in generative text-to-image and image-to-text tasks. In this work, we study methods for automatic text-image alignment evaluation. We first introduce SeeTRUE: a comprehensive evaluation set, spanning multiple datasets from both text-to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.10400v4-abstract-full').style.display = 'inline'; document.getElementById('2305.10400v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.10400v4-abstract-full" style="display: none;"> Automatically determining whether a text and a corresponding image are semantically aligned is a significant challenge for vision-language models, with applications in generative text-to-image and image-to-text tasks. In this work, we study methods for automatic text-image alignment evaluation. We first introduce SeeTRUE: a comprehensive evaluation set, spanning multiple datasets from both text-to-image and image-to-text generation tasks, with human judgements for whether a given text-image pair is semantically aligned. We then describe two automatic methods to determine alignment: the first involving a pipeline based on question generation and visual question answering models, and the second employing an end-to-end classification approach by finetuning multimodal pretrained models. Both methods surpass prior approaches in various text-image alignment tasks, with significant improvements in challenging cases that involve complex composition or unnatural images. Finally, we demonstrate how our approaches can localize specific misalignments between an image and a given text, and how they can be used to automatically re-rank candidates in text-to-image generation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.10400v4-abstract-full').style.display = 'none'; document.getElementById('2305.10400v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS 2023. Website: https://wysiwyr-itm.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.02336">arXiv:2301.02336</a> <span> [<a href="https://arxiv.org/pdf/2301.02336">pdf</a>, <a href="https://arxiv.org/format/2301.02336">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3568162.3578630">10.1145/3568162.3578630 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Exploring Levels of Control for a Navigation Assistant for Blind Travelers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ranganeni%2C+V">Vinitha Ranganeni</a>, <a href="/search/cs?searchtype=author&query=Sinclair%2C+M">Mike Sinclair</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Miller%2C+A">Amos Miller</a>, <a href="/search/cs?searchtype=author&query=Campbell%2C+J">Jonathan Campbell</a>, <a href="/search/cs?searchtype=author&query=Kolobov%2C+A">Andrey Kolobov</a>, <a href="/search/cs?searchtype=author&query=Cutrell%2C+E">Edward Cutrell</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.02336v1-abstract-short" style="display: inline;"> Only a small percentage of blind and low-vision people use traditional mobility aids such as a cane or a guide dog. Various assistive technologies have been proposed to address the limitations of traditional mobility aids. These devices often give either the user or the device majority of the control. In this work, we explore how varying levels of control affect the users' sense of agency, trust i… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.02336v1-abstract-full').style.display = 'inline'; document.getElementById('2301.02336v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.02336v1-abstract-full" style="display: none;"> Only a small percentage of blind and low-vision people use traditional mobility aids such as a cane or a guide dog. Various assistive technologies have been proposed to address the limitations of traditional mobility aids. These devices often give either the user or the device majority of the control. In this work, we explore how varying levels of control affect the users' sense of agency, trust in the device, confidence, and successful navigation. We present Glide, a novel mobility aid with two modes for control: Glide-directed and User-directed. We employ Glide in a study (N=9) in which blind or low-vision participants used both modes to navigate through an indoor environment. Overall, participants found that Glide was easy to use and learn. Most participants trusted Glide despite its current limitations, and their confidence and performance increased as they continued to use Glide. Users' control mode preference varied in different situations; no single mode "won" in all situations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.02336v1-abstract-full').style.display = 'none'; document.getElementById('2301.02336v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 6 figures, Human-Robot Interaction 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.09038">arXiv:2206.09038</a> <span> [<a href="https://arxiv.org/pdf/2206.09038">pdf</a>, <a href="https://arxiv.org/format/2206.09038">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/1463434.1463464">10.1145/1463434.1463464 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Validation of Vector Data using Oblique Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mishra%2C+P">Pragyana Mishra</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Kimchi%2C+G">Gur Kimchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.09038v1-abstract-short" style="display: inline;"> Oblique images are aerial photographs taken at oblique angles to the earth's surface. Projections of vector and other geospatial data in these images depend on camera parameters, positions of the geospatial entities, surface terrain, occlusions, and visibility. This paper presents a robust and scalable algorithm to detect inconsistencies in vector data using oblique images. The algorithm uses imag… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.09038v1-abstract-full').style.display = 'inline'; document.getElementById('2206.09038v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.09038v1-abstract-full" style="display: none;"> Oblique images are aerial photographs taken at oblique angles to the earth's surface. Projections of vector and other geospatial data in these images depend on camera parameters, positions of the geospatial entities, surface terrain, occlusions, and visibility. This paper presents a robust and scalable algorithm to detect inconsistencies in vector data using oblique images. The algorithm uses image descriptors to encode the local appearance of a geospatial entity in images. These image descriptors combine color, pixel-intensity gradients, texture, and steerable filter responses. A Support Vector Machine classifier is trained to detect image descriptors that are not consistent with underlying vector data, digital elevation maps, building models, and camera parameters. In this paper, we train the classifier on visible road segments and non-road data. Thereafter, the trained classifier detects inconsistencies in vectors, which include both occluded and misaligned road segments. The consistent road segments validate our vector, DEM, and 3-D model data for those areas while inconsistent segments point out errors. We further show that a search for descriptors that are consistent with visible road segments in the neighborhood of a misaligned road yields the desired road alignment that is consistent with pixels in the image. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.09038v1-abstract-full').style.display = 'none'; document.getElementById('2206.09038v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">In Proceedings of 16th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM GIS'08)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of the 16th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems (ACM GIS '08), pp. 1-10. 2008 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.03189">arXiv:2206.03189</a> <span> [<a href="https://arxiv.org/pdf/2206.03189">pdf</a>, <a href="https://arxiv.org/format/2206.03189">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Quantifying the Effects of Working in VR for One Week </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Biener%2C+V">Verena Biener</a>, <a href="/search/cs?searchtype=author&query=Kalamkar%2C+S">Snehanjali Kalamkar</a>, <a href="/search/cs?searchtype=author&query=Nouri%2C+N">Negar Nouri</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Dudley%2C+J+J">John J. Dudley</a>, <a href="/search/cs?searchtype=author&query=Hu%2C+J">Jinghui Hu</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a>, <a href="/search/cs?searchtype=author&query=Weerasinghe%2C+M">Maheshya Weerasinghe</a>, <a href="/search/cs?searchtype=author&query=Pucihar%2C+K+%C4%8C">Klen 膶opi膷 Pucihar</a>, <a href="/search/cs?searchtype=author&query=Kljun%2C+M">Matja啪 Kljun</a>, <a href="/search/cs?searchtype=author&query=Streuber%2C+S">Stephan Streuber</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.03189v2-abstract-short" style="display: inline;"> Virtual Reality (VR) provides new possibilities for modern knowledge work. However, the potential advantages of virtual work environments can only be used if it is feasible to work in them for an extended period of time. Until now, there are limited studies of long-term effects when working in VR. This paper addresses the need for understanding such long-term effects. Specifically, we report on a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.03189v2-abstract-full').style.display = 'inline'; document.getElementById('2206.03189v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.03189v2-abstract-full" style="display: none;"> Virtual Reality (VR) provides new possibilities for modern knowledge work. However, the potential advantages of virtual work environments can only be used if it is feasible to work in them for an extended period of time. Until now, there are limited studies of long-term effects when working in VR. This paper addresses the need for understanding such long-term effects. Specifically, we report on a comparative study (n=16), in which participants were working in VR for an entire week -- for five days, eight hours each day -- as well as in a baseline physical desktop environment. This study aims to quantify the effects of exchanging a desktop-based work environment with a VR-based environment. Hence, during this study, we do not present the participants with the best possible VR system but rather a setup delivering a comparable experience to working in the physical desktop environment. The study reveals that, as expected, VR results in significantly worse ratings across most measures. Among other results, we found concerning levels of simulator sickness, below average usability ratings and two participants dropped out on the first day using VR, due to migraine, nausea and anxiety. Nevertheless, there is some indication that participants gradually overcame negative first impressions and initial discomfort. Overall, this study helps lay the groundwork for subsequent research, by clearly highlighting current shortcomings and identifying opportunities for improving the experience of working in VR. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.03189v2-abstract-full').style.display = 'none'; document.getElementById('2206.03189v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.3.7 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.06337">arXiv:2201.06337</a> <span> [<a href="https://arxiv.org/pdf/2201.06337">pdf</a>, <a href="https://arxiv.org/format/2201.06337">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> PoVRPoint: Authoring Presentations in Mobile Virtual Reality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Biener%2C+V">Verena Biener</a>, <a href="/search/cs?searchtype=author&query=Gesslein%2C+T">Travis Gesslein</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+D">Daniel Schneider</a>, <a href="/search/cs?searchtype=author&query=Kawala%2C+F">Felix Kawala</a>, <a href="/search/cs?searchtype=author&query=Otte%2C+A">Alexander Otte</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Campos%2C+C">Cuauhtli Campos</a>, <a href="/search/cs?searchtype=author&query=Kljun%2C+M">Matja啪 Kljun</a>, <a href="/search/cs?searchtype=author&query=Pucihar%2C+K+%C4%8C">Klen 膶opi膷 Pucihar</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.06337v1-abstract-short" style="display: inline;"> Virtual Reality (VR) has the potential to support mobile knowledge workers by complementing traditional input devices with a large three-dimensional output space and spatial input. Previous research on supporting VR knowledge work explored domains such as text entry using physical keyboards and spreadsheet interaction using combined pen and touch input. Inspired by such work, this paper probes the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.06337v1-abstract-full').style.display = 'inline'; document.getElementById('2201.06337v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.06337v1-abstract-full" style="display: none;"> Virtual Reality (VR) has the potential to support mobile knowledge workers by complementing traditional input devices with a large three-dimensional output space and spatial input. Previous research on supporting VR knowledge work explored domains such as text entry using physical keyboards and spreadsheet interaction using combined pen and touch input. Inspired by such work, this paper probes the VR design space for authoring presentations in mobile settings. We propose PoVRPoint -- a set of tools coupling pen- and touch-based editing of presentations on mobile devices, such as tablets, with the interaction capabilities afforded by VR. We study the utility of extended display space to, for example, assist users in identifying target slides, supporting spatial manipulation of objects on a slide, creating animations, and facilitating arrangements of multiple, possibly occluded, shapes. Among other things, our results indicate that 1) the wide field of view afforded by VR results in significantly faster target slide identification times compared to a tablet-only interface for visually salient targets; and 2) the three-dimensional view in VR enables significantly faster object reordering in the presence of occlusion compared to two baseline interfaces. A user study further confirmed that the interaction techniques were found to be usable and enjoyable. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.06337v1-abstract-full').style.display = 'none'; document.getElementById('2201.06337v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE VR 2022; to appear in IEEE transactions on visualization and computer graphics, 2022</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.3.7 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> In IEEE transactions on visualization and computer graphics, 2022 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.03942">arXiv:2111.03942</a> <span> [<a href="https://arxiv.org/pdf/2111.03942">pdf</a>, <a href="https://arxiv.org/format/2111.03942">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Extended Reality for Knowledge Work in Everyday Environments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Biener%2C+V">Verena Biener</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.03942v1-abstract-short" style="display: inline;"> Virtual and Augmented Reality have the potential to change information work. The ability to modify the workers senses can transform everyday environments into a productive office, using portable head-mounted displays combined with conventional interaction devices, such as keyboards and tablets. While a stream of better, cheaper and lighter HMDs have been introduced for consumers in recent years, t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.03942v1-abstract-full').style.display = 'inline'; document.getElementById('2111.03942v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.03942v1-abstract-full" style="display: none;"> Virtual and Augmented Reality have the potential to change information work. The ability to modify the workers senses can transform everyday environments into a productive office, using portable head-mounted displays combined with conventional interaction devices, such as keyboards and tablets. While a stream of better, cheaper and lighter HMDs have been introduced for consumers in recent years, there are still many challenges to be addressed to allow this vision to become reality. This chapter summarizes the state of the art in the field of extended reality for knowledge work in everyday environments and proposes steps to address the open challenges. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.03942v1-abstract-full').style.display = 'none'; document.getElementById('2111.03942v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.10607">arXiv:2109.10607</a> <span> [<a href="https://arxiv.org/pdf/2109.10607">pdf</a>, <a href="https://arxiv.org/format/2109.10607">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Accuracy Evaluation of Touch Tasks in Commodity Virtual and Augmented Reality Head-Mounted Displays </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Schneider%2C+D">Daniel Schneider</a>, <a href="/search/cs?searchtype=author&query=Biener%2C+V">Verena Biener</a>, <a href="/search/cs?searchtype=author&query=Otte%2C+A">Alexander Otte</a>, <a href="/search/cs?searchtype=author&query=Gesslein%2C+T">Travis Gesslein</a>, <a href="/search/cs?searchtype=author&query=Gagel%2C+P">Philipp Gagel</a>, <a href="/search/cs?searchtype=author&query=Campos%2C+C">Cuauhtli Campos</a>, <a href="/search/cs?searchtype=author&query=Pucihar%2C+K+%C4%8C">Klen 膶opi膷 Pucihar</a>, <a href="/search/cs?searchtype=author&query=Kljun%2C+M">Matja啪 Kljun</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.10607v1-abstract-short" style="display: inline;"> An increasing number of consumer-oriented head-mounted displays (HMD) for augmented and virtual reality (AR/VR) are capable of finger and hand tracking. We report on the accuracy of off-the-shelf VR and AR HMDs when used for touch-based tasks such as pointing or drawing. Specifically, we report on the finger tracking accuracy of the VR head-mounted displays Oculus Quest, Vive Pro and the Leap Moti… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.10607v1-abstract-full').style.display = 'inline'; document.getElementById('2109.10607v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.10607v1-abstract-full" style="display: none;"> An increasing number of consumer-oriented head-mounted displays (HMD) for augmented and virtual reality (AR/VR) are capable of finger and hand tracking. We report on the accuracy of off-the-shelf VR and AR HMDs when used for touch-based tasks such as pointing or drawing. Specifically, we report on the finger tracking accuracy of the VR head-mounted displays Oculus Quest, Vive Pro and the Leap Motion controller, when attached to a VR HMD, as well as the finger tracking accuracy of the AR head-mounted displays Microsoft HoloLens 2 and Magic Leap. We present the results of two experiments in which we compare the accuracy for absolute and relative pointing tasks using both human participants and a robot. The results suggest that HTC Vive has a lower spatial accuracy than the Oculus Quest and Leap Motion and that the Microsoft HoloLens 2 provides higher spatial accuracy than Magic Leap One. These findings can serve as decision support for researchers and practitioners in choosing which systems to use in the future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.10607v1-abstract-full').style.display = 'none'; document.getElementById('2109.10607v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in SUI 2021, November 09-10, Virtual Conference</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.3.7 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.12390">arXiv:2108.12390</a> <span> [<a href="https://arxiv.org/pdf/2108.12390">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3510463">10.1145/3510463 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Two-In-One: A Design Space for Mapping Unimanual Input into Bimanual Interactions in VR for Users with Limited Movement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yamagami%2C+M">Momona Yamagami</a>, <a href="/search/cs?searchtype=author&query=Junuzovic%2C+S">Sasa Junuzovic</a>, <a href="/search/cs?searchtype=author&query=Gonzalez-Franco%2C+M">Mar Gonzalez-Franco</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Cutrell%2C+E">Edward Cutrell</a>, <a href="/search/cs?searchtype=author&query=Porter%2C+J+R">John R. Porter</a>, <a href="/search/cs?searchtype=author&query=Wilson%2C+A+D">Andrew D. Wilson</a>, <a href="/search/cs?searchtype=author&query=Mott%2C+M+E">Martez E. Mott</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.12390v3-abstract-short" style="display: inline;"> Virtual Reality (VR) applications often require users to perform actions with two hands when performing tasks and interacting with objects in virtual environments. Although bimanual interactions in VR can resemble real-world interactions -- thus increasing realism and improving immersion -- they can also pose significant accessibility challenges to people with limited mobility, such as for people… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.12390v3-abstract-full').style.display = 'inline'; document.getElementById('2108.12390v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.12390v3-abstract-full" style="display: none;"> Virtual Reality (VR) applications often require users to perform actions with two hands when performing tasks and interacting with objects in virtual environments. Although bimanual interactions in VR can resemble real-world interactions -- thus increasing realism and improving immersion -- they can also pose significant accessibility challenges to people with limited mobility, such as for people who have full use of only one hand. An opportunity exists to create accessible techniques that take advantage of users' abilities, but designers currently lack structured tools to consider alternative approaches. To begin filling this gap, we propose Two-in-One, a design space that facilitates the creation of accessible methods for bimanual interactions in VR from unimanual input. Our design space comprises two dimensions, bimanual interactions and computer assistance, and we provide a detailed examination of issues to consider when creating new unimanual input techniques that map to bimanual interactions in VR. We used our design space to create three interaction techniques that we subsequently implemented for a subset of bimanual interactions and received user feedback through a video elicitation study with 17 people with limited mobility. Our findings explore complex tradeoffs associated with autonomy and agency and highlight the need for additional settings and methods to make VR accessible to people with limited mobility. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.12390v3-abstract-full').style.display = 'none'; document.getElementById('2108.12390v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 3 figures, 6 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.10829">arXiv:2108.10829</a> <span> [<a href="https://arxiv.org/pdf/2108.10829">pdf</a>, <a href="https://arxiv.org/format/2108.10829">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3472749.3474821">10.1145/3472749.3474821 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> HapticBots: Distributed Encountered-type Haptics for VR with Multiple Shape-changing Mobile Robots </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Suzuki%2C+R">Ryo Suzuki</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Sinclair%2C+M">Mike Sinclair</a>, <a href="/search/cs?searchtype=author&query=Leithinger%2C+D">Daneil Leithinger</a>, <a href="/search/cs?searchtype=author&query=Gonzalez-Franco%2C+M">Mar Gonzalez-Franco</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.10829v1-abstract-short" style="display: inline;"> HapticBots introduces a novel encountered-type haptic approach for Virtual Reality (VR) based on multiple tabletop-size shape-changing robots. These robots move on a tabletop and change their height and orientation to haptically render various surfaces and objects on-demand. Compared to previous encountered-type haptic approaches like shape displays or robotic arms, our proposed approach has an ad… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.10829v1-abstract-full').style.display = 'inline'; document.getElementById('2108.10829v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.10829v1-abstract-full" style="display: none;"> HapticBots introduces a novel encountered-type haptic approach for Virtual Reality (VR) based on multiple tabletop-size shape-changing robots. These robots move on a tabletop and change their height and orientation to haptically render various surfaces and objects on-demand. Compared to previous encountered-type haptic approaches like shape displays or robotic arms, our proposed approach has an advantage in deployability, scalability, and generalizability -- these robots can be easily deployed due to their compact form factor. They can support multiple concurrent touch points in a large area thanks to the distributed nature of the robots. We propose and evaluate a novel set of interactions enabled by these robots which include: 1) rendering haptics for VR objects by providing just-in-time touch-points on the user's hand, 2) simulating continuous surfaces with the concurrent height and position change, and 3) enabling the user to pick up and move VR objects through graspable proxy objects. Finally, we demonstrate HapticBots with various applications, including remote collaboration, education and training, design and 3D modeling, and gaming and entertainment. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.10829v1-abstract-full').style.display = 'none'; document.getElementById('2108.10829v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">UIST 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.02947">arXiv:2009.02947</a> <span> [<a href="https://arxiv.org/pdf/2009.02947">pdf</a>, <a href="https://arxiv.org/format/2009.02947">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Towards a Practical Virtual Office for Mobile Knowledge Workers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Phillips%2C+M">Mark Phillips</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.02947v1-abstract-short" style="display: inline;"> As more people work from home or during travel, new opportunities and challenges arise around mobile office work. On one hand, people may work at flexible hours, independent of traffic limitations, but on the other hand, they may need to work at makeshift spaces, with less than optimal working conditions and decoupled from co-workers. Virtual Reality (VR) has the potential to change the way inform… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.02947v1-abstract-full').style.display = 'inline'; document.getElementById('2009.02947v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.02947v1-abstract-full" style="display: none;"> As more people work from home or during travel, new opportunities and challenges arise around mobile office work. On one hand, people may work at flexible hours, independent of traffic limitations, but on the other hand, they may need to work at makeshift spaces, with less than optimal working conditions and decoupled from co-workers. Virtual Reality (VR) has the potential to change the way information workers work: it enables personal bespoke working environments even on the go and allows new collaboration approaches that can help mitigate the effects of physical distance. In this paper, we investigate opportunities and challenges for realizing a mobile VR offices environments and discuss implications from recent findings of mixing standard off-the-shelf equipment, such as tablets, laptops or desktops, with VR to enable effective, efficient, ergonomic, and rewarding mobile knowledge work. Further, we investigate the role of conceptual and physical spaces in a mobile VR office. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.02947v1-abstract-full').style.display = 'none'; document.getElementById('2009.02947v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">https://www.microsoft.com/en-us/research/event/new-future-of-work/#!publications</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Microsoft New Future of Work 2020 Symposium </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.02927">arXiv:2009.02927</a> <span> [<a href="https://arxiv.org/pdf/2009.02927">pdf</a>, <a href="https://arxiv.org/format/2009.02927">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Back to the Future: Revisiting Mouse and Keyboard Interaction for HMD-based Immersive Analytics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.02927v1-abstract-short" style="display: inline;"> With the rise of natural user interfaces, immersive analytics applications often focus on novel forms of interaction modalities such as mid-air gestures, gaze or tangible interaction utilizing input devices such as depth-sensors, touch screens and eye-trackers. At the same time, traditional input devices such as the physical keyboard and mouse are used to a lesser extent. We argue, that for certai… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.02927v1-abstract-full').style.display = 'inline'; document.getElementById('2009.02927v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.02927v1-abstract-full" style="display: none;"> With the rise of natural user interfaces, immersive analytics applications often focus on novel forms of interaction modalities such as mid-air gestures, gaze or tangible interaction utilizing input devices such as depth-sensors, touch screens and eye-trackers. At the same time, traditional input devices such as the physical keyboard and mouse are used to a lesser extent. We argue, that for certain work scenarios, such as conducting analytic tasks at stationary desktop settings, it can be valuable to combine the benefits of novel and established input devices as well as input modalities to create productive immersive analytics environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.02927v1-abstract-full').style.display = 'none'; document.getElementById('2009.02927v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> In ACM CHI 2020 4th Workshop on Immersive Analytics: Envisioning Future Productivity for Immersive Analytics </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.04559">arXiv:2008.04559</a> <span> [<a href="https://arxiv.org/pdf/2008.04559">pdf</a>, <a href="https://arxiv.org/format/2008.04559">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Breaking the Screen: Interaction Across Touchscreen Boundaries in Virtual Reality for Mobile Knowledge Workers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Biener%2C+V">Verena Biener</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+D">Daniel Schneider</a>, <a href="/search/cs?searchtype=author&query=Gesslein%2C+T">Travis Gesslein</a>, <a href="/search/cs?searchtype=author&query=Otte%2C+A">Alexander Otte</a>, <a href="/search/cs?searchtype=author&query=Kuth%2C+B">Bastian Kuth</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.04559v1-abstract-short" style="display: inline;"> Virtual Reality (VR) has the potential to transform knowledge work. One advantage of VR knowledge work is that it allows extending 2D displays into the third dimension, enabling new operations, such as selecting overlapping objects or displaying additional layers of information. On the other hand, mobile knowledge workers often work on established mobile devices, such as tablets, limiting interact… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.04559v1-abstract-full').style.display = 'inline'; document.getElementById('2008.04559v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.04559v1-abstract-full" style="display: none;"> Virtual Reality (VR) has the potential to transform knowledge work. One advantage of VR knowledge work is that it allows extending 2D displays into the third dimension, enabling new operations, such as selecting overlapping objects or displaying additional layers of information. On the other hand, mobile knowledge workers often work on established mobile devices, such as tablets, limiting interaction with those devices to a small input space. This challenge of a constrained input space is intensified in situations when VR knowledge work is situated in cramped environments, such as airplanes and touchdown spaces. In this paper, we investigate the feasibility of interacting jointly between an immersive VR head-mounted display and a tablet within the context of knowledge work. Specifically, we 1) design, implement and study how to interact with information that reaches beyond a single physical touchscreen in VR; 2) design and evaluate a set of interaction concepts; and 3) build example applications and gather user feedback on those applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.04559v1-abstract-full').style.display = 'none'; document.getElementById('2008.04559v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 8 figures, ISMAR 2020</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.3.7 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> In IEEE transactions on visualization and computer graphics, 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.04543">arXiv:2008.04543</a> <span> [<a href="https://arxiv.org/pdf/2008.04543">pdf</a>, <a href="https://arxiv.org/format/2008.04543">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Pen-based Interaction with Spreadsheets in Mobile Virtual Reality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gesslein%2C+T">Travis Gesslein</a>, <a href="/search/cs?searchtype=author&query=Biener%2C+V">Verena Biener</a>, <a href="/search/cs?searchtype=author&query=Gagel%2C+P">Philipp Gagel</a>, <a href="/search/cs?searchtype=author&query=Schneider%2C+D">Daniel Schneider</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.04543v1-abstract-short" style="display: inline;"> Virtual Reality (VR) can enhance the display and interaction of mobile knowledge work and in particular, spreadsheet applications. While spreadsheets are widely used yet are challenging to interact with, especially on mobile devices, using them in VR has not been explored in depth. A special uniqueness of the domain is the contrast between the immersive and large display space afforded by VR, cont… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.04543v1-abstract-full').style.display = 'inline'; document.getElementById('2008.04543v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.04543v1-abstract-full" style="display: none;"> Virtual Reality (VR) can enhance the display and interaction of mobile knowledge work and in particular, spreadsheet applications. While spreadsheets are widely used yet are challenging to interact with, especially on mobile devices, using them in VR has not been explored in depth. A special uniqueness of the domain is the contrast between the immersive and large display space afforded by VR, contrasted by the very limited interaction space that may be afforded for the information worker on the go, such as an airplane seat or a small work-space. To close this gap, we present a tool-set for enhancing spreadsheet interaction on tablets using immersive VR headsets and pen-based input. This combination opens up many possibilities for enhancing the productivity for spreadsheet interaction. We propose to use the space around and in front of the tablet for enhanced visualization of spreadsheet data and meta-data. For example, extending sheet display beyond the bounds of the physical screen, or easier debugging by uncovering hidden dependencies between sheet's cells. Combining the precise on-screen input of a pen with spatial sensing around the tablet, we propose tools for the efficient creation and editing of spreadsheets functions such as off-the-screen layered menus, visualization of sheets dependencies, and gaze-and-touch-based switching between spreadsheet tabs. We study the feasibility of the proposed tool-set using a video-based online survey and an expert-based assessment of indicative human performance potential. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.04543v1-abstract-full').style.display = 'none'; document.getElementById('2008.04543v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 11 figures, ISMAR 2020</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.3.7 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> In 2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR) 2020 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.08153">arXiv:1907.08153</a> <span> [<a href="https://arxiv.org/pdf/1907.08153">pdf</a>, <a href="https://arxiv.org/format/1907.08153">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Schneider%2C+D">Daniel Schneider</a>, <a href="/search/cs?searchtype=author&query=Otte%2C+A">Alexander Otte</a>, <a href="/search/cs?searchtype=author&query=Gesslein%2C+T">Travis Gesslein</a>, <a href="/search/cs?searchtype=author&query=Gagel%2C+P">Philipp Gagel</a>, <a href="/search/cs?searchtype=author&query=Kuth%2C+B">Bastian Kuth</a>, <a href="/search/cs?searchtype=author&query=Damlakhi%2C+M+S">Mohamad Shahm Damlakhi</a>, <a href="/search/cs?searchtype=author&query=Dietz%2C+O">Oliver Dietz</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a>, <a href="/search/cs?searchtype=author&query=M%C3%BCller%2C+J">J枚rg M眉ller</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.08153v1-abstract-short" style="display: inline;"> Physical keyboards are common peripherals for personal computers and are efficient standard text entry devices. Recent research has investigated how physical keyboards can be used in immersive head-mounted display-based Virtual Reality (VR). So far, the physical layout of keyboards has typically been transplanted into VR for replicating typing experiences in a standard desktop environment. In th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08153v1-abstract-full').style.display = 'inline'; document.getElementById('1907.08153v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.08153v1-abstract-full" style="display: none;"> Physical keyboards are common peripherals for personal computers and are efficient standard text entry devices. Recent research has investigated how physical keyboards can be used in immersive head-mounted display-based Virtual Reality (VR). So far, the physical layout of keyboards has typically been transplanted into VR for replicating typing experiences in a standard desktop environment. In this paper, we explore how to fully leverage the immersiveness of VR to change the input and output characteristics of physical keyboard interaction within a VR environment. This allows individual physical keys to be reconfigured to the same or different actions and visual output to be distributed in various ways across the VR representation of the keyboard. We explore a set of input and output mappings for reconfiguring the virtual presentation of physical keyboards and probe the resulting design space by specifically designing, implementing and evaluating nine VR-relevant applications: emojis, languages and special characters, application shortcuts, virtual text processing macros, a window manager, a photo browser, a whack-a-mole game, secure password entry and a virtual touch bar. We investigate the feasibility of the applications in a user study with 20 participants and find that, among other things, they are usable in VR. We discuss the limitations and possibilities of remapping the input and output characteristics of physical keyboards in VR based on empirical findings and analysis and suggest future research directions in this area. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.08153v1-abstract-full').style.display = 'none'; document.getElementById('1907.08153v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">to appear</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> In IEEE Transactions of Visualization and Computer Graphics (TVCG), 2019 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1812.02197">arXiv:1812.02197</a> <span> [<a href="https://arxiv.org/pdf/1812.02197">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/MCG.2018.2875609">10.1109/MCG.2018.2875609 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> The Office of the Future: Virtual, Portable and Global </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1812.02197v1-abstract-short" style="display: inline;"> Virtual Reality has the potential to change the way we work. We envision the future office worker to be able to work productively everywhere solely using portable standard input devices and immersive head-mounted displays. Virtual Reality has the potential to enable this, by allowing users to create working environments of their choice and by relieving them from physical world limitations such as… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.02197v1-abstract-full').style.display = 'inline'; document.getElementById('1812.02197v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1812.02197v1-abstract-full" style="display: none;"> Virtual Reality has the potential to change the way we work. We envision the future office worker to be able to work productively everywhere solely using portable standard input devices and immersive head-mounted displays. Virtual Reality has the potential to enable this, by allowing users to create working environments of their choice and by relieving them from physical world limitations such as constrained space or noisy environments. In this article, we investigate opportunities and challenges for realizing this vision and discuss implications from recent findings of text entry in virtual reality as a core office task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.02197v1-abstract-full').style.display = 'none'; document.getElementById('1812.02197v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1804.03211">arXiv:1804.03211</a> <span> [<a href="https://arxiv.org/pdf/1804.03211">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Mobiles as Portals for Interacting with Virtual Data Visualizations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Riche%2C+N+H">Nathalie Henry Riche</a>, <a href="/search/cs?searchtype=author&query=Hurter%2C+C">Christophe Hurter</a>, <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1804.03211v1-abstract-short" style="display: inline;"> We propose a set of techniques leveraging mobile devices as lenses to explore, interact and annotate n-dimensional data visualizations. The democratization of mobile devices, with their arrays of integrated sensors, opens up opportunities to create experiences for anyone to explore and interact with large information spaces anywhere. In this paper, we propose to revisit ideas behind the Chameleon… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.03211v1-abstract-full').style.display = 'inline'; document.getElementById('1804.03211v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1804.03211v1-abstract-full" style="display: none;"> We propose a set of techniques leveraging mobile devices as lenses to explore, interact and annotate n-dimensional data visualizations. The democratization of mobile devices, with their arrays of integrated sensors, opens up opportunities to create experiences for anyone to explore and interact with large information spaces anywhere. In this paper, we propose to revisit ideas behind the Chameleon prototype of Fitzmaurice et al. initially envisioned in the 90s for navigation, before spatially-aware devices became mainstream. We also take advantage of other input modalities such as pen and touch to not only navigate the space using the mobile as a lens, but interact and annotate it by adding toolglasses. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1804.03211v1-abstract-full').style.display = 'none'; document.getElementById('1804.03211v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 April, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.00626">arXiv:1802.00626</a> <span> [<a href="https://arxiv.org/pdf/1802.00626">pdf</a>, <a href="https://arxiv.org/format/1802.00626">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Text Entry in Immersive Head-Mounted Display-based Virtual Reality using Standard Keyboards </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a>, <a href="/search/cs?searchtype=author&query=Witzani%2C+L">Lukas Witzani</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Kranz%2C+M">Matthias Kranz</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.00626v1-abstract-short" style="display: inline;"> We study the performance and user experience of two popular mainstream text entry devices, desktop keyboards and touchscreen keyboards, for use in Virtual Reality (VR) applications. We discuss the limitations arising from limited visual feedback, and examine the efficiency of different strategies of use. We analyze a total of 24 hours of typing data in VR from 24 participants and find that novice… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.00626v1-abstract-full').style.display = 'inline'; document.getElementById('1802.00626v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.00626v1-abstract-full" style="display: none;"> We study the performance and user experience of two popular mainstream text entry devices, desktop keyboards and touchscreen keyboards, for use in Virtual Reality (VR) applications. We discuss the limitations arising from limited visual feedback, and examine the efficiency of different strategies of use. We analyze a total of 24 hours of typing data in VR from 24 participants and find that novice users are able to retain about 60% of their typing speed on a desktop keyboard and about 40-45\% of their typing speed on a touchscreen keyboard. We also find no significant learning effects, indicating that users can transfer their typing skills fast into VR. Besides investigating baseline performances, we study the position in which keyboards and hands are rendered in space. We find that this does not adversely affect performance for desktop keyboard typing and results in a performance trade-off for touchscreen keyboard typing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.00626v1-abstract-full').style.display = 'none'; document.getElementById('1802.00626v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE VR 2018. arXiv admin note: text overlap with arXiv:1802.00613</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.00613">arXiv:1802.00613</a> <span> [<a href="https://arxiv.org/pdf/1802.00613">pdf</a>, <a href="https://arxiv.org/format/1802.00613">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Effects of Hand Representations for Typing in Virtual Reality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a>, <a href="/search/cs?searchtype=author&query=Witzani%2C+L">Lukas Witzani</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Kranz%2C+M">Matthias Kranz</a>, <a href="/search/cs?searchtype=author&query=Kristensson%2C+P+O">Per Ola Kristensson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.00613v1-abstract-short" style="display: inline;"> Alphanumeric text entry is a challenge for Virtual Reality (VR) applications. VR enables new capabilities, impossible in the real world, such as an unobstructed view of the keyboard, without occlusion by the user's physical hands. Several hand representations have been proposed for typing in VR on standard physical keyboards. However, to date, these hand representations have not been compared rega… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.00613v1-abstract-full').style.display = 'inline'; document.getElementById('1802.00613v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.00613v1-abstract-full" style="display: none;"> Alphanumeric text entry is a challenge for Virtual Reality (VR) applications. VR enables new capabilities, impossible in the real world, such as an unobstructed view of the keyboard, without occlusion by the user's physical hands. Several hand representations have been proposed for typing in VR on standard physical keyboards. However, to date, these hand representations have not been compared regarding their performance and effects on presence for VR text entry. Our work addresses this gap by comparing existing hand representations with minimalistic fingertip visualization. We study the effects of four hand representations (no hand representation, inverse kinematic model, fingertip visualization using spheres and video inlay) on typing in VR using a standard physical keyboard with 24 participants. We found that the fingertip visualization and video inlay both resulted in statistically significant lower text entry error rates compared to no hand or inverse kinematic model representations. We found no statistical differences in text entry speed. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.00613v1-abstract-full').style.display = 'none'; document.getElementById('1802.00613v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">IEEE VR 2018 publication</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1701.03963">arXiv:1701.03963</a> <span> [<a href="https://arxiv.org/pdf/1701.03963">pdf</a>, <a href="https://arxiv.org/format/1701.03963">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Towards Interaction Around Unmodified Camera-equipped Mobile Devices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Grubert%2C+J">Jens Grubert</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eyal Ofek</a>, <a href="/search/cs?searchtype=author&query=Pahud%2C+M">Michel Pahud</a>, <a href="/search/cs?searchtype=author&query=Kranz%2C+M">Matthias Kranz</a>, <a href="/search/cs?searchtype=author&query=Schmalstieg%2C+D">Dieter Schmalstieg</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1701.03963v1-abstract-short" style="display: inline;"> Around-device interaction promises to extend the input space of mobile and wearable devices beyond the common but restricted touchscreen. So far, most around-device interaction approaches rely on instrumenting the device or the environment with additional sensors. We believe, that the full potential of ordinary cameras, specifically user-facing cameras, which are integrated in most mobile devices… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.03963v1-abstract-full').style.display = 'inline'; document.getElementById('1701.03963v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1701.03963v1-abstract-full" style="display: none;"> Around-device interaction promises to extend the input space of mobile and wearable devices beyond the common but restricted touchscreen. So far, most around-device interaction approaches rely on instrumenting the device or the environment with additional sensors. We believe, that the full potential of ordinary cameras, specifically user-facing cameras, which are integrated in most mobile devices today, are not used to their full potential, yet. We To this end, we present a novel approach for extending the input space around unmodified mobile devices using built-in front-facing cameras of unmodified handheld devices. Our approach estimates hand poses and gestures through reflections in sunglasses, ski goggles or visors. Thereby, GlassHands creates an enlarged input space, rivaling input reach on large touch displays. We discuss the idea, its limitations and future work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1701.03963v1-abstract-full').style.display = 'none'; document.getElementById('1701.03963v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 January, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2017. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.5.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/cs/0603084">arXiv:cs/0603084</a> <span> [<a href="https://arxiv.org/pdf/cs/0603084">pdf</a>, <a href="https://arxiv.org/ps/cs/0603084">ps</a>, <a href="https://arxiv.org/format/cs/0603084">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Logic in Computer Science">cs.LO</span> </div> </div> <p class="title is-5 mathjax"> Random 3CNF formulas elude the Lovasz theta function </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Feige%2C+U">Uriel Feige</a>, <a href="/search/cs?searchtype=author&query=Ofek%2C+E">Eran Ofek</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="cs/0603084v1-abstract-short" style="display: inline;"> Let $蠁$ be a 3CNF formula with n variables and m clauses. A simple nonconstructive argument shows that when m is sufficiently large compared to n, most 3CNF formulas are not satisfiable. It is an open question whether there is an efficient refutation algorithm that for most such formulas proves that they are not satisfiable. A possible approach to refute a formula $蠁$ is: first, translate it int… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('cs/0603084v1-abstract-full').style.display = 'inline'; document.getElementById('cs/0603084v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="cs/0603084v1-abstract-full" style="display: none;"> Let $蠁$ be a 3CNF formula with n variables and m clauses. A simple nonconstructive argument shows that when m is sufficiently large compared to n, most 3CNF formulas are not satisfiable. It is an open question whether there is an efficient refutation algorithm that for most such formulas proves that they are not satisfiable. A possible approach to refute a formula $蠁$ is: first, translate it into a graph $G_蠁$ using a generic reduction from 3-SAT to max-IS, then bound the maximum independent set of $G_蠁$ using the Lovasz $\vartheta$ function. If the $\vartheta$ function returns a value $< m$, this is a certificate for the unsatisfiability of $蠁$. We show that for random formulas with $m < n^{3/2 -o(1)}$ clauses, the above approach fails, i.e. the $\vartheta$ function is likely to return a value of m. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('cs/0603084v1-abstract-full').style.display = 'none'; document.getElementById('cs/0603084v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 March, 2006; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2006. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>