CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 108 results for author: <span class="mathjax">Shukla, A</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Shukla%2C+A">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Shukla, A"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Shukla%2C+A&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Shukla, A"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Shukla%2C+A&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Shukla%2C+A&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Shukla%2C+A&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&query=Shukla%2C+A&start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.06718">arXiv:2410.06718</a> <span> [<a href="https://arxiv.org/pdf/2410.06718">pdf</a>, <a href="https://arxiv.org/format/2410.06718">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> MatMamba: A Matryoshka State Space Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Abhinav Shukla</a>, <a href="/search/cs?searchtype=author&query=Vemprala%2C+S">Sai Vemprala</a>, <a href="/search/cs?searchtype=author&query=Kusupati%2C+A">Aditya Kusupati</a>, <a href="/search/cs?searchtype=author&query=Kapoor%2C+A">Ashish Kapoor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.06718v1-abstract-short" style="display: inline;"> State Space Models (SSMs) like Mamba2 are a promising alternative to Transformers, with faster theoretical training and inference times -- especially for long context lengths. Recent work on Matryoshka Representation Learning -- and its application to Transformer backbones in works like MatFormer -- showed how to introduce nested granularities of smaller submodels in one universal elastic model. I… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06718v1-abstract-full').style.display = 'inline'; document.getElementById('2410.06718v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.06718v1-abstract-full" style="display: none;"> State Space Models (SSMs) like Mamba2 are a promising alternative to Transformers, with faster theoretical training and inference times -- especially for long context lengths. Recent work on Matryoshka Representation Learning -- and its application to Transformer backbones in works like MatFormer -- showed how to introduce nested granularities of smaller submodels in one universal elastic model. In this work, we present MatMamba: a state space model which combines Matryoshka-style learning with Mamba2, by modifying the block to contain nested dimensions to enable joint training and adaptive inference. MatMamba allows for efficient and adaptive deployment across various model sizes. We train a single large MatMamba model and are able to get a number of smaller nested models for free -- while maintaining or improving upon the performance of a baseline smaller model trained from scratch. We train language and image models at a variety of parameter sizes from 35M to 1.4B. Our results on ImageNet and FineWeb show that MatMamba models scale comparably to Transformers, while having more efficient inference characteristics. This makes MatMamba a practically viable option for deploying large-scale models in an elastic way based on the available inference compute. Code and models are open sourced at \url{https://github.com/ScaledFoundations/MatMamba} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06718v1-abstract-full').style.display = 'none'; document.getElementById('2410.06718v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.00425">arXiv:2410.00425</a> <span> [<a href="https://arxiv.org/pdf/2410.00425">pdf</a>, <a href="https://arxiv.org/format/2410.00425">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> ManiSkill3: GPU Parallelized Robotics Simulation and Rendering for Generalizable Embodied AI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tao%2C+S">Stone Tao</a>, <a href="/search/cs?searchtype=author&query=Xiang%2C+F">Fanbo Xiang</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Arth Shukla</a>, <a href="/search/cs?searchtype=author&query=Qin%2C+Y">Yuzhe Qin</a>, <a href="/search/cs?searchtype=author&query=Hinrichsen%2C+X">Xander Hinrichsen</a>, <a href="/search/cs?searchtype=author&query=Yuan%2C+X">Xiaodi Yuan</a>, <a href="/search/cs?searchtype=author&query=Bao%2C+C">Chen Bao</a>, <a href="/search/cs?searchtype=author&query=Lin%2C+X">Xinsong Lin</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Y">Yulin Liu</a>, <a href="/search/cs?searchtype=author&query=Chan%2C+T">Tse-kai Chan</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+Y">Yuan Gao</a>, <a href="/search/cs?searchtype=author&query=Li%2C+X">Xuanlin Li</a>, <a href="/search/cs?searchtype=author&query=Mu%2C+T">Tongzhou Mu</a>, <a href="/search/cs?searchtype=author&query=Xiao%2C+N">Nan Xiao</a>, <a href="/search/cs?searchtype=author&query=Gurha%2C+A">Arnav Gurha</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+Z">Zhiao Huang</a>, <a href="/search/cs?searchtype=author&query=Calandra%2C+R">Roberto Calandra</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+R">Rui Chen</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+S">Shan Luo</a>, <a href="/search/cs?searchtype=author&query=Su%2C+H">Hao Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.00425v1-abstract-short" style="display: inline;"> Simulation has enabled unprecedented compute-scalable approaches to robot learning. However, many existing simulation frameworks typically support a narrow range of scenes/tasks and lack features critical for scaling generalizable robotics and sim2real. We introduce and open source ManiSkill3, the fastest state-visual GPU parallelized robotics simulator with contact-rich physics targeting generali… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00425v1-abstract-full').style.display = 'inline'; document.getElementById('2410.00425v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.00425v1-abstract-full" style="display: none;"> Simulation has enabled unprecedented compute-scalable approaches to robot learning. However, many existing simulation frameworks typically support a narrow range of scenes/tasks and lack features critical for scaling generalizable robotics and sim2real. We introduce and open source ManiSkill3, the fastest state-visual GPU parallelized robotics simulator with contact-rich physics targeting generalizable manipulation. ManiSkill3 supports GPU parallelization of many aspects including simulation+rendering, heterogeneous simulation, pointclouds/voxels visual input, and more. Simulation with rendering on ManiSkill3 can run 10-1000x faster with 2-3x less GPU memory usage than other platforms, achieving up to 30,000+ FPS in benchmarked environments due to minimal python/pytorch overhead in the system, simulation on the GPU, and the use of the SAPIEN parallel rendering system. Tasks that used to take hours to train can now take minutes. We further provide the most comprehensive range of GPU parallelized environments/tasks spanning 12 distinct domains including but not limited to mobile manipulation for tasks such as drawing, humanoids, and dextrous manipulation in realistic scenes designed by artists or real-world digital twins. In addition, millions of demonstration frames are provided from motion planning, RL, and teleoperation. ManiSkill3 also provides a comprehensive set of baselines that span popular RL and learning-from-demonstrations algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.00425v1-abstract-full').style.display = 'none'; document.getElementById('2410.00425v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project website: http://maniskill.ai/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.07858">arXiv:2407.07858</a> <span> [<a href="https://arxiv.org/pdf/2407.07858">pdf</a>, <a href="https://arxiv.org/format/2407.07858">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> FACTS About Building Retrieval Augmented Generation-based Chatbots </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Akkiraju%2C+R">Rama Akkiraju</a>, <a href="/search/cs?searchtype=author&query=Xu%2C+A">Anbang Xu</a>, <a href="/search/cs?searchtype=author&query=Bora%2C+D">Deepak Bora</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+T">Tan Yu</a>, <a href="/search/cs?searchtype=author&query=An%2C+L">Lu An</a>, <a href="/search/cs?searchtype=author&query=Seth%2C+V">Vishal Seth</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Aaditya Shukla</a>, <a href="/search/cs?searchtype=author&query=Gundecha%2C+P">Pritam Gundecha</a>, <a href="/search/cs?searchtype=author&query=Mehta%2C+H">Hridhay Mehta</a>, <a href="/search/cs?searchtype=author&query=Jha%2C+A">Ashwin Jha</a>, <a href="/search/cs?searchtype=author&query=Raj%2C+P">Prithvi Raj</a>, <a href="/search/cs?searchtype=author&query=Balasubramanian%2C+A">Abhinav Balasubramanian</a>, <a href="/search/cs?searchtype=author&query=Maram%2C+M">Murali Maram</a>, <a href="/search/cs?searchtype=author&query=Muthusamy%2C+G">Guru Muthusamy</a>, <a href="/search/cs?searchtype=author&query=Annepally%2C+S+R">Shivakesh Reddy Annepally</a>, <a href="/search/cs?searchtype=author&query=Knowles%2C+S">Sidney Knowles</a>, <a href="/search/cs?searchtype=author&query=Du%2C+M">Min Du</a>, <a href="/search/cs?searchtype=author&query=Burnett%2C+N">Nick Burnett</a>, <a href="/search/cs?searchtype=author&query=Javiya%2C+S">Sean Javiya</a>, <a href="/search/cs?searchtype=author&query=Marannan%2C+A">Ashok Marannan</a>, <a href="/search/cs?searchtype=author&query=Kumari%2C+M">Mamta Kumari</a>, <a href="/search/cs?searchtype=author&query=Jha%2C+S">Surbhi Jha</a>, <a href="/search/cs?searchtype=author&query=Dereszenski%2C+E">Ethan Dereszenski</a>, <a href="/search/cs?searchtype=author&query=Chakraborty%2C+A">Anupam Chakraborty</a>, <a href="/search/cs?searchtype=author&query=Ranjan%2C+S">Subhash Ranjan</a> , et al. (13 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.07858v1-abstract-short" style="display: inline;"> Enterprise chatbots, powered by generative AI, are emerging as key applications to enhance employee productivity. Retrieval Augmented Generation (RAG), Large Language Models (LLMs), and orchestration frameworks like Langchain and Llamaindex are crucial for building these chatbots. However, creating effective enterprise chatbots is challenging and requires meticulous RAG pipeline engineering. This… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.07858v1-abstract-full').style.display = 'inline'; document.getElementById('2407.07858v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.07858v1-abstract-full" style="display: none;"> Enterprise chatbots, powered by generative AI, are emerging as key applications to enhance employee productivity. Retrieval Augmented Generation (RAG), Large Language Models (LLMs), and orchestration frameworks like Langchain and Llamaindex are crucial for building these chatbots. However, creating effective enterprise chatbots is challenging and requires meticulous RAG pipeline engineering. This includes fine-tuning embeddings and LLMs, extracting documents from vector databases, rephrasing queries, reranking results, designing prompts, honoring document access controls, providing concise responses, including references, safeguarding personal information, and building orchestration agents. We present a framework for building RAG-based chatbots based on our experience with three NVIDIA chatbots: for IT/HR benefits, financial earnings, and general content. Our contributions are three-fold: introducing the FACTS framework (Freshness, Architectures, Cost, Testing, Security), presenting fifteen RAG pipeline control points, and providing empirical results on accuracy-latency tradeoffs between large and small LLMs. To the best of our knowledge, this is the first paper of its kind that provides a holistic view of the factors as well as solutions for building secure enterprise-grade chatbots." <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.07858v1-abstract-full').style.display = 'none'; document.getElementById('2407.07858v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 6 figures, 2 tables, Preprint submission to ACM CIKM 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.05315">arXiv:2407.05315</a> <span> [<a href="https://arxiv.org/pdf/2407.05315">pdf</a>, <a href="https://arxiv.org/format/2407.05315">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Algebraic Topology">math.AT</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.engappai.2023.107719">10.1016/j.engappai.2023.107719 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Topological Persistence Guided Knowledge Distillation for Wearable Sensor Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jeon%2C+E+S">Eun Som Jeon</a>, <a href="/search/cs?searchtype=author&query=Choi%2C+H">Hongjun Choi</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yuan Wang</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+H">Hyunglae Lee</a>, <a href="/search/cs?searchtype=author&query=Buman%2C+M+P">Matthew P. Buman</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.05315v1-abstract-short" style="display: inline;"> Deep learning methods have achieved a lot of success in various applications involving converting wearable sensor data to actionable health insights. A common application areas is activity recognition, where deep-learning methods still suffer from limitations such as sensitivity to signal quality, sensor characteristic variations, and variability between subjects. To mitigate these issues, robust… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.05315v1-abstract-full').style.display = 'inline'; document.getElementById('2407.05315v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.05315v1-abstract-full" style="display: none;"> Deep learning methods have achieved a lot of success in various applications involving converting wearable sensor data to actionable health insights. A common application areas is activity recognition, where deep-learning methods still suffer from limitations such as sensitivity to signal quality, sensor characteristic variations, and variability between subjects. To mitigate these issues, robust features obtained by topological data analysis (TDA) have been suggested as a potential solution. However, there are two significant obstacles to using topological features in deep learning: (1) large computational load to extract topological features using TDA, and (2) different signal representations obtained from deep learning and TDA which makes fusion difficult. In this paper, to enable integration of the strengths of topological methods in deep-learning for time-series data, we propose to use two teacher networks, one trained on the raw time-series data, and another trained on persistence images generated by TDA methods. The distilled student model utilizes only the raw time-series data at test-time. This approach addresses both issues. The use of KD with multiple teachers utilizes complementary information, and results in a compact model with strong supervisory features and an integrated richer representation. To assimilate desirable information from different modalities, we design new constraints, including orthogonality imposed on feature correlation maps for improving feature expressiveness and allowing the student to easily learn from the teacher. Also, we apply an annealing strategy in KD for fast saturation and better accommodation from different features, while the knowledge gap between the teachers and student is reduced. Finally, a robust student model is distilled, which uses only the time-series data as an input, while implicitly preserving topological features. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.05315v1-abstract-full').style.display = 'none'; document.getElementById('2407.05315v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Engineering Applications of Artificial Intelligence 130, 107719</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Engineering Applications of Artificial Intelligence, 130, 107719 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.03379">arXiv:2405.03379</a> <span> [<a href="https://arxiv.org/pdf/2405.03379">pdf</a>, <a href="https://arxiv.org/format/2405.03379">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Reverse Forward Curriculum Learning for Extreme Sample and Demonstration Efficiency in Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tao%2C+S">Stone Tao</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Arth Shukla</a>, <a href="/search/cs?searchtype=author&query=Chan%2C+T">Tse-kai Chan</a>, <a href="/search/cs?searchtype=author&query=Su%2C+H">Hao Su</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.03379v1-abstract-short" style="display: inline;"> Reinforcement learning (RL) presents a promising framework to learn policies through environment interaction, but often requires an infeasible amount of interaction data to solve complex tasks from sparse rewards. One direction includes augmenting RL with offline data demonstrating desired tasks, but past work often require a lot of high-quality demonstration data that is difficult to obtain, espe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.03379v1-abstract-full').style.display = 'inline'; document.getElementById('2405.03379v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.03379v1-abstract-full" style="display: none;"> Reinforcement learning (RL) presents a promising framework to learn policies through environment interaction, but often requires an infeasible amount of interaction data to solve complex tasks from sparse rewards. One direction includes augmenting RL with offline data demonstrating desired tasks, but past work often require a lot of high-quality demonstration data that is difficult to obtain, especially for domains such as robotics. Our approach consists of a reverse curriculum followed by a forward curriculum. Unique to our approach compared to past work is the ability to efficiently leverage more than one demonstration via a per-demonstration reverse curriculum generated via state resets. The result of our reverse curriculum is an initial policy that performs well on a narrow initial state distribution and helps overcome difficult exploration problems. A forward curriculum is then used to accelerate the training of the initial policy to perform well on the full initial state distribution of the task and improve demonstration and sample efficiency. We show how the combination of a reverse curriculum and forward curriculum in our method, RFCL, enables significant improvements in demonstration and sample efficiency compared against various state-of-the-art learning-from-demonstration baselines, even solving previously unsolvable tasks that require high precision and control. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.03379v1-abstract-full').style.display = 'none'; document.getElementById('2405.03379v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at The Twelfth International Conference on Learning Representations (ICLR 2024). Website: https://reverseforward-cl.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.01592">arXiv:2405.01592</a> <span> [<a href="https://arxiv.org/pdf/2405.01592">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Text and Audio Simplification: Human vs. ChatGPT </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Leroy%2C+G">Gondy Leroy</a>, <a href="/search/cs?searchtype=author&query=Kauchak%2C+D">David Kauchak</a>, <a href="/search/cs?searchtype=author&query=Harber%2C+P">Philip Harber</a>, <a href="/search/cs?searchtype=author&query=Pal%2C+A">Ankit Pal</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Akash Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.01592v1-abstract-short" style="display: inline;"> Text and audio simplification to increase information comprehension are important in healthcare. With the introduction of ChatGPT, an evaluation of its simplification performance is needed. We provide a systematic comparison of human and ChatGPT simplified texts using fourteen metrics indicative of text difficulty. We briefly introduce our online editor where these simplification tools, including… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.01592v1-abstract-full').style.display = 'inline'; document.getElementById('2405.01592v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.01592v1-abstract-full" style="display: none;"> Text and audio simplification to increase information comprehension are important in healthcare. With the introduction of ChatGPT, an evaluation of its simplification performance is needed. We provide a systematic comparison of human and ChatGPT simplified texts using fourteen metrics indicative of text difficulty. We briefly introduce our online editor where these simplification tools, including ChatGPT, are available. We scored twelve corpora using our metrics: six text, one audio, and five ChatGPT simplified corpora. We then compare these corpora with texts simplified and verified in a prior user study. Finally, a medical domain expert evaluated these texts and five, new ChatGPT simplified versions. We found that simple corpora show higher similarity with the human simplified texts. ChatGPT simplification moves metrics in the right direction. The medical domain expert evaluation showed a preference for the ChatGPT style, but the text itself was rated lower for content retention. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.01592v1-abstract-full').style.display = 'none'; document.getElementById('2405.01592v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">AMIA Summit, Boston, 2024</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.10212">arXiv:2404.10212</a> <span> [<a href="https://arxiv.org/pdf/2404.10212">pdf</a>, <a href="https://arxiv.org/format/2404.10212">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LWIRPOSE: A novel LWIR Thermal Image Dataset and Benchmark </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Upadhyay%2C+A">Avinash Upadhyay</a>, <a href="/search/cs?searchtype=author&query=Dhupar%2C+B">Bhipanshu Dhupar</a>, <a href="/search/cs?searchtype=author&query=Sharma%2C+M">Manoj Sharma</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankit Shukla</a>, <a href="/search/cs?searchtype=author&query=Abraham%2C+A">Ajith Abraham</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.10212v1-abstract-short" style="display: inline;"> Human pose estimation faces hurdles in real-world applications due to factors like lighting changes, occlusions, and cluttered environments. We introduce a unique RGB-Thermal Nearly Paired and Annotated 2D Pose Dataset, comprising over 2,400 high-quality LWIR (thermal) images. Each image is meticulously annotated with 2D human poses, offering a valuable resource for researchers and practitioners.… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.10212v1-abstract-full').style.display = 'inline'; document.getElementById('2404.10212v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.10212v1-abstract-full" style="display: none;"> Human pose estimation faces hurdles in real-world applications due to factors like lighting changes, occlusions, and cluttered environments. We introduce a unique RGB-Thermal Nearly Paired and Annotated 2D Pose Dataset, comprising over 2,400 high-quality LWIR (thermal) images. Each image is meticulously annotated with 2D human poses, offering a valuable resource for researchers and practitioners. This dataset, captured from seven actors performing diverse everyday activities like sitting, eating, and walking, facilitates pose estimation on occlusion and other challenging scenarios. We benchmark state-of-the-art pose estimation methods on the dataset to showcase its potential, establishing a strong baseline for future research. Our results demonstrate the dataset's effectiveness in promoting advancements in pose estimation for various applications, including surveillance, healthcare, and sports analytics. The dataset and code are available at https://github.com/avinres/LWIRPOSE <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.10212v1-abstract-full').style.display = 'none'; document.getElementById('2404.10212v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted in ICIP2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.00613">arXiv:2404.00613</a> <span> [<a href="https://arxiv.org/pdf/2404.00613">pdf</a>, <a href="https://arxiv.org/ps/2404.00613">ps</a>, <a href="https://arxiv.org/format/2404.00613">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> On $(胃, 螛)$-cyclic codes and their applications in constructing QECCs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A+K">Awadhesh Kumar Shukla</a>, <a href="/search/cs?searchtype=author&query=Pathak%2C+S">Sachin Pathak</a>, <a href="/search/cs?searchtype=author&query=Pandey%2C+O+P">Om Prakash Pandey</a>, <a href="/search/cs?searchtype=author&query=Mishra%2C+V">Vipul Mishra</a>, <a href="/search/cs?searchtype=author&query=Upadhyay%2C+A+K">Ashish Kumar Upadhyay</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.00613v1-abstract-short" style="display: inline;"> Let $\mathbb F_q$ be a finite field, where $q$ is an odd prime power. Let $R=\mathbb{F}_q+u\mathbb{F}_q+v\mathbb{F}_q+uv\mathbb F_q$ with $u^2=u,v^2=v,uv=vu$. In this paper, we study the algebraic structure of $(胃, 螛)$-cyclic codes of block length $(r,s )$ over $\mathbb{F}_qR.$ Specifically, we analyze the structure of these codes as left $R[x:螛]$-submodules of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00613v1-abstract-full').style.display = 'inline'; document.getElementById('2404.00613v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.00613v1-abstract-full" style="display: none;"> Let $\mathbb F_q$ be a finite field, where $q$ is an odd prime power. Let $R=\mathbb{F}_q+u\mathbb{F}_q+v\mathbb{F}_q+uv\mathbb F_q$ with $u^2=u,v^2=v,uv=vu$. In this paper, we study the algebraic structure of $(胃, 螛)$-cyclic codes of block length $(r,s )$ over $\mathbb{F}_qR.$ Specifically, we analyze the structure of these codes as left $R[x:螛]$-submodules of $\mathfrak{R}_{r,s} = \frac{\mathbb{F}_q[x:胃]}{\langle x^r-1\rangle} \times \frac{R[x:螛]}{\langle x^s-1\rangle}$. Our investigation involves determining generator polynomials and minimal generating sets for this family of codes. Further, we discuss the algebraic structure of separable codes. A relationship between the generator polynomials of $(胃, 螛)$-cyclic codes over $\mathbb F_qR$ and their duals is established. Moreover, we calculate the generator polynomials of dual of $(胃, 螛)$-cyclic codes. As an application of our study, we provide a construction of quantum error-correcting codes (QECCs) from $(胃, 螛)$-cyclic codes of block length $(r,s)$ over $\mathbb{F}_qR$. We support our theoretical results with illustrative examples. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00613v1-abstract-full').style.display = 'none'; document.getElementById('2404.00613v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages, 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.03737">arXiv:2402.03737</a> <span> [<a href="https://arxiv.org/pdf/2402.03737">pdf</a>, <a href="https://arxiv.org/ps/2402.03737">ps</a>, <a href="https://arxiv.org/format/2402.03737">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Differentially Private High Dimensional Bandits </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Apurv Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.03737v1-abstract-short" style="display: inline;"> We consider a high-dimensional stochastic contextual linear bandit problem when the parameter vector is $s_{0}$-sparse and the decision maker is subject to privacy constraints under both central and local models of differential privacy. We present PrivateLASSO, a differentially private LASSO bandit algorithm. PrivateLASSO is based on two sub-routines: (i) a sparse hard-thresholding-based privacy m… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.03737v1-abstract-full').style.display = 'inline'; document.getElementById('2402.03737v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.03737v1-abstract-full" style="display: none;"> We consider a high-dimensional stochastic contextual linear bandit problem when the parameter vector is $s_{0}$-sparse and the decision maker is subject to privacy constraints under both central and local models of differential privacy. We present PrivateLASSO, a differentially private LASSO bandit algorithm. PrivateLASSO is based on two sub-routines: (i) a sparse hard-thresholding-based privacy mechanism and (ii) an episodic thresholding rule for identifying the support of the parameter $胃$. We prove minimax private lower bounds and establish privacy and utility guarantees for PrivateLASSO for the central model under standard assumptions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.03737v1-abstract-full').style.display = 'none'; document.getElementById('2402.03737v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.09185">arXiv:2401.09185</a> <span> [<a href="https://arxiv.org/pdf/2401.09185">pdf</a>, <a href="https://arxiv.org/format/2401.09185">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> </div> </div> <p class="title is-5 mathjax"> Behavior Trees with Dataflow: Coordinating Reactive Tasks in Lingua Franca </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Schulz-Rosengarten%2C+A">Alexander Schulz-Rosengarten</a>, <a href="/search/cs?searchtype=author&query=Ahmad%2C+A">Akash Ahmad</a>, <a href="/search/cs?searchtype=author&query=Clement%2C+M">Malte Clement</a>, <a href="/search/cs?searchtype=author&query=von+Hanxleden%2C+R">Reinhard von Hanxleden</a>, <a href="/search/cs?searchtype=author&query=Asch%2C+B">Benjamin Asch</a>, <a href="/search/cs?searchtype=author&query=Lohstroh%2C+M">Marten Lohstroh</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+E+A">Edward A. Lee</a>, <a href="/search/cs?searchtype=author&query=Araya%2C+G+Q">Gustavo Quiros Araya</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankit Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.09185v1-abstract-short" style="display: inline;"> Behavior Trees (BTs) provide a lean set of control flow elements that are easily composable in a modular tree structure. They are well established for modeling the high-level behavior of non-player characters in computer games and recently gained popularity in other areas such as industrial automation. While BTs nicely express control, data handling aspects so far must be provided separately, e. g… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09185v1-abstract-full').style.display = 'inline'; document.getElementById('2401.09185v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.09185v1-abstract-full" style="display: none;"> Behavior Trees (BTs) provide a lean set of control flow elements that are easily composable in a modular tree structure. They are well established for modeling the high-level behavior of non-player characters in computer games and recently gained popularity in other areas such as industrial automation. While BTs nicely express control, data handling aspects so far must be provided separately, e. g. in the form of blackboards. This may hamper reusability and can be a source of nondeterminism. We here present a dataflow extension to BTs that explicitly models data relations and communication. We provide a combined textual/graphical approach in line with modern, productivity-enhancing pragmatics-aware modeling techniques. We realized and validated that approach in the recently introduced polyglot coordination language Lingua Franca (LF). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09185v1-abstract-full').style.display = 'none'; document.getElementById('2401.09185v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.02158">arXiv:2401.02158</a> <span> [<a href="https://arxiv.org/pdf/2401.02158">pdf</a>, <a href="https://arxiv.org/format/2401.02158">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Shayona@SMM4H23: COVID-19 Self diagnosis classification using BERT and LightGBM models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chavda%2C+R">Rushi Chavda</a>, <a href="/search/cs?searchtype=author&query=Makwana%2C+D">Darshan Makwana</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+V">Vraj Patel</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anupam Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.02158v1-abstract-short" style="display: inline;"> This paper describes approaches and results for shared Task 1 and 4 of SMMH4-23 by Team Shayona. Shared Task-1 was binary classification of english tweets self-reporting a COVID-19 diagnosis, and Shared Task-4 was Binary classification of English Reddit posts self-reporting a social anxiety disorder diagnosis. Our team has achieved the highest f1-score 0.94 in Task-1 among all participants. We hav… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.02158v1-abstract-full').style.display = 'inline'; document.getElementById('2401.02158v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.02158v1-abstract-full" style="display: none;"> This paper describes approaches and results for shared Task 1 and 4 of SMMH4-23 by Team Shayona. Shared Task-1 was binary classification of english tweets self-reporting a COVID-19 diagnosis, and Shared Task-4 was Binary classification of English Reddit posts self-reporting a social anxiety disorder diagnosis. Our team has achieved the highest f1-score 0.94 in Task-1 among all participants. We have leveraged the Transformer model (BERT) in combination with the LightGBM model for both tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.02158v1-abstract-full').style.display = 'none'; document.getElementById('2401.02158v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.16338">arXiv:2311.16338</a> <span> [<a href="https://arxiv.org/pdf/2311.16338">pdf</a>, <a href="https://arxiv.org/format/2311.16338">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Releasing the CRaQAn (Coreference Resolution in Question-Answering): An open-source dataset and dataset creation methodology using instruction-following models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Grzywinski%2C+R">Rob Grzywinski</a>, <a href="/search/cs?searchtype=author&query=D%27Arcy%2C+J">Joshua D'Arcy</a>, <a href="/search/cs?searchtype=author&query=Naidoff%2C+R">Rob Naidoff</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ashish Shukla</a>, <a href="/search/cs?searchtype=author&query=Browne%2C+A">Alex Browne</a>, <a href="/search/cs?searchtype=author&query=Gibbons%2C+R">Ren Gibbons</a>, <a href="/search/cs?searchtype=author&query=Bent%2C+B">Brinnae Bent</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.16338v1-abstract-short" style="display: inline;"> Instruction-following language models demand robust methodologies for information retrieval to augment instructions for question-answering applications. A primary challenge is the resolution of coreferences in the context of chunking strategies for long documents. The critical barrier to experimentation of handling coreferences is a lack of open source datasets, specifically in question-answering… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.16338v1-abstract-full').style.display = 'inline'; document.getElementById('2311.16338v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.16338v1-abstract-full" style="display: none;"> Instruction-following language models demand robust methodologies for information retrieval to augment instructions for question-answering applications. A primary challenge is the resolution of coreferences in the context of chunking strategies for long documents. The critical barrier to experimentation of handling coreferences is a lack of open source datasets, specifically in question-answering tasks that require coreference resolution. In this work we present our Coreference Resolution in Question-Answering (CRaQAn) dataset, an open-source dataset that caters to the nuanced information retrieval requirements of coreference resolution in question-answering tasks by providing over 250 question-answer pairs containing coreferences. To develop this dataset, we developed a novel approach for creating high-quality datasets using an instruction-following model (GPT-4) and a Recursive Criticism and Improvement Loop. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.16338v1-abstract-full').style.display = 'none'; document.getElementById('2311.16338v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NeurIPS 2023 Workshop on Instruction Tuning and Instruction Following</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.05079">arXiv:2311.05079</a> <span> [<a href="https://arxiv.org/pdf/2311.05079">pdf</a>, <a href="https://arxiv.org/format/2311.05079">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Social Media Bot Detection using Dropout-GAN </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anant Shukla</a>, <a href="/search/cs?searchtype=author&query=Jurecek%2C+M">Martin Jurecek</a>, <a href="/search/cs?searchtype=author&query=Stamp%2C+M">Mark Stamp</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.05079v1-abstract-short" style="display: inline;"> Bot activity on social media platforms is a pervasive problem, undermining the credibility of online discourse and potentially leading to cybercrime. We propose an approach to bot detection using Generative Adversarial Networks (GAN). We discuss how we overcome the issue of mode collapse by utilizing multiple discriminators to train against one generator, while decoupling the discriminator to perf… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.05079v1-abstract-full').style.display = 'inline'; document.getElementById('2311.05079v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.05079v1-abstract-full" style="display: none;"> Bot activity on social media platforms is a pervasive problem, undermining the credibility of online discourse and potentially leading to cybercrime. We propose an approach to bot detection using Generative Adversarial Networks (GAN). We discuss how we overcome the issue of mode collapse by utilizing multiple discriminators to train against one generator, while decoupling the discriminator to perform social media bot detection and utilizing the generator for data augmentation. In terms of classification accuracy, our approach outperforms the state-of-the-art techniques in this field. We also show how the generator in the GAN can be used to evade such a classification technique. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.05079v1-abstract-full').style.display = 'none'; document.getElementById('2311.05079v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.00887">arXiv:2310.00887</a> <span> [<a href="https://arxiv.org/pdf/2310.00887">pdf</a>, <a href="https://arxiv.org/format/2310.00887">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> GRID: A Platform for General Robot Intelligence Development </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Vemprala%2C+S">Sai Vemprala</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+S">Shuhang Chen</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Abhinav Shukla</a>, <a href="/search/cs?searchtype=author&query=Narayanan%2C+D">Dinesh Narayanan</a>, <a href="/search/cs?searchtype=author&query=Kapoor%2C+A">Ashish Kapoor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.00887v2-abstract-short" style="display: inline;"> Developing machine intelligence abilities in robots and autonomous systems is an expensive and time consuming process. Existing solutions are tailored to specific applications and are harder to generalize. Furthermore, scarcity of training data adds a layer of complexity in deploying deep machine learning models. We present a new platform for General Robot Intelligence Development (GRID) to addres… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.00887v2-abstract-full').style.display = 'inline'; document.getElementById('2310.00887v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.00887v2-abstract-full" style="display: none;"> Developing machine intelligence abilities in robots and autonomous systems is an expensive and time consuming process. Existing solutions are tailored to specific applications and are harder to generalize. Furthermore, scarcity of training data adds a layer of complexity in deploying deep machine learning models. We present a new platform for General Robot Intelligence Development (GRID) to address both of these issues. The platform enables robots to learn, compose and adapt skills to their physical capabilities, environmental constraints and goals. The platform addresses AI problems in robotics via foundation models that know the physical world. GRID is designed from the ground up to be extensible to accommodate new types of robots, vehicles, hardware platforms and software protocols. In addition, the modular design enables various deep ML components and existing foundation models to be easily usable in a wider variety of robot-centric problems. We demonstrate the platform in various aerial robotics scenarios and demonstrate how the platform dramatically accelerates development of machine intelligent robots. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.00887v2-abstract-full').style.display = 'none'; document.getElementById('2310.00887v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.00207">arXiv:2310.00207</a> <span> [<a href="https://arxiv.org/pdf/2310.00207">pdf</a>, <a href="https://arxiv.org/ps/2310.00207">ps</a>, <a href="https://arxiv.org/format/2310.00207">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Detecting Unseen Multiword Expressions in American Sign Language </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kezar%2C+L">Lee Kezar</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Aryan Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.00207v1-abstract-short" style="display: inline;"> Multiword expressions present unique challenges in many translation tasks. In an attempt to ultimately apply a multiword expression detection system to the translation of American Sign Language, we built and tested two systems that apply word embeddings from GloVe to determine whether or not the word embeddings of lexemes can be used to predict whether or not those lexemes compose a multiword expr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.00207v1-abstract-full').style.display = 'inline'; document.getElementById('2310.00207v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.00207v1-abstract-full" style="display: none;"> Multiword expressions present unique challenges in many translation tasks. In an attempt to ultimately apply a multiword expression detection system to the translation of American Sign Language, we built and tested two systems that apply word embeddings from GloVe to determine whether or not the word embeddings of lexemes can be used to predict whether or not those lexemes compose a multiword expression. It became apparent that word embeddings carry data that can detect non-compositionality with decent accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.00207v1-abstract-full').style.display = 'none'; document.getElementById('2310.00207v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical report, unpublished</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.12129">arXiv:2308.12129</a> <span> [<a href="https://arxiv.org/pdf/2308.12129">pdf</a>, <a href="https://arxiv.org/format/2308.12129">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Resiliency Analysis of LLM generated models for Industrial Automation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ogundare%2C+O">Oluwatosin Ogundare</a>, <a href="/search/cs?searchtype=author&query=Araya%2C+G+Q">Gustavo Quiros Araya</a>, <a href="/search/cs?searchtype=author&query=Akrotirianakis%2C+I">Ioannis Akrotirianakis</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankit Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.12129v1-abstract-short" style="display: inline;"> This paper proposes a study of the resilience and efficiency of automatically generated industrial automation and control systems using Large Language Models (LLMs). The approach involves modeling the system using percolation theory to estimate its resilience and formulating the design problem as an optimization problem subject to constraints. Techniques from stochastic optimization and regret ana… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.12129v1-abstract-full').style.display = 'inline'; document.getElementById('2308.12129v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.12129v1-abstract-full" style="display: none;"> This paper proposes a study of the resilience and efficiency of automatically generated industrial automation and control systems using Large Language Models (LLMs). The approach involves modeling the system using percolation theory to estimate its resilience and formulating the design problem as an optimization problem subject to constraints. Techniques from stochastic optimization and regret analysis are used to find a near-optimal solution with provable regret bounds. The study aims to provide insights into the effectiveness and reliability of automatically generated systems in industrial automation and control, and to identify potential areas for improvement in their design and implementation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.12129v1-abstract-full').style.display = 'none'; document.getElementById('2308.12129v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 Pages, Conference Manuscript</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.15768">arXiv:2306.15768</a> <span> [<a href="https://arxiv.org/pdf/2306.15768">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> An Efficient Deep Convolutional Neural Network Model For Yoga Pose Recognition Using Single Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yadav%2C+S+K">Santosh Kumar Yadav</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Apurv Shukla</a>, <a href="/search/cs?searchtype=author&query=Tiwari%2C+K">Kamlesh Tiwari</a>, <a href="/search/cs?searchtype=author&query=Pandey%2C+H+M">Hari Mohan Pandey</a>, <a href="/search/cs?searchtype=author&query=Akbar%2C+S+A">Shaik Ali Akbar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.15768v1-abstract-short" style="display: inline;"> Pose recognition deals with designing algorithms to locate human body joints in a 2D/3D space and run inference on the estimated joint locations for predicting the poses. Yoga poses consist of some very complex postures. It imposes various challenges on the computer vision algorithms like occlusion, inter-class similarity, intra-class variability, viewpoint complexity, etc. This paper presents YPo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.15768v1-abstract-full').style.display = 'inline'; document.getElementById('2306.15768v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.15768v1-abstract-full" style="display: none;"> Pose recognition deals with designing algorithms to locate human body joints in a 2D/3D space and run inference on the estimated joint locations for predicting the poses. Yoga poses consist of some very complex postures. It imposes various challenges on the computer vision algorithms like occlusion, inter-class similarity, intra-class variability, viewpoint complexity, etc. This paper presents YPose, an efficient deep convolutional neural network (CNN) model to recognize yoga asanas from RGB images. The proposed model consists of four steps as follows: (a) first, the region of interest (ROI) is segmented using segmentation based approaches to extract the ROI from the original images; (b) second, these refined images are passed to a CNN architecture based on the backbone of EfficientNets for feature extraction; (c) third, dense refinement blocks, adapted from the architecture of densely connected networks are added to learn more diversified features; and (d) fourth, global average pooling and fully connected layers are applied for the classification of the multi-level hierarchy of the yoga poses. The proposed model has been tested on the Yoga-82 dataset. It is a publicly available benchmark dataset for yoga pose recognition. Experimental results show that the proposed model achieves the state-of-the-art on this dataset. The proposed model obtained an accuracy of 93.28%, which is an improvement over the earlier state-of-the-art (79.35%) with a margin of approximately 13.9%. The code will be made publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.15768v1-abstract-full').style.display = 'none'; document.getElementById('2306.15768v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.06414">arXiv:2305.06414</a> <span> [<a href="https://arxiv.org/pdf/2305.06414">pdf</a>, <a href="https://arxiv.org/format/2305.06414">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Dynamical Systems">math.DS</span> </div> </div> <p class="title is-5 mathjax"> Self-contained relaxation-based dynamical Ising machines </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Erementchouk%2C+M">Mikhail Erementchouk</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Aditya Shukla</a>, <a href="/search/cs?searchtype=author&query=Mazumder%2C+P">Pinaki Mazumder</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.06414v1-abstract-short" style="display: inline;"> Dynamical Ising machines are continuous dynamical systems evolving from a generic initial state to a state strongly related to the ground state of the classical Ising model on a graph. Reaching the ground state is equivalent to finding the maximum (weighted) cut of the graph, which presents the Ising machines as an alternative way to solving and investigating NP-complete problems. Among the dynami… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06414v1-abstract-full').style.display = 'inline'; document.getElementById('2305.06414v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.06414v1-abstract-full" style="display: none;"> Dynamical Ising machines are continuous dynamical systems evolving from a generic initial state to a state strongly related to the ground state of the classical Ising model on a graph. Reaching the ground state is equivalent to finding the maximum (weighted) cut of the graph, which presents the Ising machines as an alternative way to solving and investigating NP-complete problems. Among the dynamical models driving the Ising machines, relaxation-based models are especially interesting because of their relations with guarantees of performance achieved in time scaling polynomially with the problem size. However, the terminal states of such machines are essentially non-binary, which necessitates special post-processing relying on disparate computing. We show that an Ising machine implementing a special dynamical system (called \mdII{}) solves the rounding problem dynamically. We prove that the \mdII-machine starting from an arbitrary non-binary state terminates in a state, which trivially rounds to a binary state with the cut at least as big as obtained after the optimal rounding of the initial state. Besides showing that relaxation-based dynamical Ising machines can be made self-contained, our findings demonstrate that dynamical systems can directly perform complex information processing tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.06414v1-abstract-full').style.display = 'none'; document.getElementById('2305.06414v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 5 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 90C27; 37B15; 82C20 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.03235">arXiv:2305.03235</a> <span> [<a href="https://arxiv.org/pdf/2305.03235">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> </div> </div> <p class="title is-5 mathjax"> Hardware in Loop Learning with Spin Stochastic Neurons </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Islam%2C+A+N+M+N">A N M Nafiul Islam</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+K">Kezhou Yang</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A+K">Amit K. Shukla</a>, <a href="/search/cs?searchtype=author&query=Khanal%2C+P">Pravin Khanal</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+B">Bowei Zhou</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+W">Wei-Gang Wang</a>, <a href="/search/cs?searchtype=author&query=Sengupta%2C+A">Abhronil Sengupta</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.03235v3-abstract-short" style="display: inline;"> Despite the promise of superior efficiency and scalability, real-world deployment of emerging nanoelectronic platforms for brain-inspired computing have been limited thus far, primarily because of inter-device variations and intrinsic non-idealities. In this work, we demonstrate mitigating these issues by performing learning directly on practical devices through a hardware-in-loop approach, utiliz… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.03235v3-abstract-full').style.display = 'inline'; document.getElementById('2305.03235v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.03235v3-abstract-full" style="display: none;"> Despite the promise of superior efficiency and scalability, real-world deployment of emerging nanoelectronic platforms for brain-inspired computing have been limited thus far, primarily because of inter-device variations and intrinsic non-idealities. In this work, we demonstrate mitigating these issues by performing learning directly on practical devices through a hardware-in-loop approach, utilizing stochastic neurons based on heavy metal/ferromagnetic spin-orbit torque heterostructures. We characterize the probabilistic switching and device-to-device variability of our fabricated devices of various sizes to showcase the effect of device dimension on the neuronal dynamics and its consequent impact on network-level performance. The efficacy of the hardware-in-loop scheme is illustrated in a deep learning scenario achieving equivalent software performance. This work paves the way for future large-scale implementations of neuromorphic hardware and realization of truly autonomous edge-intelligent devices. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.03235v3-abstract-full').style.display = 'none'; document.getElementById('2305.03235v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.09948">arXiv:2304.09948</a> <span> [<a href="https://arxiv.org/pdf/2304.09948">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Catch Me If You Can: Identifying Fraudulent Physician Reviews with Large Language Models Using Generative Pre-Trained Transformers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A+D">Aishwarya Deep Shukla</a>, <a href="/search/cs?searchtype=author&query=Agarwal%2C+L">Laksh Agarwal</a>, <a href="/search/cs?searchtype=author&query=Mein%2C+J">Jie Mein</a>, <a href="/search/cs?searchtype=author&query=Goh"> Goh</a>, <a href="/search/cs?searchtype=author&query=Guodong"> Guodong</a>, <a href="/search/cs?searchtype=author&query=Gao"> Gao</a>, <a href="/search/cs?searchtype=author&query=Agarwal%2C+R">Ritu Agarwal</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.09948v1-abstract-short" style="display: inline;"> The proliferation of fake reviews of doctors has potentially detrimental consequences for patient well-being and has prompted concern among consumer protection groups and regulatory bodies. Yet despite significant advancements in the fields of machine learning and natural language processing, there remains limited comprehension of the characteristics differentiating fraudulent from authentic revie… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.09948v1-abstract-full').style.display = 'inline'; document.getElementById('2304.09948v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.09948v1-abstract-full" style="display: none;"> The proliferation of fake reviews of doctors has potentially detrimental consequences for patient well-being and has prompted concern among consumer protection groups and regulatory bodies. Yet despite significant advancements in the fields of machine learning and natural language processing, there remains limited comprehension of the characteristics differentiating fraudulent from authentic reviews. This study utilizes a novel pre-labeled dataset of 38048 physician reviews to establish the effectiveness of large language models in classifying reviews. Specifically, we compare the performance of traditional ML models, such as logistic regression and support vector machines, to generative pre-trained transformer models. Furthermore, we use GPT4, the newest model in the GPT family, to uncover the key dimensions along which fake and genuine physician reviews differ. Our findings reveal significantly superior performance of GPT-3 over traditional ML models in this context. Additionally, our analysis suggests that GPT3 requires a smaller training sample than traditional models, suggesting its appropriateness for tasks with scarce training data. Moreover, the superiority of GPT3 performance increases in the cold start context i.e., when there are no prior reviews of a doctor. Finally, we employ GPT4 to reveal the crucial dimensions that distinguish fake physician reviews. In sharp contrast to previous findings in the literature that were obtained using simulated data, our findings from a real-world dataset show that fake reviews are generally more clinically detailed, more reserved in sentiment, and have better structure and grammar than authentic ones. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.09948v1-abstract-full').style.display = 'none'; document.getElementById('2304.09948v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.16024">arXiv:2303.16024</a> <span> [<a href="https://arxiv.org/pdf/2303.16024">pdf</a>, <a href="https://arxiv.org/format/2303.16024">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Egocentric Auditory Attention Localization in Conversations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ryan%2C+F">Fiona Ryan</a>, <a href="/search/cs?searchtype=author&query=Jiang%2C+H">Hao Jiang</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Abhinav Shukla</a>, <a href="/search/cs?searchtype=author&query=Rehg%2C+J+M">James M. Rehg</a>, <a href="/search/cs?searchtype=author&query=Ithapu%2C+V+K">Vamsi Krishna Ithapu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.16024v1-abstract-short" style="display: inline;"> In a noisy conversation environment such as a dinner party, people often exhibit selective auditory attention, or the ability to focus on a particular speaker while tuning out others. Recognizing who somebody is listening to in a conversation is essential for developing technologies that can understand social behavior and devices that can augment human hearing by amplifying particular sound source… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.16024v1-abstract-full').style.display = 'inline'; document.getElementById('2303.16024v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.16024v1-abstract-full" style="display: none;"> In a noisy conversation environment such as a dinner party, people often exhibit selective auditory attention, or the ability to focus on a particular speaker while tuning out others. Recognizing who somebody is listening to in a conversation is essential for developing technologies that can understand social behavior and devices that can augment human hearing by amplifying particular sound sources. The computer vision and audio research communities have made great strides towards recognizing sound sources and speakers in scenes. In this work, we take a step further by focusing on the problem of localizing auditory attention targets in egocentric video, or detecting who in a camera wearer's field of view they are listening to. To tackle the new and challenging Selective Auditory Attention Localization problem, we propose an end-to-end deep learning approach that uses egocentric video and multichannel audio to predict the heatmap of the camera wearer's auditory attention. Our approach leverages spatiotemporal audiovisual features and holistic reasoning about the scene to make predictions, and outperforms a set of baselines on a challenging multi-speaker conversation dataset. Project page: https://fkryan.github.io/saal <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.16024v1-abstract-full').style.display = 'none'; document.getElementById('2303.16024v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.11873">arXiv:2303.11873</a> <span> [<a href="https://arxiv.org/pdf/2303.11873">pdf</a>, <a href="https://arxiv.org/format/2303.11873">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Tale of Two Circuits: Grokking as Competition of Sparse and Dense Subnetworks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Merrill%2C+W">William Merrill</a>, <a href="/search/cs?searchtype=author&query=Tsilivis%2C+N">Nikolaos Tsilivis</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Aman Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.11873v1-abstract-short" style="display: inline;"> Grokking is a phenomenon where a model trained on an algorithmic task first overfits but, then, after a large amount of additional training, undergoes a phase transition to generalize perfectly. We empirically study the internal structure of networks undergoing grokking on the sparse parity task, and find that the grokking phase transition corresponds to the emergence of a sparse subnetwork that d… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.11873v1-abstract-full').style.display = 'inline'; document.getElementById('2303.11873v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.11873v1-abstract-full" style="display: none;"> Grokking is a phenomenon where a model trained on an algorithmic task first overfits but, then, after a large amount of additional training, undergoes a phase transition to generalize perfectly. We empirically study the internal structure of networks undergoing grokking on the sparse parity task, and find that the grokking phase transition corresponds to the emergence of a sparse subnetwork that dominates model predictions. On an optimization level, we find that this subnetwork arises when a small subset of neurons undergoes rapid norm growth, whereas the other neurons in the network decay slowly in norm. Thus, we suggest that the grokking phase transition can be understood to emerge from competition of two largely distinct subnetworks: a dense one that dominates before the transition and generalizes poorly, and a sparse one that dominates afterwards. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.11873v1-abstract-full').style.display = 'none'; document.getElementById('2303.11873v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at the Workshop on Understanding Foundation Models at ICLR 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.11424">arXiv:2303.11424</a> <span> [<a href="https://arxiv.org/pdf/2303.11424">pdf</a>, <a href="https://arxiv.org/format/2303.11424">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Polynomial Implicit Neural Representations For Large Diverse Datasets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Singh%2C+R">Rajhans Singh</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.11424v1-abstract-short" style="display: inline;"> Implicit neural representations (INR) have gained significant popularity for signal and image representation for many end-tasks, such as superresolution, 3D modeling, and more. Most INR architectures rely on sinusoidal positional encoding, which accounts for high-frequency information in data. However, the finite encoding size restricts the model's representational power. Higher representational p… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.11424v1-abstract-full').style.display = 'inline'; document.getElementById('2303.11424v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.11424v1-abstract-full" style="display: none;"> Implicit neural representations (INR) have gained significant popularity for signal and image representation for many end-tasks, such as superresolution, 3D modeling, and more. Most INR architectures rely on sinusoidal positional encoding, which accounts for high-frequency information in data. However, the finite encoding size restricts the model's representational power. Higher representational power is needed to go from representing a single given image to representing large and diverse datasets. Our approach addresses this gap by representing an image with a polynomial function and eliminates the need for positional encodings. Therefore, to achieve a progressively higher degree of polynomial representation, we use element-wise multiplications between features and affine-transformed coordinate locations after every ReLU layer. The proposed method is evaluated qualitatively and quantitatively on large datasets like ImageNet. The proposed Poly-INR model performs comparably to state-of-the-art generative models without any convolution, normalization, or self-attention layers, and with far fewer trainable parameters. With much fewer training parameters and higher representative power, our approach paves the way for broader adoption of INR models for generative modeling tasks in complex domains. The code is available at \url{https://github.com/Rajhans0/Poly_INR} <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.11424v1-abstract-full').style.display = 'none'; document.getElementById('2303.11424v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at CVPR 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.07451">arXiv:2303.07451</a> <span> [<a href="https://arxiv.org/pdf/2303.07451">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> DRISHTI: Visual Navigation Assistant for Visually Impaired </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Joshi%2C+M">Malay Joshi</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Aditi Shukla</a>, <a href="/search/cs?searchtype=author&query=Srivastava%2C+J">Jayesh Srivastava</a>, <a href="/search/cs?searchtype=author&query=Rastogi%2C+M">Manya Rastogi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.07451v1-abstract-short" style="display: inline;"> In today's society, where independent living is becoming increasingly important, it can be extremely constricting for those who are blind. Blind and visually impaired (BVI) people face challenges because they need manual support to prompt information about their environment. In this work, we took our first step towards developing an affordable and high-performing eye wearable assistive device, DRI… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.07451v1-abstract-full').style.display = 'inline'; document.getElementById('2303.07451v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.07451v1-abstract-full" style="display: none;"> In today's society, where independent living is becoming increasingly important, it can be extremely constricting for those who are blind. Blind and visually impaired (BVI) people face challenges because they need manual support to prompt information about their environment. In this work, we took our first step towards developing an affordable and high-performing eye wearable assistive device, DRISHTI, to provide visual navigation assistance for BVI people. This system comprises a camera module, ESP32 processor, Bluetooth module, smartphone and speakers. Using artificial intelligence, this system is proposed to detect and understand the nature of the users' path and obstacles ahead of the user in that path and then inform BVI users about it via audio output to enable them to acquire directions by themselves on their journey. This first step discussed in this paper involves establishing a proof-of-concept of achieving the right balance of affordability and performance by testing an initial software integration of a currency detection algorithm on a low-cost embedded arrangement. This work will lay the foundation for our upcoming works toward achieving the goal of assisting the maximum of BVI people around the globe in moving independently. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.07451v1-abstract-full').style.display = 'none'; document.getElementById('2303.07451v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Paper presented at International Conference on Advancements and Key Challenges in Green Energy and Computing (AKGEC 2023) is accepted to be published in the proceedings of the Journal of Physics</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.07201">arXiv:2303.07201</a> <span> [<a href="https://arxiv.org/pdf/2303.07201">pdf</a>, <a href="https://arxiv.org/format/2303.07201">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> An evaluation of Google Translate for Sanskrit to English translation via sentiment and semantic analysis </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Akshat Shukla</a>, <a href="/search/cs?searchtype=author&query=Bansal%2C+C">Chaarvi Bansal</a>, <a href="/search/cs?searchtype=author&query=Badhe%2C+S">Sushrut Badhe</a>, <a href="/search/cs?searchtype=author&query=Ranjan%2C+M">Mukul Ranjan</a>, <a href="/search/cs?searchtype=author&query=Chandra%2C+R">Rohitash Chandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.07201v1-abstract-short" style="display: inline;"> Google Translate has been prominent for language translation; however, limited work has been done in evaluating the quality of translation when compared to human experts. Sanskrit one of the oldest written languages in the world. In 2022, the Sanskrit language was added to the Google Translate engine. Sanskrit is known as the mother of languages such as Hindi and an ancient source of the Indo-Euro… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.07201v1-abstract-full').style.display = 'inline'; document.getElementById('2303.07201v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.07201v1-abstract-full" style="display: none;"> Google Translate has been prominent for language translation; however, limited work has been done in evaluating the quality of translation when compared to human experts. Sanskrit one of the oldest written languages in the world. In 2022, the Sanskrit language was added to the Google Translate engine. Sanskrit is known as the mother of languages such as Hindi and an ancient source of the Indo-European group of languages. Sanskrit is the original language for sacred Hindu texts such as the Bhagavad Gita. In this study, we present a framework that evaluates the Google Translate for Sanskrit using the Bhagavad Gita. We first publish a translation of the Bhagavad Gita in Sanskrit using Google Translate. Our framework then compares Google Translate version of Bhagavad Gita with expert translations using sentiment and semantic analysis via BERT-based language models. Our results indicate that in terms of sentiment and semantic analysis, there is low level of similarity in selected verses of Google Translate when compared to expert translations. In the qualitative evaluation, we find that Google translate is unsuitable for translation of certain Sanskrit words and phrases due to its poetic nature, contextual significance, metaphor and imagery. The mistranslations are not surprising since the Bhagavad Gita is known as a difficult text not only to translate, but also to interpret since it relies on contextual, philosophical and historical information. Our framework lays the foundation for automatic evaluation of other languages by Google Translate <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.07201v1-abstract-full').style.display = 'none'; document.getElementById('2303.07201v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.14130">arXiv:2302.14130</a> <span> [<a href="https://arxiv.org/pdf/2302.14130">pdf</a>, <a href="https://arxiv.org/format/2302.14130">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.neucom.2022.11.029">10.1016/j.neucom.2022.11.029 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Leveraging Angular Distributions for Improved Knowledge Distillation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jeon%2C+E+S">Eun Som Jeon</a>, <a href="/search/cs?searchtype=author&query=Choi%2C+H">Hongjun Choi</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.14130v1-abstract-short" style="display: inline;"> Knowledge distillation as a broad class of methods has led to the development of lightweight and memory efficient models, using a pre-trained model with a large capacity (teacher network) to train a smaller model (student network). Recently, additional variations for knowledge distillation, utilizing activation maps of intermediate layers as the source of knowledge, have been studied. Generally, i… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.14130v1-abstract-full').style.display = 'inline'; document.getElementById('2302.14130v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.14130v1-abstract-full" style="display: none;"> Knowledge distillation as a broad class of methods has led to the development of lightweight and memory efficient models, using a pre-trained model with a large capacity (teacher network) to train a smaller model (student network). Recently, additional variations for knowledge distillation, utilizing activation maps of intermediate layers as the source of knowledge, have been studied. Generally, in computer vision applications, it is seen that the feature activation learned by a higher capacity model contains richer knowledge, highlighting complete objects while focusing less on the background. Based on this observation, we leverage the dual ability of the teacher to accurately distinguish between positive (relevant to the target object) and negative (irrelevant) areas. We propose a new loss function for distillation, called angular margin-based distillation (AMD) loss. AMD loss uses the angular distance between positive and negative features by projecting them onto a hypersphere, motivated by the near angular distributions seen in many feature extractors. Then, we create a more attentive feature that is angularly distributed on the hypersphere by introducing an angular margin to the positive feature. Transferring such knowledge from the teacher network enables the student model to harness the higher discrimination of positive and negative features for the teacher, thus distilling superior student models. The proposed method is evaluated for various student-teacher network pairs on four public datasets. Furthermore, we show that the proposed method has advantages in compatibility with other learning techniques, such as using fine-grained features, augmentation, and other distillation methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.14130v1-abstract-full').style.display = 'none'; document.getElementById('2302.14130v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Neurocomputing, Volume 518, 21 January 2023, Pages 466-481</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Neurocomputing, Volume 518, 2023, Pages 466-481 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.12861">arXiv:2212.12861</a> <span> [<a href="https://arxiv.org/pdf/2212.12861">pdf</a>, <a href="https://arxiv.org/format/2212.12861">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantum Physics">quant-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> An efficient quantum-classical hybrid algorithm for distorted alphanumeric character identification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Pal%2C+A">Ankur Pal</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Abhishek Shukla</a>, <a href="/search/cs?searchtype=author&query=Pathak%2C+A">Anirban Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.12861v1-abstract-short" style="display: inline;"> An algorithm for image processing is proposed. The proposed algorithm, which can be viewed as a quantum-classical hybrid algorithm, can transform a low-resolution bitonal image of a character from the set of alphanumeric characters (A-Z, 0-9) into a high-resolution image. The quantum part of the proposed algorithm fruitfully utilizes a variant of Grover's search algorithm, known as the fixed point… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.12861v1-abstract-full').style.display = 'inline'; document.getElementById('2212.12861v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.12861v1-abstract-full" style="display: none;"> An algorithm for image processing is proposed. The proposed algorithm, which can be viewed as a quantum-classical hybrid algorithm, can transform a low-resolution bitonal image of a character from the set of alphanumeric characters (A-Z, 0-9) into a high-resolution image. The quantum part of the proposed algorithm fruitfully utilizes a variant of Grover's search algorithm, known as the fixed point search algorithm. Further, the quantum part of the algorithm is simulated using CQASM and the advantage of the algorithm is established through the complexity analysis. Additional analysis has also revealed that this scheme for optical character recognition (OCR) leads to high confidence value and generally works in a more efficient manner compared to the existing classical, quantum, and hybrid algorithms for a similar task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.12861v1-abstract-full').style.display = 'none'; document.getElementById('2212.12861v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">A quantum-assisted algorithm for optical character recognition (OCR) is proposed using fixed point Grover's algorithm</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.01745">arXiv:2212.01745</a> <span> [<a href="https://arxiv.org/pdf/2212.01745">pdf</a>, <a href="https://arxiv.org/format/2212.01745">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Design of an All-Purpose Terrace Farming Robot </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mohta%2C+V">Vibhakar Mohta</a>, <a href="/search/cs?searchtype=author&query=Patnaik%2C+A">Adarsh Patnaik</a>, <a href="/search/cs?searchtype=author&query=Panda%2C+S+K">Shivam Kumar Panda</a>, <a href="/search/cs?searchtype=author&query=Krishnan%2C+S+V">Siva Vignesh Krishnan</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+A">Abhinav Gupta</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Abhay Shukla</a>, <a href="/search/cs?searchtype=author&query=Wadhwa%2C+G">Gauri Wadhwa</a>, <a href="/search/cs?searchtype=author&query=Verma%2C+S">Shrey Verma</a>, <a href="/search/cs?searchtype=author&query=Bandopadhyay%2C+A">Aditya Bandopadhyay</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.01745v1-abstract-short" style="display: inline;"> Automation in farming processes is a growing field of research in both academia and industries. A considerable amount of work has been put into this field to develop systems robust enough for farming. Terrace farming, in particular, provides a varying set of challenges, including robust stair climbing methods and stable navigation in unstructured terrains. We propose the design of a novel autonomo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.01745v1-abstract-full').style.display = 'inline'; document.getElementById('2212.01745v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.01745v1-abstract-full" style="display: none;"> Automation in farming processes is a growing field of research in both academia and industries. A considerable amount of work has been put into this field to develop systems robust enough for farming. Terrace farming, in particular, provides a varying set of challenges, including robust stair climbing methods and stable navigation in unstructured terrains. We propose the design of a novel autonomous terrace farming robot, Aarohi, that can effectively climb steep terraces of considerable heights and execute several farming operations. The design optimisation strategy for the overall mechanical structure is elucidated. Further, the embedded and software architecture along with fail-safe strategies are presented for a working prototype. Algorithms for autonomous traversal over the terrace steps using the scissor lift mechanism and performing various farming operations have also been discussed. The adaptability of the design to specific operational requirements and modular farm tools allow Aarohi to be customised for a wide variety of use cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.01745v1-abstract-full').style.display = 'none'; document.getElementById('2212.01745v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.16172">arXiv:2211.16172</a> <span> [<a href="https://arxiv.org/pdf/2211.16172">pdf</a>, <a href="https://arxiv.org/format/2211.16172">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computers and Society">cs.CY</span> </div> </div> <p class="title is-5 mathjax"> Learnings from Technological Interventions in a Low Resource Language: Enhancing Information Access in Gondi </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mehta%2C+D">Devansh Mehta</a>, <a href="/search/cs?searchtype=author&query=Diddee%2C+H">Harshita Diddee</a>, <a href="/search/cs?searchtype=author&query=Saxena%2C+A">Ananya Saxena</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anurag Shukla</a>, <a href="/search/cs?searchtype=author&query=Santy%2C+S">Sebastin Santy</a>, <a href="/search/cs?searchtype=author&query=Mothilal%2C+R+K">Ramaravind Kommiya Mothilal</a>, <a href="/search/cs?searchtype=author&query=Srivastava%2C+B+M+L">Brij Mohan Lal Srivastava</a>, <a href="/search/cs?searchtype=author&query=Sharma%2C+A">Alok Sharma</a>, <a href="/search/cs?searchtype=author&query=Prasad%2C+V">Vishnu Prasad</a>, <a href="/search/cs?searchtype=author&query=U%2C+V">Venkanna U</a>, <a href="/search/cs?searchtype=author&query=Bali%2C+K">Kalika Bali</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.16172v1-abstract-short" style="display: inline;"> The primary obstacle to developing technologies for low-resource languages is the lack of representative, usable data. In this paper, we report the deployment of technology-driven data collection methods for creating a corpus of more than 60,000 translations from Hindi to Gondi, a low-resource vulnerable language spoken by around 2.3 million tribal people in south and central India. During this pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.16172v1-abstract-full').style.display = 'inline'; document.getElementById('2211.16172v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.16172v1-abstract-full" style="display: none;"> The primary obstacle to developing technologies for low-resource languages is the lack of representative, usable data. In this paper, we report the deployment of technology-driven data collection methods for creating a corpus of more than 60,000 translations from Hindi to Gondi, a low-resource vulnerable language spoken by around 2.3 million tribal people in south and central India. During this process, we help expand information access in Gondi across 2 different dimensions (a) The creation of linguistic resources that can be used by the community, such as a dictionary, children's stories, Gondi translations from multiple sources and an Interactive Voice Response (IVR) based mass awareness platform; (b) Enabling its use in the digital domain by developing a Hindi-Gondi machine translation model, which is compressed by nearly 4 times to enable it's edge deployment on low-resource edge devices and in areas of little to no internet connectivity. We also present preliminary evaluations of utilizing the developed machine translation model to provide assistance to volunteers who are involved in collecting more data for the target language. Through these interventions, we not only created a refined and evaluated corpus of 26,240 Hindi-Gondi translations that was used for building the translation model but also engaged nearly 850 community members who can help take Gondi onto the internet. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.16172v1-abstract-full').style.display = 'none'; document.getElementById('2211.16172v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">In Submission (Revised) to Language Resources and Evaluation Journal. arXiv admin note: text overlap with arXiv:2004.10270</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.05100">arXiv:2211.05100</a> <span> [<a href="https://arxiv.org/pdf/2211.05100">pdf</a>, <a href="https://arxiv.org/format/2211.05100">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> BLOOM: A 176B-Parameter Open-Access Multilingual Language Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Workshop%2C+B">BigScience Workshop</a>, <a href="/search/cs?searchtype=author&query=%3A"> :</a>, <a href="/search/cs?searchtype=author&query=Scao%2C+T+L">Teven Le Scao</a>, <a href="/search/cs?searchtype=author&query=Fan%2C+A">Angela Fan</a>, <a href="/search/cs?searchtype=author&query=Akiki%2C+C">Christopher Akiki</a>, <a href="/search/cs?searchtype=author&query=Pavlick%2C+E">Ellie Pavlick</a>, <a href="/search/cs?searchtype=author&query=Ili%C4%87%2C+S">Suzana Ili膰</a>, <a href="/search/cs?searchtype=author&query=Hesslow%2C+D">Daniel Hesslow</a>, <a href="/search/cs?searchtype=author&query=Castagn%C3%A9%2C+R">Roman Castagn茅</a>, <a href="/search/cs?searchtype=author&query=Luccioni%2C+A+S">Alexandra Sasha Luccioni</a>, <a href="/search/cs?searchtype=author&query=Yvon%2C+F">Fran莽ois Yvon</a>, <a href="/search/cs?searchtype=author&query=Gall%C3%A9%2C+M">Matthias Gall茅</a>, <a href="/search/cs?searchtype=author&query=Tow%2C+J">Jonathan Tow</a>, <a href="/search/cs?searchtype=author&query=Rush%2C+A+M">Alexander M. Rush</a>, <a href="/search/cs?searchtype=author&query=Biderman%2C+S">Stella Biderman</a>, <a href="/search/cs?searchtype=author&query=Webson%2C+A">Albert Webson</a>, <a href="/search/cs?searchtype=author&query=Ammanamanchi%2C+P+S">Pawan Sasanka Ammanamanchi</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+T">Thomas Wang</a>, <a href="/search/cs?searchtype=author&query=Sagot%2C+B">Beno卯t Sagot</a>, <a href="/search/cs?searchtype=author&query=Muennighoff%2C+N">Niklas Muennighoff</a>, <a href="/search/cs?searchtype=author&query=del+Moral%2C+A+V">Albert Villanova del Moral</a>, <a href="/search/cs?searchtype=author&query=Ruwase%2C+O">Olatunji Ruwase</a>, <a href="/search/cs?searchtype=author&query=Bawden%2C+R">Rachel Bawden</a>, <a href="/search/cs?searchtype=author&query=Bekman%2C+S">Stas Bekman</a>, <a href="/search/cs?searchtype=author&query=McMillan-Major%2C+A">Angelina McMillan-Major</a> , et al. (369 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.05100v4-abstract-short" style="display: inline;"> Large language models (LLMs) have been shown to be able to perform new tasks based on a few demonstrations or natural language instructions. While these capabilities have led to widespread adoption, most LLMs are developed by resource-rich organizations and are frequently kept from the public. As a step towards democratizing this powerful technology, we present BLOOM, a 176B-parameter open-access… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05100v4-abstract-full').style.display = 'inline'; document.getElementById('2211.05100v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.05100v4-abstract-full" style="display: none;"> Large language models (LLMs) have been shown to be able to perform new tasks based on a few demonstrations or natural language instructions. While these capabilities have led to widespread adoption, most LLMs are developed by resource-rich organizations and are frequently kept from the public. As a step towards democratizing this powerful technology, we present BLOOM, a 176B-parameter open-access language model designed and built thanks to a collaboration of hundreds of researchers. BLOOM is a decoder-only Transformer language model that was trained on the ROOTS corpus, a dataset comprising hundreds of sources in 46 natural and 13 programming languages (59 in total). We find that BLOOM achieves competitive performance on a wide variety of benchmarks, with stronger results after undergoing multitask prompted finetuning. To facilitate future research and applications using LLMs, we publicly release our models and code under the Responsible AI License. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05100v4-abstract-full').style.display = 'none'; document.getElementById('2211.05100v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.03946">arXiv:2211.03946</a> <span> [<a href="https://arxiv.org/pdf/2211.03946">pdf</a>, <a href="https://arxiv.org/format/2211.03946">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Understanding the Role of Mixup in Knowledge Distillation: An Empirical Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Choi%2C+H">Hongjun Choi</a>, <a href="/search/cs?searchtype=author&query=Jeon%2C+E+S">Eun Som Jeon</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.03946v2-abstract-short" style="display: inline;"> Mixup is a popular data augmentation technique based on creating new samples by linear interpolation between two given data samples, to improve both the generalization and robustness of the trained model. Knowledge distillation (KD), on the other hand, is widely used for model compression and transfer learning, which involves using a larger network's implicit knowledge to guide the learning of a s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.03946v2-abstract-full').style.display = 'inline'; document.getElementById('2211.03946v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.03946v2-abstract-full" style="display: none;"> Mixup is a popular data augmentation technique based on creating new samples by linear interpolation between two given data samples, to improve both the generalization and robustness of the trained model. Knowledge distillation (KD), on the other hand, is widely used for model compression and transfer learning, which involves using a larger network's implicit knowledge to guide the learning of a smaller network. At first glance, these two techniques seem very different, however, we found that "smoothness" is the connecting link between the two and is also a crucial attribute in understanding KD's interplay with mixup. Although many mixup variants and distillation methods have been proposed, much remains to be understood regarding the role of a mixup in knowledge distillation. In this paper, we present a detailed empirical study on various important dimensions of compatibility between mixup and knowledge distillation. We also scrutinize the behavior of the networks trained with a mixup in the light of knowledge distillation through extensive analysis, visualizations, and comprehensive experiments on image classification. Finally, based on our findings, we suggest improved strategies to guide the student network to enhance its effectiveness. Additionally, the findings of this study provide insightful suggestions to researchers and practitioners that commonly use techniques from KD. Our code is available at https://github.com/hchoi71/MIX-KD. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.03946v2-abstract-full').style.display = 'none'; document.getElementById('2211.03946v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To be presented at WACV 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.07544">arXiv:2210.07544</a> <span> [<a href="https://arxiv.org/pdf/2210.07544">pdf</a>, <a href="https://arxiv.org/format/2210.07544">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Legal Case Document Summarization: Extractive and Abstractive Methods and their Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Abhay Shukla</a>, <a href="/search/cs?searchtype=author&query=Bhattacharya%2C+P">Paheli Bhattacharya</a>, <a href="/search/cs?searchtype=author&query=Poddar%2C+S">Soham Poddar</a>, <a href="/search/cs?searchtype=author&query=Mukherjee%2C+R">Rajdeep Mukherjee</a>, <a href="/search/cs?searchtype=author&query=Ghosh%2C+K">Kripabandhu Ghosh</a>, <a href="/search/cs?searchtype=author&query=Goyal%2C+P">Pawan Goyal</a>, <a href="/search/cs?searchtype=author&query=Ghosh%2C+S">Saptarshi Ghosh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.07544v1-abstract-short" style="display: inline;"> Summarization of legal case judgement documents is a challenging problem in Legal NLP. However, not much analyses exist on how different families of summarization models (e.g., extractive vs. abstractive) perform when applied to legal case documents. This question is particularly important since many recent transformer-based abstractive summarization models have restrictions on the number of input… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.07544v1-abstract-full').style.display = 'inline'; document.getElementById('2210.07544v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.07544v1-abstract-full" style="display: none;"> Summarization of legal case judgement documents is a challenging problem in Legal NLP. However, not much analyses exist on how different families of summarization models (e.g., extractive vs. abstractive) perform when applied to legal case documents. This question is particularly important since many recent transformer-based abstractive summarization models have restrictions on the number of input tokens, and legal documents are known to be very long. Also, it is an open question on how best to evaluate legal case document summarization systems. In this paper, we carry out extensive experiments with several extractive and abstractive summarization methods (both supervised and unsupervised) over three legal summarization datasets that we have developed. Our analyses, that includes evaluation by law practitioners, lead to several interesting insights on legal summarization in specific and long document summarization in general. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.07544v1-abstract-full').style.display = 'none'; document.getElementById('2210.07544v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at The 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (AACL-IJCNLP), 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.11632">arXiv:2209.11632</a> <span> [<a href="https://arxiv.org/pdf/2209.11632">pdf</a>, <a href="https://arxiv.org/format/2209.11632">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Facilitating Change Implementation for Continuous ML-Safety Assurance </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Cheng%2C+C">Chih-Hong Cheng</a>, <a href="/search/cs?searchtype=author&query=Doan%2C+N+A+V">Nguyen Anh Vu Doan</a>, <a href="/search/cs?searchtype=author&query=Balu%2C+B">Balahari Balu</a>, <a href="/search/cs?searchtype=author&query=Schwaiger%2C+F">Franziska Schwaiger</a>, <a href="/search/cs?searchtype=author&query=Seferis%2C+E">Emmanouil Seferis</a>, <a href="/search/cs?searchtype=author&query=Burton%2C+S">Simon Burton</a>, <a href="/search/cs?searchtype=author&query=Qamsane%2C+Y">Yassine Qamsane</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankit Shukla</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+Y">Yinchong Yang</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+Z">Zhiliang Wu</a>, <a href="/search/cs?searchtype=author&query=Hapfelmeier%2C+A">Andreas Hapfelmeier</a>, <a href="/search/cs?searchtype=author&query=Thon%2C+I">Ingo Thon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.11632v1-abstract-short" style="display: inline;"> We propose a method for deploying a safety-critical machine-learning component into continuously evolving environments where an increased degree of automation in the engineering process is desired. We associate semantic tags with the safety case argumentation and turn each piece of evidence into a quantitative metric or a logic formula. With proper tool support, the impact can be characterized by… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.11632v1-abstract-full').style.display = 'inline'; document.getElementById('2209.11632v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.11632v1-abstract-full" style="display: none;"> We propose a method for deploying a safety-critical machine-learning component into continuously evolving environments where an increased degree of automation in the engineering process is desired. We associate semantic tags with the safety case argumentation and turn each piece of evidence into a quantitative metric or a logic formula. With proper tool support, the impact can be characterized by a query over the safety argumentation tree to highlight evidence turning invalid. The concept is exemplified using a vision-based emergency braking system of an autonomous guided vehicle for factory automation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.11632v1-abstract-full').style.display = 'none'; document.getElementById('2209.11632v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.14760">arXiv:2205.14760</a> <span> [<a href="https://arxiv.org/pdf/2205.14760">pdf</a>, <a href="https://arxiv.org/format/2205.14760">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Emerging Technologies">cs.ET</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistical Mechanics">cond-mat.stat-mech</span> </div> </div> <p class="title is-5 mathjax"> Scalable almost-linear dynamical Ising machines </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Aditya Shukla</a>, <a href="/search/cs?searchtype=author&query=Erementchouk%2C+M">Mikhail Erementchouk</a>, <a href="/search/cs?searchtype=author&query=Mazumder%2C+P">Pinaki Mazumder</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.14760v1-abstract-short" style="display: inline;"> The past decade has seen the emergence of Ising machines targeting hard combinatorial optimization problems by minimizing the Ising Hamiltonian with spins represented by continuous dynamical variables. However, capabilities of these machines at larger scales are yet to be fully explored. We investigate an Ising machine based on a network of almost-linearly coupled analog spins. We show that such n… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.14760v1-abstract-full').style.display = 'inline'; document.getElementById('2205.14760v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.14760v1-abstract-full" style="display: none;"> The past decade has seen the emergence of Ising machines targeting hard combinatorial optimization problems by minimizing the Ising Hamiltonian with spins represented by continuous dynamical variables. However, capabilities of these machines at larger scales are yet to be fully explored. We investigate an Ising machine based on a network of almost-linearly coupled analog spins. We show that such networks leverage the computational resource similar to that of the semidefinite positive relaxation of the Ising model. We estimate the expected performance of the almost-linear machine and benchmark it on a set of {0,1}-weighted graphs. We show that the running time of the investigated machine scales polynomially (linearly with the number of edges in the connectivity graph). As an example of the physical realization of the machine, we present a CMOS-compatible implementation comprising an array of vertices efficiently storing the continuous spins on charged capacitors and communicating externally via analog current. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.14760v1-abstract-full').style.display = 'none'; document.getElementById('2205.14760v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.11722">arXiv:2205.11722</a> <span> [<a href="https://arxiv.org/pdf/2205.11722">pdf</a>, <a href="https://arxiv.org/format/2205.11722">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Improving Shape Awareness and Interpretability in Deep Networks Using Geometric Moments </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Singh%2C+R">Rajhans Singh</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.11722v2-abstract-short" style="display: inline;"> Deep networks for image classification often rely more on texture information than object shape. While efforts have been made to make deep-models shape-aware, it is often difficult to make such models simple, interpretable, or rooted in known mathematical definitions of shape. This paper presents a deep-learning model inspired by geometric moments, a classically well understood approach to measure… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.11722v2-abstract-full').style.display = 'inline'; document.getElementById('2205.11722v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.11722v2-abstract-full" style="display: none;"> Deep networks for image classification often rely more on texture information than object shape. While efforts have been made to make deep-models shape-aware, it is often difficult to make such models simple, interpretable, or rooted in known mathematical definitions of shape. This paper presents a deep-learning model inspired by geometric moments, a classically well understood approach to measure shape-related properties. The proposed method consists of a trainable network for generating coordinate bases and affine parameters for making the features geometrically invariant yet in a task-specific manner. The proposed model improves the final feature's interpretation. We demonstrate the effectiveness of our method on standard image classification datasets. The proposed model achieves higher classification performance compared to the baseline and standard ResNet models while substantially improving interpretability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.11722v2-abstract-full').style.display = 'none'; document.getElementById('2205.11722v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at CVPR 2023 Workshop: Deep Learning for Geometric Computing</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.02591">arXiv:2204.02591</a> <span> [<a href="https://arxiv.org/pdf/2204.02591">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Contextual Attention Mechanism, SRGAN Based Inpainting System for Eliminating Interruptions from Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Darapaneni%2C+N">Narayana Darapaneni</a>, <a href="/search/cs?searchtype=author&query=Kherde%2C+V">Vaibhav Kherde</a>, <a href="/search/cs?searchtype=author&query=Rao%2C+K">Kameswara Rao</a>, <a href="/search/cs?searchtype=author&query=Nikam%2C+D">Deepali Nikam</a>, <a href="/search/cs?searchtype=author&query=Katdare%2C+S">Swanand Katdare</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anima Shukla</a>, <a href="/search/cs?searchtype=author&query=Lomate%2C+A">Anagha Lomate</a>, <a href="/search/cs?searchtype=author&query=Paduri%2C+A+R">Anwesh Reddy Paduri</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.02591v1-abstract-short" style="display: inline;"> The new alternative is to use deep learning to inpaint any image by utilizing image classification and computer vision techniques. In general, image inpainting is a task of recreating or reconstructing any broken image which could be a photograph or oil/acrylic painting. With the advancement in the field of Artificial Intelligence, this topic has become popular among AI enthusiasts. With our appro… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.02591v1-abstract-full').style.display = 'inline'; document.getElementById('2204.02591v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.02591v1-abstract-full" style="display: none;"> The new alternative is to use deep learning to inpaint any image by utilizing image classification and computer vision techniques. In general, image inpainting is a task of recreating or reconstructing any broken image which could be a photograph or oil/acrylic painting. With the advancement in the field of Artificial Intelligence, this topic has become popular among AI enthusiasts. With our approach, we propose an initial end-to-end pipeline for inpainting images using a complete Machine Learning approach instead of a conventional application-based approach. We first use the YOLO model to automatically identify and localize the object we wish to remove from the image. Using the result obtained from the model we can generate a mask for the same. After this, we provide the masked image and original image to the GAN model which uses the Contextual Attention method to fill in the region. It consists of two generator networks and two discriminator networks and is also called a coarse-to-fine network structure. The two generators use fully convolutional networks while the global discriminator gets hold of the entire image as input while the local discriminator gets the grip of the filled region as input. The contextual Attention mechanism is proposed to effectively borrow the neighbor information from distant spatial locations for reconstructing the missing pixels. The third part of our implementation uses SRGAN to resolve the inpainted image back to its original size. Our work is inspired by the paper Free-Form Image Inpainting with Gated Convolution and Generative Image Inpainting with Contextual Attention. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.02591v1-abstract-full').style.display = 'none'; document.getElementById('2204.02591v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.00111">arXiv:2201.00111</a> <span> [<a href="https://arxiv.org/pdf/2201.00111">pdf</a>, <a href="https://arxiv.org/format/2201.00111">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/JIOT.2021.3139038">10.1109/JIOT.2021.3139038 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Role of Data Augmentation Strategies in Knowledge Distillation for Wearable Sensor Data </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jeon%2C+E+S">Eun Som Jeon</a>, <a href="/search/cs?searchtype=author&query=Som%2C+A">Anirudh Som</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Hasanaj%2C+K">Kristina Hasanaj</a>, <a href="/search/cs?searchtype=author&query=Buman%2C+M+P">Matthew P. Buman</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.00111v1-abstract-short" style="display: inline;"> Deep neural networks are parametrized by several thousands or millions of parameters, and have shown tremendous success in many classification problems. However, the large number of parameters makes it difficult to integrate these models into edge devices such as smartphones and wearable devices. To address this problem, knowledge distillation (KD) has been widely employed, that uses a pre-trained… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.00111v1-abstract-full').style.display = 'inline'; document.getElementById('2201.00111v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.00111v1-abstract-full" style="display: none;"> Deep neural networks are parametrized by several thousands or millions of parameters, and have shown tremendous success in many classification problems. However, the large number of parameters makes it difficult to integrate these models into edge devices such as smartphones and wearable devices. To address this problem, knowledge distillation (KD) has been widely employed, that uses a pre-trained high capacity network to train a much smaller network, suitable for edge devices. In this paper, for the first time, we study the applicability and challenges of using KD for time-series data for wearable devices. Successful application of KD requires specific choices of data augmentation methods during training. However, it is not yet known if there exists a coherent strategy for choosing an augmentation approach during KD. In this paper, we report the results of a detailed study that compares and contrasts various common choices and some hybrid data augmentation strategies in KD based human activity analysis. Research in this area is often limited as there are not many comprehensive databases available in the public domain from wearable devices. Our study considers databases from small scale publicly available to one derived from a large scale interventional study into human activity and sedentary behavior. We find that the choice of data augmentation techniques during KD have a variable level of impact on end performance, and find that the optimal network choice as well as data augmentation strategies are specific to a dataset at hand. However, we also conclude with a general set of recommendations that can provide a strong baseline performance across databases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.00111v1-abstract-full').style.display = 'none'; document.getElementById('2201.00111v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2112.11044">arXiv:2112.11044</a> <span> [<a href="https://arxiv.org/pdf/2112.11044">pdf</a>, <a href="https://arxiv.org/ps/2112.11044">ps</a>, <a href="https://arxiv.org/format/2112.11044">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Logic in Computer Science">cs.LO</span> </div> </div> <p class="title is-5 mathjax"> Extending Merge Resolution to a Family of Proof Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chede%2C+S">Sravanthi Chede</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anil Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2112.11044v1-abstract-short" style="display: inline;"> Merge Resolution (MRes [Beyersdorff et al. J. Autom. Reason.'2021]) is a recently introduced proof system for false QBFs. It stores the countermodels as merge maps. Merge maps are deterministic branching programs in which isomorphism checking is efficient making MRes a polynomial time verifiable proof system. In this paper, we introduce a family of proof systems MRes-R in which, the countermodel… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.11044v1-abstract-full').style.display = 'inline'; document.getElementById('2112.11044v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2112.11044v1-abstract-full" style="display: none;"> Merge Resolution (MRes [Beyersdorff et al. J. Autom. Reason.'2021]) is a recently introduced proof system for false QBFs. It stores the countermodels as merge maps. Merge maps are deterministic branching programs in which isomorphism checking is efficient making MRes a polynomial time verifiable proof system. In this paper, we introduce a family of proof systems MRes-R in which, the countermodels are stored in any pre-fixed complete representation R, instead of merge maps. Hence corresponding to each such R, we have a sound and refutationally complete QBF-proof system in MRes-R. To handle arbitrary representations for the strategies, we introduce consistency checking rules in MRes-R instead of isomorphism checking. As a result these proof systems are not polynomial time verifiable. Consequently, the paper shows that using merge maps is too restrictive and can be replaced with arbitrary representations leading to several interesting proof systems. Exploring proof theoretic properties of MRes-R, we show that eFrege+$\forall$red simulates all valid refutations from proof systems in MRes-R. In order to simulate arbitrary representations in MRes-R, we first represent the steps used by the proof systems as a new complete structure. Consequently, the corresponding proof system belonging to MRes-R is able to simulate all proof systems in MRes-R. Finally, we simulate this proof system via eFrege+$\forall$red using the ideas from [Chew et al. ECCC.'2021]. On the lower bound side, we show that the completion principle formulas from [Jonata et al. Theor. Comput. Sci.'2015] which are shown to be hard for regular MRes in [Beyersdorff et al. FSTTCS.'2020], are also hard for any regular proof system in MRes-R. Thereby, the paper lifts the lower bound of regular MRes to an entire class of proof systems, which use some complete representation, including those undiscovered, instead of merge maps. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2112.11044v1-abstract-full').style.display = 'none'; document.getElementById('2112.11044v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 pages, 4 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 03F20 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> F.2.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.14579">arXiv:2111.14579</a> <span> [<a href="https://arxiv.org/pdf/2111.14579">pdf</a>, <a href="https://arxiv.org/format/2111.14579">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3493425.3502751">10.1145/3493425.3502751 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Shortcutting Fast Failover Routes in the Data Plane </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Apoorv Shukla</a>, <a href="/search/cs?searchtype=author&query=Foerster%2C+K">Klaus-Tycho Foerster</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.14579v1-abstract-short" style="display: inline;"> In networks, availability is of paramount importance. As link failures are disruptive, modern networks in turn provide Fast ReRoute (FRR) mechanisms to rapidly restore connectivity. However, existing FRR approaches heavily impact performance until the slower convergence protocols kick in. The fast failover routes commonly involve unnecessary loops and detours, disturbing other traffic while causin… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.14579v1-abstract-full').style.display = 'inline'; document.getElementById('2111.14579v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.14579v1-abstract-full" style="display: none;"> In networks, availability is of paramount importance. As link failures are disruptive, modern networks in turn provide Fast ReRoute (FRR) mechanisms to rapidly restore connectivity. However, existing FRR approaches heavily impact performance until the slower convergence protocols kick in. The fast failover routes commonly involve unnecessary loops and detours, disturbing other traffic while causing costly packet loss. In this paper, we make a case for augmenting FRR mechanisms to avoid such inefficiencies. We introduce ShortCut that routes the packets in a loop free fashion, avoiding costly detours and decreasing link load. ShortCut achieves this by leveraging data plane programmability: when a loop is locally observed, it can be removed by short-cutting the respective route parts. As such, ShortCut is topology-independent and agnostic to the type of FRR currently deployed. Our first experimental simulations show that ShortCut can outperform control plane convergence mechanisms; moreover avoiding loops and keeping packet loss minimal opposed to existing FRR mechanisms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.14579v1-abstract-full').style.display = 'none'; document.getElementById('2111.14579v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear at the ACM/IEEE Symposium on Architectures for Networking and Communications Systems 2021 (ANCS'21)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.14053">arXiv:2111.14053</a> <span> [<a href="https://arxiv.org/pdf/2111.14053">pdf</a>, <a href="https://arxiv.org/format/2111.14053">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Biomolecules">q-bio.BM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Biological Physics">physics.bio-ph</span> </div> </div> <p class="title is-5 mathjax"> Towards Conditional Generation of Minimal Action Potential Pathways for Molecular Dynamics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Cava%2C+J+K">John Kevin Cava</a>, <a href="/search/cs?searchtype=author&query=Vant%2C+J">John Vant</a>, <a href="/search/cs?searchtype=author&query=Ho%2C+N">Nicholas Ho</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a>, <a href="/search/cs?searchtype=author&query=Maciejewski%2C+R">Ross Maciejewski</a>, <a href="/search/cs?searchtype=author&query=Singharoy%2C+A">Abhishek Singharoy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.14053v2-abstract-short" style="display: inline;"> In this paper, we utilized generative models, and reformulate it for problems in molecular dynamics (MD) simulation, by introducing an MD potential energy component to our generative model. By incorporating potential energy as calculated from TorchMD into a conditional generative framework, we attempt to construct a low-potential energy route of transformation between the helix~$\rightarrow$~coil… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.14053v2-abstract-full').style.display = 'inline'; document.getElementById('2111.14053v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.14053v2-abstract-full" style="display: none;"> In this paper, we utilized generative models, and reformulate it for problems in molecular dynamics (MD) simulation, by introducing an MD potential energy component to our generative model. By incorporating potential energy as calculated from TorchMD into a conditional generative framework, we attempt to construct a low-potential energy route of transformation between the helix~$\rightarrow$~coil structures of a protein. We show how to add an additional loss function to conditional generative models, motivated by potential energy of molecular configurations, and also present an optimization technique for such an augmented loss function. Our results show the benefit of this additional loss term on synthesizing realistic molecular trajectories. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.14053v2-abstract-full').style.display = 'none'; document.getElementById('2111.14053v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 28 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ELLIS ML4Molecules Workshop 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.12798">arXiv:2111.12798</a> <span> [<a href="https://arxiv.org/pdf/2111.12798">pdf</a>, <a href="https://arxiv.org/format/2111.12798">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Geometric Priors for Scientific Generative Models in Inertial Confinement Fusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Anirudh%2C+R">Rushil Anirudh</a>, <a href="/search/cs?searchtype=author&query=Kur%2C+E">Eugene Kur</a>, <a href="/search/cs?searchtype=author&query=Thiagarajan%2C+J+J">Jayaraman J. Thiagarajan</a>, <a href="/search/cs?searchtype=author&query=Bremer%2C+P">Peer-Timo Bremer</a>, <a href="/search/cs?searchtype=author&query=Spears%2C+B+K">Brian K. Spears</a>, <a href="/search/cs?searchtype=author&query=Ma%2C+T">Tammy Ma</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.12798v1-abstract-short" style="display: inline;"> In this paper, we develop a Wasserstein autoencoder (WAE) with a hyperspherical prior for multimodal data in the application of inertial confinement fusion. Unlike a typical hyperspherical generative model that requires computationally inefficient sampling from distributions like the von Mis Fisher, we sample from a normal distribution followed by a projection layer before the generator. Finally,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.12798v1-abstract-full').style.display = 'inline'; document.getElementById('2111.12798v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.12798v1-abstract-full" style="display: none;"> In this paper, we develop a Wasserstein autoencoder (WAE) with a hyperspherical prior for multimodal data in the application of inertial confinement fusion. Unlike a typical hyperspherical generative model that requires computationally inefficient sampling from distributions like the von Mis Fisher, we sample from a normal distribution followed by a projection layer before the generator. Finally, to determine the validity of the generated samples, we exploit a known relationship between the modalities in the dataset as a scientific constraint, and study different properties of the proposed model. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.12798v1-abstract-full').style.display = 'none'; document.getElementById('2111.12798v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, 4 figures, Fourth Workshop on Machine Learning and the Physical Sciences, NeurIPS 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.08429">arXiv:2110.08429</a> <span> [<a href="https://arxiv.org/pdf/2110.08429">pdf</a>, <a href="https://arxiv.org/format/2110.08429">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> TorchEsegeta: Framework for Interpretability and Explainability of Image-based Deep Learning Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chatterjee%2C+S">Soumick Chatterjee</a>, <a href="/search/cs?searchtype=author&query=Das%2C+A">Arnab Das</a>, <a href="/search/cs?searchtype=author&query=Mandal%2C+C">Chirag Mandal</a>, <a href="/search/cs?searchtype=author&query=Mukhopadhyay%2C+B">Budhaditya Mukhopadhyay</a>, <a href="/search/cs?searchtype=author&query=Vipinraj%2C+M">Manish Vipinraj</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Aniruddh Shukla</a>, <a href="/search/cs?searchtype=author&query=Rao%2C+R+N">Rajatha Nagaraja Rao</a>, <a href="/search/cs?searchtype=author&query=Sarasaen%2C+C">Chompunuch Sarasaen</a>, <a href="/search/cs?searchtype=author&query=Speck%2C+O">Oliver Speck</a>, <a href="/search/cs?searchtype=author&query=N%C3%BCrnberger%2C+A">Andreas N眉rnberger</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.08429v2-abstract-short" style="display: inline;"> Clinicians are often very sceptical about applying automatic image processing approaches, especially deep learning based methods, in practice. One main reason for this is the black-box nature of these approaches and the inherent problem of missing insights of the automatically derived decisions. In order to increase trust in these methods, this paper presents approaches that help to interpret and… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.08429v2-abstract-full').style.display = 'inline'; document.getElementById('2110.08429v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.08429v2-abstract-full" style="display: none;"> Clinicians are often very sceptical about applying automatic image processing approaches, especially deep learning based methods, in practice. One main reason for this is the black-box nature of these approaches and the inherent problem of missing insights of the automatically derived decisions. In order to increase trust in these methods, this paper presents approaches that help to interpret and explain the results of deep learning algorithms by depicting the anatomical areas which influence the decision of the algorithm most. Moreover, this research presents a unified framework, TorchEsegeta, for applying various interpretability and explainability techniques for deep learning models and generate visual interpretations and explanations for clinicians to corroborate their clinical findings. In addition, this will aid in gaining confidence in such methods. The framework builds on existing interpretability and explainability techniques that are currently focusing on classification models, extending them to segmentation tasks. In addition, these methods have been adapted to 3D models for volumetric analysis. The proposed framework provides methods to quantitatively compare visual explanations using infidelity and sensitivity metrics. This framework can be used by data scientists to perform post-hoc interpretations and explanations of their models, develop more explainable tools and present the findings to clinicians to increase their faith in such models. The proposed framework was evaluated based on a use case scenario of vessel segmentation models trained on Time-of-fight (TOF) Magnetic Resonance Angiogram (MRA) images of the human brain. Quantitative and qualitative results of a comparative study of different models and interpretability methods are presented. Furthermore, this paper provides an extensive overview of several existing interpretability and explainability methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.08429v2-abstract-full').style.display = 'none'; document.getElementById('2110.08429v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.01904">arXiv:2110.01904</a> <span> [<a href="https://arxiv.org/pdf/2110.01904">pdf</a>, <a href="https://arxiv.org/format/2110.01904">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.cosrev.2022.100496">10.1016/j.cosrev.2022.100496 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> System Security Assurance: A Systematic Literature Review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankur Shukla</a>, <a href="/search/cs?searchtype=author&query=Katt%2C+B">Basel Katt</a>, <a href="/search/cs?searchtype=author&query=Nweke%2C+L+O">Livinus Obiora Nweke</a>, <a href="/search/cs?searchtype=author&query=Yeng%2C+P+K">Prosper Kandabongee Yeng</a>, <a href="/search/cs?searchtype=author&query=Weldehawaryat%2C+G+K">Goitom Kahsay Weldehawaryat</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.01904v2-abstract-short" style="display: inline;"> System security assurance provides the confidence that security features, practices, procedures, and architecture of software systems mediate and enforce the security policy and are resilient against security failure and attacks. Alongside the significant benefits of security assurance, the evolution of new information and communication technology (ICT) introduces new challenges regarding informat… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.01904v2-abstract-full').style.display = 'inline'; document.getElementById('2110.01904v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.01904v2-abstract-full" style="display: none;"> System security assurance provides the confidence that security features, practices, procedures, and architecture of software systems mediate and enforce the security policy and are resilient against security failure and attacks. Alongside the significant benefits of security assurance, the evolution of new information and communication technology (ICT) introduces new challenges regarding information protection. Security assurance methods based on the traditional tools, techniques, and procedures may fail to account new challenges due to poor requirement specifications, static nature, and poor development processes. The common criteria (CC) commonly used for security evaluation and certification process also comes with many limitations and challenges. In this paper, extensive efforts have been made to study the state-of-the-art, limitations and future research directions for security assurance of the ICT and cyber-physical systems (CPS) in a wide range of domains. We conducted a systematic review of requirements, processes, and activities involved in system security assurance including security requirements, security metrics, system and environments and assurance methods. We highlighted the challenges and gaps that have been identified by the existing literature related to system security assurance and corresponding solutions. Finally, we discussed the limitations of the present methods and future research directions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.01904v2-abstract-full').style.display = 'none'; document.getElementById('2110.01904v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Report number:</span> Volume 45 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Computer Science Review, Volume 45, 2022, 100496 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.03760">arXiv:2108.03760</a> <span> [<a href="https://arxiv.org/pdf/2108.03760">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Symptom based Hierarchical Classification of Diabetes and Thyroid disorders using Fuzzy Cognitive Maps </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A+M">Anand M. Shukla</a>, <a href="/search/cs?searchtype=author&query=Pandit%2C+P+D">Pooja D. Pandit</a>, <a href="/search/cs?searchtype=author&query=Purandare%2C+V+M">Vasudev M. Purandare</a>, <a href="/search/cs?searchtype=author&query=Srinivasaraghavan%2C+A">Anuradha Srinivasaraghavan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.03760v1-abstract-short" style="display: inline;"> Fuzzy Cognitive Maps (FCMs) are soft computing technique that follows an approach similar to human reasoning and human decision-making process, making them a valuable modeling and simulation methodology. Medical Decision Systems are complex systems consisting of many factors that may be complementary, contradictory, and competitive; these factors influence each other and determine the overall diag… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.03760v1-abstract-full').style.display = 'inline'; document.getElementById('2108.03760v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.03760v1-abstract-full" style="display: none;"> Fuzzy Cognitive Maps (FCMs) are soft computing technique that follows an approach similar to human reasoning and human decision-making process, making them a valuable modeling and simulation methodology. Medical Decision Systems are complex systems consisting of many factors that may be complementary, contradictory, and competitive; these factors influence each other and determine the overall diagnosis with a different degree. Thus, FCMs are suitable to model Medical Decision Support Systems. The proposed work therefore uses FCMs arranged in hierarchical structure to classify between Diabetes, Thyroid disorders and their subtypes. Subtypes include type 1 and type 2 for diabetes and hyperthyroidism and hypothyroidism for thyroid. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.03760v1-abstract-full').style.display = 'none'; document.getElementById('2108.03760v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.09320">arXiv:2107.09320</a> <span> [<a href="https://arxiv.org/pdf/2107.09320">pdf</a>, <a href="https://arxiv.org/format/2107.09320">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Logic in Computer Science">cs.LO</span> </div> </div> <p class="title is-5 mathjax"> QRAT Polynomially Simulates Merge Resolution </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chede%2C+S">Sravanthi Chede</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anil Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.09320v1-abstract-short" style="display: inline;"> Merge Resolution (MRes [Beyersdorff et al. J. Autom. Reason.'2021] ) is a refutational proof system for quantified Boolean formulas (QBF). Each line of MRes consists of clauses with only existential literals, together with information of countermodels stored as merge maps. As a result, MRes has strategy extraction by design. The QRAT [Heule et al. J. Autom. Reason.'2017] proof system was designed… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.09320v1-abstract-full').style.display = 'inline'; document.getElementById('2107.09320v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.09320v1-abstract-full" style="display: none;"> Merge Resolution (MRes [Beyersdorff et al. J. Autom. Reason.'2021] ) is a refutational proof system for quantified Boolean formulas (QBF). Each line of MRes consists of clauses with only existential literals, together with information of countermodels stored as merge maps. As a result, MRes has strategy extraction by design. The QRAT [Heule et al. J. Autom. Reason.'2017] proof system was designed to capture QBF preprocessing. QRAT can simulate both the expansion-based proof system $\forall$Exp+Res and CDCL-based QBF proof system LD-Q-Res. A family of false QBFs called SquaredEquality formulas were introduced in [Beyersdorff et al. J. Autom. Reason.'2021] and shown to be easy for MRes but need exponential size proofs in Q-Res, QU-Res, CP+$\forall$red, $\forall$Exp+Res, IR-calc and reductionless LD-Q-Res. As a result none of these systems can simulate MRes. In this paper, we show a short QRAT refutation of the SquaredEquality formulas. We further show that QRAT strictly p-simulates MRes. Besides highlighting the power of QRAT system, this work also presents the first simulation result for MRes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.09320v1-abstract-full').style.display = 'none'; document.getElementById('2107.09320v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 1 figure</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 03F20 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> F.2.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.04547">arXiv:2107.04547</a> <span> [<a href="https://arxiv.org/pdf/2107.04547">pdf</a>, <a href="https://arxiv.org/ps/2107.04547">ps</a>, <a href="https://arxiv.org/format/2107.04547">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Complexity">cs.CC</span> </div> </div> <p class="title is-5 mathjax"> Does QRAT simulate IR-calc? QRAT simulation algorithm for $\forall$Exp+Res cannot be lifted to IR-calc </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chede%2C+S">Sravanthi Chede</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anil Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.04547v1-abstract-short" style="display: inline;"> We show that the QRAT simulation algorithm of $\forall$Exp+Res from [B. Kiesl and M. Seidl, 2019] cannot be lifted to IR-calc. </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.04547v1-abstract-full" style="display: none;"> We show that the QRAT simulation algorithm of $\forall$Exp+Res from [B. Kiesl and M. Seidl, 2019] cannot be lifted to IR-calc. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.04547v1-abstract-full').style.display = 'none'; document.getElementById('2107.04547v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 2 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 03F20 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> F.2.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2103.01046">arXiv:2103.01046</a> <span> [<a href="https://arxiv.org/pdf/2103.01046">pdf</a>, <a href="https://arxiv.org/ps/2103.01046">ps</a>, <a href="https://arxiv.org/format/2103.01046">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Logic in Computer Science">cs.LO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> </div> </div> <p class="title is-5 mathjax"> Extending Prolog for Quantified Boolean Horn Formulas </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Mallick%2C+A">Anish Mallick</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anil Shukla</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2103.01046v1-abstract-short" style="display: inline;"> Prolog is a well known declarative programming language based on propositional Horn formulas. It is useful in various areas, including artificial intelligence, automated theorem proving, mathematical logic and so on. An active research area for many years is to extend Prolog to larger classes of logic. Some important extensions of it includes the constraint logic programming, and the object orient… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.01046v1-abstract-full').style.display = 'inline'; document.getElementById('2103.01046v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2103.01046v1-abstract-full" style="display: none;"> Prolog is a well known declarative programming language based on propositional Horn formulas. It is useful in various areas, including artificial intelligence, automated theorem proving, mathematical logic and so on. An active research area for many years is to extend Prolog to larger classes of logic. Some important extensions of it includes the constraint logic programming, and the object oriented logic programming. However, it cannot solve problems having arbitrary quantified Horn formulas. To be precise, the facts, rules and queries in Prolog are not allowed to have arbitrary quantified variables. The paper overcomes this major limitations of Prolog by extending it for the quantified Boolean Horn formulas. We achieved this by extending the SLD-resolution proof system for quantified Boolean Horn formulas, followed by proposing an efficient model for implementation. The paper shows that the proposed implementation also supports the first-order predicate Horn logic with arbitrary quantified variables. The paper also introduces for the first time, a declarative programming for the quantified Boolean Horn formulas. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2103.01046v1-abstract-full').style.display = 'none'; document.getElementById('2103.01046v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.08360">arXiv:2102.08360</a> <span> [<a href="https://arxiv.org/pdf/2102.08360">pdf</a>, <a href="https://arxiv.org/format/2102.08360">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Interpretable COVID-19 Chest X-Ray Classification via Orthogonality Constraint </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Wang%2C+E+Y">Ella Y. Wang</a>, <a href="/search/cs?searchtype=author&query=Som%2C+A">Anirudh Som</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Ankita Shukla</a>, <a href="/search/cs?searchtype=author&query=Choi%2C+H">Hongjun Choi</a>, <a href="/search/cs?searchtype=author&query=Turaga%2C+P">Pavan Turaga</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.08360v3-abstract-short" style="display: inline;"> Deep neural networks have increasingly been used as an auxiliary tool in healthcare applications, due to their ability to improve performance of several diagnosis tasks. However, these methods are not widely adopted in clinical settings due to the practical limitations in the reliability, generalizability, and interpretability of deep learning based systems. As a result, methods have been develope… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.08360v3-abstract-full').style.display = 'inline'; document.getElementById('2102.08360v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.08360v3-abstract-full" style="display: none;"> Deep neural networks have increasingly been used as an auxiliary tool in healthcare applications, due to their ability to improve performance of several diagnosis tasks. However, these methods are not widely adopted in clinical settings due to the practical limitations in the reliability, generalizability, and interpretability of deep learning based systems. As a result, methods have been developed that impose additional constraints during network training to gain more control as well as improve interpretabilty, facilitating their acceptance in healthcare community. In this work, we investigate the benefit of using Orthogonal Spheres (OS) constraint for classification of COVID-19 cases from chest X-ray images. The OS constraint can be written as a simple orthonormality term which is used in conjunction with the standard cross-entropy loss during classification network training. Previous studies have demonstrated significant benefits in applying such constraints to deep learning models. Our findings corroborate these observations, indicating that the orthonormality loss function effectively produces improved semantic localization via GradCAM visualizations, enhanced classification performance, and reduced model calibration error. Our approach achieves an improvement in accuracy of 1.6% and 4.8% for two- and three-class classification, respectively; similar results are found for models with data augmentation applied. In addition to these findings, our work also presents a new application of the OS regularizer in healthcare, increasing the post-hoc interpretability and performance of deep learning models for COVID-19 classification to facilitate adoption of these methods in clinical settings. We also identify the limitations of our strategy that can be explored for further research in future. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.08360v3-abstract-full').style.display = 'none'; document.getElementById('2102.08360v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 December, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in the 2021 ACM CHIL Workshop track. An extended version of this work is under consideration at Pattern Recognition Letters</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.02429">arXiv:2010.02429</a> <span> [<a href="https://arxiv.org/pdf/2010.02429">pdf</a>, <a href="https://arxiv.org/ps/2010.02429">ps</a>, <a href="https://arxiv.org/format/2010.02429">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Modeling Preconditions in Text with a Crowd-sourced Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kwon%2C+H">Heeyoung Kwon</a>, <a href="/search/cs?searchtype=author&query=Koupaee%2C+M">Mahnaz Koupaee</a>, <a href="/search/cs?searchtype=author&query=Singh%2C+P">Pratyush Singh</a>, <a href="/search/cs?searchtype=author&query=Sawhney%2C+G">Gargi Sawhney</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Anmol Shukla</a>, <a href="/search/cs?searchtype=author&query=Kallur%2C+K+K">Keerthi Kumar Kallur</a>, <a href="/search/cs?searchtype=author&query=Chambers%2C+N">Nathanael Chambers</a>, <a href="/search/cs?searchtype=author&query=Balasubramanian%2C+N">Niranjan Balasubramanian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.02429v3-abstract-short" style="display: inline;"> Preconditions provide a form of logical connection between events that explains why some events occur together and information that is complementary to the more widely studied relations such as causation, temporal ordering, entailment, and discourse relations. Modeling preconditions in text has been hampered in part due to the lack of large scale labeled data grounded in text. This paper introduce… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.02429v3-abstract-full').style.display = 'inline'; document.getElementById('2010.02429v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.02429v3-abstract-full" style="display: none;"> Preconditions provide a form of logical connection between events that explains why some events occur together and information that is complementary to the more widely studied relations such as causation, temporal ordering, entailment, and discourse relations. Modeling preconditions in text has been hampered in part due to the lack of large scale labeled data grounded in text. This paper introduces PeKo, a crowd-sourced annotation of preconditions between event pairs in newswire, an order of magnitude larger than prior text annotations. To complement this new corpus, we also introduce two challenge tasks aimed at modeling preconditions: (i) Precondition Identification -- a standard classification task defined over pairs of event mentions, and (ii) Precondition Generation -- a generative task aimed at testing a more general ability to reason about a given event. Evaluation on both tasks shows that modeling preconditions is challenging even for today's large language models (LM). This suggests that precondition knowledge is not easily accessible in LM-derived representations alone. Our generation results show that fine-tuning an LM on PeKo yields better conditional relations than when trained on raw text or temporally-ordered corpora. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.02429v3-abstract-full').style.display = 'none'; document.getElementById('2010.02429v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.10277">arXiv:2008.10277</a> <span> [<a href="https://arxiv.org/pdf/2008.10277">pdf</a>, <a href="https://arxiv.org/format/2008.10277">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Sample-Rank: Weak Multi-Objective Recommendations Using Rejection Sampling </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Shukla%2C+A">Abhay Shukla</a>, <a href="/search/cs?searchtype=author&query=Sathyanarayana%2C+J">Jairaj Sathyanarayana</a>, <a href="/search/cs?searchtype=author&query=Banerjee%2C+D">Dipyaman Banerjee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.10277v1-abstract-short" style="display: inline;"> Online food ordering marketplaces are multi-stakeholder systems where recommendations impact the experience and growth of each participant in the system. A recommender system in this setting has to encapsulate the objectives and constraints of different stakeholders in order to find utility of an item for recommendation. Constrained-optimization based approaches to this problem typically involve c… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.10277v1-abstract-full').style.display = 'inline'; document.getElementById('2008.10277v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.10277v1-abstract-full" style="display: none;"> Online food ordering marketplaces are multi-stakeholder systems where recommendations impact the experience and growth of each participant in the system. A recommender system in this setting has to encapsulate the objectives and constraints of different stakeholders in order to find utility of an item for recommendation. Constrained-optimization based approaches to this problem typically involve complex formulations and have high computational complexity in production settings involving millions of entities. Simplifications and relaxation techniques (for example, scalarization) help but introduce sub-optimality and can be time-consuming due to the amount of tuning needed. In this paper, we introduce a method involving multi-goal sampling followed by ranking for user-relevance (Sample-Rank), to nudge recommendations towards multi-objective (MO) goals of the marketplace. The proposed method's novelty is that it reduces the MO recommendation problem to sampling from a desired multi-goal distribution then using it to build a production-friendly learning-to-rank (LTR) model. In offline experiments we show that we are able to bias recommendations towards MO criteria with acceptable trade-offs in metrics like AUC and NDCG. We also show results from a large-scale online A/B experiment where this approach gave a statistically significant lift of 2.64% in average revenue per order (RPO) (objective #1) with no drop in conversion rate (CR) (objective #2) while holding the average last-mile traversed flat (objective #3), vs. the baseline ranking method. This method also significantly reduces time to model development and deployment in MO settings and allows for trivial extensions to more objectives and other types of LTR models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.10277v1-abstract-full').style.display = 'none'; document.getElementById('2008.10277v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Shukla%2C+A&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Shukla%2C+A&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Shukla%2C+A&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&query=Shukla%2C+A&start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>