CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 134 results for author: <span class="mathjax">Sharma, G</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Sharma%2C+G">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Sharma, G"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Sharma%2C+G&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Sharma, G"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Sharma%2C+G&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Sharma%2C+G&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Sharma%2C+G&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Sharma%2C+G&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.16216">arXiv:2503.16216</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.16216">pdf</a>, <a href="https://arxiv.org/format/2503.16216">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Dispersion is (Almost) Optimal under (A)synchrony </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kshemkalyani%2C+A+D">Ajay D. Kshemkalyani</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+M">Manish Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Molla%2C+A+R">Anisur Rahaman Molla</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gokarna Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.16216v1-abstract-short" style="display: inline;"> The dispersion problem has received much attention recently in the distributed computing literature. In this problem, $k\leq n$ agents placed initially arbitrarily on the nodes of an $n$-node, $m$-edge anonymous graph of maximum degree $螖$ have to reposition autonomously to reach a configuration in which each agent is on a distinct node of the graph. Dispersion is interesting as well as important&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.16216v1-abstract-full').style.display = 'inline'; document.getElementById('2503.16216v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.16216v1-abstract-full" style="display: none;"> The dispersion problem has received much attention recently in the distributed computing literature. In this problem, $k\leq n$ agents placed initially arbitrarily on the nodes of an $n$-node, $m$-edge anonymous graph of maximum degree $螖$ have to reposition autonomously to reach a configuration in which each agent is on a distinct node of the graph. Dispersion is interesting as well as important due to its connections to many fundamental coordination problems by mobile agents on graphs, such as exploration, scattering, load balancing, relocation of self-driven electric cars (robots) to recharge stations (nodes), etc. The objective has been to provide a solution that optimizes simultaneously time and memory complexities. There exist graphs for which the lower bound on time complexity is $惟(k)$. Memory complexity is $惟(\log k)$ per agent independent of graph topology. The state-of-the-art algorithms have (i) time complexity $O(k\log^2k)$ and memory complexity $O(\log(k+螖))$ under the synchronous setting [DISC&#39;24] and (ii) time complexity $O(\min\{m,k螖\})$ and memory complexity $O(\log(k+螖))$ under the asynchronous setting [OPODIS&#39;21]. In this paper, we improve substantially on this state-of-the-art. Under the synchronous setting as in [DISC&#39;24], we present the first optimal $O(k)$ time algorithm keeping memory complexity $O(\log (k+螖))$. Under the asynchronous setting as in [OPODIS&#39;21], we present the first algorithm with time complexity $O(k\log k)$ keeping memory complexity $O(\log (k+螖))$, which is time-optimal within an $O(\log k)$ factor despite asynchrony. Both results were obtained through novel techniques to quickly find empty nodes to settle agents, which may be of independent interest. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.16216v1-abstract-full').style.display = 'none'; document.getElementById('2503.16216v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.15297">arXiv:2503.15297</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.15297">pdf</a>, <a href="https://arxiv.org/format/2503.15297">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Probabilistic Delay Forecasting in 5G Using Recurrent and Attention-Based Architectures </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mostafavi%2C+S">Samie Mostafavi</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G+P">Gourav Prateek Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Traboulsi%2C+A">Ahmad Traboulsi</a>, <a href="/search/cs?searchtype=author&amp;query=Gross%2C+J">James Gross</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.15297v1-abstract-short" style="display: inline;"> With the emergence of new application areas such as cyber-physical systems and human-in-the-loop applications ensuring a specific level of end-to-end network latency with high reliability (e.g., 99.9%) is becoming increasingly critical. To align wireless links with these reliability requirements, it is essential to analyze and control network latency in terms of its full probability distribution.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.15297v1-abstract-full').style.display = 'inline'; document.getElementById('2503.15297v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.15297v1-abstract-full" style="display: none;"> With the emergence of new application areas such as cyber-physical systems and human-in-the-loop applications ensuring a specific level of end-to-end network latency with high reliability (e.g., 99.9%) is becoming increasingly critical. To align wireless links with these reliability requirements, it is essential to analyze and control network latency in terms of its full probability distribution. However, in a wireless link, the distribution may vary over time, making this task particularly challenging. We propose predicting the latency distribution using state-of-the-art data-driven techniques that leverage historical network information. Our approach tokenizes network state information and processes it using temporal deep-learning architectures-namely LSTM and Transformer models-to capture both short- and long-term delay dependencies. These models output parameters for a chosen parametric density via a mixture density network with Gaussian mixtures, yielding multi-step probabilistic forecasts of future delays. To validate our proposed approach, we implemented and tested these methods using a time-synchronized, SDR-based OpenAirInterface 5G testbed to collect and preprocess network-delay data. Our experiments show that the Transformer model achieves lower negative log-likelihood and mean absolute error than both LSTM and feed-forward baselines in challenging scenarios, while also providing insights into model complexity and training/inference overhead. This framework enables more informed decision-making for adaptive scheduling and resource allocation, paving the way toward enhanced QoS in evolving 5G and 6G networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.15297v1-abstract-full').style.display = 'none'; document.getElementById('2503.15297v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.13486">arXiv:2503.13486</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.13486">pdf</a>, <a href="https://arxiv.org/format/2503.13486">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Machine learning for triage of strokes with large vessel occlusion using photoplethysmography biomarkers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Goda%2C+M+%C3%81">M谩rton 脕. Goda</a>, <a href="/search/cs?searchtype=author&amp;query=Badge%2C+H">Helen Badge</a>, <a href="/search/cs?searchtype=author&amp;query=Khan%2C+J">Jasmeen Khan</a>, <a href="/search/cs?searchtype=author&amp;query=Solewicz%2C+Y">Yosef Solewicz</a>, <a href="/search/cs?searchtype=author&amp;query=Davoodi%2C+M">Moran Davoodi</a>, <a href="/search/cs?searchtype=author&amp;query=Teramayi%2C+R">Rumbidzai Teramayi</a>, <a href="/search/cs?searchtype=author&amp;query=Cordato%2C+D">Dennis Cordato</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+L">Longting Lin</a>, <a href="/search/cs?searchtype=author&amp;query=Christie%2C+L">Lauren Christie</a>, <a href="/search/cs?searchtype=author&amp;query=Blair%2C+C">Christopher Blair</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gagan Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Parsons%2C+M">Mark Parsons</a>, <a href="/search/cs?searchtype=author&amp;query=Behar%2C+J+A">Joachim A. Behar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.13486v1-abstract-short" style="display: inline;"> Objective. Large vessel occlusion (LVO) stroke presents a major challenge in clinical practice due to the potential for poor outcomes with delayed treatment. Treatment for LVO involves highly specialized care, in particular endovascular thrombectomy, and is available only at certain hospitals. Therefore, prehospital identification of LVO by emergency ambulance services, can be critical for triagin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.13486v1-abstract-full').style.display = 'inline'; document.getElementById('2503.13486v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.13486v1-abstract-full" style="display: none;"> Objective. Large vessel occlusion (LVO) stroke presents a major challenge in clinical practice due to the potential for poor outcomes with delayed treatment. Treatment for LVO involves highly specialized care, in particular endovascular thrombectomy, and is available only at certain hospitals. Therefore, prehospital identification of LVO by emergency ambulance services, can be critical for triaging LVO stroke patients directly to a hospital with access to endovascular therapy. Clinical scores exist to help distinguish LVO from less severe strokes, but they are based on a series of examinations that can take minutes and may be impractical for patients with dementia or those who cannot follow commands due to their stroke. There is a need for a fast and reliable method to aid in the early identification of LVO. In this study, our objective was to assess the feasibility of using 30-second photoplethysmography (PPG) recording to assist in recognizing LVO stroke. Method. A total of 88 patients, including 25 with LVO, 27 with stroke mimic (SM), and 36 non-LVO stroke patients (NL), were recorded at the Liverpool Hospital emergency department in Sydney, Australia. Demographics (age, sex), as well as morphological features and beating rate variability measures, were extracted from the PPG. A binary classification approach was employed to differentiate between LVO stroke and NL+SM (NL.SM). A 2:1 train-test split was stratified and repeated randomly across 100 iterations. Results. The best model achieved a median test set area under the receiver operating characteristic curve (AUROC) of 0.77 (0.71--0.82). \textit{Conclusion.} Our study demonstrates the potential of utilizing a 30-second PPG recording for identifying LVO stroke. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.13486v1-abstract-full').style.display = 'none'; document.getElementById('2503.13486v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.11077">arXiv:2503.11077</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.11077">pdf</a>, <a href="https://arxiv.org/format/2503.11077">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> SmartShards: Churn-Tolerant Continuously Available Distributed Ledger </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Oglio%2C+J">Joseph Oglio</a>, <a href="/search/cs?searchtype=author&amp;query=Nesterenko%2C+M">Mikhail Nesterenko</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gokarna Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.11077v1-abstract-short" style="display: inline;"> We present SmartShards: a new sharding algorithm for improving Byzantine tolerance and churn resistance in blockchains. Our algorithm places a peer in multiple shards to create an overlap. This simplifies cross-shard communication and shard membership management. We describe SmartShards, prove it correct and evaluate its performance. We propose several SmartShards extensions: defense against a s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.11077v1-abstract-full').style.display = 'inline'; document.getElementById('2503.11077v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.11077v1-abstract-full" style="display: none;"> We present SmartShards: a new sharding algorithm for improving Byzantine tolerance and churn resistance in blockchains. Our algorithm places a peer in multiple shards to create an overlap. This simplifies cross-shard communication and shard membership management. We describe SmartShards, prove it correct and evaluate its performance. We propose several SmartShards extensions: defense against a slowly adaptive adversary, combining transactions into blocks, fortification against the join/leave attack. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.11077v1-abstract-full').style.display = 'none'; document.getElementById('2503.11077v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2503.03489">arXiv:2503.03489</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2503.03489">pdf</a>, <a href="https://arxiv.org/format/2503.03489">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Federated Learning for Predicting Mild Cognitive Impairment to Dementia Conversion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gaurang Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Moradi%2C+E">Elaheh Moradi</a>, <a href="/search/cs?searchtype=author&amp;query=Pajula%2C+J">Juha Pajula</a>, <a href="/search/cs?searchtype=author&amp;query=Hilvo%2C+M">Mika Hilvo</a>, <a href="/search/cs?searchtype=author&amp;query=Tohka%2C+J">Jussi Tohka</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2503.03489v1-abstract-short" style="display: inline;"> Dementia is a progressive condition that impairs an individual&#39;s cognitive health and daily functioning, with mild cognitive impairment (MCI) often serving as its precursor. The prediction of MCI to dementia conversion has been well studied, but previous studies have almost always focused on traditional Machine Learning (ML) based methods that require sharing sensitive clinical information to trai&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.03489v1-abstract-full').style.display = 'inline'; document.getElementById('2503.03489v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2503.03489v1-abstract-full" style="display: none;"> Dementia is a progressive condition that impairs an individual&#39;s cognitive health and daily functioning, with mild cognitive impairment (MCI) often serving as its precursor. The prediction of MCI to dementia conversion has been well studied, but previous studies have almost always focused on traditional Machine Learning (ML) based methods that require sharing sensitive clinical information to train predictive models. This study proposes a privacy-enhancing solution using Federated Learning (FL) to train predictive models for MCI to dementia conversion without sharing sensitive data, leveraging socio demographic and cognitive measures. We simulated and compared two network architectures, Peer to Peer (P2P) and client-server, to enable collaborative learning. Our results demonstrated that FL had comparable predictive performance to centralized ML, and each clinical site showed similar performance without sharing local data. Moreover, the predictive performance of FL models was superior to site specific models trained without collaboration. This work highlights that FL can eliminate the need for data sharing without compromising model efficacy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2503.03489v1-abstract-full').style.display = 'none'; document.getElementById('2503.03489v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 March, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work has been submitted to the IEEE for possible publication</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.16127">arXiv:2502.16127</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.16127">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> A New Era of Elections: Leveraging Blockchain for Fair and Transparent Voting </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chouhan%2C+S">Suniti Chouhan</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gajanand Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.16127v1-abstract-short" style="display: inline;"> This study presents a blockchain-based voting system aimed at enhancing election security, transparency, and integrity. Traditional voting methods face growing risks of tampering, making it crucial to explore innovative solutions. Our proposed system combines blockchain&#39;s immutable, decentralized ledger with advanced voter identity verification techniques, including digital identity validation thr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.16127v1-abstract-full').style.display = 'inline'; document.getElementById('2502.16127v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.16127v1-abstract-full" style="display: none;"> This study presents a blockchain-based voting system aimed at enhancing election security, transparency, and integrity. Traditional voting methods face growing risks of tampering, making it crucial to explore innovative solutions. Our proposed system combines blockchain&#39;s immutable, decentralized ledger with advanced voter identity verification techniques, including digital identity validation through Aadhaar and Driving Licenses (secured via BLAKE2b-512 hashing), biometric fingerprint authentication, and a picture rotation pattern for added security. Votes are recorded transparently and securely on a blockchain, with a consensus mechanism ensuring data integrity and reducing the risk of unauthorized alterations. Security analysis indicates that this multi-layered approach significantly reduces impersonation risks, while blockchain ensures accurate, private, and tamper-resistant vote recording. The findings support that a blockchain-based voting system with robust identity checks offers a trustworthy alternative to traditional methods, with potential for even greater refinement in secure and transparent elections. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.16127v1-abstract-full').style.display = 'none'; document.getElementById('2502.16127v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.11595">arXiv:2502.11595</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.11595">pdf</a>, <a href="https://arxiv.org/format/2502.11595">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> End-to-End Reliability in Wireless IEEE 802.1Qbv Time-Sensitive Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Egger%2C+S">S. Egger</a>, <a href="/search/cs?searchtype=author&amp;query=Gross%2C+J">J. Gross</a>, <a href="/search/cs?searchtype=author&amp;query=Sachs%2C+J">J. Sachs</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G+P">G. P. Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Becker%2C+C">C. Becker</a>, <a href="/search/cs?searchtype=author&amp;query=D%C3%BCrr%2C+F">F. D眉rr</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.11595v1-abstract-short" style="display: inline;"> Industrial cyber-physical systems require dependable network communication with formal end-to-end reliability guarantees. Striving towards this goal, recent efforts aim to advance the integration of 5G into Time-Sensitive Networking (TSN). However, we show that IEEE 802.1Qbv TSN schedulers that are unattuned to 5G packet delay variations may jeopardize any reliability guarantees provided by the 5G&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11595v1-abstract-full').style.display = 'inline'; document.getElementById('2502.11595v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.11595v1-abstract-full" style="display: none;"> Industrial cyber-physical systems require dependable network communication with formal end-to-end reliability guarantees. Striving towards this goal, recent efforts aim to advance the integration of 5G into Time-Sensitive Networking (TSN). However, we show that IEEE 802.1Qbv TSN schedulers that are unattuned to 5G packet delay variations may jeopardize any reliability guarantees provided by the 5G system. We demonstrate this on a case where a 99.99% reliability in the inner 5G network diminishes to below 10% when looking at end-to-end communication in TSN. In this paper, we overcome this shortcoming by introducing Full Interleaving Packet Scheduling (FIPS) as a wireless-friendly IEEE 802.1Qbv scheduler. To the best of our knowledge, FIPS is the first to provide formal end-to-end QoS guarantees in wireless TSN. FIPS allows a controlled batching of TSN streams, which improves schedulability in terms of the number of wireless TSN streams by a factor of up to x45. Even in failure cases, FIPS isolates the otherwise cascading QoS violations to the affected streams and protects all other streams. With formal end-to-end reliability, improved schedulability, and fault isolation, FIPS makes a substantial advance towards dependability in wireless TSN. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.11595v1-abstract-full').style.display = 'none'; document.getElementById('2502.11595v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint with extended appendix</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.05836">arXiv:2502.05836</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.05836">pdf</a>, <a href="https://arxiv.org/format/2502.05836">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> LegalSeg: Unlocking the Structure of Indian Legal Judgments Through Rhetorical Role Classification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nigam%2C+S+K">Shubham Kumar Nigam</a>, <a href="/search/cs?searchtype=author&amp;query=Dubey%2C+T">Tanmay Dubey</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Govind Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Shallum%2C+N">Noel Shallum</a>, <a href="/search/cs?searchtype=author&amp;query=Ghosh%2C+K">Kripabandhu Ghosh</a>, <a href="/search/cs?searchtype=author&amp;query=Bhattacharya%2C+A">Arnab Bhattacharya</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.05836v1-abstract-short" style="display: inline;"> In this paper, we address the task of semantic segmentation of legal documents through rhetorical role classification, with a focus on Indian legal judgments. We introduce LegalSeg, the largest annotated dataset for this task, comprising over 7,000 documents and 1.4 million sentences, labeled with 7 rhetorical roles. To benchmark performance, we evaluate multiple state-of-the-art models, including&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05836v1-abstract-full').style.display = 'inline'; document.getElementById('2502.05836v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.05836v1-abstract-full" style="display: none;"> In this paper, we address the task of semantic segmentation of legal documents through rhetorical role classification, with a focus on Indian legal judgments. We introduce LegalSeg, the largest annotated dataset for this task, comprising over 7,000 documents and 1.4 million sentences, labeled with 7 rhetorical roles. To benchmark performance, we evaluate multiple state-of-the-art models, including Hierarchical BiLSTM-CRF, TransformerOverInLegalBERT (ToInLegalBERT), Graph Neural Networks (GNNs), and Role-Aware Transformers, alongside an exploratory RhetoricLLaMA, an instruction-tuned large language model. Our results demonstrate that models incorporating broader context, structural relationships, and sequential sentence information outperform those relying solely on sentence-level features. Additionally, we conducted experiments using surrounding context and predicted or actual labels of neighboring sentences to assess their impact on classification accuracy. Despite these advancements, challenges persist in distinguishing between closely related roles and addressing class imbalance. Our work underscores the potential of advanced techniques for improving legal document understanding and sets a strong foundation for future research in legal NLP. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.05836v1-abstract-full').style.display = 'none'; document.getElementById('2502.05836v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted on NAACL 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.13889">arXiv:2501.13889</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.13889">pdf</a>, <a href="https://arxiv.org/format/2501.13889">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Generating Realistic Forehead-Creases for User Verification via Conditioned Piecewise Polynomial Curves </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tandon%2C+A">Abhishek Tandon</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Geetanjali Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Jaswal%2C+G">Gaurav Jaswal</a>, <a href="/search/cs?searchtype=author&amp;query=Nigam%2C+A">Aditya Nigam</a>, <a href="/search/cs?searchtype=author&amp;query=Ramachandra%2C+R">Raghavendra Ramachandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.13889v1-abstract-short" style="display: inline;"> We propose a trait-specific image generation method that models forehead creases geometrically using B-spline and B茅zier curves. This approach ensures the realistic generation of both principal creases and non-prominent crease patterns, effectively constructing detailed and authentic forehead-crease images. These geometrically rendered images serve as visual prompts for a diffusion-based Edge-to-I&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.13889v1-abstract-full').style.display = 'inline'; document.getElementById('2501.13889v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.13889v1-abstract-full" style="display: none;"> We propose a trait-specific image generation method that models forehead creases geometrically using B-spline and B茅zier curves. This approach ensures the realistic generation of both principal creases and non-prominent crease patterns, effectively constructing detailed and authentic forehead-crease images. These geometrically rendered images serve as visual prompts for a diffusion-based Edge-to-Image translation model, which generates corresponding mated samples. The resulting novel synthetic identities are then used to train a forehead-crease verification network. To enhance intra-subject diversity in the generated samples, we employ two strategies: (a) perturbing the control points of B-splines under defined constraints to maintain label consistency, and (b) applying image-level augmentations to the geometric visual prompts, such as dropout and elastic transformations, specifically tailored to crease patterns. By integrating the proposed synthetic dataset with real-world data, our method significantly improves the performance of forehead-crease verification systems under a cross-database verification protocol. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.13889v1-abstract-full').style.display = 'none'; document.getElementById('2501.13889v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at WACV-W 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.18987">arXiv:2411.18987</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.18987">pdf</a>, <a href="https://arxiv.org/format/2411.18987">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> </div> </div> <p class="title is-5 mathjax"> Complexity Issues Concerning the Quadruple Roman Domination Problem in Graphs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Palagiri%2C+V+S+R">V. S. R. Palagiri</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G+P">G. P. Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Yero%2C+I+G">I. G. Yero</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.18987v1-abstract-short" style="display: inline;"> Given a graph $G$ with vertex set $V(G)$, a mapping $h : V(G) \rightarrow \lbrace 0, 1, 2, 3, 4, 5 \rbrace$ is called a quadruple Roman dominating function (4RDF) for $G$ if it holds the following. Every vertex $x$ such that $h(x)\in \{0,1,2, 3\}$ satisfies that $h(N[x]) = \sum_{v\in N[x]} h(v) \geq |\{y:y \in N(x) \; \text{and} \; h(y) \neq 0\}|+4$, where $N(x)$ and $N[x]$ stands for the open and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18987v1-abstract-full').style.display = 'inline'; document.getElementById('2411.18987v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.18987v1-abstract-full" style="display: none;"> Given a graph $G$ with vertex set $V(G)$, a mapping $h : V(G) \rightarrow \lbrace 0, 1, 2, 3, 4, 5 \rbrace$ is called a quadruple Roman dominating function (4RDF) for $G$ if it holds the following. Every vertex $x$ such that $h(x)\in \{0,1,2, 3\}$ satisfies that $h(N[x]) = \sum_{v\in N[x]} h(v) \geq |\{y:y \in N(x) \; \text{and} \; h(y) \neq 0\}|+4$, where $N(x)$ and $N[x]$ stands for the open and closed neighborhood of $x$, respectively. The smallest possible weight $\sum_{x \in V(G)} h(x)$ among all possible 4RDFs $h$ for $G$ is the quadruple Roman domination number of $G$, denoted by $纬_{[4R]}(G)$. This work is focused on complexity aspects for the problem of computing the value of this parameter for several graph classes. Specifically, it is shown that the decision problem concerning $纬_{[4R]}(G)$ is NP-complete when restricted to star convex bipartite, comb convex bipartite, split and planar graphs. In contrast, it is also proved that such problem can be efficiently solved for threshold graphs where an exact solution is demonstrated, while for graphs having an efficient dominating set, tight upper and lower bounds in terms of the classical domination number are given. In addition, some approximation results to the problem are given. That is, we show that the problem cannot be approximated within $(1 - 蔚) \ln |V|$ for any $蔚&gt; 0$ unless $P=NP$. An approximation algorithm for it is proposed, and its APX-completeness proved, whether graphs of maximum degree four are considered. Finally, an integer linear programming formulation for our problem is presented. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.18987v1-abstract-full').style.display = 'none'; document.getElementById('2411.18987v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.08490">arXiv:2411.08490</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.08490">pdf</a>, <a href="https://arxiv.org/ps/2411.08490">ps</a>, <a href="https://arxiv.org/format/2411.08490">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Impact of Iris Pigmentation on Performance Bias in Visible Iris Verification Systems: A Comparative Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Geetanjali Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Tandon%2C+A">Abhishek Tandon</a>, <a href="/search/cs?searchtype=author&amp;query=Jaswal%2C+G">Gaurav Jaswal</a>, <a href="/search/cs?searchtype=author&amp;query=Nigam%2C+A">Aditya Nigam</a>, <a href="/search/cs?searchtype=author&amp;query=Ramachandra%2C+R">Raghavendra Ramachandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.08490v1-abstract-short" style="display: inline;"> Iris recognition technology plays a critical role in biometric identification systems, but their performance can be affected by variations in iris pigmentation. In this work, we investigate the impact of iris pigmentation on the efficacy of biometric recognition systems, focusing on a comparative analysis of blue and dark irises. Data sets were collected using multiple devices, including P1, P2, a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08490v1-abstract-full').style.display = 'inline'; document.getElementById('2411.08490v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.08490v1-abstract-full" style="display: none;"> Iris recognition technology plays a critical role in biometric identification systems, but their performance can be affected by variations in iris pigmentation. In this work, we investigate the impact of iris pigmentation on the efficacy of biometric recognition systems, focusing on a comparative analysis of blue and dark irises. Data sets were collected using multiple devices, including P1, P2, and P3 smartphones [4], to assess the robustness of the systems in different capture environments [19]. Both traditional machine learning techniques and deep learning models were used, namely Open-Iris, ViT-b, and ResNet50, to evaluate performance metrics such as Equal Error Rate (EER) and True Match Rate (TMR). Our results indicate that iris recognition systems generally exhibit higher accuracy for blue irises compared to dark irises. Furthermore, we examined the generalization capabilities of these systems across different iris colors and devices, finding that while training on diverse datasets enhances recognition performance, the degree of improvement is contingent on the specific model and device used. Our analysis also identifies inherent biases in recognition performance related to iris color and cross-device variability. These findings underscore the need for more inclusive dataset collection and model refinement to reduce bias and promote equitable biometric recognition across varying iris pigmentation and device configurations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.08490v1-abstract-full').style.display = 'none'; document.getElementById('2411.08490v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 5 figures, 5 Tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.07405">arXiv:2411.07405</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.07405">pdf</a>, <a href="https://arxiv.org/format/2411.07405">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Quality of Control based Resource Dimensioning for Collaborative Edge Robotics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Roy%2C+N">Neelabhro Roy</a>, <a href="/search/cs?searchtype=author&amp;query=Dhullipalla%2C+M+H">Mani H. Dhullipalla</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G+P">Gourav Prateek Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Dimarogonas%2C+D+V">Dimos V. Dimarogonas</a>, <a href="/search/cs?searchtype=author&amp;query=Gross%2C+J">James Gross</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.07405v1-abstract-short" style="display: inline;"> With the increasing focus on flexible automation, which emphasizes systems capable of adapting to varied tasks and conditions, exploring future deployments of cloud and edge-based network infrastructures in robotic systems becomes crucial. This work, examines how wireless solutions could support the shift from rigid, wired setups toward more adaptive, flexible automation in industrial environments&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07405v1-abstract-full').style.display = 'inline'; document.getElementById('2411.07405v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.07405v1-abstract-full" style="display: none;"> With the increasing focus on flexible automation, which emphasizes systems capable of adapting to varied tasks and conditions, exploring future deployments of cloud and edge-based network infrastructures in robotic systems becomes crucial. This work, examines how wireless solutions could support the shift from rigid, wired setups toward more adaptive, flexible automation in industrial environments. We provide a quality of control (QoC) based abstraction for robotic workloads, parameterized on loop latency and reliability, and jointly optimize system performance. The setup involves collaborative robots working on distributed tasks, underscoring how wireless communication can enable more dynamic coordination in flexible automation systems. We use our abstraction to optimally maximize the QoC ensuring efficient operation even under varying network conditions. Additionally, our solution allocates the communication resources in time slots, optimizing the balance between communication and control costs. Our simulation results highlight that minimizing the delay in the system may not always ensure the best QoC but can lead to substantial gains in QoC if delays are sometimes relaxed, allowing more packets to be delivered reliably. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.07405v1-abstract-full').style.display = 'none'; document.getElementById('2411.07405v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in IEEE CCNC 2025</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.10020">arXiv:2409.10020</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.10020">pdf</a>, <a href="https://arxiv.org/format/2409.10020">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.future.2024.05.032">10.1016/j.future.2024.05.032 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Li-MSD: A lightweight mitigation solution for DAO insider attack in RPL-based IoT </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Verma%2C+A">Abhishek Verma</a>, <a href="/search/cs?searchtype=author&amp;query=Verma%2C+S+K">Sachin Kumar Verma</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+A+C">Avinash Chandra Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Grover%2C+J">Jyoti Grover</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Girish Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.10020v1-abstract-short" style="display: inline;"> Many IoT applications run on a wireless infrastructure supported by resource-constrained nodes which is popularly known as Low-Power and Lossy Networks (LLNs). Currently, LLNs play a vital role in digital transformation of industries. The resource limitations of LLNs restrict the usage of traditional routing protocols and therefore require an energy-efficient routing solution. IETF&#39;s Routing Proto&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.10020v1-abstract-full').style.display = 'inline'; document.getElementById('2409.10020v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.10020v1-abstract-full" style="display: none;"> Many IoT applications run on a wireless infrastructure supported by resource-constrained nodes which is popularly known as Low-Power and Lossy Networks (LLNs). Currently, LLNs play a vital role in digital transformation of industries. The resource limitations of LLNs restrict the usage of traditional routing protocols and therefore require an energy-efficient routing solution. IETF&#39;s Routing Protocol for Low-power Lossy Networks (RPL, pronounced &#39;ripple&#39;) is one of the most popular energy-efficient protocols for LLNs, specified in RFC 6550. In RPL, Destination Advertisement Object (DAO) control message is transmitted by a child node to pass on its reachability information to its immediate parent or root node. An attacker may exploit the insecure DAO sending mechanism of RPL to perform &#39;DAO insider attack&#39; by transmitting DAO multiple times. This paper shows that an aggressive DAO insider attacker can drastically degrade network performance. We propose a Lightweight Mitigation Solution for DAO insider attack, which is termed as &#39;Li-MSD&#39;. Li-MSD uses a blacklisting strategy to mitigate the attack and restore RPL performance, significantly. By using simulations, it is shown that Li-MSD outperforms the existing solution in the literature. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.10020v1-abstract-full').style.display = 'none'; document.getElementById('2409.10020v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Future Generation Computer Systems, 159, 327-339 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.15693">arXiv:2408.15693</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.15693">pdf</a>, <a href="https://arxiv.org/format/2408.15693">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Synthetic Forehead-creases Biometric Generation for Reliable User Verification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Tandon%2C+A">Abhishek Tandon</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Geetanjali Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Jaswal%2C+G">Gaurav Jaswal</a>, <a href="/search/cs?searchtype=author&amp;query=Nigam%2C+A">Aditya Nigam</a>, <a href="/search/cs?searchtype=author&amp;query=Ramachandra%2C+R">Raghavendra Ramachandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.15693v1-abstract-short" style="display: inline;"> Recent studies have emphasized the potential of forehead-crease patterns as an alternative for face, iris, and periocular recognition, presenting contactless and convenient solutions, particularly in situations where faces are covered by surgical masks. However, collecting forehead data presents challenges, including cost and time constraints, as developing and optimizing forehead verification met&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.15693v1-abstract-full').style.display = 'inline'; document.getElementById('2408.15693v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.15693v1-abstract-full" style="display: none;"> Recent studies have emphasized the potential of forehead-crease patterns as an alternative for face, iris, and periocular recognition, presenting contactless and convenient solutions, particularly in situations where faces are covered by surgical masks. However, collecting forehead data presents challenges, including cost and time constraints, as developing and optimizing forehead verification methods requires a substantial number of high-quality images. To tackle these challenges, the generation of synthetic biometric data has gained traction due to its ability to protect privacy while enabling effective training of deep learning-based biometric verification methods. In this paper, we present a new framework to synthesize forehead-crease image data while maintaining important features, such as uniqueness and realism. The proposed framework consists of two main modules: a Subject-Specific Generation Module (SSGM), based on an image-to-image Brownian Bridge Diffusion Model (BBDM), which learns a one-to-many mapping between image pairs to generate identity-aware synthetic forehead creases corresponding to real subjects, and a Subject-Agnostic Generation Module (SAGM), which samples new synthetic identities with assistance from the SSGM. We evaluate the diversity and realism of the generated forehead-crease images primarily using the Fr茅chet Inception Distance (FID) and the Structural Similarity Index Measure (SSIM). In addition, we assess the utility of synthetically generated forehead-crease images using a forehead-crease verification system (FHCVS). The results indicate an improvement in the verification accuracy of the FHCVS by utilizing synthetic data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.15693v1-abstract-full').style.display = 'none'; document.getElementById('2408.15693v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at Generative AI for Futuristic Biometrics - IJCB&#39;24 Special Session</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.02165">arXiv:2408.02165</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.02165">pdf</a>, <a href="https://arxiv.org/format/2408.02165">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> SelfBC: Self Behavior Cloning for Offline Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Liu%2C+S">Shirong Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Bai%2C+C">Chenjia Bai</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Z">Zixian Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gaurav Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.02165v1-abstract-short" style="display: inline;"> Policy constraint methods in offline reinforcement learning employ additional regularization techniques to constrain the discrepancy between the learned policy and the offline dataset. However, these methods tend to result in overly conservative policies that resemble the behavior policy, thus limiting their performance. We investigate this limitation and attribute it to the static nature of tradi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02165v1-abstract-full').style.display = 'inline'; document.getElementById('2408.02165v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.02165v1-abstract-full" style="display: none;"> Policy constraint methods in offline reinforcement learning employ additional regularization techniques to constrain the discrepancy between the learned policy and the offline dataset. However, these methods tend to result in overly conservative policies that resemble the behavior policy, thus limiting their performance. We investigate this limitation and attribute it to the static nature of traditional constraints. In this paper, we propose a novel dynamic policy constraint that restricts the learned policy on the samples generated by the exponential moving average of previously learned policies. By integrating this self-constraint mechanism into off-policy methods, our method facilitates the learning of non-conservative policies while avoiding policy collapse in the offline setting. Theoretical results show that our approach results in a nearly monotonically improved reference policy. Extensive experiments on the D4RL MuJoCo domain demonstrate that our proposed method achieves state-of-the-art performance among the policy constraint methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.02165v1-abstract-full').style.display = 'none'; document.getElementById('2408.02165v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.01895">arXiv:2408.01895</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.01895">pdf</a>, <a href="https://arxiv.org/format/2408.01895">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3654777.3676415">10.1145/3654777.3676415 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Computational Trichromacy Reconstruction: Empowering the Color-Vision Deficient to Recognize Colors Using Augmented Reality </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhu%2C+Y">Yuhao Zhu</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+E">Ethan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Hascup%2C+C">Colin Hascup</a>, <a href="/search/cs?searchtype=author&amp;query=Yan%2C+Y">Yukang Yan</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gaurav Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.01895v2-abstract-short" style="display: inline;"> We propose an assistive technology that helps individuals with Color Vision Deficiencies (CVD) to recognize/name colors. A dichromat&#39;s color perception is a reduced two-dimensional (2D) subset of a normal trichromat&#39;s three dimensional color (3D) perception, leading to confusion when visual stimuli that appear identical to the dichromat are referred to by different color names. Using our proposed&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01895v2-abstract-full').style.display = 'inline'; document.getElementById('2408.01895v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.01895v2-abstract-full" style="display: none;"> We propose an assistive technology that helps individuals with Color Vision Deficiencies (CVD) to recognize/name colors. A dichromat&#39;s color perception is a reduced two-dimensional (2D) subset of a normal trichromat&#39;s three dimensional color (3D) perception, leading to confusion when visual stimuli that appear identical to the dichromat are referred to by different color names. Using our proposed system, CVD individuals can interactively induce distinct perceptual changes to originally confusing colors via a computational color space transformation. By combining their original 2D precepts for colors with the discriminative changes, a three dimensional color space is reconstructed, where the dichromat can learn to resolve color name confusions and accurately recognize colors. Our system is implemented as an Augmented Reality (AR) interface on smartphones, where users interactively control the rotation through swipe gestures and observe the induced color shifts in the camera view or in a displayed image. Through psychophysical experiments and a longitudinal user study, we demonstrate that such rotational color shifts have discriminative power (initially confusing colors become distinct under rotation) and exhibit structured perceptual shifts dichromats can learn with modest training. The AR App is also evaluated in two real-world scenarios (building with lego blocks and interpreting artistic works); users all report positive experience in using the App to recognize object colors that they otherwise could not. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01895v2-abstract-full').style.display = 'none'; document.getElementById('2408.01895v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.01085">arXiv:2408.01085</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.01085">pdf</a>, <a href="https://arxiv.org/format/2408.01085">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Effect of Fog Particle Size Distribution on 3D Object Detection Under Adverse Weather Conditions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shinde%2C+A">Ajinkya Shinde</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gaurav Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Pattanaik%2C+M">Manisha Pattanaik</a>, <a href="/search/cs?searchtype=author&amp;query=Singh%2C+S+N">Sri Niwas Singh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.01085v1-abstract-short" style="display: inline;"> LiDAR-based sensors employing optical spectrum signals play a vital role in providing significant information about the target objects in autonomous driving vehicle systems. However, the presence of fog in the atmosphere severely degrades the overall system&#39;s performance. This manuscript analyzes the role of fog particle size distributions in 3D object detection under adverse weather conditions. W&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01085v1-abstract-full').style.display = 'inline'; document.getElementById('2408.01085v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.01085v1-abstract-full" style="display: none;"> LiDAR-based sensors employing optical spectrum signals play a vital role in providing significant information about the target objects in autonomous driving vehicle systems. However, the presence of fog in the atmosphere severely degrades the overall system&#39;s performance. This manuscript analyzes the role of fog particle size distributions in 3D object detection under adverse weather conditions. We utilise Mie theory and meteorological optical range (MOR) to calculate the attenuation and backscattering coefficient values for point cloud generation and analyze the overall system&#39;s accuracy in Car, Cyclist, and Pedestrian case scenarios under easy, medium and hard detection difficulties. Gamma and Junge (Power-Law) distributions are employed to mathematically model the fog particle size distribution under strong and moderate advection fog environments. Subsequently, we modified the KITTI dataset based on the backscattering coefficient values and trained it on the PV-RCNN++ deep neural network model for Car, Cyclist, and Pedestrian cases under different detection difficulties. The result analysis shows a significant variation in the system&#39;s accuracy concerning the changes in target object dimensionality, the nature of the fog environment and increasing detection difficulties, with the Car exhibiting the highest accuracy of around 99% and the Pedestrian showing the lowest accuracy of around 73%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.01085v1-abstract-full').style.display = 'none'; document.getElementById('2408.01085v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.19779">arXiv:2407.19779</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.19779">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Synthesizing Scientific Summaries: An Extractive and Abstractive Approach </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Grishma Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Paretkar%2C+A">Aditi Paretkar</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+D">Deepak Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.19779v1-abstract-short" style="display: inline;"> The availability of a vast array of research papers in any area of study, necessitates the need of automated summarisation systems that can present the key research conducted and their corresponding findings. Scientific paper summarisation is a challenging task for various reasons including token length limits in modern transformer models and corresponding memory and compute requirements for long&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19779v1-abstract-full').style.display = 'inline'; document.getElementById('2407.19779v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.19779v1-abstract-full" style="display: none;"> The availability of a vast array of research papers in any area of study, necessitates the need of automated summarisation systems that can present the key research conducted and their corresponding findings. Scientific paper summarisation is a challenging task for various reasons including token length limits in modern transformer models and corresponding memory and compute requirements for long text. A significant amount of work has been conducted in this area, with approaches that modify the attention mechanisms of existing transformer models and others that utilise discourse information to capture long range dependencies in research papers. In this paper, we propose a hybrid methodology for research paper summarisation which incorporates an extractive and abstractive approach. We use the extractive approach to capture the key findings of research, and pair it with the introduction of the paper which captures the motivation for research. We use two models based on unsupervised learning for the extraction stage and two transformer language models, resulting in four combinations for our hybrid approach. The performances of the models are evaluated on three metrics and we present our findings in this paper. We find that using certain combinations of hyper parameters, it is possible for automated summarisation systems to exceed the abstractiveness of summaries written by humans. Finally, we state our future scope of research in extending this methodology to summarisation of generalised long documents. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.19779v1-abstract-full').style.display = 'none'; document.getElementById('2407.19779v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">the paper consists of 10 pages , 5 figures and 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.04497">arXiv:2406.04497</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.04497">pdf</a>, <a href="https://arxiv.org/format/2406.04497">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Consensus Through Knot Discovery in Asynchronous Dynamic Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bricker%2C+R">Rachel Bricker</a>, <a href="/search/cs?searchtype=author&amp;query=Nesterenko%2C+M">Mikhail Nesterenko</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gokarna Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.04497v1-abstract-short" style="display: inline;"> We state the Problem of Knot Identification as a way to achieve consensus in dynamic networks. The network adversary is asynchronous and not oblivious. The network may be disconnected throughout the computation. We determine the necessary and sufficient conditions for the existence of a solution to the Knot Identification Problem: the knots must be observable by all processes and the first observe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04497v1-abstract-full').style.display = 'inline'; document.getElementById('2406.04497v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.04497v1-abstract-full" style="display: none;"> We state the Problem of Knot Identification as a way to achieve consensus in dynamic networks. The network adversary is asynchronous and not oblivious. The network may be disconnected throughout the computation. We determine the necessary and sufficient conditions for the existence of a solution to the Knot Identification Problem: the knots must be observable by all processes and the first observed knot must be the same for all processes. We present an algorithm KIA that solves it. We conduct KIA performance evaluation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04497v1-abstract-full').style.display = 'none'; document.getElementById('2406.04497v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.07146">arXiv:2405.07146</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.07146">pdf</a>, <a href="https://arxiv.org/format/2405.07146">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> TRAIL: Cross-Shard Validation for Cryptocurrency Byzantine Shard Protection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jacovetty%2C+M">Mitch Jacovetty</a>, <a href="/search/cs?searchtype=author&amp;query=Oglio%2C+J">Joseph Oglio</a>, <a href="/search/cs?searchtype=author&amp;query=Nesterenko%2C+M">Mikhail Nesterenko</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gokarna Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.07146v1-abstract-short" style="display: inline;"> We present TRAIL: an algorithm that uses a novel consensus procedure to tolerate failed or malicious shards within a blockchain-based cryptocurrency. Our algorithm takes a new approach of selecting validator shards for each transaction from those that previously held the assets being transferred. This approach ensures the algorithm&#39;s robustness and efficiency. TRAIL is presented using PBFT for int&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.07146v1-abstract-full').style.display = 'inline'; document.getElementById('2405.07146v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.07146v1-abstract-full" style="display: none;"> We present TRAIL: an algorithm that uses a novel consensus procedure to tolerate failed or malicious shards within a blockchain-based cryptocurrency. Our algorithm takes a new approach of selecting validator shards for each transaction from those that previously held the assets being transferred. This approach ensures the algorithm&#39;s robustness and efficiency. TRAIL is presented using PBFT for internal shard transaction processing and a modified version of PBFT for external cross-shard validation. We describe TRAIL, prove it correct, analyze its message complexity, and evaluate its performance. We propose various TRAIL optimizations: we describe how it can be adapted to other Byzantine-tolerant consensus algorithms, how a complete system may be built on the basis of it, and how TRAIL can be applied to existing and future sharded blockchains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.07146v1-abstract-full').style.display = 'none'; document.getElementById('2405.07146v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.06617">arXiv:2405.06617</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.06617">pdf</a>, <a href="https://arxiv.org/ps/2405.06617">ps</a>, <a href="https://arxiv.org/format/2405.06617">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Optimal Uniform Circle Formation by Asynchronous Luminous Robots </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Feletti%2C+C">Caterina Feletti</a>, <a href="/search/cs?searchtype=author&amp;query=Pattanayak%2C+D">Debasish Pattanayak</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gokarna Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.06617v1-abstract-short" style="display: inline;"> We study the {\sc Uniform Circle Formation} ({\sc UCF}) problem for a swarm of $n$ autonomous mobile robots operating in \emph{Look-Compute-Move} (LCM) cycles on the Euclidean plane. We assume our robots are \emph{luminous}, i.e. embedded with a persistent light that can assume a color chosen from a fixed palette, and \emph{opaque}, i.e. not able to see beyond a collinear robot. Robots are said to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06617v1-abstract-full').style.display = 'inline'; document.getElementById('2405.06617v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.06617v1-abstract-full" style="display: none;"> We study the {\sc Uniform Circle Formation} ({\sc UCF}) problem for a swarm of $n$ autonomous mobile robots operating in \emph{Look-Compute-Move} (LCM) cycles on the Euclidean plane. We assume our robots are \emph{luminous}, i.e. embedded with a persistent light that can assume a color chosen from a fixed palette, and \emph{opaque}, i.e. not able to see beyond a collinear robot. Robots are said to \emph{collide} if they share positions or their paths intersect within concurrent LCM cycles. To solve {\sc UCF}, a swarm of $n$ robots must autonomously arrange themselves so that each robot occupies a vertex of the same regular $n$-gon not fixed in advance. In terms of efficiency, the goal is to design an algorithm that optimizes (or provides a tradeoff between) two fundamental performance metrics: \emph{(i)} the execution time and \emph{(ii)} the size of the color palette. There exists an $O(1)$-time $O(1)$-color algorithm for this problem under the fully synchronous and semi-synchronous schedulers and a $O(\log\log n)$-time $O(1)$-color or $O(1)$-time $O(\sqrt{n})$-color algorithm under the asynchronous scheduler, avoiding collisions. In this paper, we develop a deterministic algorithm solving {\sc UCF} avoiding collisions in $O(1)$-time with $O(1)$ colors under the asynchronous scheduler, which is asymptotically optimal with respect to both time and number of colors used, the first such result. Furthermore, the algorithm proposed here minimizes for the first time what we call the \emph{computational SEC}, i.e. the smallest circular area where robots operate throughout the whole algorithm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.06617v1-abstract-full').style.display = 'none'; document.getElementById('2405.06617v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">33 pages, 15 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.10151">arXiv:2404.10151</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.10151">pdf</a>, <a href="https://arxiv.org/format/2404.10151">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Distributing Context-Aware Shared Memory Data Structures: A Case Study on Singly-Linked Lists </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ravishankar%2C+R">Raaghav Ravishankar</a>, <a href="/search/cs?searchtype=author&amp;query=Kulkarni%2C+S">Sandeep Kulkarni</a>, <a href="/search/cs?searchtype=author&amp;query=Peri%2C+S">Sathya Peri</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gokarna Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.10151v2-abstract-short" style="display: inline;"> In this paper, we study the partitioning of a context-aware shared memory data structure so that it can be implemented as a distributed data structure running on multiple machines. By context-aware data structures, we mean that the result of an operation not only depends upon the value of the shared data but also upon the previous operations performed by the same client. While there is substantial&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.10151v2-abstract-full').style.display = 'inline'; document.getElementById('2404.10151v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.10151v2-abstract-full" style="display: none;"> In this paper, we study the partitioning of a context-aware shared memory data structure so that it can be implemented as a distributed data structure running on multiple machines. By context-aware data structures, we mean that the result of an operation not only depends upon the value of the shared data but also upon the previous operations performed by the same client. While there is substantial work on designing distributed data structures, designing distributed context-aware data structures has not received much attention. We focus on singly-linked lists as a case study of the context-aware data structure. We start with a shared memory context-aware lock-free singly-linked list and show how it can be transformed into a distributed lock-free context-aware singly-linked list. The main challenge in such a transformation is to preserve properties of client-visible operations of the underlying data structure. We present two protocols that preserve these properties of client-visible operations of the linked list. In the first protocol, the distribution is done in the background as a low priority task, while in the second protocol the client-visible operations help the task of distribution without affecting client latency. In both protocols, the client-visible operations remain lock-free. Also, our transformation approach does not utilize any hardware primitives (except a compare-and-swap operation on a single word). We note that our transformation is generic and can be used for other lock-free context-aware data structures that can be constructed from singly-linked lists. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.10151v2-abstract-full').style.display = 'none'; document.getElementById('2404.10151v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.09591">arXiv:2404.09591</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.09591">pdf</a>, <a href="https://arxiv.org/format/2404.09591">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> 3D Gaussian Splatting as Markov Chain Monte Carlo </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kheradmand%2C+S">Shakiba Kheradmand</a>, <a href="/search/cs?searchtype=author&amp;query=Rebain%2C+D">Daniel Rebain</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gopal Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+W">Weiwei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Tseng%2C+J">Jeff Tseng</a>, <a href="/search/cs?searchtype=author&amp;query=Isack%2C+H">Hossam Isack</a>, <a href="/search/cs?searchtype=author&amp;query=Kar%2C+A">Abhishek Kar</a>, <a href="/search/cs?searchtype=author&amp;query=Tagliasacchi%2C+A">Andrea Tagliasacchi</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+K+M">Kwang Moo Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.09591v3-abstract-short" style="display: inline;"> While 3D Gaussian Splatting has recently become popular for neural rendering, current methods rely on carefully engineered cloning and splitting strategies for placing Gaussians, which can lead to poor-quality renderings, and reliance on a good initialization. In this work, we rethink the set of 3D Gaussians as a random sample drawn from an underlying probability distribution describing the physic&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.09591v3-abstract-full').style.display = 'inline'; document.getElementById('2404.09591v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.09591v3-abstract-full" style="display: none;"> While 3D Gaussian Splatting has recently become popular for neural rendering, current methods rely on carefully engineered cloning and splitting strategies for placing Gaussians, which can lead to poor-quality renderings, and reliance on a good initialization. In this work, we rethink the set of 3D Gaussians as a random sample drawn from an underlying probability distribution describing the physical representation of the scene-in other words, Markov Chain Monte Carlo (MCMC) samples. Under this view, we show that the 3D Gaussian updates can be converted as Stochastic Gradient Langevin Dynamics (SGLD) updates by simply introducing noise. We then rewrite the densification and pruning strategies in 3D Gaussian Splatting as simply a deterministic state transition of MCMC samples, removing these heuristics from the framework. To do so, we revise the &#39;cloning&#39; of Gaussians into a relocalization scheme that approximately preserves sample probability. To encourage efficient use of Gaussians, we introduce a regularizer that promotes the removal of unused Gaussians. On various standard evaluation scenes, we show that our method provides improved rendering quality, easy control over the number of Gaussians, and robustness to initialization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.09591v3-abstract-full').style.display = 'none'; document.getElementById('2404.09591v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.01689">arXiv:2404.01689</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.01689">pdf</a>, <a href="https://arxiv.org/format/2404.01689">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/ANTS59832.2023.10469481">10.1109/ANTS59832.2023.10469481 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Lightweight Security Solution for Mitigation of Hatchetman Attack in RPL-based 6LoWPAN </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Girish Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Grover%2C+J">Jyoti Grover</a>, <a href="/search/cs?searchtype=author&amp;query=Verma%2C+A">Abhishek Verma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.01689v1-abstract-short" style="display: inline;"> In recent times, the Internet of Things (IoT) has a significant rise in industries, and we live in the era of Industry 4.0, where each device is connected to the Internet from small to big. These devices are Artificial Intelligence (AI) enabled and are capable of perspective analytics. By 2023, it&#39;s anticipated that over 14 billion smart devices will be available on the Internet. These application&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.01689v1-abstract-full').style.display = 'inline'; document.getElementById('2404.01689v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.01689v1-abstract-full" style="display: none;"> In recent times, the Internet of Things (IoT) has a significant rise in industries, and we live in the era of Industry 4.0, where each device is connected to the Internet from small to big. These devices are Artificial Intelligence (AI) enabled and are capable of perspective analytics. By 2023, it&#39;s anticipated that over 14 billion smart devices will be available on the Internet. These applications operate in a wireless environment where memory, power, and other resource limitations apply to the nodes. In addition, the conventional routing method is ineffective in networks with limited resource devices, lossy links, and slow data rates. Routing Protocol for Low Power and Lossy Networks (RPL), a new routing protocol for such networks, was proposed by the IETF&#39;s ROLL group. RPL operates in two modes: Storing and Non-Storing. In Storing mode, each node have the information to reach to other node. In Non-Storing mode, the routing information lies with the root node only. The attacker may exploit the Non-Storing feature of the RPL. When the root node transmits User Datagram Protocol~(UDP) or control message packet to the child nodes, the routing information is stored in the extended header of the IPv6 packet. The attacker may modify the address from the source routing header which leads to Denial of Service (DoS) attack. This attack is RPL specific which is known as Hatchetman attack. This paper shows significant degradation in terms of network performance when an attacker exploits this feature. We also propose a lightweight mitigation of Hatchetman attack using game theoretic approach to detect the Hatchetman attack in IoT. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.01689v1-abstract-full').style.display = 'none'; document.getElementById('2404.01689v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.19057">arXiv:2403.19057</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.19057">pdf</a>, <a href="https://arxiv.org/ps/2403.19057">ps</a>, <a href="https://arxiv.org/format/2403.19057">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Equity in Healthcare: Analyzing Disparities in Machine Learning Predictions of Diabetic Patient Readmissions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Al-Zanbouri%2C+Z">Zainab Al-Zanbouri</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gauri Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Raza%2C+S">Shaina Raza</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.19057v1-abstract-short" style="display: inline;"> This study investigates how machine learning (ML) models can predict hospital readmissions for diabetic patients fairly and accurately across different demographics (age, gender, race). We compared models like Deep Learning, Generalized Linear Models, Gradient Boosting Machines (GBM), and Naive Bayes. GBM stood out with an F1-score of 84.3% and accuracy of 82.2%, accurately predicting readmissions&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19057v1-abstract-full').style.display = 'inline'; document.getElementById('2403.19057v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.19057v1-abstract-full" style="display: none;"> This study investigates how machine learning (ML) models can predict hospital readmissions for diabetic patients fairly and accurately across different demographics (age, gender, race). We compared models like Deep Learning, Generalized Linear Models, Gradient Boosting Machines (GBM), and Naive Bayes. GBM stood out with an F1-score of 84.3% and accuracy of 82.2%, accurately predicting readmissions across demographics. A fairness analysis was conducted across all the models. GBM minimized disparities in predictions, achieving balanced results across genders and races. It showed low False Discovery Rates (FDR) (6-7%) and False Positive Rates (FPR) (5%) for both genders. Additionally, FDRs remained low for racial groups, such as African Americans (8%) and Asians (7%). Similarly, FPRs were consistent across age groups (4%) for both patients under 40 and those above 40, indicating its precision and ability to reduce bias. These findings emphasize the importance of choosing ML models carefully to ensure both accuracy and fairness for all patients. By showcasing effectiveness of various models with fairness metrics, this study promotes personalized medicine and the need for fair ML algorithms in healthcare. This can ultimately reduce disparities and improve outcomes for diabetic patients of all backgrounds. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.19057v1-abstract-full').style.display = 'none'; document.getElementById('2403.19057v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.16202">arXiv:2403.16202</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.16202">pdf</a>, <a href="https://arxiv.org/format/2403.16202">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> FH-SSTNet: Forehead Creases based User Verification using Spatio-Spatial Temporal Network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Geetanjali Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Jaswal%2C+G">Gaurav Jaswal</a>, <a href="/search/cs?searchtype=author&amp;query=Nigam%2C+A">Aditya Nigam</a>, <a href="/search/cs?searchtype=author&amp;query=Ramachandra%2C+R">Raghavendra Ramachandra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.16202v1-abstract-short" style="display: inline;"> Biometric authentication, which utilizes contactless features, such as forehead patterns, has become increasingly important for identity verification and access management. The proposed method is based on learning a 3D spatio-spatial temporal convolution to create detailed pictures of forehead patterns. We introduce a new CNN model called the Forehead Spatio-Spatial Temporal Network (FH-SSTNet), w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.16202v1-abstract-full').style.display = 'inline'; document.getElementById('2403.16202v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.16202v1-abstract-full" style="display: none;"> Biometric authentication, which utilizes contactless features, such as forehead patterns, has become increasingly important for identity verification and access management. The proposed method is based on learning a 3D spatio-spatial temporal convolution to create detailed pictures of forehead patterns. We introduce a new CNN model called the Forehead Spatio-Spatial Temporal Network (FH-SSTNet), which utilizes a 3D CNN architecture with triplet loss to capture distinguishing features. We enhance the model&#39;s discrimination capability using Arcloss in the network&#39;s head. Experimentation on the Forehead Creases version 1 (FH-V1) dataset, containing 247 unique subjects, demonstrates the superior performance of FH-SSTNet compared to existing methods and pre-trained CNNs like ResNet50, especially for forehead-based user verification. The results demonstrate the superior performance of FH-SSTNet for forehead-based user verification, confirming its effectiveness in identity authentication. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.16202v1-abstract-full').style.display = 'none'; document.getElementById('2403.16202v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 5 Figure, IWBF conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.13716">arXiv:2403.13716</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.13716">pdf</a>, <a href="https://arxiv.org/ps/2403.13716">ps</a>, <a href="https://arxiv.org/format/2403.13716">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Agent-based Leader Election, MST, and Beyond </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kshemkalyani%2C+A+D">Ajay D. Kshemkalyani</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+M">Manish Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Molla%2C+A+R">Anisur Rahaman Molla</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gokarna Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.13716v2-abstract-short" style="display: inline;"> Leader election is one of the fundamental and well-studied problems in distributed computing. In this paper, we initiate the study of leader election using mobile agents. Suppose $n$ agents are positioned initially arbitrarily on the nodes of an arbitrary, anonymous, $n$-node, $m$-edge graph $G$. The agents relocate themselves autonomously on the nodes of $G$ and elect an agent as a leader such th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.13716v2-abstract-full').style.display = 'inline'; document.getElementById('2403.13716v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.13716v2-abstract-full" style="display: none;"> Leader election is one of the fundamental and well-studied problems in distributed computing. In this paper, we initiate the study of leader election using mobile agents. Suppose $n$ agents are positioned initially arbitrarily on the nodes of an arbitrary, anonymous, $n$-node, $m$-edge graph $G$. The agents relocate themselves autonomously on the nodes of $G$ and elect an agent as a leader such that the leader agent knows it is a leader and the other agents know they are not leaders. The objective is to minimize time and memory requirements. Following the literature, we consider the synchronous setting in which each agent performs its operations synchronously with others and hence the time complexity can be measured in rounds. The quest in this paper is to provide solutions without agents knowing any graph parameter, such as $n$, a priori. We first establish that, without agents knowing any graph parameter a priori, there exists a deterministic algorithm to elect an agent as a leader in $O(m)$ rounds with $O(n\log n)$ bits at each agent. Using this leader election result, we develop a deterministic algorithm for agents to construct a minimum spanning tree of $G$ in $O(m+n\log n)$ rounds using $O(n \log n)$ bits memory at each agent, without agents knowing any graph parameter a priori. Finally, using the same leader election result, we provide improved time/memory results for other fundamental distributed graph problems, namely, gathering, maximal independent set, and minimal dominating sets, removing the assumptions on agents knowing graph parameters a priori. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.13716v2-abstract-full').style.display = 'none'; document.getElementById('2403.13716v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">25 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.12256">arXiv:2403.12256</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.12256">pdf</a>, <a href="https://arxiv.org/format/2403.12256">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> BeRGeR: Byzantine-Robust Geometric Routing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zaz%2C+B">Brown Zaz</a>, <a href="/search/cs?searchtype=author&amp;query=Nesterenko%2C+M">Mikhail Nesterenko</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gokarna Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.12256v1-abstract-short" style="display: inline;"> We present BeRGeR: the first asynchronous geometric routing algorithm that guarantees delivery of a message despite a Byzantine fault without relying on cryptographic primitives or randomization. The communication graph is a planar embedding that remains three-connected if all edges intersecting the source-target line segment are removed. We prove the algorithm correct and estimate its message com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.12256v1-abstract-full').style.display = 'inline'; document.getElementById('2403.12256v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.12256v1-abstract-full" style="display: none;"> We present BeRGeR: the first asynchronous geometric routing algorithm that guarantees delivery of a message despite a Byzantine fault without relying on cryptographic primitives or randomization. The communication graph is a planar embedding that remains three-connected if all edges intersecting the source-target line segment are removed. We prove the algorithm correct and estimate its message complexity. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.12256v1-abstract-full').style.display = 'none'; document.getElementById('2403.12256v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.08061">arXiv:2402.08061</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.08061">pdf</a>, <a href="https://arxiv.org/format/2402.08061">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Portobello: Extending Driving Simulation from the Lab to the Road </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bu%2C+F">Fanjun Bu</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+S">Stacey Li</a>, <a href="/search/cs?searchtype=author&amp;query=Goedicke%2C+D">David Goedicke</a>, <a href="/search/cs?searchtype=author&amp;query=Colley%2C+M">Mark Colley</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gyanendra Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Yasuda%2C+H">Hiroshi Yasuda</a>, <a href="/search/cs?searchtype=author&amp;query=Ju%2C+W">Wendy Ju</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.08061v1-abstract-short" style="display: inline;"> In automotive user interface design, testing often starts with lab-based driving simulators and migrates toward on-road studies to mitigate risks. Mixed reality (XR) helps translate virtual study designs to the real road to increase ecological validity. However, researchers rarely run the same study in both in-lab and on-road simulators due to the challenges of replicating studies in both physical&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.08061v1-abstract-full').style.display = 'inline'; document.getElementById('2402.08061v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.08061v1-abstract-full" style="display: none;"> In automotive user interface design, testing often starts with lab-based driving simulators and migrates toward on-road studies to mitigate risks. Mixed reality (XR) helps translate virtual study designs to the real road to increase ecological validity. However, researchers rarely run the same study in both in-lab and on-road simulators due to the challenges of replicating studies in both physical and virtual worlds. To provide a common infrastructure to port in-lab study designs on-road, we built a platform-portable infrastructure, Portobello, to enable us to run twinned physical-virtual studies. As a proof-of-concept, we extended the on-road simulator XR-OOM with Portobello. We ran a within-subjects, autonomous-vehicle crosswalk cooperation study (N=32) both in-lab and on-road to investigate study design portability and platform-driven influences on study outcomes. To our knowledge, this is the first system that enables the twinning of studies originally designed for in-lab simulators to be carried out in an on-road platform. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.08061v1-abstract-full').style.display = 'none'; document.getElementById('2402.08061v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CHI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.15194">arXiv:2401.15194</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.15194">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Multimodality in Group Communication Research </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lange%2C+R">Robin Lange</a>, <a href="/search/cs?searchtype=author&amp;query=Welles%2C+B+F">Brooke Foucault Welles</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gyanendra Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Radke%2C+R+J">Richard J. Radke</a>, <a href="/search/cs?searchtype=author&amp;query=Garcia%2C+J+O">Javier O. Garcia</a>, <a href="/search/cs?searchtype=author&amp;query=Riedl%2C+C">Christoph Riedl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.15194v1-abstract-short" style="display: inline;"> Team interactions are often multisensory, requiring members to pick up on verbal, visual, spatial and body language cues. Multimodal research, research that captures multiple modes of communication such as audio and visual signals, is therefore integral to understanding these multisensory group communication processes. This type of research has gained traction in biomedical engineering and neurosc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.15194v1-abstract-full').style.display = 'inline'; document.getElementById('2401.15194v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.15194v1-abstract-full" style="display: none;"> Team interactions are often multisensory, requiring members to pick up on verbal, visual, spatial and body language cues. Multimodal research, research that captures multiple modes of communication such as audio and visual signals, is therefore integral to understanding these multisensory group communication processes. This type of research has gained traction in biomedical engineering and neuroscience, but it is unclear the extent to which communication and management researchers conduct multimodal research. Our study finds that despite its&#39; utility, multimodal research is underutilized in the communication and management literature&#39;s. This paper then covers introductory guidelines for creating new multimodal research including considerations for sensors, data integration and ethical considerations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.15194v1-abstract-full').style.display = 'none'; document.getElementById('2401.15194v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">27 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.09856">arXiv:2401.09856</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.09856">pdf</a>, <a href="https://arxiv.org/format/2401.09856">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> EDAF: An End-to-End Delay Analytics Framework for 5G-and-Beyond Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mostafavi%2C+S">Samie Mostafavi</a>, <a href="/search/cs?searchtype=author&amp;query=Tillner%2C+M">Marius Tillner</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G+P">Gourav Prateek Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Gross%2C+J">James Gross</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.09856v1-abstract-short" style="display: inline;"> Supporting applications in emerging domains like cyber-physical systems and human-in-the-loop scenarios typically requires adherence to strict end-to-end delay guarantees. Contributions of many tandem processes unfolding layer by layer within the wireless network result in violations of delay constraints, thereby severely degrading application performance. Meeting the application&#39;s stringent requi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09856v1-abstract-full').style.display = 'inline'; document.getElementById('2401.09856v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.09856v1-abstract-full" style="display: none;"> Supporting applications in emerging domains like cyber-physical systems and human-in-the-loop scenarios typically requires adherence to strict end-to-end delay guarantees. Contributions of many tandem processes unfolding layer by layer within the wireless network result in violations of delay constraints, thereby severely degrading application performance. Meeting the application&#39;s stringent requirements necessitates coordinated optimization of the end-to-end delay by fine-tuning all contributing processes. To achieve this task, we designed and implemented EDAF, a framework to decompose packets&#39; end-to-end delays and determine each component&#39;s significance for 5G network. We showcase EDAF on OpenAirInterface 5G uplink, modified to report timestamps across the data plane. By applying the obtained insights, we optimized end-to-end uplink delay by eliminating segmentation and frame-alignment delays, decreasing average delay from 12ms to 4ms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.09856v1-abstract-full').style.display = 'none'; document.getElementById('2401.09856v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to the 11th International Workshop on Computer and Networking Experimental Research using Testbeds (CNERT 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.08164">arXiv:2401.08164</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.08164">pdf</a>, <a href="https://arxiv.org/format/2401.08164">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> EEG-based Cognitive Load Estimation of Acoustic Parameters for Data Sonification </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gulshan Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Madan%2C+S">Surbhi Madan</a>, <a href="/search/cs?searchtype=author&amp;query=Bilalpur%2C+M">Maneesh Bilalpur</a>, <a href="/search/cs?searchtype=author&amp;query=Dhall%2C+A">Abhinav Dhall</a>, <a href="/search/cs?searchtype=author&amp;query=Subramanian%2C+R">Ramanathan Subramanian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.08164v1-abstract-short" style="display: inline;"> Sonification is a data visualization technique which expresses data attributes via psychoacoustic parameters, which are non-speech audio signals used to convey information. This paper investigates the binary estimation of cognitive load induced by psychoacoustic parameters conveying the focus level of an astronomical image via Electroencephalogram (EEG) embeddings. Employing machine learning and d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.08164v1-abstract-full').style.display = 'inline'; document.getElementById('2401.08164v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.08164v1-abstract-full" style="display: none;"> Sonification is a data visualization technique which expresses data attributes via psychoacoustic parameters, which are non-speech audio signals used to convey information. This paper investigates the binary estimation of cognitive load induced by psychoacoustic parameters conveying the focus level of an astronomical image via Electroencephalogram (EEG) embeddings. Employing machine learning and deep learning methodologies, we demonstrate that EEG signals are reliable for (a) binary estimation of cognitive load, (b) isolating easy vs difficult visual-to-auditory perceptual mappings, and (c) capturing perceptual similarities among psychoacoustic parameters. Our key findings reveal that (1) EEG embeddings can reliably measure cognitive load, achieving a peak F1-score of 0.98; (2) Extreme focus levels are easier to detect via auditory mappings than intermediate ones, and (3) psychoacoustic parameters inducing comparable cognitive load levels tend to generate similar EEG encodings. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.08164v1-abstract-full').style.display = 'none'; document.getElementById('2401.08164v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.03483">arXiv:2312.03483</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.03483">pdf</a>, <a href="https://arxiv.org/format/2312.03483">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Exploring Answer Information Methods for Question Generation with Transformers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chafekar%2C+T">Talha Chafekar</a>, <a href="/search/cs?searchtype=author&amp;query=Hussain%2C+A">Aafiya Hussain</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Grishma Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+D">Deepak Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.03483v1-abstract-short" style="display: inline;"> There has been a lot of work in question generation where different methods to provide target answers as input, have been employed. This experimentation has been mostly carried out for RNN based models. We use three different methods and their combinations for incorporating answer information and explore their effect on several automatic evaluation metrics. The methods that are used are answer pro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.03483v1-abstract-full').style.display = 'inline'; document.getElementById('2312.03483v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.03483v1-abstract-full" style="display: none;"> There has been a lot of work in question generation where different methods to provide target answers as input, have been employed. This experimentation has been mostly carried out for RNN based models. We use three different methods and their combinations for incorporating answer information and explore their effect on several automatic evaluation metrics. The methods that are used are answer prompting, using a custom product method using answer embeddings and encoder outputs, choosing sentences from the input paragraph that have answer related information, and using a separate cross-attention attention block in the decoder which attends to the answer. We observe that answer prompting without any additional modes obtains the best scores across rouge, meteor scores. Additionally, we use a custom metric to calculate how many of the generated questions have the same answer, as the answer which is used to generate them. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.03483v1-abstract-full').style.display = 'none'; document.getElementById('2312.03483v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02593">arXiv:2312.02593</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02593">pdf</a>, <a href="https://arxiv.org/format/2312.02593">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> 6D Assembly Pose Estimation by Point Cloud Registration for Robot Manipulation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Samarawickrama%2C+K">K. Samarawickrama</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">G. Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Angleraud%2C+A">A. Angleraud</a>, <a href="/search/cs?searchtype=author&amp;query=Pieters%2C+R">R. Pieters</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02593v1-abstract-short" style="display: inline;"> The demands on robotic manipulation skills to perform challenging tasks have drastically increased in recent times. To perform these tasks with dexterity, robots require perception tools to understand the scene and extract useful information that transforms to robot control inputs. To this end, recent research has introduced various object pose estimation and grasp pose detection methods that yiel&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02593v1-abstract-full').style.display = 'inline'; document.getElementById('2312.02593v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02593v1-abstract-full" style="display: none;"> The demands on robotic manipulation skills to perform challenging tasks have drastically increased in recent times. To perform these tasks with dexterity, robots require perception tools to understand the scene and extract useful information that transforms to robot control inputs. To this end, recent research has introduced various object pose estimation and grasp pose detection methods that yield precise results. Assembly pose estimation is a secondary yet highly desirable skill in robotic assembling as it requires more detailed information on object placement as compared to bin picking and pick-and-place tasks. However, it has been often overlooked in research due to the complexity of integration in an agile framework. To address this issue, we propose an assembly pose estimation method with RGB-D input and 3D CAD models of the associated objects. The framework consists of semantic segmentation of the scene and registering point clouds of local surfaces against target point clouds derived from CAD models to estimate 6D poses. We show that our method can deliver sufficient accuracy for assembling object assemblies using evaluation metrics and demonstrations. The source code and dataset for the work can be found at: https://github.com/KulunuOS/6DAPose <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02593v1-abstract-full').style.display = 'none'; document.getElementById('2312.02593v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02362">arXiv:2312.02362</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02362">pdf</a>, <a href="https://arxiv.org/format/2312.02362">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> PointNeRF++: A multi-scale, point-based Neural Radiance Field </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sun%2C+W">Weiwei Sun</a>, <a href="/search/cs?searchtype=author&amp;query=Trulls%2C+E">Eduard Trulls</a>, <a href="/search/cs?searchtype=author&amp;query=Tseng%2C+Y">Yang-Che Tseng</a>, <a href="/search/cs?searchtype=author&amp;query=Sambandam%2C+S">Sneha Sambandam</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gopal Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Tagliasacchi%2C+A">Andrea Tagliasacchi</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+K+M">Kwang Moo Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02362v2-abstract-short" style="display: inline;"> Point clouds offer an attractive source of information to complement images in neural scene representations, especially when few images are available. Neural rendering methods based on point clouds do exist, but they do not perform well when the point cloud quality is low -- e.g., sparse or incomplete, which is often the case with real-world data. We overcome these problems with a simple represent&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02362v2-abstract-full').style.display = 'inline'; document.getElementById('2312.02362v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02362v2-abstract-full" style="display: none;"> Point clouds offer an attractive source of information to complement images in neural scene representations, especially when few images are available. Neural rendering methods based on point clouds do exist, but they do not perform well when the point cloud quality is low -- e.g., sparse or incomplete, which is often the case with real-world data. We overcome these problems with a simple representation that aggregates point clouds at multiple scale levels with sparse voxel grids at different resolutions. To deal with point cloud sparsity, we average across multiple scale levels -- but only among those that are valid, i.e., that have enough neighboring points in proximity to the ray of a pixel. To help model areas without points, we add a global voxel at the coarsest scale, thus unifying ``classical&#39;&#39; and point-based NeRF formulations. We validate our method on the NeRF Synthetic, ScanNet, and KITTI-360 datasets, outperforming the state of the art, with a significant gap compared to other NeRF-based methods, especially on more challenging scenes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02362v2-abstract-full').style.display = 'none'; document.getElementById('2312.02362v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project website: https://pointnerfpp.github.io/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.02202">arXiv:2312.02202</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.02202">pdf</a>, <a href="https://arxiv.org/format/2312.02202">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Volumetric Rendering with Baked Quadrature Fields </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gopal Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Rebain%2C+D">Daniel Rebain</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+K+M">Kwang Moo Yi</a>, <a href="/search/cs?searchtype=author&amp;query=Tagliasacchi%2C+A">Andrea Tagliasacchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.02202v2-abstract-short" style="display: inline;"> We propose a novel Neural Radiance Field (NeRF) representation for non-opaque scenes that enables fast inference by utilizing textured polygons. Despite the high-quality novel view rendering that NeRF provides, a critical limitation is that it relies on volume rendering that can be computationally expensive and does not utilize the advancements in modern graphics hardware. Many existing methods fa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02202v2-abstract-full').style.display = 'inline'; document.getElementById('2312.02202v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.02202v2-abstract-full" style="display: none;"> We propose a novel Neural Radiance Field (NeRF) representation for non-opaque scenes that enables fast inference by utilizing textured polygons. Despite the high-quality novel view rendering that NeRF provides, a critical limitation is that it relies on volume rendering that can be computationally expensive and does not utilize the advancements in modern graphics hardware. Many existing methods fall short when it comes to modelling volumetric effects as they rely purely on surface rendering. We thus propose to model the scene with polygons, which can then be used to obtain the quadrature points required to model volumetric effects, and also their opacity and colour from the texture. To obtain such polygonal mesh, we train a specialized field whose zero-crossings would correspond to the quadrature points when volume rendering, and perform marching cubes on this field. We then perform ray-tracing and utilize the ray-tracing shader to obtain the final colour image. Our method allows an easy integration with existing graphics frameworks allowing rendering speed of over 100 frames-per-second for a $1920\times1080$ image, while still being able to represent non-opaque objects. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.02202v2-abstract-full').style.display = 'none'; document.getElementById('2312.02202v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.00075">arXiv:2312.00075</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.00075">pdf</a>, <a href="https://arxiv.org/format/2312.00075">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Accelerating Neural Field Training via Soft Mining </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kheradmand%2C+S">Shakiba Kheradmand</a>, <a href="/search/cs?searchtype=author&amp;query=Rebain%2C+D">Daniel Rebain</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gopal Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Isack%2C+H">Hossam Isack</a>, <a href="/search/cs?searchtype=author&amp;query=Kar%2C+A">Abhishek Kar</a>, <a href="/search/cs?searchtype=author&amp;query=Tagliasacchi%2C+A">Andrea Tagliasacchi</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+K+M">Kwang Moo Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.00075v1-abstract-short" style="display: inline;"> We present an approach to accelerate Neural Field training by efficiently selecting sampling locations. While Neural Fields have recently become popular, it is often trained by uniformly sampling the training domain, or through handcrafted heuristics. We show that improved convergence and final training quality can be achieved by a soft mining technique based on importance sampling: rather than ei&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.00075v1-abstract-full').style.display = 'inline'; document.getElementById('2312.00075v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.00075v1-abstract-full" style="display: none;"> We present an approach to accelerate Neural Field training by efficiently selecting sampling locations. While Neural Fields have recently become popular, it is often trained by uniformly sampling the training domain, or through handcrafted heuristics. We show that improved convergence and final training quality can be achieved by a soft mining technique based on importance sampling: rather than either considering or ignoring a pixel completely, we weigh the corresponding loss by a scalar. To implement our idea we use Langevin Monte-Carlo sampling. We show that by doing so, regions with higher error are being selected more frequently, leading to more than 2x improvement in convergence speed. The code and related resources for this study are publicly available at https://ubc-vision.github.io/nf-soft-mining/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.00075v1-abstract-full').style.display = 'none'; document.getElementById('2312.00075v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.00065">arXiv:2312.00065</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.00065">pdf</a>, <a href="https://arxiv.org/format/2312.00065">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Unsupervised Keypoints from Pretrained Diffusion Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hedlin%2C+E">Eric Hedlin</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gopal Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Mahajan%2C+S">Shweta Mahajan</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+X">Xingzhe He</a>, <a href="/search/cs?searchtype=author&amp;query=Isack%2C+H">Hossam Isack</a>, <a href="/search/cs?searchtype=author&amp;query=Rhodin%2C+A+K+H">Abhishek Kar Helge Rhodin</a>, <a href="/search/cs?searchtype=author&amp;query=Tagliasacchi%2C+A">Andrea Tagliasacchi</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+K+M">Kwang Moo Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.00065v3-abstract-short" style="display: inline;"> Unsupervised learning of keypoints and landmarks has seen significant progress with the help of modern neural network architectures, but performance is yet to match the supervised counterpart, making their practicability questionable. We leverage the emergent knowledge within text-to-image diffusion models, towards more robust unsupervised keypoints. Our core idea is to find text embeddings that w&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.00065v3-abstract-full').style.display = 'inline'; document.getElementById('2312.00065v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.00065v3-abstract-full" style="display: none;"> Unsupervised learning of keypoints and landmarks has seen significant progress with the help of modern neural network architectures, but performance is yet to match the supervised counterpart, making their practicability questionable. We leverage the emergent knowledge within text-to-image diffusion models, towards more robust unsupervised keypoints. Our core idea is to find text embeddings that would cause the generative model to consistently attend to compact regions in images (i.e. keypoints). To do so, we simply optimize the text embedding such that the cross-attention maps within the denoising network are localized as Gaussians with small standard deviations. We validate our performance on multiple datasets: the CelebA, CUB-200-2011, Tai-Chi-HD, DeepFashion, and Human3.6m datasets. We achieve significantly improved accuracy, sometimes even outperforming supervised ones, particularly for data that is non-aligned and less curated. Our code is publicly available and can be found through our project page: https://ubc-vision.github.io/StableKeypoints/ <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.00065v3-abstract-full').style.display = 'none'; document.getElementById('2312.00065v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.18300">arXiv:2311.18300</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.18300">pdf</a>, <a href="https://arxiv.org/format/2311.18300">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Multi-label Annotation for Visual Multi-Task Learning Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">G. Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Angleraud%2C+A">A. Angleraud</a>, <a href="/search/cs?searchtype=author&amp;query=Pieters%2C+R">R. Pieters</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.18300v1-abstract-short" style="display: inline;"> Deep learning requires large amounts of data, and a well-defined pipeline for labeling and augmentation. Current solutions support numerous computer vision tasks with dedicated annotation types and formats, such as bounding boxes, polygons, and key points. These annotations can be combined into a single data format to benefit approaches such as multi-task models. However, to our knowledge, no avai&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.18300v1-abstract-full').style.display = 'inline'; document.getElementById('2311.18300v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.18300v1-abstract-full" style="display: none;"> Deep learning requires large amounts of data, and a well-defined pipeline for labeling and augmentation. Current solutions support numerous computer vision tasks with dedicated annotation types and formats, such as bounding boxes, polygons, and key points. These annotations can be combined into a single data format to benefit approaches such as multi-task models. However, to our knowledge, no available labeling tool supports the export functionality for a combined benchmark format, and no augmentation library supports transformations for the combination of all. In this work, these functionalities are presented, with visual data annotation and augmentation to train a multi-task model (object detection, segmentation, and key point extraction). The tools are demonstrated in two robot perception use cases. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.18300v1-abstract-full').style.display = 'none'; document.getElementById('2311.18300v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, accepted to IEEE International Conference on Robotic Computing</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.18285">arXiv:2311.18285</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.18285">pdf</a>, <a href="https://arxiv.org/format/2311.18285">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Co-speech gestures for human-robot collaboration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ekrekli%2C+A">A. Ekrekli</a>, <a href="/search/cs?searchtype=author&amp;query=Angleraud%2C+A">A. Angleraud</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">G. Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Pieters%2C+R">R. Pieters</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.18285v1-abstract-short" style="display: inline;"> Collaboration between human and robot requires effective modes of communication to assign robot tasks and coordinate activities. As communication can utilize different modalities, a multi-modal approach can be more expressive than single modal models alone. In this work we propose a co-speech gesture model that can assign robot tasks for human-robot collaboration. Human gestures and speech, detect&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.18285v1-abstract-full').style.display = 'inline'; document.getElementById('2311.18285v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.18285v1-abstract-full" style="display: none;"> Collaboration between human and robot requires effective modes of communication to assign robot tasks and coordinate activities. As communication can utilize different modalities, a multi-modal approach can be more expressive than single modal models alone. In this work we propose a co-speech gesture model that can assign robot tasks for human-robot collaboration. Human gestures and speech, detected by computer vision and speech recognition, can thus refer to objects in the scene and apply robot actions to them. We present an experimental evaluation of the multi-modal co-speech model with a real-world industrial use case. Results demonstrate that multi-modal communication is easy to achieve and can provide benefits for collaboration with respect to single modal tools. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.18285v1-abstract-full').style.display = 'none'; document.getElementById('2311.18285v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">5 pages, accepted to IEEE International Conference on Robotics Computing</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.05709">arXiv:2311.05709</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.05709">pdf</a>, <a href="https://arxiv.org/ps/2311.05709">ps</a>, <a href="https://arxiv.org/format/2311.05709">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> OmniVec: Learning robust representations with cross modal sharing </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Srivastava%2C+S">Siddharth Srivastava</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gaurav Sharma</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.05709v1-abstract-short" style="display: inline;"> Majority of research in learning based methods has been towards designing and training networks for specific tasks. However, many of the learning based tasks, across modalities, share commonalities and could be potentially tackled in a joint framework. We present an approach in such direction, to learn multiple tasks, in multiple modalities, with a unified architecture. The proposed network is com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.05709v1-abstract-full').style.display = 'inline'; document.getElementById('2311.05709v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.05709v1-abstract-full" style="display: none;"> Majority of research in learning based methods has been towards designing and training networks for specific tasks. However, many of the learning based tasks, across modalities, share commonalities and could be potentially tackled in a joint framework. We present an approach in such direction, to learn multiple tasks, in multiple modalities, with a unified architecture. The proposed network is composed of task specific encoders, a common trunk in the middle, followed by task specific prediction heads. We first pre-train it by self-supervised masked training, followed by sequential training for the different tasks. We train the network on all major modalities, e.g.\ visual, audio, text and 3D, and report results on $22$ diverse and challenging public benchmarks. We demonstrate empirically that, using a joint network to train across modalities leads to meaningful information sharing and this allows us to achieve state-of-the-art results on most of the benchmarks. We also show generalization of the trained network on cross-modal tasks as well as unseen datasets and tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.05709v1-abstract-full').style.display = 'none'; document.getElementById('2311.05709v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to WACV 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.01279">arXiv:2311.01279</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.01279">pdf</a>, <a href="https://arxiv.org/format/2311.01279">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3583740.3626819">10.1145/3583740.3626819 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> ExPECA: An Experimental Platform for Trustworthy Edge Computing Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mostafavi%2C+S">Samie Mostafavi</a>, <a href="/search/cs?searchtype=author&amp;query=Moothedath%2C+V+N">Vishnu Narayanan Moothedath</a>, <a href="/search/cs?searchtype=author&amp;query=R%C3%B6nngren%2C+S">Stefan R枚nngren</a>, <a href="/search/cs?searchtype=author&amp;query=Roy%2C+N">Neelabhro Roy</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G+P">Gourav Prateek Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Seo%2C+S">Sangwon Seo</a>, <a href="/search/cs?searchtype=author&amp;query=Mu%C3%B1oz%2C+M+O">Manuel Olgu铆n Mu帽oz</a>, <a href="/search/cs?searchtype=author&amp;query=Gross%2C+J">James Gross</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.01279v1-abstract-short" style="display: inline;"> This paper presents ExPECA, an edge computing and wireless communication research testbed designed to tackle two pressing challenges: comprehensive end-to-end experimentation and high levels of experimental reproducibility. Leveraging OpenStack-based Chameleon Infrastructure (CHI) framework for its proven flexibility and ease of operation, ExPECA is located in a unique, isolated underground facili&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.01279v1-abstract-full').style.display = 'inline'; document.getElementById('2311.01279v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.01279v1-abstract-full" style="display: none;"> This paper presents ExPECA, an edge computing and wireless communication research testbed designed to tackle two pressing challenges: comprehensive end-to-end experimentation and high levels of experimental reproducibility. Leveraging OpenStack-based Chameleon Infrastructure (CHI) framework for its proven flexibility and ease of operation, ExPECA is located in a unique, isolated underground facility, providing a highly controlled setting for wireless experiments. The testbed is engineered to facilitate integrated studies of both communication and computation, offering a diverse array of Software-Defined Radios (SDR) and Commercial Off-The-Shelf (COTS) wireless and wired links, as well as containerized computational environments. We exemplify the experimental possibilities of the testbed using OpenRTiST, a latency-sensitive, bandwidth-intensive application, and analyze its performance. Lastly, we highlight an array of research domains and experimental setups that stand to gain from ExPECA&#39;s features, including closed-loop applications and time-sensitive networking. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.01279v1-abstract-full').style.display = 'none'; document.getElementById('2311.01279v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.18912">arXiv:2310.18912</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.18912">pdf</a>, <a href="https://arxiv.org/format/2310.18912">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/TKDE.2024.3377229">10.1109/TKDE.2024.3377229 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Sentence Bag Graph Formulation for Biomedical Distant Supervision Relation Extraction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+H">Hao Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+X">Xiaoyan Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Liang%2C+T">Tianming Liang</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gaurav Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Xue%2C+L">Liang Xue</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+M">Maozu Guo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.18912v1-abstract-short" style="display: inline;"> We introduce a novel graph-based framework for alleviating key challenges in distantly-supervised relation extraction and demonstrate its effectiveness in the challenging and important domain of biomedical data. Specifically, we propose a graph view of sentence bags referring to an entity pair, which enables message-passing based aggregation of information related to the entity pair over the sente&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.18912v1-abstract-full').style.display = 'inline'; document.getElementById('2310.18912v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.18912v1-abstract-full" style="display: none;"> We introduce a novel graph-based framework for alleviating key challenges in distantly-supervised relation extraction and demonstrate its effectiveness in the challenging and important domain of biomedical data. Specifically, we propose a graph view of sentence bags referring to an entity pair, which enables message-passing based aggregation of information related to the entity pair over the sentence bag. The proposed framework alleviates the common problem of noisy labeling in distantly supervised relation extraction and also effectively incorporates inter-dependencies between sentences within a bag. Extensive experiments on two large-scale biomedical relation datasets and the widely utilized NYT dataset demonstrate that our proposed framework significantly outperforms the state-of-the-art methods for biomedical distant supervision relation extraction while also providing excellent performance for relation extraction in the general text mining domain. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.18912v1-abstract-full').style.display = 'none'; document.getElementById('2310.18912v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">in IEEE Transactions on Knowledge and Data Engineering, 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.10765">arXiv:2309.10765</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.10765">pdf</a>, <a href="https://arxiv.org/format/2309.10765">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3581783.3612858">10.1145/3581783.3612858 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> MAGIC-TBR: Multiview Attention Fusion for Transformer-based Bodily Behavior Recognition in Group Settings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Madan%2C+S">Surbhi Madan</a>, <a href="/search/cs?searchtype=author&amp;query=Jain%2C+R">Rishabh Jain</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gulshan Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Subramanian%2C+R">Ramanathan Subramanian</a>, <a href="/search/cs?searchtype=author&amp;query=Dhall%2C+A">Abhinav Dhall</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.10765v1-abstract-short" style="display: inline;"> Bodily behavioral language is an important social cue, and its automated analysis helps in enhancing the understanding of artificial intelligence systems. Furthermore, behavioral language cues are essential for active engagement in social agent-based user interactions. Despite the progress made in computer vision for tasks like head and body pose estimation, there is still a need to explore the de&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.10765v1-abstract-full').style.display = 'inline'; document.getElementById('2309.10765v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.10765v1-abstract-full" style="display: none;"> Bodily behavioral language is an important social cue, and its automated analysis helps in enhancing the understanding of artificial intelligence systems. Furthermore, behavioral language cues are essential for active engagement in social agent-based user interactions. Despite the progress made in computer vision for tasks like head and body pose estimation, there is still a need to explore the detection of finer behaviors such as gesturing, grooming, or fumbling. This paper proposes a multiview attention fusion method named MAGIC-TBR that combines features extracted from videos and their corresponding Discrete Cosine Transform coefficients via a transformer-based approach. The experiments are conducted on the BBSI dataset and the results demonstrate the effectiveness of the proposed feature fusion with multiview attention. The code is available at: https://github.com/surbhimadan92/MAGIC-TBR <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.10765v1-abstract-full').style.display = 'none'; document.getElementById('2309.10765v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages, 2 Tables and 3 Figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.10648">arXiv:2307.10648</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.10648">pdf</a>, <a href="https://arxiv.org/format/2307.10648">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Data-Driven Latency Probability Prediction for Wireless Networks: Focusing on Tail Probabilities </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mostafavi%2C+S">Samie Mostafavi</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G+P">Gourav Prateek Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Gross%2C+J">James Gross</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.10648v1-abstract-short" style="display: inline;"> With the emergence of new application areas, such as cyber-physical systems and human-in-the-loop applications, there is a need to guarantee a certain level of end-to-end network latency with extremely high reliability, e.g., 99.999%. While mechanisms specified under IEEE 802.1as time-sensitive networking (TSN) can be used to achieve these requirements for switched Ethernet networks, implementing&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.10648v1-abstract-full').style.display = 'inline'; document.getElementById('2307.10648v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.10648v1-abstract-full" style="display: none;"> With the emergence of new application areas, such as cyber-physical systems and human-in-the-loop applications, there is a need to guarantee a certain level of end-to-end network latency with extremely high reliability, e.g., 99.999%. While mechanisms specified under IEEE 802.1as time-sensitive networking (TSN) can be used to achieve these requirements for switched Ethernet networks, implementing TSN mechanisms in wireless networks is challenging due to their stochastic nature. To conform the wireless link to a reliability level of 99.999%, the behavior of extremely rare outliers in the latency probability distribution, or the tail of the distribution, must be analyzed and controlled. This work proposes predicting the tail of the latency distribution using state-of-the-art data-driven approaches, such as mixture density networks (MDN) and extreme value mixture models, to estimate the likelihood of rare latencies conditioned on the network parameters, which can be used to make more informed decisions in wireless transmission. Actual latency measurements of IEEE 802.11g (WiFi), commercial private and a software-defined 5G network are used to benchmark the proposed approaches and evaluate their sensitivities concerning the tail probabilities. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.10648v1-abstract-full').style.display = 'none'; document.getElementById('2307.10648v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to IEEE Global Communications (GLOBECOM) 2023 conference</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.05451">arXiv:2307.05451</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.05451">pdf</a>, <a href="https://arxiv.org/format/2307.05451">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Detection Threshold of Audio Haptic Asynchrony in a Driving Context </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gyanendra Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Yasuda%2C+H">Hiroshi Yasuda</a>, <a href="/search/cs?searchtype=author&amp;query=Kuehner%2C+M">Manuel Kuehner</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.05451v1-abstract-short" style="display: inline;"> In order to provide perceptually accurate multimodal feedback during driving situations, it is vital to understand the threshold at which drivers are able to recognize asyncrony between multiple incoming Stimuli. In this work, we investigated and report the \textit{detection threshold} (DT) of asynchrony between audio and haptic feedback, in the context of a force feedback steering wheel. We desig&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.05451v1-abstract-full').style.display = 'inline'; document.getElementById('2307.05451v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.05451v1-abstract-full" style="display: none;"> In order to provide perceptually accurate multimodal feedback during driving situations, it is vital to understand the threshold at which drivers are able to recognize asyncrony between multiple incoming Stimuli. In this work, we investigated and report the \textit{detection threshold} (DT) of asynchrony between audio and haptic feedback, in the context of a force feedback steering wheel. We designed the experiment to loosely resemble a driving situation where the haptic feedback was provided through a steering wheel (\textit{Sensodrive}), while the accompanying audio was played through noise cancelling headphones. Both feedbacks were designed to resemble rumble strips, that are generally installed on the side of major roadways as a safety tool. The results indicate that, for $50\%$ of the participants, asynchrony was detectable outside the range of -75 ms and 110 ms, where the former is related to perceiving audio before haptic and vice versa for the latter. We were also able to concur with previous studies, which state that latency is perceivable at a lower threshold when audio precedes haptic stimuli. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.05451v1-abstract-full').style.display = 'none'; document.getElementById('2307.05451v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 9 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 91E30 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> H.1.2 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.15581">arXiv:2305.15581</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.15581">pdf</a>, <a href="https://arxiv.org/format/2305.15581">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Unsupervised Semantic Correspondence Using Stable Diffusion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Hedlin%2C+E">Eric Hedlin</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gopal Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Mahajan%2C+S">Shweta Mahajan</a>, <a href="/search/cs?searchtype=author&amp;query=Isack%2C+H">Hossam Isack</a>, <a href="/search/cs?searchtype=author&amp;query=Kar%2C+A">Abhishek Kar</a>, <a href="/search/cs?searchtype=author&amp;query=Tagliasacchi%2C+A">Andrea Tagliasacchi</a>, <a href="/search/cs?searchtype=author&amp;query=Yi%2C+K+M">Kwang Moo Yi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.15581v2-abstract-short" style="display: inline;"> Text-to-image diffusion models are now capable of generating images that are often indistinguishable from real images. To generate such images, these models must understand the semantics of the objects they are asked to generate. In this work we show that, without any training, one can leverage this semantic knowledge within diffusion models to find semantic correspondences - locations in multiple&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.15581v2-abstract-full').style.display = 'inline'; document.getElementById('2305.15581v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.15581v2-abstract-full" style="display: none;"> Text-to-image diffusion models are now capable of generating images that are often indistinguishable from real images. To generate such images, these models must understand the semantics of the objects they are asked to generate. In this work we show that, without any training, one can leverage this semantic knowledge within diffusion models to find semantic correspondences - locations in multiple images that have the same semantic meaning. Specifically, given an image, we optimize the prompt embeddings of these models for maximum attention on the regions of interest. These optimized embeddings capture semantic information about the location, which can then be transferred to another image. By doing so we obtain results on par with the strongly supervised state of the art on the PF-Willow dataset and significantly outperform (20.9% relative for the SPair-71k dataset) any existing weakly or unsupervised method on PF-Willow, CUB-200 and SPair-71k datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.15581v2-abstract-full').style.display = 'none'; document.getElementById('2305.15581v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 24 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project website: https://github.com/ubc-vision/LDM_correspondences</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.07898">arXiv:2305.07898</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.07898">pdf</a>, <a href="https://arxiv.org/format/2305.07898">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Network-GIANT: Fully distributed Newton-type optimization via harmonic Hessian consensus </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Maritan%2C+A">Alessio Maritan</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Ganesh Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Schenato%2C+L">Luca Schenato</a>, <a href="/search/cs?searchtype=author&amp;query=Dey%2C+S">Subhrakanti Dey</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.07898v2-abstract-short" style="display: inline;"> This paper considers the problem of distributed multi-agent learning, where the global aim is to minimize a sum of local objective (empirical loss) functions through local optimization and information exchange between neighbouring nodes. We introduce a Newton-type fully distributed optimization algorithm, Network-GIANT, which is based on GIANT, a Federated learning algorithm that relies on a centr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.07898v2-abstract-full').style.display = 'inline'; document.getElementById('2305.07898v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.07898v2-abstract-full" style="display: none;"> This paper considers the problem of distributed multi-agent learning, where the global aim is to minimize a sum of local objective (empirical loss) functions through local optimization and information exchange between neighbouring nodes. We introduce a Newton-type fully distributed optimization algorithm, Network-GIANT, which is based on GIANT, a Federated learning algorithm that relies on a centralized parameter server. The Network-GIANT algorithm is designed via a combination of gradient-tracking and a Newton-type iterative algorithm at each node with consensus based averaging of local gradient and Newton updates. We prove that our algorithm guarantees semi-global and exponential convergence to the exact solution over the network assuming strongly convex and smooth loss functions. We provide empirical evidence of the superior convergence performance of Network-GIANT over other state-of-art distributed learning algorithms such as Network-DANE and Newton-Raphson Consensus. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.07898v2-abstract-full').style.display = 'none'; document.getElementById('2305.07898v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.05255">arXiv:2305.05255</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.05255">pdf</a>, <a href="https://arxiv.org/format/2305.05255">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Emolysis: A Multimodal Open-Source Group Emotion Analysis and Visualization Toolkit </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghosh%2C+S">Shreya Ghosh</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+Z">Zhixi Cai</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+P">Parul Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Garima Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Dhall%2C+A">Abhinav Dhall</a>, <a href="/search/cs?searchtype=author&amp;query=Hayat%2C+M">Munawar Hayat</a>, <a href="/search/cs?searchtype=author&amp;query=Gedeon%2C+T">Tom Gedeon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.05255v3-abstract-short" style="display: inline;"> Automatic group emotion recognition plays an important role in understanding complex human-human interaction. This paper introduces, Emolysis, a Python-based, standalone open-source group emotion analysis toolkit for use in different social situations upon getting consent from the users. Given any input video, Emolysis processes synchronized multimodal input and maps it to group level emotion, val&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.05255v3-abstract-full').style.display = 'inline'; document.getElementById('2305.05255v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.05255v3-abstract-full" style="display: none;"> Automatic group emotion recognition plays an important role in understanding complex human-human interaction. This paper introduces, Emolysis, a Python-based, standalone open-source group emotion analysis toolkit for use in different social situations upon getting consent from the users. Given any input video, Emolysis processes synchronized multimodal input and maps it to group level emotion, valence and arousal. Additionally, the toolkit supports major mobile and desktop platforms (Android, iOS, Windows). The Emolysis platform also comes with an intuitive graphical user interface that allows users to select different modalities and target persons for more fine-grained emotion analysis. Emolysis is freely available for academic research and encourages application developers to extend it to application specific environments on top of the existing system. We believe that the extension mechanism is quite straightforward. Our code models and interface are available at https://github.com/ControlNet/emolysis. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.05255v3-abstract-full').style.display = 'none'; document.getElementById('2305.05255v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ACII Demo 2024. Both Shreya Ghosh and Zhixi Cai contributed equally to this research</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.07245">arXiv:2304.07245</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2304.07245">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Machine Learning-Based Multi-Objective Design Exploration Of Flexible Disc Elements </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+G">Gehendra Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Mun%2C+S">Sungkwang Mun</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+N">Nayeon Lee</a>, <a href="/search/cs?searchtype=author&amp;query=Peterson%2C+L">Luke Peterson</a>, <a href="/search/cs?searchtype=author&amp;query=Tellkamp%2C+D">Daniela Tellkamp</a>, <a href="/search/cs?searchtype=author&amp;query=Nellippallil%2C+A+B">Anand Balu Nellippallil</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.07245v1-abstract-short" style="display: inline;"> Design exploration is an important step in the engineering design process. This involves the search for design/s that meet the specified design criteria and accomplishes the predefined objective/s. In recent years, machine learning-based approaches have been widely used in engineering design problems. This paper showcases Artificial Neural Network (ANN) architecture applied to an engineering desig&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.07245v1-abstract-full').style.display = 'inline'; document.getElementById('2304.07245v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.07245v1-abstract-full" style="display: none;"> Design exploration is an important step in the engineering design process. This involves the search for design/s that meet the specified design criteria and accomplishes the predefined objective/s. In recent years, machine learning-based approaches have been widely used in engineering design problems. This paper showcases Artificial Neural Network (ANN) architecture applied to an engineering design problem to explore and identify improved design solutions. The case problem of this study is the design of flexible disc elements used in disc couplings. We are required to improve the design of the disc elements by lowering the mass and stress without lowering the torque transmission and misalignment capability. To accomplish this objective, we employ ANN coupled with genetic algorithm in the design exploration step to identify designs that meet the specified criteria (torque and misalignment) while having minimum mass and stress. The results are comparable to the optimized results obtained from the traditional response surface method. This can have huge advantage when we are evaluating conceptual designs against multiple conflicting requirements. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.07245v1-abstract-full').style.display = 'none'; document.getElementById('2304.07245v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Sharma%2C+G&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Sharma%2C+G&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Sharma%2C+G&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Sharma%2C+G&amp;start=100" class="pagination-link " aria-label="Page 3" aria-current="page">3 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10