CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 56 results for author: <span class="mathjax">Pathak, S</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Pathak%2C+S">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Pathak, S"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Pathak%2C+S&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Pathak, S"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Pathak%2C+S&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Pathak%2C+S&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Pathak%2C+S&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.01816">arXiv:2502.01816</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2502.01816">pdf</a>, <a href="https://arxiv.org/format/2502.01816">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Low Resource Video Super-resolution using Memory and Residual Deformable Convolutions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Viswanathan%2C+K">Kavitha Viswanathan</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shashwat Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Bharambe%2C+P">Piyush Bharambe</a>, <a href="/search/cs?searchtype=author&amp;query=Choudhary%2C+H">Harsh Choudhary</a>, <a href="/search/cs?searchtype=author&amp;query=Sethi%2C+A">Amit Sethi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.01816v1-abstract-short" style="display: inline;"> Transformer-based video super-resolution (VSR) models have set new benchmarks in recent years, but their substantial computational demands make most of them unsuitable for deployment on resource-constrained devices. Achieving a balance between model complexity and output quality remains a formidable challenge in VSR. Although lightweight models have been introduced to address this issue, they ofte&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.01816v1-abstract-full').style.display = 'inline'; document.getElementById('2502.01816v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.01816v1-abstract-full" style="display: none;"> Transformer-based video super-resolution (VSR) models have set new benchmarks in recent years, but their substantial computational demands make most of them unsuitable for deployment on resource-constrained devices. Achieving a balance between model complexity and output quality remains a formidable challenge in VSR. Although lightweight models have been introduced to address this issue, they often struggle to deliver state-of-the-art performance. We propose a novel lightweight, parameter-efficient deep residual deformable convolution network for VSR. Unlike prior methods, our model enhances feature utilization through residual connections and employs deformable convolution for precise frame alignment, addressing motion dynamics effectively. Furthermore, we introduce a single memory tensor to capture information accrued from the past frames and improve motion estimation across frames. This design enables an efficient balance between computational cost and reconstruction quality. With just 2.3 million parameters, our model achieves state-of-the-art SSIM of 0.9175 on the REDS4 dataset, surpassing existing lightweight and many heavy models in both accuracy and resource efficiency. Architectural insights from our model pave the way for real-time VSR on streaming data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.01816v1-abstract-full').style.display = 'none'; document.getElementById('2502.01816v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.11709">arXiv:2501.11709</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.11709">pdf</a>, <a href="https://arxiv.org/format/2501.11709">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Software Engineering">cs.SE</span> </div> </div> <p class="title is-5 mathjax"> Towards Detecting Prompt Knowledge Gaps for Improved LLM-guided Issue Resolution </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ehsani%2C+R">Ramtin Ehsani</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sakshi Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Chatterjee%2C+P">Preetha Chatterjee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.11709v3-abstract-short" style="display: inline;"> Large language models (LLMs) have become essential in software development, especially for issue resolution. However, despite their widespread use, significant challenges persist in the quality of LLM responses to issue resolution queries. LLM interactions often yield incorrect, incomplete, or ambiguous information, largely due to knowledge gaps in prompt design, which can lead to unproductive exc&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.11709v3-abstract-full').style.display = 'inline'; document.getElementById('2501.11709v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.11709v3-abstract-full" style="display: none;"> Large language models (LLMs) have become essential in software development, especially for issue resolution. However, despite their widespread use, significant challenges persist in the quality of LLM responses to issue resolution queries. LLM interactions often yield incorrect, incomplete, or ambiguous information, largely due to knowledge gaps in prompt design, which can lead to unproductive exchanges and reduced developer productivity. In this paper, we analyze 433 developer-ChatGPT conversations within GitHub issue threads to examine the impact of prompt knowledge gaps and conversation styles on issue resolution. We identify four main knowledge gaps in developer prompts: Missing Context, Missing Specifications, Multiple Context, and Unclear Instructions. Assuming that conversations within closed issues contributed to successful resolutions while those in open issues did not, we find that ineffective conversations contain knowledge gaps in 44.6% of prompts, compared to only 12.6% in effective ones. Additionally, we observe seven distinct conversational styles, with Directive Prompting, Chain of Thought, and Responsive Feedback being the most prevalent. We find that knowledge gaps are present in all styles of conversations, with Missing Context being the most repeated challenge developers face in issue-resolution conversations. Based on our analysis, we identify key textual and code-related heuristics (Specificity, Contextual Richness, and Clarity) that are associated with successful issue closure and help assess prompt quality. These heuristics lay the foundation for an automated tool that can dynamically flag unclear prompts and suggest structured improvements. To test feasibility, we developed a lightweight browser extension prototype for detecting prompt gaps, that can be easily adapted to other tools within developer workflows. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.11709v3-abstract-full').style.display = 'none'; document.getElementById('2501.11709v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.03533">arXiv:2501.03533</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2501.03533">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1080/01691864.2024.2422968">10.1080/01691864.2024.2422968 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Anomaly Triplet-Net: Progress Recognition Model Using Deep Metric Learning Considering Occlusion for Manual Assembly Work </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kitsukawa%2C+T">Takumi Kitsukawa</a>, <a href="/search/cs?searchtype=author&amp;query=Miura%2C+K">Kazuma Miura</a>, <a href="/search/cs?searchtype=author&amp;query=Yumoto%2C+S">Shigeki Yumoto</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sarthak Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Moro%2C+A">Alessandro Moro</a>, <a href="/search/cs?searchtype=author&amp;query=Umeda%2C+K">Kazunori Umeda</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.03533v1-abstract-short" style="display: inline;"> In this paper, a progress recognition method consider occlusion using deep metric learning is proposed to visualize the product assembly process in a factory. First, the target assembly product is detected from images acquired from a fixed-point camera installed in the factory using a deep learning-based object detection method. Next, the detection area is cropped from the image. Finally, by using&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03533v1-abstract-full').style.display = 'inline'; document.getElementById('2501.03533v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.03533v1-abstract-full" style="display: none;"> In this paper, a progress recognition method consider occlusion using deep metric learning is proposed to visualize the product assembly process in a factory. First, the target assembly product is detected from images acquired from a fixed-point camera installed in the factory using a deep learning-based object detection method. Next, the detection area is cropped from the image. Finally, by using a classification method based on deep metric learning on the cropped image, the progress of the product assembly work is estimated as a rough progress step. As a specific progress estimation model, we propose an Anomaly Triplet-Net that adds anomaly samples to Triplet Loss for progress estimation considering occlusion. In experiments, an 82.9% success rate is achieved for the progress estimation method using Anomaly Triplet-Net. We also experimented with the practicality of the sequence of detection, cropping, and progression estimation, and confirmed the effectiveness of the overall system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03533v1-abstract-full').style.display = 'none'; document.getElementById('2501.03533v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper has been peer-reviewed, revised, and published in Advanced Robotics</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Advanced Robotics(2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.12122">arXiv:2412.12122</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.12122">pdf</a>, <a href="https://arxiv.org/format/2412.12122">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Inverse design of potential metastructures inspired from Indian medieval architectural elements </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Bhattacharya%2C+B">Bishakh Bhattacharya</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+T">Tanuj Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+A+K">Arun Kumar Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Dwivedi%2C+A">Ankur Dwivedi</a>, <a href="/search/cs?searchtype=author&amp;query=Gupta%2C+V">Vivek Gupta</a>, <a href="/search/cs?searchtype=author&amp;query=Sahana%2C+S">Subhadeep Sahana</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Suryansh Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Awasthi%2C+A">Ashish Awasthi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.12122v2-abstract-short" style="display: inline;"> In this study, we immerse in the intricate world of patterns, examining the structural details of Indian medieval architecture for the discovery of motifs with great application potential from the mechanical metastructure perspective. The motifs that specifically engrossed us are derived from the tomb of I&#39;timad-ud-Daula, situated in the city of Agra, close to the Taj Mahal. In an exploratory stud&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.12122v2-abstract-full').style.display = 'inline'; document.getElementById('2412.12122v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.12122v2-abstract-full" style="display: none;"> In this study, we immerse in the intricate world of patterns, examining the structural details of Indian medieval architecture for the discovery of motifs with great application potential from the mechanical metastructure perspective. The motifs that specifically engrossed us are derived from the tomb of I&#39;timad-ud-Daula, situated in the city of Agra, close to the Taj Mahal. In an exploratory study, we designed nine interlaced metastructures inspired from the tomb&#39;s motifs. We fabricated the metastructures using additive manufacturing and studied their vibration characteristics experimentally and numerically. We also investigated bandgap modulation with metallic inserts in honeycomb interlaced metastructures. The comprehensive study of these metastructure panels reveals their high performance in controlling elastic wave propagation and generating suitable frequency bandgaps, hence having potential applications as waveguides for noise and vibration control. Finally, we developed a novel AI-based model trained on numerical datasets for the inverse design of metastructures with a desired bandgap. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.12122v2-abstract-full').style.display = 'none'; document.getElementById('2412.12122v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2412.10411">arXiv:2412.10411</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2412.10411">pdf</a>, <a href="https://arxiv.org/format/2412.10411">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Pre-trained protein language model for codon optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shashank Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Lin%2C+G">Guohui Lin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2412.10411v1-abstract-short" style="display: inline;"> Motivation: Codon optimization of Open Reading Frame (ORF) sequences is essential for enhancing mRNA stability and expression in applications like mRNA vaccines, where codon choice can significantly impact protein yield which directly impacts immune strength. In this work, we investigate the use of a pre-trained protein language model (PPLM) for getting a rich representation of amino acids which c&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.10411v1-abstract-full').style.display = 'inline'; document.getElementById('2412.10411v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2412.10411v1-abstract-full" style="display: none;"> Motivation: Codon optimization of Open Reading Frame (ORF) sequences is essential for enhancing mRNA stability and expression in applications like mRNA vaccines, where codon choice can significantly impact protein yield which directly impacts immune strength. In this work, we investigate the use of a pre-trained protein language model (PPLM) for getting a rich representation of amino acids which could be utilized for codon optimization. This leaves us with a simpler fine-tuning task over PPLM in optimizing ORF sequences. Results: The ORFs generated by our proposed models outperformed their natural counterparts encoding the same proteins on computational metrics for stability and expression. They also demonstrated enhanced performance against the benchmark ORFs used in mRNA vaccines for the SARS-CoV-2 viral spike protein and the varicella-zoster virus (VZV). These results highlight the potential of adapting PPLM for designing ORFs tailored to encode target antigens in mRNA vaccines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2412.10411v1-abstract-full').style.display = 'none'; document.getElementById('2412.10411v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.17580">arXiv:2411.17580</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.17580">pdf</a>, <a href="https://arxiv.org/format/2411.17580">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Revisiting Point Cloud Completion: Are We Ready For The Real-World? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Stuti Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+P">Prashant Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Mboga%2C+N">Nicholus Mboga</a>, <a href="/search/cs?searchtype=author&amp;query=Steenackers%2C+G">Gunther Steenackers</a>, <a href="/search/cs?searchtype=author&amp;query=Penne%2C+R">Rudi Penne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.17580v2-abstract-short" style="display: inline;"> Point clouds acquired in constrained and challenging real-world settings are incomplete, non-uniformly sparse, or both. These obstacles present acute challenges for a vital task - point cloud completion. Using tools from Algebraic Topology and Persistent Homology ($\mathcal{PH}$), we demonstrate that current benchmark synthetic point clouds lack rich topological features that are important constit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17580v2-abstract-full').style.display = 'inline'; document.getElementById('2411.17580v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.17580v2-abstract-full" style="display: none;"> Point clouds acquired in constrained and challenging real-world settings are incomplete, non-uniformly sparse, or both. These obstacles present acute challenges for a vital task - point cloud completion. Using tools from Algebraic Topology and Persistent Homology ($\mathcal{PH}$), we demonstrate that current benchmark synthetic point clouds lack rich topological features that are important constituents of point clouds captured in realistic settings. To facilitate research in this direction, we contribute the first real-world industrial point cloud dataset for point cloud completion, RealPC - a diverse set of rich and varied point clouds, consisting of $\sim$ 40,000 pairs across 21 categories of industrial structures in railway establishments. Our benchmark results on several strong baselines reveal a striking observation - the existing methods are tailored for synthetic datasets and fail miserably in real-world settings. Building on our observation that RealPC consists of several 0 and 1-dimensional $\mathcal{PH}$-based topological features, we demonstrate the potential of integrating Homology-based topological priors into existing works. More specifically, we present how 0-dimensional $\mathcal{PH}$ priors, which extract the global topology of a complete shape in the form of a 3-D skeleton, can assist a model in generating topologically-consistent complete shapes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.17580v2-abstract-full').style.display = 'none'; document.getElementById('2411.17580v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2411.09618">arXiv:2411.09618</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2411.09618">pdf</a>, <a href="https://arxiv.org/format/2411.09618">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Medical Physics">physics.med-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.59275/j.melba.2024-9c68">10.59275/j.melba.2024-9c68 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> MICCAI-CDMRI 2023 QuantConn Challenge Findings on Achieving Robust Quantitative Connectivity through Harmonized Preprocessing of Diffusion MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Newlin%2C+N+R">Nancy R. Newlin</a>, <a href="/search/cs?searchtype=author&amp;query=Schilling%2C+K">Kurt Schilling</a>, <a href="/search/cs?searchtype=author&amp;query=Koudoro%2C+S">Serge Koudoro</a>, <a href="/search/cs?searchtype=author&amp;query=Chandio%2C+B+Q">Bramsh Qamar Chandio</a>, <a href="/search/cs?searchtype=author&amp;query=Kanakaraj%2C+P">Praitayini Kanakaraj</a>, <a href="/search/cs?searchtype=author&amp;query=Moyer%2C+D">Daniel Moyer</a>, <a href="/search/cs?searchtype=author&amp;query=Kelly%2C+C+E">Claire E. Kelly</a>, <a href="/search/cs?searchtype=author&amp;query=Genc%2C+S">Sila Genc</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jian Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+J+Y">Joseph Yuan-Mou Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Y">Ye Wu</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+Y">Yifei He</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+J">Jiawei Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Zeng%2C+Q">Qingrun Zeng</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+F">Fan Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Adluru%2C+N">Nagesh Adluru</a>, <a href="/search/cs?searchtype=author&amp;query=Nath%2C+V">Vishwesh Nath</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sudhir Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Schneider%2C+W">Walter Schneider</a>, <a href="/search/cs?searchtype=author&amp;query=Gade%2C+A">Anurag Gade</a>, <a href="/search/cs?searchtype=author&amp;query=Rathi%2C+Y">Yogesh Rathi</a>, <a href="/search/cs?searchtype=author&amp;query=Hendriks%2C+T">Tom Hendriks</a>, <a href="/search/cs?searchtype=author&amp;query=Vilanova%2C+A">Anna Vilanova</a>, <a href="/search/cs?searchtype=author&amp;query=Chamberland%2C+M">Maxime Chamberland</a>, <a href="/search/cs?searchtype=author&amp;query=Pieciak%2C+T">Tomasz Pieciak</a> , et al. (11 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2411.09618v1-abstract-short" style="display: inline;"> White matter alterations are increasingly implicated in neurological diseases and their progression. International-scale studies use diffusion-weighted magnetic resonance imaging (DW-MRI) to qualitatively identify changes in white matter microstructure and connectivity. Yet, quantitative analysis of DW-MRI data is hindered by inconsistencies stemming from varying acquisition protocols. There is a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09618v1-abstract-full').style.display = 'inline'; document.getElementById('2411.09618v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2411.09618v1-abstract-full" style="display: none;"> White matter alterations are increasingly implicated in neurological diseases and their progression. International-scale studies use diffusion-weighted magnetic resonance imaging (DW-MRI) to qualitatively identify changes in white matter microstructure and connectivity. Yet, quantitative analysis of DW-MRI data is hindered by inconsistencies stemming from varying acquisition protocols. There is a pressing need to harmonize the preprocessing of DW-MRI datasets to ensure the derivation of robust quantitative diffusion metrics across acquisitions. In the MICCAI-CDMRI 2023 QuantConn challenge, participants were provided raw data from the same individuals collected on the same scanner but with two different acquisitions and tasked with preprocessing the DW-MRI to minimize acquisition differences while retaining biological variation. Submissions are evaluated on the reproducibility and comparability of cross-acquisition bundle-wise microstructure measures, bundle shape features, and connectomics. The key innovations of the QuantConn challenge are that (1) we assess bundles and tractography in the context of harmonization for the first time, (2) we assess connectomics in the context of harmonization for the first time, and (3) we have 10x additional subjects over prior harmonization challenge, MUSHAC and 100x over SuperMUDI. We find that bundle surface area, fractional anisotropy, connectome assortativity, betweenness centrality, edge count, modularity, nodal strength, and participation coefficient measures are most biased by acquisition and that machine learning voxel-wise correction, RISH mapping, and NeSH methods effectively reduce these biases. In addition, microstructure measures AD, MD, RD, bundle length, connectome density, efficiency, and path length are least biased by these acquisition differences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2411.09618v1-abstract-full').style.display = 'none'; document.getElementById('2411.09618v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the Journal of Machine Learning for Biomedical Imaging (MELBA) https://melba-journal.org/2024/019</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Machine.Learning.for.Biomedical.Imaging. 2 (2024) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21721">arXiv:2410.21721</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.21721">pdf</a>, <a href="https://arxiv.org/format/2410.21721">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> DiffSTR: Controlled Diffusion Models for Scene Text Removal </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sanhita Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Kaushik%2C+V">Vinay Kaushik</a>, <a href="/search/cs?searchtype=author&amp;query=Lall%2C+B">Brejesh Lall</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21721v1-abstract-short" style="display: inline;"> To prevent unauthorized use of text in images, Scene Text Removal (STR) has become a crucial task. It focuses on automatically removing text and replacing it with a natural, text-less background while preserving significant details such as texture, color, and contrast. Despite its importance in privacy protection, STR faces several challenges, including boundary artifacts, inconsistent texture and&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21721v1-abstract-full').style.display = 'inline'; document.getElementById('2410.21721v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21721v1-abstract-full" style="display: none;"> To prevent unauthorized use of text in images, Scene Text Removal (STR) has become a crucial task. It focuses on automatically removing text and replacing it with a natural, text-less background while preserving significant details such as texture, color, and contrast. Despite its importance in privacy protection, STR faces several challenges, including boundary artifacts, inconsistent texture and color, and preserving correct shadows. Most STR approaches estimate a text region mask to train a model, solving for image translation or inpainting to generate a text-free image. Thus, the quality of the generated image depends on the accuracy of the inpainting mask and the generator&#39;s capability. In this work, we leverage the superior capabilities of diffusion models in generating high-quality, consistent images to address the STR problem. We introduce a ControlNet diffusion model, treating STR as an inpainting task. To enhance the model&#39;s robustness, we develop a mask pretraining pipeline to condition our diffusion model. This involves training a masked autoencoder (MAE) using a combination of box masks and coarse stroke masks, and fine-tuning it using masks derived from our novel segmentation-based mask refinement framework. This framework iteratively refines an initial mask and segments it using the SLIC and Hierarchical Feature Selection (HFS) algorithms to produce an accurate final text mask. This improves mask prediction and utilizes rich textural information in natural scene images to provide accurate inpainting masks. Experiments on the SCUT-EnsText and SCUT-Syn datasets demonstrate that our method significantly outperforms existing state-of-the-art techniques. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21721v1-abstract-full').style.display = 'none'; document.getElementById('2410.21721v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 Pages, 6 Figures, 3 Tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.00118">arXiv:2408.00118</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.00118">pdf</a>, <a href="https://arxiv.org/format/2408.00118">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Gemma 2: Improving Open Language Models at a Practical Size </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gemma+Team"> Gemma Team</a>, <a href="/search/cs?searchtype=author&amp;query=Riviere%2C+M">Morgane Riviere</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Sessa%2C+P+G">Pier Giuseppe Sessa</a>, <a href="/search/cs?searchtype=author&amp;query=Hardin%2C+C">Cassidy Hardin</a>, <a href="/search/cs?searchtype=author&amp;query=Bhupatiraju%2C+S">Surya Bhupatiraju</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Mesnard%2C+T">Thomas Mesnard</a>, <a href="/search/cs?searchtype=author&amp;query=Shahriari%2C+B">Bobak Shahriari</a>, <a href="/search/cs?searchtype=author&amp;query=Ram%C3%A9%2C+A">Alexandre Ram茅</a>, <a href="/search/cs?searchtype=author&amp;query=Ferret%2C+J">Johan Ferret</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+P">Peter Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Tafti%2C+P">Pouya Tafti</a>, <a href="/search/cs?searchtype=author&amp;query=Friesen%2C+A">Abe Friesen</a>, <a href="/search/cs?searchtype=author&amp;query=Casbon%2C+M">Michelle Casbon</a>, <a href="/search/cs?searchtype=author&amp;query=Ramos%2C+S">Sabela Ramos</a>, <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+R">Ravin Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Lan%2C+C+L">Charline Le Lan</a>, <a href="/search/cs?searchtype=author&amp;query=Jerome%2C+S">Sammy Jerome</a>, <a href="/search/cs?searchtype=author&amp;query=Tsitsulin%2C+A">Anton Tsitsulin</a>, <a href="/search/cs?searchtype=author&amp;query=Vieillard%2C+N">Nino Vieillard</a>, <a href="/search/cs?searchtype=author&amp;query=Stanczyk%2C+P">Piotr Stanczyk</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Momchev%2C+N">Nikola Momchev</a>, <a href="/search/cs?searchtype=author&amp;query=Hoffman%2C+M">Matt Hoffman</a> , et al. (173 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.00118v3-abstract-short" style="display: inline;"> In this work, we introduce Gemma 2, a new addition to the Gemma family of lightweight, state-of-the-art open models, ranging in scale from 2 billion to 27 billion parameters. In this new version, we apply several known technical modifications to the Transformer architecture, such as interleaving local-global attentions (Beltagy et al., 2020a) and group-query attention (Ainslie et al., 2023). We al&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.00118v3-abstract-full').style.display = 'inline'; document.getElementById('2408.00118v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.00118v3-abstract-full" style="display: none;"> In this work, we introduce Gemma 2, a new addition to the Gemma family of lightweight, state-of-the-art open models, ranging in scale from 2 billion to 27 billion parameters. In this new version, we apply several known technical modifications to the Transformer architecture, such as interleaving local-global attentions (Beltagy et al., 2020a) and group-query attention (Ainslie et al., 2023). We also train the 2B and 9B models with knowledge distillation (Hinton et al., 2015) instead of next token prediction. The resulting models deliver the best performance for their size, and even offer competitive alternatives to models that are 2-3 times bigger. We release all our models to the community. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.00118v3-abstract-full').style.display = 'none'; document.getElementById('2408.00118v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2408.00112">arXiv:2408.00112</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2408.00112">pdf</a>, <a href="https://arxiv.org/format/2408.00112">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Automated Sperm Morphology Analysis Based on Instance-Aware Part Segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Chen%2C+W">Wenyuan Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+H">Haocong Song</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+C">Changsheng Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Jiang%2C+A">Aojun Jiang</a>, <a href="/search/cs?searchtype=author&amp;query=Shan%2C+G">Guanqiao Shan</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+H">Hang Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yanlong Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Abdalla%2C+K">Khaled Abdalla</a>, <a href="/search/cs?searchtype=author&amp;query=Dhanani%2C+S+N">Shivani N Dhanani</a>, <a href="/search/cs?searchtype=author&amp;query=Moosavi%2C+K+F">Katy Fatemeh Moosavi</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shruti Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Librach%2C+C">Clifford Librach</a>, <a href="/search/cs?searchtype=author&amp;query=Zhang%2C+Z">Zhuoran Zhang</a>, <a href="/search/cs?searchtype=author&amp;query=Sun%2C+Y">Yu Sun</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2408.00112v1-abstract-short" style="display: inline;"> Traditional sperm morphology analysis is based on tedious manual annotation. Automated morphology analysis of a high number of sperm requires accurate segmentation of each sperm part and quantitative morphology evaluation. State-of-the-art instance-aware part segmentation networks follow a &#34;detect-then-segment&#34; paradigm. However, due to sperm&#39;s slim shape, their segmentation suffers from large con&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.00112v1-abstract-full').style.display = 'inline'; document.getElementById('2408.00112v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2408.00112v1-abstract-full" style="display: none;"> Traditional sperm morphology analysis is based on tedious manual annotation. Automated morphology analysis of a high number of sperm requires accurate segmentation of each sperm part and quantitative morphology evaluation. State-of-the-art instance-aware part segmentation networks follow a &#34;detect-then-segment&#34; paradigm. However, due to sperm&#39;s slim shape, their segmentation suffers from large context loss and feature distortion due to bounding box cropping and resizing during ROI Align. Moreover, morphology measurement of sperm tail is demanding because of the long and curved shape and its uneven width. This paper presents automated techniques to measure sperm morphology parameters automatically and quantitatively. A novel attention-based instance-aware part segmentation network is designed to reconstruct lost contexts outside bounding boxes and to fix distorted features, by refining preliminary segmented masks through merging features extracted by feature pyramid network. An automated centerline-based tail morphology measurement method is also proposed, in which an outlier filtering method and endpoint detection algorithm are designed to accurately reconstruct tail endpoints. Experimental results demonstrate that the proposed network outperformed the state-of-the-art top-down RP-R-CNN by 9.2% [AP]_vol^p, and the proposed automated tail morphology measurement method achieved high measurement accuracies of 95.34%,96.39%,91.2% for length, width and curvature, respectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2408.00112v1-abstract-full').style.display = 'none'; document.getElementById('2408.00112v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ICRA 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.02184">arXiv:2406.02184</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.02184">pdf</a>, <a href="https://arxiv.org/format/2406.02184">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GraVITON: Graph based garment warping with attention guided inversion for Virtual-tryon </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sanhita Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Kaushik%2C+V">Vinay Kaushik</a>, <a href="/search/cs?searchtype=author&amp;query=Lall%2C+B">Brejesh Lall</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.02184v1-abstract-short" style="display: inline;"> Virtual try-on, a rapidly evolving field in computer vision, is transforming e-commerce by improving customer experiences through precise garment warping and seamless integration onto the human body. While existing methods such as TPS and flow address the garment warping but overlook the finer contextual details. In this paper, we introduce a novel graph based warping technique which emphasizes th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.02184v1-abstract-full').style.display = 'inline'; document.getElementById('2406.02184v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.02184v1-abstract-full" style="display: none;"> Virtual try-on, a rapidly evolving field in computer vision, is transforming e-commerce by improving customer experiences through precise garment warping and seamless integration onto the human body. While existing methods such as TPS and flow address the garment warping but overlook the finer contextual details. In this paper, we introduce a novel graph based warping technique which emphasizes the value of context in garment flow. Our graph based warping module generates warped garment as well as a coarse person image, which is utilised by a simple refinement network to give a coarse virtual tryon image. The proposed work exploits latent diffusion model to generate the final tryon, treating garment transfer as an inpainting task. The diffusion model is conditioned with decoupled cross attention based inversion of visual and textual information. We introduce an occlusion aware warping constraint that generates dense warped garment, without any holes and occlusion. Our method, validated on VITON-HD and Dresscode datasets, showcases substantial state-of-the-art qualitative and quantitative results showing considerable improvement in garment warping, texture preservation, and overall realism. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.02184v1-abstract-full').style.display = 'none'; document.getElementById('2406.02184v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">18 pages, 7 Figures and 6 Tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.19179">arXiv:2405.19179</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.19179">pdf</a>, <a href="https://arxiv.org/format/2405.19179">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Model Agnostic Defense against Adversarial Patch Attacks on Object Detection in Unmanned Aerial Vehicles </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Saurabh Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Shrestha%2C+S">Samridha Shrestha</a>, <a href="/search/cs?searchtype=author&amp;query=AlMahmoud%2C+A">Abdelrahman AlMahmoud</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.19179v1-abstract-short" style="display: inline;"> Object detection forms a key component in Unmanned Aerial Vehicles (UAVs) for completing high-level tasks that depend on the awareness of objects on the ground from an aerial perspective. In that scenario, adversarial patch attacks on an onboard object detector can severely impair the performance of upstream tasks. This paper proposes a novel model-agnostic defense mechanism against the threat of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.19179v1-abstract-full').style.display = 'inline'; document.getElementById('2405.19179v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.19179v1-abstract-full" style="display: none;"> Object detection forms a key component in Unmanned Aerial Vehicles (UAVs) for completing high-level tasks that depend on the awareness of objects on the ground from an aerial perspective. In that scenario, adversarial patch attacks on an onboard object detector can severely impair the performance of upstream tasks. This paper proposes a novel model-agnostic defense mechanism against the threat of adversarial patch attacks in the context of UAV-based object detection. We formulate adversarial patch defense as an occlusion removal task. The proposed defense method can neutralize adversarial patches located on objects of interest, without exposure to adversarial patches during training. Our lightweight single-stage defense approach allows us to maintain a model-agnostic nature, that once deployed does not require to be updated in response to changes in the object detection pipeline. The evaluations in digital and physical domains show the feasibility of our method for deployment in UAV object detection pipelines, by significantly decreasing the Attack Success Ratio without incurring significant processing costs. As a result, the proposed defense solution can improve the reliability of object detection for UAVs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.19179v1-abstract-full').style.display = 'none'; document.getElementById('2405.19179v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">submitted to IROS 2024</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.4.4; I.4.9 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.18631">arXiv:2404.18631</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.18631">pdf</a>, <a href="https://arxiv.org/format/2404.18631">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Feature importance to explain multimodal prediction models. A clinical use case </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=van+de+Beld%2C+J">Jorn-Jan van de Beld</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreyasi Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Geerdink%2C+J">Jeroen Geerdink</a>, <a href="/search/cs?searchtype=author&amp;query=Hegeman%2C+J+H">Johannes H. Hegeman</a>, <a href="/search/cs?searchtype=author&amp;query=Seifert%2C+C">Christin Seifert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.18631v1-abstract-short" style="display: inline;"> Surgery to treat elderly hip fracture patients may cause complications that can lead to early mortality. An early warning system for complications could provoke clinicians to monitor high-risk patients more carefully and address potential complications early, or inform the patient. In this work, we develop a multimodal deep-learning model for post-operative mortality prediction using pre-operative&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.18631v1-abstract-full').style.display = 'inline'; document.getElementById('2404.18631v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.18631v1-abstract-full" style="display: none;"> Surgery to treat elderly hip fracture patients may cause complications that can lead to early mortality. An early warning system for complications could provoke clinicians to monitor high-risk patients more carefully and address potential complications early, or inform the patient. In this work, we develop a multimodal deep-learning model for post-operative mortality prediction using pre-operative and per-operative data from elderly hip fracture patients. Specifically, we include static patient data, hip and chest images before surgery in pre-operative data, vital signals, and medications administered during surgery in per-operative data. We extract features from image modalities using ResNet and from vital signals using LSTM. Explainable model outcomes are essential for clinical applicability, therefore we compute Shapley values to explain the predictions of our multimodal black box model. We find that i) Shapley values can be used to estimate the relative contribution of each modality both locally and globally, and ii) a modified version of the chain rule can be used to propagate Shapley values through a sequence of models supporting interpretable local explanations. Our findings imply that a multimodal combination of black box models can be explained by propagating Shapley values through the model sequence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.18631v1-abstract-full').style.display = 'none'; document.getElementById('2404.18631v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at World Conference on Explainable Artificial Intelligence; 19 pages, 2 figures, 7 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.07839">arXiv:2404.07839</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.07839">pdf</a>, <a href="https://arxiv.org/format/2404.07839">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> RecurrentGemma: Moving Past Transformers for Efficient Open Language Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Botev%2C+A">Aleksandar Botev</a>, <a href="/search/cs?searchtype=author&amp;query=De%2C+S">Soham De</a>, <a href="/search/cs?searchtype=author&amp;query=Smith%2C+S+L">Samuel L Smith</a>, <a href="/search/cs?searchtype=author&amp;query=Fernando%2C+A">Anushan Fernando</a>, <a href="/search/cs?searchtype=author&amp;query=Muraru%2C+G">George-Cristian Muraru</a>, <a href="/search/cs?searchtype=author&amp;query=Haroun%2C+R">Ruba Haroun</a>, <a href="/search/cs?searchtype=author&amp;query=Berrada%2C+L">Leonard Berrada</a>, <a href="/search/cs?searchtype=author&amp;query=Pascanu%2C+R">Razvan Pascanu</a>, <a href="/search/cs?searchtype=author&amp;query=Sessa%2C+P+G">Pier Giuseppe Sessa</a>, <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Ferret%2C+J">Johan Ferret</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Andreev%2C+A">Alek Andreev</a>, <a href="/search/cs?searchtype=author&amp;query=Kenealy%2C+K">Kathleen Kenealy</a>, <a href="/search/cs?searchtype=author&amp;query=Mesnard%2C+T">Thomas Mesnard</a>, <a href="/search/cs?searchtype=author&amp;query=Hardin%2C+C">Cassidy Hardin</a>, <a href="/search/cs?searchtype=author&amp;query=Bhupatiraju%2C+S">Surya Bhupatiraju</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Sifre%2C+L">Laurent Sifre</a>, <a href="/search/cs?searchtype=author&amp;query=Rivi%C3%A8re%2C+M">Morgane Rivi猫re</a>, <a href="/search/cs?searchtype=author&amp;query=Kale%2C+M+S">Mihir Sanjay Kale</a>, <a href="/search/cs?searchtype=author&amp;query=Love%2C+J">Juliette Love</a>, <a href="/search/cs?searchtype=author&amp;query=Tafti%2C+P">Pouya Tafti</a> , et al. (37 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.07839v2-abstract-short" style="display: inline;"> We introduce RecurrentGemma, a family of open language models which uses Google&#39;s novel Griffin architecture. Griffin combines linear recurrences with local attention to achieve excellent performance on language. It has a fixed-sized state, which reduces memory use and enables efficient inference on long sequences. We provide two sizes of models, containing 2B and 9B parameters, and provide pre-tr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.07839v2-abstract-full').style.display = 'inline'; document.getElementById('2404.07839v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.07839v2-abstract-full" style="display: none;"> We introduce RecurrentGemma, a family of open language models which uses Google&#39;s novel Griffin architecture. Griffin combines linear recurrences with local attention to achieve excellent performance on language. It has a fixed-sized state, which reduces memory use and enables efficient inference on long sequences. We provide two sizes of models, containing 2B and 9B parameters, and provide pre-trained and instruction tuned variants for both. Our models achieve comparable performance to similarly-sized Gemma baselines despite being trained on fewer tokens. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.07839v2-abstract-full').style.display = 'none'; document.getElementById('2404.07839v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.01234">arXiv:2404.01234</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.01234">pdf</a>, <a href="https://arxiv.org/format/2404.01234">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Logic">math.LO</span> </div> </div> <p class="title is-5 mathjax"> GFLean: An Autoformalisation Framework for Lean via GF </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shashank Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.01234v1-abstract-short" style="display: inline;"> We present an autoformalisation framework for the Lean theorem prover, called GFLean. GFLean uses a high-level grammar writing tool called Grammatical Framework (GF) for parsing and linearisation. GFLean is implemented in Haskell. We explain the functionalities of GFLean, its inner working and discuss its limitations. We also discuss how we can use neural network based translation programs and rul&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.01234v1-abstract-full').style.display = 'inline'; document.getElementById('2404.01234v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.01234v1-abstract-full" style="display: none;"> We present an autoformalisation framework for the Lean theorem prover, called GFLean. GFLean uses a high-level grammar writing tool called Grammatical Framework (GF) for parsing and linearisation. GFLean is implemented in Haskell. We explain the functionalities of GFLean, its inner working and discuss its limitations. We also discuss how we can use neural network based translation programs and rule based translation programs together complimenting each other to build robust autoformalisation frameworks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.01234v1-abstract-full').style.display = 'none'; document.getElementById('2404.01234v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 Pages, 3 Figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.7 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.00613">arXiv:2404.00613</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2404.00613">pdf</a>, <a href="https://arxiv.org/ps/2404.00613">ps</a>, <a href="https://arxiv.org/format/2404.00613">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> On $(胃, 螛)$-cyclic codes and their applications in constructing QECCs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shukla%2C+A+K">Awadhesh Kumar Shukla</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sachin Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Pandey%2C+O+P">Om Prakash Pandey</a>, <a href="/search/cs?searchtype=author&amp;query=Mishra%2C+V">Vipul Mishra</a>, <a href="/search/cs?searchtype=author&amp;query=Upadhyay%2C+A+K">Ashish Kumar Upadhyay</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.00613v1-abstract-short" style="display: inline;"> Let $\mathbb F_q$ be a finite field, where $q$ is an odd prime power. Let $R=\mathbb{F}_q+u\mathbb{F}_q+v\mathbb{F}_q+uv\mathbb F_q$ with $u^2=u,v^2=v,uv=vu$. In this paper, we study the algebraic structure of $(胃, 螛)$-cyclic codes of block length $(r,s )$ over $\mathbb{F}_qR.$ Specifically, we analyze the structure of these codes as left $R[x:螛]$-submodules of&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00613v1-abstract-full').style.display = 'inline'; document.getElementById('2404.00613v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.00613v1-abstract-full" style="display: none;"> Let $\mathbb F_q$ be a finite field, where $q$ is an odd prime power. Let $R=\mathbb{F}_q+u\mathbb{F}_q+v\mathbb{F}_q+uv\mathbb F_q$ with $u^2=u,v^2=v,uv=vu$. In this paper, we study the algebraic structure of $(胃, 螛)$-cyclic codes of block length $(r,s )$ over $\mathbb{F}_qR.$ Specifically, we analyze the structure of these codes as left $R[x:螛]$-submodules of $\mathfrak{R}_{r,s} = \frac{\mathbb{F}_q[x:胃]}{\langle x^r-1\rangle} \times \frac{R[x:螛]}{\langle x^s-1\rangle}$. Our investigation involves determining generator polynomials and minimal generating sets for this family of codes. Further, we discuss the algebraic structure of separable codes. A relationship between the generator polynomials of $(胃, 螛)$-cyclic codes over $\mathbb F_qR$ and their duals is established. Moreover, we calculate the generator polynomials of dual of $(胃, 螛)$-cyclic codes. As an application of our study, we provide a construction of quantum error-correcting codes (QECCs) from $(胃, 螛)$-cyclic codes of block length $(r,s)$ over $\mathbb{F}_qR$. We support our theoretical results with illustrative examples. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.00613v1-abstract-full').style.display = 'none'; document.getElementById('2404.00613v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">30 pages, 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.20260">arXiv:2403.20260</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.20260">pdf</a>, <a href="https://arxiv.org/format/2403.20260">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Prototype-based Interpretable Breast Cancer Prediction Models: Analysis and Challenges </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreyasi Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Schl%C3%B6tterer%2C+J">J枚rg Schl枚tterer</a>, <a href="/search/cs?searchtype=author&amp;query=Veltman%2C+J">Jeroen Veltman</a>, <a href="/search/cs?searchtype=author&amp;query=Geerdink%2C+J">Jeroen Geerdink</a>, <a href="/search/cs?searchtype=author&amp;query=van+Keulen%2C+M">Maurice van Keulen</a>, <a href="/search/cs?searchtype=author&amp;query=Seifert%2C+C">Christin Seifert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.20260v3-abstract-short" style="display: inline;"> Deep learning models have achieved high performance in medical applications, however, their adoption in clinical practice is hindered due to their black-box nature. Self-explainable models, like prototype-based models, can be especially beneficial as they are interpretable by design. However, if the learnt prototypes are of low quality then the prototype-based models are as good as black-box. Havi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.20260v3-abstract-full').style.display = 'inline'; document.getElementById('2403.20260v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.20260v3-abstract-full" style="display: none;"> Deep learning models have achieved high performance in medical applications, however, their adoption in clinical practice is hindered due to their black-box nature. Self-explainable models, like prototype-based models, can be especially beneficial as they are interpretable by design. However, if the learnt prototypes are of low quality then the prototype-based models are as good as black-box. Having high quality prototypes is a pre-requisite for a truly interpretable model. In this work, we propose a prototype evaluation framework for coherence (PEF-C) for quantitatively evaluating the quality of the prototypes based on domain knowledge. We show the use of PEF-C in the context of breast cancer prediction using mammography. Existing works on prototype-based models on breast cancer prediction using mammography have focused on improving the classification performance of prototype-based models compared to black-box models and have evaluated prototype quality through anecdotal evidence. We are the first to go beyond anecdotal evidence and evaluate the quality of the mammography prototypes systematically using our PEF-C. Specifically, we apply three state-of-the-art prototype-based models, ProtoPNet, BRAIxProtoPNet++ and PIP-Net on mammography images for breast cancer prediction and evaluate these models w.r.t. i) classification performance, and ii) quality of the prototypes, on three public datasets. Our results show that prototype-based models are competitive with black-box models in terms of classification performance, and achieve a higher score in detecting ROIs. However, the quality of the prototypes are not yet sufficient and can be improved in aspects of relevance, purity and learning a variety of prototypes. We call the XAI community to systematically evaluate the quality of the prototypes to check their true usability in high stake decisions and improve such models further. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.20260v3-abstract-full').style.display = 'none'; document.getElementById('2403.20260v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at World Conference on Explainable Artificial Intelligence. Cham: Springer Nature Switzerland, 2024; 21 pages, 5 figures, 3 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.08295">arXiv:2403.08295</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.08295">pdf</a>, <a href="https://arxiv.org/format/2403.08295">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Gemma: Open Models Based on Gemini Research and Technology </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gemma+Team"> Gemma Team</a>, <a href="/search/cs?searchtype=author&amp;query=Mesnard%2C+T">Thomas Mesnard</a>, <a href="/search/cs?searchtype=author&amp;query=Hardin%2C+C">Cassidy Hardin</a>, <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a>, <a href="/search/cs?searchtype=author&amp;query=Bhupatiraju%2C+S">Surya Bhupatiraju</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Sifre%2C+L">Laurent Sifre</a>, <a href="/search/cs?searchtype=author&amp;query=Rivi%C3%A8re%2C+M">Morgane Rivi猫re</a>, <a href="/search/cs?searchtype=author&amp;query=Kale%2C+M+S">Mihir Sanjay Kale</a>, <a href="/search/cs?searchtype=author&amp;query=Love%2C+J">Juliette Love</a>, <a href="/search/cs?searchtype=author&amp;query=Tafti%2C+P">Pouya Tafti</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Sessa%2C+P+G">Pier Giuseppe Sessa</a>, <a href="/search/cs?searchtype=author&amp;query=Chowdhery%2C+A">Aakanksha Chowdhery</a>, <a href="/search/cs?searchtype=author&amp;query=Roberts%2C+A">Adam Roberts</a>, <a href="/search/cs?searchtype=author&amp;query=Barua%2C+A">Aditya Barua</a>, <a href="/search/cs?searchtype=author&amp;query=Botev%2C+A">Alex Botev</a>, <a href="/search/cs?searchtype=author&amp;query=Castro-Ros%2C+A">Alex Castro-Ros</a>, <a href="/search/cs?searchtype=author&amp;query=Slone%2C+A">Ambrose Slone</a>, <a href="/search/cs?searchtype=author&amp;query=H%C3%A9liou%2C+A">Am茅lie H茅liou</a>, <a href="/search/cs?searchtype=author&amp;query=Tacchetti%2C+A">Andrea Tacchetti</a>, <a href="/search/cs?searchtype=author&amp;query=Bulanova%2C+A">Anna Bulanova</a>, <a href="/search/cs?searchtype=author&amp;query=Paterson%2C+A">Antonia Paterson</a>, <a href="/search/cs?searchtype=author&amp;query=Tsai%2C+B">Beth Tsai</a>, <a href="/search/cs?searchtype=author&amp;query=Shahriari%2C+B">Bobak Shahriari</a> , et al. (83 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.08295v4-abstract-short" style="display: inline;"> This work introduces Gemma, a family of lightweight, state-of-the art open models built from the research and technology used to create Gemini models. Gemma models demonstrate strong performance across academic benchmarks for language understanding, reasoning, and safety. We release two sizes of models (2 billion and 7 billion parameters), and provide both pretrained and fine-tuned checkpoints. Ge&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.08295v4-abstract-full').style.display = 'inline'; document.getElementById('2403.08295v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.08295v4-abstract-full" style="display: none;"> This work introduces Gemma, a family of lightweight, state-of-the art open models built from the research and technology used to create Gemini models. Gemma models demonstrate strong performance across academic benchmarks for language understanding, reasoning, and safety. We release two sizes of models (2 billion and 7 billion parameters), and provide both pretrained and fine-tuned checkpoints. Gemma outperforms similarly sized open models on 11 out of 18 text-based tasks, and we present comprehensive evaluations of safety and responsibility aspects of the models, alongside a detailed description of model development. We believe the responsible release of LLMs is critical for improving the safety of frontier models, and for enabling the next wave of LLM innovations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.08295v4-abstract-full').style.display = 'none'; document.getElementById('2403.08295v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.07750">arXiv:2403.07750</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.07750">pdf</a>, <a href="https://arxiv.org/format/2403.07750">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Synth$^2$: Boosting Visual-Language Models with Synthetic Captions and Image Embeddings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sharifzadeh%2C+S">Sahand Sharifzadeh</a>, <a href="/search/cs?searchtype=author&amp;query=Kaplanis%2C+C">Christos Kaplanis</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Kumaran%2C+D">Dharshan Kumaran</a>, <a href="/search/cs?searchtype=author&amp;query=Ilic%2C+A">Anastasija Ilic</a>, <a href="/search/cs?searchtype=author&amp;query=Mitrovic%2C+J">Jovana Mitrovic</a>, <a href="/search/cs?searchtype=author&amp;query=Blundell%2C+C">Charles Blundell</a>, <a href="/search/cs?searchtype=author&amp;query=Banino%2C+A">Andrea Banino</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.07750v2-abstract-short" style="display: inline;"> The creation of high-quality human-labeled image-caption datasets presents a significant bottleneck in the development of Visual-Language Models (VLMs). In this work, we investigate an approach that leverages the strengths of Large Language Models (LLMs) and image generation models to create synthetic image-text pairs for efficient and effective VLM training. Our method employs a pretrained text-t&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07750v2-abstract-full').style.display = 'inline'; document.getElementById('2403.07750v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.07750v2-abstract-full" style="display: none;"> The creation of high-quality human-labeled image-caption datasets presents a significant bottleneck in the development of Visual-Language Models (VLMs). In this work, we investigate an approach that leverages the strengths of Large Language Models (LLMs) and image generation models to create synthetic image-text pairs for efficient and effective VLM training. Our method employs a pretrained text-to-image model to synthesize image embeddings from captions generated by an LLM. Despite the text-to-image model and VLM initially being trained on the same data, our approach leverages the image generator&#39;s ability to create novel compositions, resulting in synthetic image embeddings that expand beyond the limitations of the original dataset. Extensive experiments demonstrate that our VLM, finetuned on synthetic data achieves comparable performance to models trained solely on human-annotated data, while requiring significantly less data. Furthermore, we perform a set of analyses on captions which reveals that semantic diversity and balance are key aspects for better downstream performance. Finally, we show that synthesizing images in the image embedding space is 25\% faster than in the pixel space. We believe our work not only addresses a significant challenge in VLM training but also opens up promising avenues for the development of self-improving multi-modal models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.07750v2-abstract-full').style.display = 'none'; document.getElementById('2403.07750v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.16863">arXiv:2402.16863</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.16863">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Quantum Inspired Chaotic Salp Swarm Optimization for Dynamic Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sanjai Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Mani%2C+A">Ashish Mani</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+M">Mayank Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Chatterjee%2C+A">Amlan Chatterjee</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.16863v1-abstract-short" style="display: inline;"> Many real-world problems are dynamic optimization problems that are unknown beforehand. In practice, unpredictable events such as the arrival of new jobs, due date changes, and reservation cancellations, changes in parameters or constraints make the search environment dynamic. Many algorithms are designed to deal with stationary optimization problems, but these algorithms do not face dynamic optim&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16863v1-abstract-full').style.display = 'inline'; document.getElementById('2402.16863v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.16863v1-abstract-full" style="display: none;"> Many real-world problems are dynamic optimization problems that are unknown beforehand. In practice, unpredictable events such as the arrival of new jobs, due date changes, and reservation cancellations, changes in parameters or constraints make the search environment dynamic. Many algorithms are designed to deal with stationary optimization problems, but these algorithms do not face dynamic optimization problems or manage them correctly. Although some optimization algorithms are proposed to deal with the changes in dynamic environments differently, there are still areas of improvement in existing algorithms due to limitations or drawbacks, especially in terms of locating and following the previously identified optima. With this in mind, we studied a variant of SSA known as QSSO, which integrates the principles of quantum computing. An attempt is made to improve the overall performance of standard SSA to deal with the dynamic environment effectively by locating and tracking the global optima for DOPs. This work is an extension of the proposed new algorithm QSSO, known as the Quantum-inspired Chaotic Salp Swarm Optimization (QCSSO) Algorithm, which details the various approaches considered while solving DOPs. A chaotic operator is employed with quantum computing to respond to change and guarantee to increase individual searchability by improving population diversity and the speed at which the algorithm converges. We experimented by evaluating QCSSO on a well-known generalized dynamic benchmark problem (GDBG) provided for CEC 2009, followed by a comparative numerical study with well-regarded algorithms. As promised, the introduced QCSSO is discovered as the rival algorithm for DOPs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.16863v1-abstract-full').style.display = 'none'; document.getElementById('2402.16863v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 2 figures, 1 algorithm</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.09757">arXiv:2402.09757</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.09757">pdf</a>, <a href="https://arxiv.org/ps/2402.09757">ps</a>, <a href="https://arxiv.org/format/2402.09757">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Combinatorics">math.CO</span> </div> </div> <p class="title is-5 mathjax"> Construction of CCC and ZCCS Through Additive Characters Over Galois Field </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghosh%2C+G">Gobinda Ghosh</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sachin Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.09757v3-abstract-short" style="display: inline;"> The rapid progression in wireless communication technologies, especially in multicarrier code-division multiple access (MC-CDMA), there is a need of advanced code construction methods. Traditional approaches, mainly based on generalized Boolean functions, have limitations in code length versatility. This paper introduces a novel approach to constructing complete complementary codes (CCC) and Z-com&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.09757v3-abstract-full').style.display = 'inline'; document.getElementById('2402.09757v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.09757v3-abstract-full" style="display: none;"> The rapid progression in wireless communication technologies, especially in multicarrier code-division multiple access (MC-CDMA), there is a need of advanced code construction methods. Traditional approaches, mainly based on generalized Boolean functions, have limitations in code length versatility. This paper introduces a novel approach to constructing complete complementary codes (CCC) and Z-complementary code sets (ZCCS), for reducing interference in MC-CDMA systems. The proposed construction, distinct from Boolean function-based approaches, employs additive characters over Galois fields GF($p^{r}$), where $p$ is prime and $r$ is a positive integer. First, we develop CCCs with lengths of $p^{r}$, which are then extended to construct ZCCS with both unreported lengths and sizes of $np^{r}$, where $n$ are arbitrary positive integers. The versatility of this method is further highlighted as it includes the lengths of ZCCS reported in prior studies as special cases, underscoring the method&#39;s comprehensive nature and superiority. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.09757v3-abstract-full').style.display = 'none'; document.getElementById('2402.09757v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.08780">arXiv:2402.08780</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.08780">pdf</a>, <a href="https://arxiv.org/format/2402.08780">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Enhanced Deep Q-Learning for 2D Self-Driving Cars: Implementation and Evaluation on a Custom Track Environment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sagar Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Shrestha%2C+B">Bidhya Shrestha</a>, <a href="/search/cs?searchtype=author&amp;query=Pahi%2C+K">Kritish Pahi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.08780v1-abstract-short" style="display: inline;"> This research project presents the implementation of a Deep Q-Learning Network (DQN) for a self-driving car on a 2-dimensional (2D) custom track, with the objective of enhancing the DQN network&#39;s performance. It encompasses the development of a custom driving environment using Pygame on a track surrounding the University of Memphis map, as well as the design and implementation of the DQN model. Th&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.08780v1-abstract-full').style.display = 'inline'; document.getElementById('2402.08780v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.08780v1-abstract-full" style="display: none;"> This research project presents the implementation of a Deep Q-Learning Network (DQN) for a self-driving car on a 2-dimensional (2D) custom track, with the objective of enhancing the DQN network&#39;s performance. It encompasses the development of a custom driving environment using Pygame on a track surrounding the University of Memphis map, as well as the design and implementation of the DQN model. The algorithm utilizes data from 7 sensors installed in the car, which measure the distance between the car and the track. These sensors are positioned in front of the vehicle, spaced 20 degrees apart, enabling them to sense a wide area ahead. We successfully implemented the DQN and also a modified version of the DQN with a priority-based action selection mechanism, which we refer to as modified DQN. The model was trained over 1000 episodes, and the average reward received by the agent was found to be around 40, which is approximately 60% higher than the original DQN and around 50% higher than the vanilla neural network. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.08780v1-abstract-full').style.display = 'none'; document.getElementById('2402.08780v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">8 pages, 8 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.13791">arXiv:2312.13791</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.13791">pdf</a>, <a href="https://arxiv.org/format/2312.13791">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> Parameterized Guarantees for Almost Envy-Free Allocations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Barman%2C+S">Siddharth Barman</a>, <a href="/search/cs?searchtype=author&amp;query=Kar%2C+D">Debajyoti Kar</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shraddha Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.13791v1-abstract-short" style="display: inline;"> We study fair allocation of indivisible goods among agents with additive valuations. We obtain novel approximation guarantees for three of the strongest fairness notions in discrete fair division, namely envy-free up to the removal of any positively-valued good (EFx), pairwise maximin shares (PMMS), and envy-free up to the transfer of any positively-valued good (tEFx). Our approximation guarantees&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.13791v1-abstract-full').style.display = 'inline'; document.getElementById('2312.13791v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.13791v1-abstract-full" style="display: none;"> We study fair allocation of indivisible goods among agents with additive valuations. We obtain novel approximation guarantees for three of the strongest fairness notions in discrete fair division, namely envy-free up to the removal of any positively-valued good (EFx), pairwise maximin shares (PMMS), and envy-free up to the transfer of any positively-valued good (tEFx). Our approximation guarantees are in terms of an instance-dependent parameter $纬\in (0,1]$ that upper bounds, for each indivisible good in the given instance, the multiplicative range of nonzero values for the good across the agents. First, we consider allocations wherein, between any pair of agents and up to the removal of any positively-valued good, the envy is multiplicatively bounded. Specifically, the current work develops a polynomial-time algorithm that computes a $\left( \frac{2纬}{\sqrt{5+4纬}-1}\right)$-approximately EFx allocation for any given fair division instance with range parameter $纬\in (0,1]$. For instances with $纬\geq 0.511$, the obtained approximation guarantee for EFx surpasses the previously best-known approximation bound of $(蠁-1) \approx 0.618$, here $蠁$ denotes the golden ratio. Furthermore, for $纬\in (0,1]$, we develop a polynomial-time algorithm for finding allocations wherein the PMMS requirement is satisfied, between every pair of agents, within a multiplicative factor of $\frac{5}{6} 纬$. En route to this result, we obtain novel existential and computational guarantees for $\frac{5}{6}$-approximately PMMS allocations under restricted additive valuations. Finally, we develop an algorithm that efficiently computes a $2纬$-approximately tEFx allocation. Specifically, we obtain existence and efficient computation of exact tEFx allocations for all instances with $纬\in [0.5, 1]$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.13791v1-abstract-full').style.display = 'none'; document.getElementById('2312.13791v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">28 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.07395">arXiv:2312.07395</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.07395">pdf</a>, <a href="https://arxiv.org/format/2312.07395">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> A Simple Recipe for Contrastively Pre-training Video-First Encoders Beyond 16 Frames </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Papalampidi%2C+P">Pinelopi Papalampidi</a>, <a href="/search/cs?searchtype=author&amp;query=Koppula%2C+S">Skanda Koppula</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Chiu%2C+J">Justin Chiu</a>, <a href="/search/cs?searchtype=author&amp;query=Heyward%2C+J">Joe Heyward</a>, <a href="/search/cs?searchtype=author&amp;query=Patraucean%2C+V">Viorica Patraucean</a>, <a href="/search/cs?searchtype=author&amp;query=Shen%2C+J">Jiajun Shen</a>, <a href="/search/cs?searchtype=author&amp;query=Miech%2C+A">Antoine Miech</a>, <a href="/search/cs?searchtype=author&amp;query=Zisserman%2C+A">Andrew Zisserman</a>, <a href="/search/cs?searchtype=author&amp;query=Nematzadeh%2C+A">Aida Nematzadeh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.07395v2-abstract-short" style="display: inline;"> Understanding long, real-world videos requires modeling of long-range visual dependencies. To this end, we explore video-first architectures, building on the common paradigm of transferring large-scale, image--text models to video via shallow temporal fusion. However, we expose two limitations to the approach: (1) decreased spatial capabilities, likely due to poor video--language alignment in stan&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.07395v2-abstract-full').style.display = 'inline'; document.getElementById('2312.07395v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.07395v2-abstract-full" style="display: none;"> Understanding long, real-world videos requires modeling of long-range visual dependencies. To this end, we explore video-first architectures, building on the common paradigm of transferring large-scale, image--text models to video via shallow temporal fusion. However, we expose two limitations to the approach: (1) decreased spatial capabilities, likely due to poor video--language alignment in standard video datasets, and (2) higher memory consumption, bottlenecking the number of frames that can be processed. To mitigate the memory bottleneck, we systematically analyze the memory/accuracy trade-off of various efficient methods: factorized attention, parameter-efficient image-to-video adaptation, input masking, and multi-resolution patchification. Surprisingly, simply masking large portions of the video (up to 75%) during contrastive pre-training proves to be one of the most robust ways to scale encoders to videos up to 4.3 minutes at 1 FPS. Our simple approach for training long video-to-text models, which scales to 1B parameters, does not add new architectural complexity and is able to outperform the popular paradigm of using much larger LLMs as an information aggregator over segment-based information on benchmarks with long-range temporal dependencies (YouCook2, EgoSchema). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.07395v2-abstract-full').style.display = 'none'; document.getElementById('2312.07395v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 December, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.05328">arXiv:2312.05328</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.05328">pdf</a>, <a href="https://arxiv.org/format/2312.05328">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Bad Students Make Great Teachers: Active Learning Accelerates Large-Scale Visual Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Evans%2C+T">Talfan Evans</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Merzic%2C+H">Hamza Merzic</a>, <a href="/search/cs?searchtype=author&amp;query=Schwarz%2C+J">Jonathan Schwarz</a>, <a href="/search/cs?searchtype=author&amp;query=Tanno%2C+R">Ryutaro Tanno</a>, <a href="/search/cs?searchtype=author&amp;query=Henaff%2C+O+J">Olivier J. Henaff</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.05328v4-abstract-short" style="display: inline;"> Power-law scaling indicates that large-scale training with uniform sampling is prohibitively slow. Active learning methods aim to increase data efficiency by prioritizing learning on the most relevant examples. Despite their appeal, these methods have yet to be widely adopted since no one algorithm has been shown to a) generalize across models and tasks b) scale to large datasets and c) yield over&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.05328v4-abstract-full').style.display = 'inline'; document.getElementById('2312.05328v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.05328v4-abstract-full" style="display: none;"> Power-law scaling indicates that large-scale training with uniform sampling is prohibitively slow. Active learning methods aim to increase data efficiency by prioritizing learning on the most relevant examples. Despite their appeal, these methods have yet to be widely adopted since no one algorithm has been shown to a) generalize across models and tasks b) scale to large datasets and c) yield overall FLOP savings when accounting for the overhead of data selection. In this work we propose a method which satisfies these three properties, leveraging small, cheap proxy models to estimate &#34;learnability&#34; scores for datapoints, which are used to prioritize data for the training of much larger models. As a result, our models require 46% and 51% fewer training updates and up to 25% less total computation to reach the same performance as uniformly trained visual classifiers on JFT and multimodal models on ALIGN. Finally, we find our data-prioritization scheme to be complementary with recent data-curation and learning objectives, yielding a new state-of-the-art in several multimodal transfer tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.05328v4-abstract-full').style.display = 'none'; document.getElementById('2312.05328v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Technical report</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.18281">arXiv:2311.18281</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2311.18281">pdf</a>, <a href="https://arxiv.org/format/2311.18281">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Utilizing Radiomic Feature Analysis For Automated MRI Keypoint Detection: Enhancing Graph Applications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nasser%2C+S+A">Sahar Almahfouz Nasser</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shashwat Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Singhal%2C+K">Keshav Singhal</a>, <a href="/search/cs?searchtype=author&amp;query=Meena%2C+M">Mohit Meena</a>, <a href="/search/cs?searchtype=author&amp;query=Gupte%2C+N">Nihar Gupte</a>, <a href="/search/cs?searchtype=author&amp;query=Chinmaya%2C+A">Ananya Chinmaya</a>, <a href="/search/cs?searchtype=author&amp;query=Garg%2C+P">Prateek Garg</a>, <a href="/search/cs?searchtype=author&amp;query=Sethi%2C+A">Amit Sethi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.18281v1-abstract-short" style="display: inline;"> Graph neural networks (GNNs) present a promising alternative to CNNs and transformers in certain image processing applications due to their parameter-efficiency in modeling spatial relationships. Currently, a major area of research involves the converting non-graph input data for GNN-based models, notably in scenarios where the data originates from images. One approach involves converting images i&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.18281v1-abstract-full').style.display = 'inline'; document.getElementById('2311.18281v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.18281v1-abstract-full" style="display: none;"> Graph neural networks (GNNs) present a promising alternative to CNNs and transformers in certain image processing applications due to their parameter-efficiency in modeling spatial relationships. Currently, a major area of research involves the converting non-graph input data for GNN-based models, notably in scenarios where the data originates from images. One approach involves converting images into nodes by identifying significant keypoints within them. Super-Retina, a semi-supervised technique, has been utilized for detecting keypoints in retinal images. However, its limitations lie in the dependency on a small initial set of ground truth keypoints, which is progressively expanded to detect more keypoints. Having encountered difficulties in detecting consistent initial keypoints in brain images using SIFT and LoFTR, we proposed a new approach: radiomic feature-based keypoint detection. Demonstrating the anatomical significance of the detected keypoints was achieved by showcasing their efficacy in improving registration processes guided by these keypoints. Subsequently, these keypoints were employed as the ground truth for the keypoint detection method (LK-SuperRetina). Furthermore, the study showcases the application of GNNs in image matching, highlighting their superior performance in terms of both the number of good matches and confidence scores. This research sets the stage for expanding GNN applications into various other applications, including but not limited to image classification, segmentation, and registration. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.18281v1-abstract-full').style.display = 'none'; document.getElementById('2311.18281v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.20274">arXiv:2310.20274</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.20274">pdf</a>, <a href="https://arxiv.org/format/2310.20274">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3132847.3133141">10.1145/3132847.3133141 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Extracting Entities of Interest from Comparative Product Reviews </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Arora%2C+J">Jatin Arora</a>, <a href="/search/cs?searchtype=author&amp;query=Agrawal%2C+S">Sumit Agrawal</a>, <a href="/search/cs?searchtype=author&amp;query=Goyal%2C+P">Pawan Goyal</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sayan Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.20274v1-abstract-short" style="display: inline;"> This paper presents a deep learning based approach to extract product comparison information out of user reviews on various e-commerce websites. Any comparative product review has three major entities of information: the names of the products being compared, the user opinion (predicate) and the feature or aspect under comparison. All these informing entities are dependent on each other and bound b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.20274v1-abstract-full').style.display = 'inline'; document.getElementById('2310.20274v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.20274v1-abstract-full" style="display: none;"> This paper presents a deep learning based approach to extract product comparison information out of user reviews on various e-commerce websites. Any comparative product review has three major entities of information: the names of the products being compared, the user opinion (predicate) and the feature or aspect under comparison. All these informing entities are dependent on each other and bound by the rules of the language, in the review. We observe that their inter-dependencies can be captured well using LSTMs. We evaluate our system on existing manually labeled datasets and observe out-performance over the existing Semantic Role Labeling (SRL) framework popular for this task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.20274v1-abstract-full').style.display = 'none'; document.getElementById('2310.20274v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Source Code: https://github.com/jatinarora2702/Review-Information-Extraction</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.7; H.3.3 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Proceedings of the 2017 ACM on Conference on Information and Knowledge Management, Pages 1975 - 1978 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.12677">arXiv:2310.12677</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.12677">pdf</a>, <a href="https://arxiv.org/format/2310.12677">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Case-level Breast Cancer Prediction for Real Hospital Settings </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreyasi Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Schl%C3%B6tterer%2C+J">J枚rg Schl枚tterer</a>, <a href="/search/cs?searchtype=author&amp;query=Geerdink%2C+J">Jeroen Geerdink</a>, <a href="/search/cs?searchtype=author&amp;query=Veltman%2C+J">Jeroen Veltman</a>, <a href="/search/cs?searchtype=author&amp;query=van+Keulen%2C+M">Maurice van Keulen</a>, <a href="/search/cs?searchtype=author&amp;query=Strisciuglio%2C+N">Nicola Strisciuglio</a>, <a href="/search/cs?searchtype=author&amp;query=Seifert%2C+C">Christin Seifert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.12677v2-abstract-short" style="display: inline;"> Breast cancer prediction models for mammography assume that annotations are available for individual images or regions of interest (ROIs), and that there is a fixed number of images per patient. These assumptions do not hold in real hospital settings, where clinicians provide only a final diagnosis for the entire mammography exam (case). Since data in real hospital settings scales with continuous&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12677v2-abstract-full').style.display = 'inline'; document.getElementById('2310.12677v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.12677v2-abstract-full" style="display: none;"> Breast cancer prediction models for mammography assume that annotations are available for individual images or regions of interest (ROIs), and that there is a fixed number of images per patient. These assumptions do not hold in real hospital settings, where clinicians provide only a final diagnosis for the entire mammography exam (case). Since data in real hospital settings scales with continuous patient intake, while manual annotation efforts do not, we develop a framework for case-level breast cancer prediction that does not require any manual annotation and can be trained with case labels readily available at the hospital. Specifically, we propose a two-level multi-instance learning (MIL) approach at patch and image level for case-level breast cancer prediction and evaluate it on two public and one private dataset. We propose a novel domain-specific MIL pooling observing that breast cancer may or may not occur in both sides, while images of both breasts are taken as a precaution during mammography. We propose a dynamic training procedure for training our MIL framework on a variable number of images per case. We show that our two-level MIL model can be applied in real hospital settings where only case labels, and a variable number of images per case are available, without any loss in performance compared to models trained on image labels. Only trained with weak (case-level) labels, it has the capability to point out in which breast side, mammography view and view region the abnormality lies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.12677v2-abstract-full').style.display = 'none'; document.getElementById('2310.12677v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">31 pages, 15 figures, 12 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.05024">arXiv:2310.05024</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.05024">pdf</a>, <a href="https://arxiv.org/format/2310.05024">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Single Stage Warped Cloth Learning and Semantic-Contextual Attention Feature Fusion for Virtual TryOn </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sanhita Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Kaushik%2C+V">Vinay Kaushik</a>, <a href="/search/cs?searchtype=author&amp;query=Lall%2C+B">Brejesh Lall</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.05024v2-abstract-short" style="display: inline;"> Image-based virtual try-on aims to fit an in-shop garment onto a clothed person image. Garment warping, which aligns the target garment with the corresponding body parts in the person image, is a crucial step in achieving this goal. Existing methods often use multi-stage frameworks to handle clothes warping, person body synthesis and tryon generation separately or rely on noisy intermediate parser&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.05024v2-abstract-full').style.display = 'inline'; document.getElementById('2310.05024v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.05024v2-abstract-full" style="display: none;"> Image-based virtual try-on aims to fit an in-shop garment onto a clothed person image. Garment warping, which aligns the target garment with the corresponding body parts in the person image, is a crucial step in achieving this goal. Existing methods often use multi-stage frameworks to handle clothes warping, person body synthesis and tryon generation separately or rely on noisy intermediate parser-based labels. We propose a novel single-stage framework that implicitly learns the same without explicit multi-stage learning. Our approach utilizes a novel semantic-contextual fusion attention module for garment-person feature fusion, enabling efficient and realistic cloth warping and body synthesis from target pose keypoints. By introducing a lightweight linear attention framework that attends to garment regions and fuses multiple sampled flow fields, we also address misalignment and artifacts present in previous methods. To achieve simultaneous learning of warped garment and try-on results, we introduce a Warped Cloth Learning Module. Our proposed approach significantly improves the quality and efficiency of virtual try-on methods, providing users with a more reliable and realistic virtual try-on experience. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.05024v2-abstract-full').style.display = 'none'; document.getElementById('2310.05024v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in ICME 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.11326">arXiv:2309.11326</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2309.11326">pdf</a>, <a href="https://arxiv.org/format/2309.11326">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> How to turn your camera into a perfect pinhole model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=De+Boi%2C+I">Ivan De Boi</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Stuti Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Oliveira%2C+M">Marina Oliveira</a>, <a href="/search/cs?searchtype=author&amp;query=Penne%2C+R">Rudi Penne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.11326v1-abstract-short" style="display: inline;"> Camera calibration is a first and fundamental step in various computer vision applications. Despite being an active field of research, Zhang&#39;s method remains widely used for camera calibration due to its implementation in popular toolboxes. However, this method initially assumes a pinhole model with oversimplified distortion models. In this work, we propose a novel approach that involves a pre-pro&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.11326v1-abstract-full').style.display = 'inline'; document.getElementById('2309.11326v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.11326v1-abstract-full" style="display: none;"> Camera calibration is a first and fundamental step in various computer vision applications. Despite being an active field of research, Zhang&#39;s method remains widely used for camera calibration due to its implementation in popular toolboxes. However, this method initially assumes a pinhole model with oversimplified distortion models. In this work, we propose a novel approach that involves a pre-processing step to remove distortions from images by means of Gaussian processes. Our method does not need to assume any distortion model and can be applied to severely warped images, even in the case of multiple distortion sources, e.g., a fisheye image of a curved mirror reflection. The Gaussian processes capture all distortions and camera imperfections, resulting in virtual images as though taken by an ideal pinhole camera with square pixels. Furthermore, this ideal GP-camera only needs one image of a square grid calibration pattern. This model allows for a serious upgrade of many algorithms and applications that are designed in a pure projective geometry setting but with a performance that is very sensitive to nonlinear lens distortions. We demonstrate the effectiveness of our method by simplifying Zhang&#39;s calibration method, reducing the number of parameters and getting rid of the distortion parameters and iterative optimization. We validate by means of synthetic data and real world images. The contributions of this work include the construction of a virtual ideal pinhole camera using Gaussian processes, a simplified calibration method and lens distortion removal. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.11326v1-abstract-full').style.display = 'none'; document.getElementById('2309.11326v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages, 3 figures, conference CIARP</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.15225">arXiv:2303.15225</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.15225">pdf</a>, <a href="https://arxiv.org/format/2303.15225">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> GP-PCS: One-shot Feature-Preserving Point Cloud Simplification with Gaussian Processes on Riemannian Manifolds </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Stuti Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=McDonald%2C+T+M">Thomas M. McDonald</a>, <a href="/search/cs?searchtype=author&amp;query=Sels%2C+S">Seppe Sels</a>, <a href="/search/cs?searchtype=author&amp;query=Penne%2C+R">Rudi Penne</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.15225v4-abstract-short" style="display: inline;"> The processing, storage and transmission of large-scale point clouds is an ongoing challenge in the computer vision community which hinders progress in the application of 3D models to real-world settings, such as autonomous driving, virtual reality and remote sensing. We propose a novel, one-shot point cloud simplification method which preserves both the salient structural features and the overall&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.15225v4-abstract-full').style.display = 'inline'; document.getElementById('2303.15225v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.15225v4-abstract-full" style="display: none;"> The processing, storage and transmission of large-scale point clouds is an ongoing challenge in the computer vision community which hinders progress in the application of 3D models to real-world settings, such as autonomous driving, virtual reality and remote sensing. We propose a novel, one-shot point cloud simplification method which preserves both the salient structural features and the overall shape of a point cloud without any prior surface reconstruction step. Our method employs Gaussian processes suitable for functions defined on Riemannian manifolds, allowing us to model the surface variation function across any given point cloud. A simplified version of the original cloud is obtained by sequentially selecting points using a greedy sparsification scheme. The selection criterion used for this scheme ensures that the simplified cloud best represents the surface variation of the original point cloud. We evaluate our method on several benchmark and self-acquired point clouds, compare it to a range of existing methods, demonstrate its application in downstream tasks of registration and surface reconstruction, and show that our method is competitive both in terms of empirical performance and computational efficiency. The code is available at \href{https://github.com/stutipathak5/gps-for-point-clouds}{https://github.com/stutipathak5/gps-for-point-clouds}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.15225v4-abstract-full').style.display = 'none'; document.getElementById('2303.15225v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 4 figures, 2 tables in main; 6 pages, 7 figures and 2 tables in supplementary</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.07608">arXiv:2301.07608</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.07608">pdf</a>, <a href="https://arxiv.org/format/2301.07608">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> Human-Timescale Adaptation in an Open-Ended Task Space </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Adaptive+Agent+Team"> Adaptive Agent Team</a>, <a href="/search/cs?searchtype=author&amp;query=Bauer%2C+J">Jakob Bauer</a>, <a href="/search/cs?searchtype=author&amp;query=Baumli%2C+K">Kate Baumli</a>, <a href="/search/cs?searchtype=author&amp;query=Baveja%2C+S">Satinder Baveja</a>, <a href="/search/cs?searchtype=author&amp;query=Behbahani%2C+F">Feryal Behbahani</a>, <a href="/search/cs?searchtype=author&amp;query=Bhoopchand%2C+A">Avishkar Bhoopchand</a>, <a href="/search/cs?searchtype=author&amp;query=Bradley-Schmieg%2C+N">Nathalie Bradley-Schmieg</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+M">Michael Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Clay%2C+N">Natalie Clay</a>, <a href="/search/cs?searchtype=author&amp;query=Collister%2C+A">Adrian Collister</a>, <a href="/search/cs?searchtype=author&amp;query=Dasagi%2C+V">Vibhavari Dasagi</a>, <a href="/search/cs?searchtype=author&amp;query=Gonzalez%2C+L">Lucy Gonzalez</a>, <a href="/search/cs?searchtype=author&amp;query=Gregor%2C+K">Karol Gregor</a>, <a href="/search/cs?searchtype=author&amp;query=Hughes%2C+E">Edward Hughes</a>, <a href="/search/cs?searchtype=author&amp;query=Kashem%2C+S">Sheleem Kashem</a>, <a href="/search/cs?searchtype=author&amp;query=Loks-Thompson%2C+M">Maria Loks-Thompson</a>, <a href="/search/cs?searchtype=author&amp;query=Openshaw%2C+H">Hannah Openshaw</a>, <a href="/search/cs?searchtype=author&amp;query=Parker-Holder%2C+J">Jack Parker-Holder</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Perez-Nieves%2C+N">Nicolas Perez-Nieves</a>, <a href="/search/cs?searchtype=author&amp;query=Rakicevic%2C+N">Nemanja Rakicevic</a>, <a href="/search/cs?searchtype=author&amp;query=Rockt%C3%A4schel%2C+T">Tim Rockt盲schel</a>, <a href="/search/cs?searchtype=author&amp;query=Schroecker%2C+Y">Yannick Schroecker</a>, <a href="/search/cs?searchtype=author&amp;query=Sygnowski%2C+J">Jakub Sygnowski</a>, <a href="/search/cs?searchtype=author&amp;query=Tuyls%2C+K">Karl Tuyls</a> , et al. (3 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.07608v1-abstract-short" style="display: inline;"> Foundation models have shown impressive adaptation and scalability in supervised and self-supervised learning problems, but so far these successes have not fully translated to reinforcement learning (RL). In this work, we demonstrate that training an RL agent at scale leads to a general in-context learning algorithm that can adapt to open-ended novel embodied 3D problems as quickly as humans. In a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.07608v1-abstract-full').style.display = 'inline'; document.getElementById('2301.07608v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.07608v1-abstract-full" style="display: none;"> Foundation models have shown impressive adaptation and scalability in supervised and self-supervised learning problems, but so far these successes have not fully translated to reinforcement learning (RL). In this work, we demonstrate that training an RL agent at scale leads to a general in-context learning algorithm that can adapt to open-ended novel embodied 3D problems as quickly as humans. In a vast space of held-out environment dynamics, our adaptive agent (AdA) displays on-the-fly hypothesis-driven exploration, efficient exploitation of acquired knowledge, and can successfully be prompted with first-person demonstrations. Adaptation emerges from three ingredients: (1) meta-reinforcement learning across a vast, smooth and diverse task distribution, (2) a policy parameterised as a large-scale attention-based memory architecture, and (3) an effective automated curriculum that prioritises tasks at the frontier of an agent&#39;s capabilities. We demonstrate characteristic scaling laws with respect to network size, memory length, and richness of the training task distribution. We believe our results lay the foundation for increasingly general and adaptive RL agents that perform well across ever-larger open-ended domains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.07608v1-abstract-full').style.display = 'none'; document.getElementById('2301.07608v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.15104">arXiv:2210.15104</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.15104">pdf</a>, <a href="https://arxiv.org/ps/2210.15104">ps</a>, <a href="https://arxiv.org/format/2210.15104">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> TRScore: A Novel GPT-based Readability Scorer for ASR Segmentation and Punctuation model evaluation and selection </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Behre%2C+P">Piyush Behre</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+S">Sharman Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Shah%2C+A">Amy Shah</a>, <a href="/search/cs?searchtype=author&amp;query=Kesavamoorthy%2C+H">Harini Kesavamoorthy</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+S">Shuangyu Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Zuo%2C+F">Fei Zuo</a>, <a href="/search/cs?searchtype=author&amp;query=Basoglu%2C+C">Chris Basoglu</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sayan Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.15104v1-abstract-short" style="display: inline;"> Punctuation and Segmentation are key to readability in Automatic Speech Recognition (ASR), often evaluated using F1 scores that require high-quality human transcripts and do not reflect readability well. Human evaluation is expensive, time-consuming, and suffers from large inter-observer variability, especially in conversational speech devoid of strict grammatical structures. Large pre-trained mod&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.15104v1-abstract-full').style.display = 'inline'; document.getElementById('2210.15104v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.15104v1-abstract-full" style="display: none;"> Punctuation and Segmentation are key to readability in Automatic Speech Recognition (ASR), often evaluated using F1 scores that require high-quality human transcripts and do not reflect readability well. Human evaluation is expensive, time-consuming, and suffers from large inter-observer variability, especially in conversational speech devoid of strict grammatical structures. Large pre-trained models capture a notion of grammatical structure. We present TRScore, a novel readability measure using the GPT model to evaluate different segmentation and punctuation systems. We validate our approach with human experts. Additionally, our approach enables quantitative assessment of text post-processing techniques such as capitalization, inverse text normalization (ITN), and disfluency on overall readability, which traditional word error rate (WER) and slot error rate (SER) metrics fail to capture. TRScore is strongly correlated to traditional F1 and human readability scores, with Pearson&#39;s correlation coefficients of 0.67 and 0.98, respectively. It also eliminates the need for human transcriptions for model selection. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.15104v1-abstract-full').style.display = 'none'; document.getElementById('2210.15104v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.14446">arXiv:2210.14446</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2210.14446">pdf</a>, <a href="https://arxiv.org/format/2210.14446">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> Smart Speech Segmentation using Acousto-Linguistic Features with look-ahead </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Behre%2C+P">Piyush Behre</a>, <a href="/search/cs?searchtype=author&amp;query=Parihar%2C+N">Naveen Parihar</a>, <a href="/search/cs?searchtype=author&amp;query=Tan%2C+S">Sharman Tan</a>, <a href="/search/cs?searchtype=author&amp;query=Shah%2C+A">Amy Shah</a>, <a href="/search/cs?searchtype=author&amp;query=Sharma%2C+E">Eva Sharma</a>, <a href="/search/cs?searchtype=author&amp;query=Liu%2C+G">Geoffrey Liu</a>, <a href="/search/cs?searchtype=author&amp;query=Chang%2C+S">Shuangyu Chang</a>, <a href="/search/cs?searchtype=author&amp;query=Khalil%2C+H">Hosam Khalil</a>, <a href="/search/cs?searchtype=author&amp;query=Basoglu%2C+C">Chris Basoglu</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sayan Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.14446v2-abstract-short" style="display: inline;"> Segmentation for continuous Automatic Speech Recognition (ASR) has traditionally used silence timeouts or voice activity detectors (VADs), which are both limited to acoustic features. This segmentation is often overly aggressive, given that people naturally pause to think as they speak. Consequently, segmentation happens mid-sentence, hindering both punctuation and downstream tasks like machine tr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.14446v2-abstract-full').style.display = 'inline'; document.getElementById('2210.14446v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.14446v2-abstract-full" style="display: none;"> Segmentation for continuous Automatic Speech Recognition (ASR) has traditionally used silence timeouts or voice activity detectors (VADs), which are both limited to acoustic features. This segmentation is often overly aggressive, given that people naturally pause to think as they speak. Consequently, segmentation happens mid-sentence, hindering both punctuation and downstream tasks like machine translation for which high-quality segmentation is critical. Model-based segmentation methods that leverage acoustic features are powerful, but without an understanding of the language itself, these approaches are limited. We present a hybrid approach that leverages both acoustic and language information to improve segmentation. Furthermore, we show that including one word as a look-ahead boosts segmentation quality. On average, our models improve segmentation-F0.5 score by 9.8% over baseline. We show that this approach works for multiple languages. For the downstream task of machine translation, it improves the translation BLEU score by an average of 1.05 points. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.14446v2-abstract-full').style.display = 'none'; document.getElementById('2210.14446v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.13395">arXiv:2207.13395</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.13395">pdf</a>, <a href="https://arxiv.org/ps/2207.13395">ps</a>, <a href="https://arxiv.org/format/2207.13395">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> A Direct Construction of 2D-CCC with Arbitrary Array Size and Flexible Set Size Using Multivariable Function </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghosh%2C+G">Gobinda Ghosh</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sachin Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.13395v8-abstract-short" style="display: inline;"> Recently, two-dimensional (2D) array codes have been found to have applications in wireless communication.In this paper, we propose direct construction of 2D complete complementary codes (2D-CCCs) with arbitrary array size and flexible set size using multivariable functions (MVF). The Peak-to-mean envelope power ratio (PMEPR) properties of row and column sequences of the constructed 2D-CCC arrays&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.13395v8-abstract-full').style.display = 'inline'; document.getElementById('2207.13395v8-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.13395v8-abstract-full" style="display: none;"> Recently, two-dimensional (2D) array codes have been found to have applications in wireless communication.In this paper, we propose direct construction of 2D complete complementary codes (2D-CCCs) with arbitrary array size and flexible set size using multivariable functions (MVF). The Peak-to-mean envelope power ratio (PMEPR) properties of row and column sequences of the constructed 2D-CCC arrays are investigated. The proposed construction generalizes many of the existing state-of-the-art such as Golay complementary pair (GCP), one-dimensional (1D)-CCC, 2D Golay complementary array set (2D-GCAS), and 2D-CCC with better parameters compared to the existing work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.13395v8-abstract-full').style.display = 'none'; document.getElementById('2207.13395v8-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.11578">arXiv:2207.11578</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2207.11578">pdf</a>, <a href="https://arxiv.org/format/2207.11578">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Theoretical Economics">econ.TH</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> A Scalable Bayesian Persuasion Framework for Epidemic Containment on Heterogeneous Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shraddha Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Kulkarni%2C+A+A">Ankur A. Kulkarni</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.11578v1-abstract-short" style="display: inline;"> During an epidemic, the information available to individuals in the society deeply influences their belief of the epidemic spread, and consequently the preventive measures they take to stay safe from the infection. In this paper, we develop a scalable framework for ascertaining the optimal information disclosure a government must make to individuals in a networked society for the purpose of epidem&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.11578v1-abstract-full').style.display = 'inline'; document.getElementById('2207.11578v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.11578v1-abstract-full" style="display: none;"> During an epidemic, the information available to individuals in the society deeply influences their belief of the epidemic spread, and consequently the preventive measures they take to stay safe from the infection. In this paper, we develop a scalable framework for ascertaining the optimal information disclosure a government must make to individuals in a networked society for the purpose of epidemic containment. This problem of information design problem is complicated by the heterogeneous nature of the society, the positive externalities faced by individuals, and the variety in the public response to such disclosures. We use a networked public goods model to capture the underlying societal structure. Our first main result is a structural decomposition of the government&#39;s objectives into two independent components -- a component dependent on the utility function of individuals, and another dependent on properties of the underlying network. Since the network dependent term in this decomposition is unaffected by the signals sent by the government, this characterization simplifies the problem of finding the optimal information disclosure policies. We find explicit conditions, in terms of the risk aversion and prudence, under which no disclosure, full disclosure, exaggeration and downplay are the optimal policies. The structural decomposition results are also helpful in studying other forms of interventions like incentive design and network design. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.11578v1-abstract-full').style.display = 'none'; document.getElementById('2207.11578v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 91A28 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2202.11454">arXiv:2202.11454</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2202.11454">pdf</a>, <a href="https://arxiv.org/ps/2202.11454">ps</a>, <a href="https://arxiv.org/format/2202.11454">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Discrete Mathematics">cs.DM</span> </div> </div> <p class="title is-5 mathjax"> On $Z_{p^r}Z_{p^r}Z_{p^s}$-Additive Cyclic Codes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Fern%C3%A1ndez-C%C3%B3rdoba%2C+C">Cristina Fern谩ndez-C贸rdoba</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sachin Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Upadhyay%2C+A+K">Ashish Kumar Upadhyay</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2202.11454v1-abstract-short" style="display: inline;"> In this paper, we introduce $\mathbb{Z}_{p^r}\mathbb{Z}_{p^r}\mathbb{Z}_{p^s}$-additive cyclic codes for $r\leq s$. These codes can be identified as $\mathbb{Z}_{p^s}[x]$-submodules of $\mathbb{Z}_{p^r}[x]/\langle x^伪-1\rangle \times \mathbb{Z}_{p^r}[x]/\langle x^尾-1\rangle\times \mathbb{Z}_{p^s}[x]/\langle x^纬-1\rangle$. We determine the generator polynomials and minimal generating sets for this&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.11454v1-abstract-full').style.display = 'inline'; document.getElementById('2202.11454v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2202.11454v1-abstract-full" style="display: none;"> In this paper, we introduce $\mathbb{Z}_{p^r}\mathbb{Z}_{p^r}\mathbb{Z}_{p^s}$-additive cyclic codes for $r\leq s$. These codes can be identified as $\mathbb{Z}_{p^s}[x]$-submodules of $\mathbb{Z}_{p^r}[x]/\langle x^伪-1\rangle \times \mathbb{Z}_{p^r}[x]/\langle x^尾-1\rangle\times \mathbb{Z}_{p^s}[x]/\langle x^纬-1\rangle$. We determine the generator polynomials and minimal generating sets for this family of codes. Some previous works has been done for the case $p=2$ with $r=s=1$, $r=s=2$, and $r=1,s=2$. However, we show that in these previous works the classification of these codes were incomplete and the statements in this paper complete such classification. We also discuss the structure of separable $\mathbb{Z}_{p^r}\mathbb{Z}_{p^r}\mathbb{Z}_{p^s}$-additive cyclic codes and determine their generator polynomials. Further, we also study the duality of $\mathbb{Z}_{p^s}[x]$-submodules. As applications, we present some examples and construct some optimal binary codes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2202.11454v1-abstract-full').style.display = 'none'; document.getElementById('2202.11454v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.08164">arXiv:2201.08164</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2201.08164">pdf</a>, <a href="https://arxiv.org/format/2201.08164">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3583558">10.1145/3583558 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> From Anecdotal Evidence to Quantitative Evaluation Methods: A Systematic Review on Evaluating Explainable AI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nauta%2C+M">Meike Nauta</a>, <a href="/search/cs?searchtype=author&amp;query=Trienes%2C+J">Jan Trienes</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreyasi Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Nguyen%2C+E">Elisa Nguyen</a>, <a href="/search/cs?searchtype=author&amp;query=Peters%2C+M">Michelle Peters</a>, <a href="/search/cs?searchtype=author&amp;query=Schmitt%2C+Y">Yasmin Schmitt</a>, <a href="/search/cs?searchtype=author&amp;query=Schl%C3%B6tterer%2C+J">J枚rg Schl枚tterer</a>, <a href="/search/cs?searchtype=author&amp;query=van+Keulen%2C+M">Maurice van Keulen</a>, <a href="/search/cs?searchtype=author&amp;query=Seifert%2C+C">Christin Seifert</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.08164v3-abstract-short" style="display: inline;"> The rising popularity of explainable artificial intelligence (XAI) to understand high-performing black boxes raised the question of how to evaluate explanations of machine learning (ML) models. While interpretability and explainability are often presented as a subjectively validated binary property, we consider it a multi-faceted concept. We identify 12 conceptual properties, such as Compactness a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.08164v3-abstract-full').style.display = 'inline'; document.getElementById('2201.08164v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.08164v3-abstract-full" style="display: none;"> The rising popularity of explainable artificial intelligence (XAI) to understand high-performing black boxes raised the question of how to evaluate explanations of machine learning (ML) models. While interpretability and explainability are often presented as a subjectively validated binary property, we consider it a multi-faceted concept. We identify 12 conceptual properties, such as Compactness and Correctness, that should be evaluated for comprehensively assessing the quality of an explanation. Our so-called Co-12 properties serve as categorization scheme for systematically reviewing the evaluation practices of more than 300 papers published in the last 7 years at major AI and ML conferences that introduce an XAI method. We find that 1 in 3 papers evaluate exclusively with anecdotal evidence, and 1 in 5 papers evaluate with users. This survey also contributes to the call for objective, quantifiable evaluation methods by presenting an extensive overview of quantitative XAI evaluation methods. Our systematic collection of evaluation methods provides researchers and practitioners with concrete tools to thoroughly validate, benchmark and compare new and existing XAI methods. The Co-12 categorization scheme and our identified evaluation methods open up opportunities to include quantitative metrics as optimization criteria during model training in order to optimize for accuracy and interpretability simultaneously. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.08164v3-abstract-full').style.display = 'none'; document.getElementById('2201.08164v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published in ACM Computing Surveys (DOI http://dx.doi.org/10.1145/3583558). This ArXiv version includes the supplementary material. Website with categorization of XAI methods at https://utwente-dmb.github.io/xai-papers/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2107.09931">arXiv:2107.09931</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2107.09931">pdf</a>, <a href="https://arxiv.org/format/2107.09931">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The Effectiveness of Intermediate-Task Training for Code-Switched Natural Language Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Prasad%2C+A">Archiki Prasad</a>, <a href="/search/cs?searchtype=author&amp;query=Rehan%2C+M+A">Mohammad Ali Rehan</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Jyothi%2C+P">Preethi Jyothi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2107.09931v1-abstract-short" style="display: inline;"> While recent benchmarks have spurred a lot of new work on improving the generalization of pretrained multilingual language models on multilingual tasks, techniques to improve code-switched natural language understanding tasks have been far less explored. In this work, we propose the use of bilingual intermediate pretraining as a reliable technique to derive large and consistent performance gains o&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.09931v1-abstract-full').style.display = 'inline'; document.getElementById('2107.09931v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2107.09931v1-abstract-full" style="display: none;"> While recent benchmarks have spurred a lot of new work on improving the generalization of pretrained multilingual language models on multilingual tasks, techniques to improve code-switched natural language understanding tasks have been far less explored. In this work, we propose the use of bilingual intermediate pretraining as a reliable technique to derive large and consistent performance gains on three different NLP tasks using code-switched text. We achieve substantial absolute improvements of 7.87%, 20.15%, and 10.99%, on the mean accuracies and F1 scores over previous state-of-the-art systems for Hindi-English Natural Language Inference (NLI), Question Answering (QA) tasks, and Spanish-English Sentiment Analysis (SA) respectively. We show consistent performance gains on four different code-switched language-pairs (Hindi-English, Spanish-English, Tamil-English and Malayalam-English) for SA. We also present a code-switched masked language modelling (MLM) pretraining technique that consistently benefits SA compared to standard MLM pretraining using real code-switched text. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2107.09931v1-abstract-full').style.display = 'none'; document.getElementById('2107.09931v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 July, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.06424">arXiv:2105.06424</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2105.06424">pdf</a>, <a href="https://arxiv.org/format/2105.06424">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Programming Languages">cs.PL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Logic in Computer Science">cs.LO</span> </div> </div> <p class="title is-5 mathjax"> Stateless Model Checking under a Reads-Value-From Equivalence </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Agarwal%2C+P">Pratyush Agarwal</a>, <a href="/search/cs?searchtype=author&amp;query=Chatterjee%2C+K">Krishnendu Chatterjee</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreya Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Pavlogiannis%2C+A">Andreas Pavlogiannis</a>, <a href="/search/cs?searchtype=author&amp;query=Toman%2C+V">Viktor Toman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.06424v1-abstract-short" style="display: inline;"> Stateless model checking (SMC) is one of the standard approaches to the verification of concurrent programs. As scheduling non-determinism creates exponentially large spaces of thread interleavings, SMC attempts to partition this space into equivalence classes and explore only a few representatives from each class. The efficiency of this approach depends on two factors: (a) the coarseness of the p&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.06424v1-abstract-full').style.display = 'inline'; document.getElementById('2105.06424v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.06424v1-abstract-full" style="display: none;"> Stateless model checking (SMC) is one of the standard approaches to the verification of concurrent programs. As scheduling non-determinism creates exponentially large spaces of thread interleavings, SMC attempts to partition this space into equivalence classes and explore only a few representatives from each class. The efficiency of this approach depends on two factors: (a) the coarseness of the partitioning, and (b) the time to generate representatives in each class. For this reason, the search for coarse partitionings that are efficiently explorable is an active research challenge. In this work we present RVF-SMC, a new SMC algorithm that uses a novel \emph{reads-value-from (RVF)} partitioning. Intuitively, two interleavings are deemed equivalent if they agree on the value obtained in each read event, and read events induce consistent causal orderings between them. The RVF partitioning is provably coarser than recent approaches based on Mazurkiewicz and &#34;reads-from&#34; partitionings. Our experimental evaluation reveals that RVF is quite often a very effective equivalence, as the underlying partitioning is exponentially coarser than other approaches. Moreover, RVF-SMC generates representatives very efficiently, as the reduction in the partitioning is often met with significant speed-ups in the model checking task. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.06424v1-abstract-full').style.display = 'none'; document.getElementById('2105.06424v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Full technical report of the CAV2021 work</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.09844">arXiv:2101.09844</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2101.09844">pdf</a>, <a href="https://arxiv.org/format/2101.09844">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Data Analysis, Statistics and Probability">physics.data-an</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Pattern Ensembling for Spatial Trajectory Reconstruction </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shivam Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+M">Mingyi He</a>, <a href="/search/cs?searchtype=author&amp;query=Malinchik%2C+S">Sergey Malinchik</a>, <a href="/search/cs?searchtype=author&amp;query=Sobolevsky%2C+S">Stanislav Sobolevsky</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.09844v1-abstract-short" style="display: inline;"> Digital sensing provides an unprecedented opportunity to assess and understand mobility. However, incompleteness, missing information, possible inaccuracies, and temporal heterogeneity in the geolocation data can undermine its applicability. As mobility patterns are often repeated, we propose a method to use similar trajectory patterns from the local vicinity and probabilistically ensemble them to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.09844v1-abstract-full').style.display = 'inline'; document.getElementById('2101.09844v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.09844v1-abstract-full" style="display: none;"> Digital sensing provides an unprecedented opportunity to assess and understand mobility. However, incompleteness, missing information, possible inaccuracies, and temporal heterogeneity in the geolocation data can undermine its applicability. As mobility patterns are often repeated, we propose a method to use similar trajectory patterns from the local vicinity and probabilistically ensemble them to robustly reconstruct missing or unreliable observations. We evaluate the proposed approach in comparison with traditional functional trajectory interpolation using a case of sea vessel trajectory data provided by The Automatic Identification System (AIS). By effectively leveraging the similarities in real-world trajectories, our pattern ensembling method helps to reconstruct missing trajectory segments of extended length and complex geometry. It can be used for locating mobile objects when temporary unobserved as well as for creating an evenly sampled trajectory interpolation useful for further trajectory mining. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.09844v1-abstract-full').style.display = 'none'; document.getElementById('2101.09844v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">11 pages, 5 figures</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 68W99 <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.5 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.01485">arXiv:2010.01485</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2010.01485">pdf</a>, <a href="https://arxiv.org/format/2010.01485">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Improving Lesion Detection by exploring bias on Skin Lesion dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Trivedi%2C+A">Anusua Trivedi</a>, <a href="/search/cs?searchtype=author&amp;query=Muppalla%2C+S">Sreya Muppalla</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shreyaan Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Mobasher%2C+A">Azadeh Mobasher</a>, <a href="/search/cs?searchtype=author&amp;query=Janowski%2C+P">Pawel Janowski</a>, <a href="/search/cs?searchtype=author&amp;query=Dodhia%2C+R">Rahul Dodhia</a>, <a href="/search/cs?searchtype=author&amp;query=Ferres%2C+J+M+L">Juan M. Lavista Ferres</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.01485v1-abstract-short" style="display: inline;"> All datasets contain some biases, often unintentional, due to how they were acquired and annotated. These biases distort machine-learning models&#39; performance, creating spurious correlations that the models can unfairly exploit, or, contrarily destroying clear correlations that the models could learn. With the popularity of deep learning models, automated skin lesion analysis is starting to play an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.01485v1-abstract-full').style.display = 'inline'; document.getElementById('2010.01485v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.01485v1-abstract-full" style="display: none;"> All datasets contain some biases, often unintentional, due to how they were acquired and annotated. These biases distort machine-learning models&#39; performance, creating spurious correlations that the models can unfairly exploit, or, contrarily destroying clear correlations that the models could learn. With the popularity of deep learning models, automated skin lesion analysis is starting to play an essential role in the early detection of Melanoma. The ISIC Archive is one of the most used skin lesion sources to benchmark deep learning-based tools. Bissoto et al. experimented with different bounding-box based masks and showed that deep learning models could classify skin lesion images without clinically meaningful information in the input data. Their findings seem confounding since the ablated regions (random rectangular boxes) are not significant. The shape of the lesion is a crucial factor in the clinical characterization of a skin lesion. In that context, we performed a set of experiments that generate shape-preserving masks instead of rectangular bounding-box based masks. A deep learning model trained on these shape-preserving masked images does not outperform models trained on images without clinically meaningful information. That strongly suggests spurious correlations guiding the models. We propose use of general adversarial network (GAN) to mitigate the underlying bias. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.01485v1-abstract-full').style.display = 'none'; document.getElementById('2010.01485v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2002.08820">arXiv:2002.08820</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2002.08820">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Quantitative Methods">q-bio.QM</span> </div> </div> <p class="title is-5 mathjax"> Deep Learning Estimation of Multi-Tissue Constrained Spherical Deconvolution with Limited Single Shell DW-MRI </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Nath%2C+V">Vishwesh Nath</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S+K">Sudhir K. Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Schilling%2C+K+G">Kurt G. Schilling</a>, <a href="/search/cs?searchtype=author&amp;query=Schneider%2C+W">Walt Schneider</a>, <a href="/search/cs?searchtype=author&amp;query=Landman%2C+B+A">Bennett A. Landman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2002.08820v1-abstract-short" style="display: inline;"> Diffusion-weighted magnetic resonance imaging (DW-MRI) is the only non-invasive approach for estimation of intra-voxel tissue microarchitecture and reconstruction of in vivo neural pathways for the human brain. With improvement in accelerated MRI acquisition technologies, DW-MRI protocols that make use of multiple levels of diffusion sensitization have gained popularity. A well-known advanced meth&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.08820v1-abstract-full').style.display = 'inline'; document.getElementById('2002.08820v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2002.08820v1-abstract-full" style="display: none;"> Diffusion-weighted magnetic resonance imaging (DW-MRI) is the only non-invasive approach for estimation of intra-voxel tissue microarchitecture and reconstruction of in vivo neural pathways for the human brain. With improvement in accelerated MRI acquisition technologies, DW-MRI protocols that make use of multiple levels of diffusion sensitization have gained popularity. A well-known advanced method for reconstruction of white matter microstructure that uses multi-shell data is multi-tissue constrained spherical deconvolution (MT-CSD). MT-CSD substantially improves the resolution of intra-voxel structure over the traditional single shell version, constrained spherical deconvolution (CSD). Herein, we explore the possibility of using deep learning on single shell data (using the b=1000 s/mm2 from the Human Connectome Project (HCP)) to estimate the information content captured by 8th order MT-CSD using the full three shell data (b=1000, 2000, and 3000 s/mm2 from HCP). Briefly, we examine two network architectures: 1.) Sequential network of fully connected dense layers with a residual block in the middle (ResDNN), 2.) Patch based convolutional neural network with a residual block (ResCNN). For both networks an additional output block for estimation of voxel fraction was used with a modified loss function. Each approach was compared against the baseline of using MT-CSD on all data on 15 subjects from the HCP divided into 5 training, 2 validation, and 8 testing subjects with a total of 6.7 million voxels. The fiber orientation distribution function (fODF) can be recovered with high correlation (0.77 vs 0.74 and 0.65) as compared to the ground truth of MT-CST, which was derived from the multi-shell DW-MRI acquisitions. Source code and models have been made publicly available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2002.08820v1-abstract-full').style.display = 'none'; document.getElementById('2002.08820v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1912.01960">arXiv:1912.01960</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1912.01960">pdf</a>, <a href="https://arxiv.org/format/1912.01960">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Social and Information Networks">cs.SI</span> </div> </div> <p class="title is-5 mathjax"> Pattern and Anomaly Detection in Urban Temporal Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=He%2C+M">Mingyi He</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shivam Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Muaz%2C+U">Urwa Muaz</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+J">Jingtian Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Saini%2C+S">Saloni Saini</a>, <a href="/search/cs?searchtype=author&amp;query=Malinchik%2C+S">Sergey Malinchik</a>, <a href="/search/cs?searchtype=author&amp;query=Sobolevsky%2C+S">Stanislav Sobolevsky</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1912.01960v1-abstract-short" style="display: inline;"> Broad spectrum of urban activities including mobility can be modeled as temporal networks evolving over time. Abrupt changes in urban dynamics caused by events such as disruption of civic operations, mass crowd gatherings, holidays and natural disasters are potentially reflected in these temporal mobility networks. Identification and early detecting of such abnormal developments is of critical imp&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.01960v1-abstract-full').style.display = 'inline'; document.getElementById('1912.01960v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1912.01960v1-abstract-full" style="display: none;"> Broad spectrum of urban activities including mobility can be modeled as temporal networks evolving over time. Abrupt changes in urban dynamics caused by events such as disruption of civic operations, mass crowd gatherings, holidays and natural disasters are potentially reflected in these temporal mobility networks. Identification and early detecting of such abnormal developments is of critical importance for transportation planning and security. Anomaly detection from high dimensional network data is a challenging task as edge level measurements often have low values and high variance resulting in high noise-to-signal ratio. In this study, we propose a generic three-phase pipeline approach to tackle curse of dimensionality and noisiness of the original data. Our pipeline consists of i) initial network aggregation leveraging community detection ii) unsupervised dimensionality reduction iii) clustering of the resulting representations for outlier detection. We perform extensive experiments to evaluate the proposed approach on mobility data collected from two major cities, New York City and Taipei. Our results empirically prove that proposed methodology outperforms traditional approaches for anomaly detection. We further argue that the proposed anomaly detection framework is potentially generalizable to various other types of temporal networks e.g. social interactions, information propagation and epidemic spread. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1912.01960v1-abstract-full').style.display = 'none'; document.getElementById('1912.01960v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages, 3 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1808.06914">arXiv:1808.06914</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1808.06914">pdf</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Segmentation of Microscopy Data for finding Nuclei in Divergent Images </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Singh%2C+S">Shivam Singh</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Stuti Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1808.06914v2-abstract-short" style="display: inline;"> Every year millions of people die due to disease of Cancer. Due to its invasive nature it is very complex to cure even in primary stages. Hence, only method to survive this disease completely is via forecasting by analyzing the early mutation in cells of the patient biopsy. Cell Segmentation can be used to find cell which have left their nuclei. This enables faster cure and high rate of survival.&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.06914v2-abstract-full').style.display = 'inline'; document.getElementById('1808.06914v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1808.06914v2-abstract-full" style="display: none;"> Every year millions of people die due to disease of Cancer. Due to its invasive nature it is very complex to cure even in primary stages. Hence, only method to survive this disease completely is via forecasting by analyzing the early mutation in cells of the patient biopsy. Cell Segmentation can be used to find cell which have left their nuclei. This enables faster cure and high rate of survival. Cell counting is a hard, yet tedious task that would greatly benefit from automation. To accomplish this task, segmentation of cells need to be accurate. In this paper, we have improved the learning of training data by our network. It can annotate precise masks on test data. we examine the strength of activation functions in medical image segmentation task by improving learning rates by our proposed Carving Technique. Identifying the cells nuclei is the starting point for most analyses, identifying nuclei allows researchers to identify each individual cell in a sample, and by measuring how cells react to various treatments, the researcher can understand the underlying biological processes at work. Experimental results shows the efficiency of the proposed work. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.06914v2-abstract-full').style.display = 'none'; document.getElementById('1808.06914v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">7 pages, 7 figures, 1 table. arXiv admin note: text overlap with arXiv:1807.04459, arXiv:1802.10548, arXiv:1807.10165 by other authors</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1710.01420">arXiv:1710.01420</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1710.01420">pdf</a>, <a href="https://arxiv.org/format/1710.01420">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Databases">cs.DB</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Usable &amp; Scalable Learning Over Relational Data With Automatic Language Bias </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Picado%2C+J">Jose Picado</a>, <a href="/search/cs?searchtype=author&amp;query=Termehchy%2C+A">Arash Termehchy</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sudhanshu Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Fern%2C+A">Alan Fern</a>, <a href="/search/cs?searchtype=author&amp;query=Ilango%2C+P">Praveen Ilango</a>, <a href="/search/cs?searchtype=author&amp;query=Cai%2C+Y">Yunqiao Cai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1710.01420v2-abstract-short" style="display: inline;"> Relational databases are valuable resources for learning novel and interesting relations and concepts. In order to constraint the search through the large space of candidate definitions, users must tune the algorithm by specifying a language bias. Unfortunately, specifying the language bias is done via trial and error and is guided by the expert&#39;s intuitions. We propose AutoBias, a system that lev&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.01420v2-abstract-full').style.display = 'inline'; document.getElementById('1710.01420v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1710.01420v2-abstract-full" style="display: none;"> Relational databases are valuable resources for learning novel and interesting relations and concepts. In order to constraint the search through the large space of candidate definitions, users must tune the algorithm by specifying a language bias. Unfortunately, specifying the language bias is done via trial and error and is guided by the expert&#39;s intuitions. We propose AutoBias, a system that leverages information in the schema and content of the database to automatically induce the language bias used by popular relational learning systems. We show that AutoBias delivers the same accuracy as using manually-written language bias by imposing only a slight overhead on the running time of the learning algorithm. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1710.01420v2-abstract-full').style.display = 'none'; document.getElementById('1710.01420v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 October, 2017; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2017. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1609.05670">arXiv:1609.05670</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1609.05670">pdf</a>, <a href="https://arxiv.org/ps/1609.05670">ps</a>, <a href="https://arxiv.org/format/1609.05670">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> </div> <p class="title is-5 mathjax"> Load-aware Performance Analysis of Cell Center/Edge Users in Random HetNets </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mankar%2C+P+D">Praful D. Mankar</a>, <a href="/search/cs?searchtype=author&amp;query=Das%2C+G">Goutam Das</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S+S">S. S. Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1609.05670v2-abstract-short" style="display: inline;"> For real-time traffic, the link quality and call blocking probability (both derived from coverage probability) are realized to be poor for cell edge users (CEUs) compared to cell center users (CCUs) as the signal reception in the cell center region is better compared to the cell edge region. In heterogeneous networks (HetNets), the uncoordinated channel access by different types of base stations d&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1609.05670v2-abstract-full').style.display = 'inline'; document.getElementById('1609.05670v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1609.05670v2-abstract-full" style="display: none;"> For real-time traffic, the link quality and call blocking probability (both derived from coverage probability) are realized to be poor for cell edge users (CEUs) compared to cell center users (CCUs) as the signal reception in the cell center region is better compared to the cell edge region. In heterogeneous networks (HetNets), the uncoordinated channel access by different types of base stations determine the interference statistics that further arbitrates the coverage probability. Thus, the spectrum allocation techniques have major impact on the performance of CCU and CEU. In this paper, the performance of CCUs and CEUs in a random two-tier network is studied for two spectrum allocation techniques namely: 1) co-channel (CSA), and 2) shared (SSA). For performance analysis, the widely accepted conception of modeling the tiers of HetNet using independent homogeneous Poisson point process (PPP) is considered to accommodate the spatial randomness in location of BSs. To incorporate the spatial randomness in the arrival of service and to aid the load-aware analysis, the cellular traffic is modeled using spatio-temporal PPP. Under this scenario, we have developed an analytical framework to evaluate the load-aware performance, including coverage and blocking probabilities, of CCUs and CEUs under both spectrum allocation techniques. Further, we provide insight into achievable area energy efficiency for SSA and CSA. The developed analytical framework is validated through extensive simulations. Next, we demonstrate the impact of traffic load and femto access points density on the performance of CCUs/CEUs under CSA and SSA. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1609.05670v2-abstract-full').style.display = 'none'; document.getElementById('1609.05670v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 March, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 September, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">13 pages and 11 figures. This paper is submitted to IEEE Transaction on Vehicular Technology</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1609.05656">arXiv:1609.05656</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1609.05656">pdf</a>, <a href="https://arxiv.org/ps/1609.05656">ps</a>, <a href="https://arxiv.org/format/1609.05656">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> </div> </div> <p class="title is-5 mathjax"> Coverage Analysis of Two-Tier HetNets for Co-Channel, Orthogonal, and Partial Spectrum Sharing under Fractional Load Conditions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Mankar%2C+P+D">Praful D. Mankar</a>, <a href="/search/cs?searchtype=author&amp;query=Das%2C+G">Goutam Das</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S+S">S. S. Pathak</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1609.05656v2-abstract-short" style="display: inline;"> In heterogeneous networks, the random deployment of femto access points (FAPs) and macro base stations (MBSs) with uncoordinated channel access impose huge inter-tier interferences. In real-life networks, the process of MBSs deployment exhibits the homogeneity, however the FAPs have the behavioral characteristic of clusters formation like in malls, apartments, offices, etc. Therefore, the composit&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1609.05656v2-abstract-full').style.display = 'inline'; document.getElementById('1609.05656v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1609.05656v2-abstract-full" style="display: none;"> In heterogeneous networks, the random deployment of femto access points (FAPs) and macro base stations (MBSs) with uncoordinated channel access impose huge inter-tier interferences. In real-life networks, the process of MBSs deployment exhibits the homogeneity, however the FAPs have the behavioral characteristic of clusters formation like in malls, apartments, offices, etc. Therefore, the composite modeling of the MBSs and the FAPs using Poisson point process and Poisson cluster process is employed for the evaluation of coverage probability. The scenario of the real-time traffic for macro-tier and the best-effort traffic for femto-tier is considered. Cognition is introduced in the clustered FAPs to control the inter-tier interference. Furthermore, the impact of macro-tier load is analyzed by exploiting the inherent coupling between coverage probability and activity factor of an MBS. Further, we study the effect of co-channel, orthogonal, and partial spectrum sharing modes on the coverage for given parameters like load condition, FAPs/MBSs density, etc. We provide simulation validation for the derived expressions of coverage and present an comparative analysis for the mentioned spectrum sharing modes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1609.05656v2-abstract-full').style.display = 'none'; document.getElementById('1609.05656v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 February, 2017; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 September, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages and 10 figures. Submitted to IEEE Transaction on Vehicular Technology</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1606.05124">arXiv:1606.05124</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1606.05124">pdf</a>, <a href="https://arxiv.org/format/1606.05124">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Robust Active Perception via Data-association aware Belief Space planning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Shashank Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Thomas%2C+A">Antony Thomas</a>, <a href="/search/cs?searchtype=author&amp;query=Feniger%2C+A">Asaf Feniger</a>, <a href="/search/cs?searchtype=author&amp;query=Indelman%2C+V">Vadim Indelman</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1606.05124v1-abstract-short" style="display: inline;"> We develop a belief space planning (BSP) approach that advances the state of the art by incorporating reasoning about data association (DA) within planning, while considering additional sources of uncertainty. Existing BSP approaches typically assume data association is given and perfect, an assumption that can be harder to justify while operating, in the presence of localization uncertainty, in a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1606.05124v1-abstract-full').style.display = 'inline'; document.getElementById('1606.05124v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1606.05124v1-abstract-full" style="display: none;"> We develop a belief space planning (BSP) approach that advances the state of the art by incorporating reasoning about data association (DA) within planning, while considering additional sources of uncertainty. Existing BSP approaches typically assume data association is given and perfect, an assumption that can be harder to justify while operating, in the presence of localization uncertainty, in ambiguous and perceptually aliased environments. In contrast, our data association aware belief space planning (DA-BSP) approach explicitly reasons about DA within belief evolution, and as such can better accommodate these challenging real world scenarios. In particular, we show that due to perceptual aliasing, the posterior belief becomes a mixture of probability distribution functions, and design cost functions that measure the expected level of ambiguity and posterior uncertainty. Using these and standard costs (e.g.~control penalty, distance to goal) within the objective function, yields a general framework that reliably represents action impact, and in particular, capable of active disambiguation. Our approach is thus applicable to robust active perception and autonomous navigation in perceptually aliased environments. We demonstrate key aspects in basic and realistic simulations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1606.05124v1-abstract-full').style.display = 'none'; document.getElementById('1606.05124v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 June, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.9; I.2.10; G.3 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1602.03273">arXiv:1602.03273</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/1602.03273">pdf</a>, <a href="https://arxiv.org/ps/1602.03273">ps</a>, <a href="https://arxiv.org/format/1602.03273">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> YTrace: End-to-end Performance Diagnosis in Large Cloud and Content Providers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kanuparthy%2C+P">Partha Kanuparthy</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+Y">Yuchen Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Pathak%2C+S">Sudhir Pathak</a>, <a href="/search/cs?searchtype=author&amp;query=Samal%2C+S">Sambit Samal</a>, <a href="/search/cs?searchtype=author&amp;query=Benson%2C+T">Theophilus Benson</a>, <a href="/search/cs?searchtype=author&amp;query=Ghasemi%2C+M">Mojgan Ghasemi</a>, <a href="/search/cs?searchtype=author&amp;query=Narayan%2C+P+P+S">P. P. S. Narayan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1602.03273v3-abstract-short" style="display: inline;"> Content providers build serving stacks to deliver content to users. An important goal of a content provider is to ensure good user experience, since user experience has an impact on revenue. In this paper, we describe a system at Yahoo called YTrace that diagnoses bad user experience in near real time. We present the different components of YTrace for end-to-end multi-layer diagnosis (instrumentat&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1602.03273v3-abstract-full').style.display = 'inline'; document.getElementById('1602.03273v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1602.03273v3-abstract-full" style="display: none;"> Content providers build serving stacks to deliver content to users. An important goal of a content provider is to ensure good user experience, since user experience has an impact on revenue. In this paper, we describe a system at Yahoo called YTrace that diagnoses bad user experience in near real time. We present the different components of YTrace for end-to-end multi-layer diagnosis (instrumentation, methods and backend system), and the system architecture for delivering diagnosis in near real time across all user sessions at Yahoo. YTrace diagnoses problems across service and network layers in the end-to-end path spanning user host, Internet, CDN and the datacenters, and has three diagnosis goals: detection, localization and root cause analysis (including cascading problems) of performance problems in user sessions with the cloud. The key component of the methods in YTrace is capturing and discovering causality, which we design based on a mix of instrumentation API, domain knowledge and blackbox methods. We show three case studies from production that span a large-scale distributed storage system, a datacenter-wide network, and an end-to-end video serving stack at Yahoo. We end by listing a number of open directions for performance diagnosis in cloud and content providers. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1602.03273v3-abstract-full').style.display = 'none'; document.getElementById('1602.03273v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2016; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 February, 2016; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2016. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> B.8.2; C.2.4; C.4 </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Pathak%2C+S&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Pathak%2C+S&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Pathak%2C+S&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10