CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–21 of 21 results for author: <span class="mathjax">Gunasekara, C</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Gunasekara%2C+C">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Gunasekara, C"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Gunasekara%2C+C&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Gunasekara, C"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.03468">arXiv:2501.03468</a> <span> [<a href="https://arxiv.org/pdf/2501.03468">pdf</a>, <a href="https://arxiv.org/format/2501.03468">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> MTRAG: A Multi-Turn Conversational Benchmark for Evaluating Retrieval-Augmented Generation Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Katsis%2C+Y">Yannis Katsis</a>, <a href="/search/cs?searchtype=author&query=Rosenthal%2C+S">Sara Rosenthal</a>, <a href="/search/cs?searchtype=author&query=Fadnis%2C+K">Kshitij Fadnis</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+Y">Young-Suk Lee</a>, <a href="/search/cs?searchtype=author&query=Popa%2C+L">Lucian Popa</a>, <a href="/search/cs?searchtype=author&query=Shah%2C+V">Vraj Shah</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+H">Huaiyu Zhu</a>, <a href="/search/cs?searchtype=author&query=Contractor%2C+D">Danish Contractor</a>, <a href="/search/cs?searchtype=author&query=Danilevsky%2C+M">Marina Danilevsky</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.03468v1-abstract-short" style="display: inline;"> Retrieval-augmented generation (RAG) has recently become a very popular task for Large Language Models (LLMs). Evaluating them on multi-turn RAG conversations, where the system is asked to generate a response to a question in the context of a preceding conversation is an important and often overlooked task with several additional challenges. We present MTRAG: an end-to-end human-generated multi-tu… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03468v1-abstract-full').style.display = 'inline'; document.getElementById('2501.03468v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.03468v1-abstract-full" style="display: none;"> Retrieval-augmented generation (RAG) has recently become a very popular task for Large Language Models (LLMs). Evaluating them on multi-turn RAG conversations, where the system is asked to generate a response to a question in the context of a preceding conversation is an important and often overlooked task with several additional challenges. We present MTRAG: an end-to-end human-generated multi-turn RAG benchmark that reflects several real-world properties across diverse dimensions for evaluating the full RAG pipeline. MTRAG contains 110 conversations averaging 7.7 turns each across four domains for a total of 842 tasks. We also explore automation paths via synthetic data and LLM-as-a-Judge evaluation. Our human and automatic evaluations show that even state-of-the-art LLM RAG systems struggle on MTRAG. We demonstrate the need for strong retrieval and generation systems that can handle later turns, unanswerable questions, non-standalone questions, and multiple domains. MTRAG is available at https://github.com/ibm/mt-rag-benchmark. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03468v1-abstract-full').style.display = 'none'; document.getElementById('2501.03468v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.21597">arXiv:2410.21597</a> <span> [<a href="https://arxiv.org/pdf/2410.21597">pdf</a>, <a href="https://arxiv.org/format/2410.21597">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Reducing the Scope of Language Models with Circuit Breakers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yunis%2C+D">David Yunis</a>, <a href="/search/cs?searchtype=author&query=Huo%2C+S">Siyu Huo</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Contractor%2C+D">Danish Contractor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.21597v1-abstract-short" style="display: inline;"> Language models are now deployed in a wide variety of user-facing applications, often for specific purposes like answering questions about documentation or acting as coding assistants. As these models are intended for particular purposes, they should not be able to answer irrelevant queries like requests for poetry or questions about physics, or even worse, queries that can only be answered by hum… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21597v1-abstract-full').style.display = 'inline'; document.getElementById('2410.21597v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.21597v1-abstract-full" style="display: none;"> Language models are now deployed in a wide variety of user-facing applications, often for specific purposes like answering questions about documentation or acting as coding assistants. As these models are intended for particular purposes, they should not be able to answer irrelevant queries like requests for poetry or questions about physics, or even worse, queries that can only be answered by humans like sensitive company policies. Instead we would like them to only answer queries corresponding to desired behavior and refuse all other requests, which we refer to as scoping. We find that, despite the use of system prompts, two representative language models can be poorly scoped and respond to queries they should not be addressing. We then conduct a comprehensive empirical evaluation of methods which could be used for scoping the behavior of language models. Among many other results, we show that a recently-proposed method for general alignment, Circuit Breakers (CB), can be adapted to scope language models to very specific tasks like sentiment analysis or summarization or even tasks with finer-grained scoping (e.g. summarizing only news articles). When compared to standard methods like fine-tuning or preference learning, CB is more robust both for out of distribution tasks, and to adversarial prompting techniques. We also show that layering SFT and CB together often results in the best of both worlds: improved performance only on relevant queries, while rejecting irrelevant ones. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.21597v1-abstract-full').style.display = 'none'; document.getElementById('2410.21597v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.11500">arXiv:2409.11500</a> <span> [<a href="https://arxiv.org/pdf/2409.11500">pdf</a>, <a href="https://arxiv.org/format/2409.11500">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Multi-Document Grounded Multi-Turn Synthetic Dialog Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Lee%2C+Y">Young-Suk Lee</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Contractor%2C+D">Danish Contractor</a>, <a href="/search/cs?searchtype=author&query=Astudillo%2C+R+F">Ram贸n Fernandez Astudillo</a>, <a href="/search/cs?searchtype=author&query=Florian%2C+R">Radu Florian</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.11500v1-abstract-short" style="display: inline;"> We introduce a technique for multi-document grounded multi-turn synthetic dialog generation that incorporates three main ideas. First, we control the overall dialog flow using taxonomy-driven user queries that are generated with Chain-of-Thought (CoT) prompting. Second, we support the generation of multi-document grounded dialogs by mimicking real-world use of retrievers to update the grounding do… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.11500v1-abstract-full').style.display = 'inline'; document.getElementById('2409.11500v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.11500v1-abstract-full" style="display: none;"> We introduce a technique for multi-document grounded multi-turn synthetic dialog generation that incorporates three main ideas. First, we control the overall dialog flow using taxonomy-driven user queries that are generated with Chain-of-Thought (CoT) prompting. Second, we support the generation of multi-document grounded dialogs by mimicking real-world use of retrievers to update the grounding documents after every user-turn in the dialog. Third, we apply LLM-as-a-Judge to filter out queries with incorrect answers. Human evaluation of the synthetic dialog data suggests that the data is diverse, coherent, and includes mostly correct answers. Both human and automatic evaluations of answerable queries indicate that models fine-tuned on synthetic dialogs consistently out-perform those fine-tuned on existing human generated training data across four publicly available multi-turn document grounded benchmark test sets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.11500v1-abstract-full').style.display = 'none'; document.getElementById('2409.11500v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.00121">arXiv:2407.00121</a> <span> [<a href="https://arxiv.org/pdf/2407.00121">pdf</a>, <a href="https://arxiv.org/format/2407.00121">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Granite-Function Calling Model: Introducing Function Calling Abilities via Multi-task Learning of Granular Tasks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Abdelaziz%2C+I">Ibrahim Abdelaziz</a>, <a href="/search/cs?searchtype=author&query=Basu%2C+K">Kinjal Basu</a>, <a href="/search/cs?searchtype=author&query=Agarwal%2C+M">Mayank Agarwal</a>, <a href="/search/cs?searchtype=author&query=Kumaravel%2C+S">Sadhana Kumaravel</a>, <a href="/search/cs?searchtype=author&query=Stallone%2C+M">Matthew Stallone</a>, <a href="/search/cs?searchtype=author&query=Panda%2C+R">Rameswar Panda</a>, <a href="/search/cs?searchtype=author&query=Rizk%2C+Y">Yara Rizk</a>, <a href="/search/cs?searchtype=author&query=Bhargav%2C+G">GP Bhargav</a>, <a href="/search/cs?searchtype=author&query=Crouse%2C+M">Maxwell Crouse</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Ikbal%2C+S">Shajith Ikbal</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Sachin Joshi</a>, <a href="/search/cs?searchtype=author&query=Karanam%2C+H">Hima Karanam</a>, <a href="/search/cs?searchtype=author&query=Kumar%2C+V">Vineet Kumar</a>, <a href="/search/cs?searchtype=author&query=Munawar%2C+A">Asim Munawar</a>, <a href="/search/cs?searchtype=author&query=Neelam%2C+S">Sumit Neelam</a>, <a href="/search/cs?searchtype=author&query=Raghu%2C+D">Dinesh Raghu</a>, <a href="/search/cs?searchtype=author&query=Sharma%2C+U">Udit Sharma</a>, <a href="/search/cs?searchtype=author&query=Soria%2C+A+M">Adriana Meza Soria</a>, <a href="/search/cs?searchtype=author&query=Sreedhar%2C+D">Dheeraj Sreedhar</a>, <a href="/search/cs?searchtype=author&query=Venkateswaran%2C+P">Praveen Venkateswaran</a>, <a href="/search/cs?searchtype=author&query=Unuvar%2C+M">Merve Unuvar</a>, <a href="/search/cs?searchtype=author&query=Cox%2C+D">David Cox</a>, <a href="/search/cs?searchtype=author&query=Roukos%2C+S">Salim Roukos</a>, <a href="/search/cs?searchtype=author&query=Lastras%2C+L">Luis Lastras</a> , et al. (1 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.00121v1-abstract-short" style="display: inline;"> Large language models (LLMs) have recently shown tremendous promise in serving as the backbone to agentic systems, as demonstrated by their performance in multi-faceted, challenging benchmarks like SWE-Bench and Agent-Bench. However, to realize the true potential of LLMs as autonomous agents, they must learn to identify, call, and interact with external tools and application program interfaces (AP… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.00121v1-abstract-full').style.display = 'inline'; document.getElementById('2407.00121v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.00121v1-abstract-full" style="display: none;"> Large language models (LLMs) have recently shown tremendous promise in serving as the backbone to agentic systems, as demonstrated by their performance in multi-faceted, challenging benchmarks like SWE-Bench and Agent-Bench. However, to realize the true potential of LLMs as autonomous agents, they must learn to identify, call, and interact with external tools and application program interfaces (APIs) to complete complex tasks. These tasks together are termed function calling. Endowing LLMs with function calling abilities leads to a myriad of advantages, such as access to current and domain-specific information in databases and knowledge sources, and the ability to outsource tasks that can be reliably performed by tools, e.g., a Python interpreter or calculator. While there has been significant progress in function calling with LLMs, there is still a dearth of open models that perform on par with proprietary LLMs like GPT, Claude, and Gemini. Therefore, in this work, we introduce the GRANITE-20B-FUNCTIONCALLING model under an Apache 2.0 license. The model is trained using a multi-task training approach on seven fundamental tasks encompassed in function calling, those being Nested Function Calling, Function Chaining, Parallel Functions, Function Name Detection, Parameter-Value Pair Detection, Next-Best Function, and Response Generation. We present a comprehensive evaluation on multiple out-of-domain datasets comparing GRANITE-20B-FUNCTIONCALLING to more than 15 other best proprietary and open models. GRANITE-20B-FUNCTIONCALLING provides the best performance among all open models on the Berkeley Function Calling Leaderboard and fourth overall. As a result of the diverse tasks and datasets used for training our model, we show that GRANITE-20B-FUNCTIONCALLING has better generalizability on multiple tasks in seven different evaluation datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.00121v1-abstract-full').style.display = 'none'; document.getElementById('2407.00121v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2311.08705">arXiv:2311.08705</a> <span> [<a href="https://arxiv.org/pdf/2311.08705">pdf</a>, <a href="https://arxiv.org/format/2311.08705">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Evaluating Robustness of Dialogue Summarization Models in the Presence of Naturally Occurring Variations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gupta%2C+A">Ankita Gupta</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Wan%2C+H">Hui Wan</a>, <a href="/search/cs?searchtype=author&query=Ganhotra%2C+J">Jatin Ganhotra</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Sachindra Joshi</a>, <a href="/search/cs?searchtype=author&query=Danilevsky%2C+M">Marina Danilevsky</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2311.08705v1-abstract-short" style="display: inline;"> Dialogue summarization task involves summarizing long conversations while preserving the most salient information. Real-life dialogues often involve naturally occurring variations (e.g., repetitions, hesitations) and existing dialogue summarization models suffer from performance drop on such conversations. In this study, we systematically investigate the impact of such variations on state-of-the-a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.08705v1-abstract-full').style.display = 'inline'; document.getElementById('2311.08705v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2311.08705v1-abstract-full" style="display: none;"> Dialogue summarization task involves summarizing long conversations while preserving the most salient information. Real-life dialogues often involve naturally occurring variations (e.g., repetitions, hesitations) and existing dialogue summarization models suffer from performance drop on such conversations. In this study, we systematically investigate the impact of such variations on state-of-the-art dialogue summarization models using publicly available datasets. To simulate real-life variations, we introduce two types of perturbations: utterance-level perturbations that modify individual utterances with errors and language variations, and dialogue-level perturbations that add non-informative exchanges (e.g., repetitions, greetings). We conduct our analysis along three dimensions of robustness: consistency, saliency, and faithfulness, which capture different aspects of the summarization model's performance. We find that both fine-tuned and instruction-tuned models are affected by input variations, with the latter being more susceptible, particularly to dialogue-level perturbations. We also validate our findings via human evaluation. Finally, we investigate if the robustness of fine-tuned models can be improved by training them with a fraction of perturbed data and observe that this approach is insufficient to address robustness challenges with current models and thus warrants a more thorough investigation to identify better solutions. Overall, our work highlights robustness challenges in dialogue summarization and provides insights for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2311.08705v1-abstract-full').style.display = 'none'; document.getElementById('2311.08705v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 November, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.10452">arXiv:2306.10452</a> <span> [<a href="https://arxiv.org/pdf/2306.10452">pdf</a>, <a href="https://arxiv.org/format/2306.10452">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> MISMATCH: Fine-grained Evaluation of Machine-generated Text with Mismatch Error Types </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murugesan%2C+K">Keerthiram Murugesan</a>, <a href="/search/cs?searchtype=author&query=Swaminathan%2C+S">Sarathkrishna Swaminathan</a>, <a href="/search/cs?searchtype=author&query=Dan%2C+S">Soham Dan</a>, <a href="/search/cs?searchtype=author&query=Chaudhury%2C+S">Subhajit Chaudhury</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Crouse%2C+M">Maxwell Crouse</a>, <a href="/search/cs?searchtype=author&query=Mahajan%2C+D">Diwakar Mahajan</a>, <a href="/search/cs?searchtype=author&query=Abdelaziz%2C+I">Ibrahim Abdelaziz</a>, <a href="/search/cs?searchtype=author&query=Fokoue%2C+A">Achille Fokoue</a>, <a href="/search/cs?searchtype=author&query=Kapanipathi%2C+P">Pavan Kapanipathi</a>, <a href="/search/cs?searchtype=author&query=Roukos%2C+S">Salim Roukos</a>, <a href="/search/cs?searchtype=author&query=Gray%2C+A">Alexander Gray</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.10452v1-abstract-short" style="display: inline;"> With the growing interest in large language models, the need for evaluating the quality of machine text compared to reference (typically human-generated) text has become focal attention. Most recent works focus either on task-specific evaluation metrics or study the properties of machine-generated text captured by the existing metrics. In this work, we propose a new evaluation scheme to model huma… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.10452v1-abstract-full').style.display = 'inline'; document.getElementById('2306.10452v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.10452v1-abstract-full" style="display: none;"> With the growing interest in large language models, the need for evaluating the quality of machine text compared to reference (typically human-generated) text has become focal attention. Most recent works focus either on task-specific evaluation metrics or study the properties of machine-generated text captured by the existing metrics. In this work, we propose a new evaluation scheme to model human judgments in 7 NLP tasks, based on the fine-grained mismatches between a pair of texts. Inspired by the recent efforts in several NLP tasks for fine-grained evaluation, we introduce a set of 13 mismatch error types such as spatial/geographic errors, entity errors, etc, to guide the model for better prediction of human judgments. We propose a neural framework for evaluating machine texts that uses these mismatch error types as auxiliary tasks and re-purposes the existing single-number evaluation metrics as additional scalar features, in addition to textual features extracted from the machine and reference texts. Our experiments reveal key insights about the existing metrics via the mismatch errors. We show that the mismatch errors between the sentence pairs on the held-out datasets from 7 NLP tasks align well with the human evaluation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.10452v1-abstract-full').style.display = 'none'; document.getElementById('2306.10452v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ACL 2023 (ACL Findings Long)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.01628">arXiv:2305.01628</a> <span> [<a href="https://arxiv.org/pdf/2305.01628">pdf</a>, <a href="https://arxiv.org/format/2305.01628">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> The Benefits of Bad Advice: Autocontrastive Decoding across Model Layers </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gera%2C+A">Ariel Gera</a>, <a href="/search/cs?searchtype=author&query=Friedman%2C+R">Roni Friedman</a>, <a href="/search/cs?searchtype=author&query=Arviv%2C+O">Ofir Arviv</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Sznajder%2C+B">Benjamin Sznajder</a>, <a href="/search/cs?searchtype=author&query=Slonim%2C+N">Noam Slonim</a>, <a href="/search/cs?searchtype=author&query=Shnarch%2C+E">Eyal Shnarch</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.01628v1-abstract-short" style="display: inline;"> Applying language models to natural language processing tasks typically relies on the representations in the final model layer, as intermediate hidden layer representations are presumed to be less informative. In this work, we argue that due to the gradual improvement across model layers, additional information can be gleaned from the contrast between higher and lower layers during inference. Spec… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.01628v1-abstract-full').style.display = 'inline'; document.getElementById('2305.01628v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.01628v1-abstract-full" style="display: none;"> Applying language models to natural language processing tasks typically relies on the representations in the final model layer, as intermediate hidden layer representations are presumed to be less informative. In this work, we argue that due to the gradual improvement across model layers, additional information can be gleaned from the contrast between higher and lower layers during inference. Specifically, in choosing between the probable next token predictions of a generative model, the predictions of lower layers can be used to highlight which candidates are best avoided. We propose a novel approach that utilizes the contrast between layers to improve text generation outputs, and show that it mitigates degenerative behaviors of the model in open-ended generation, significantly improving the quality of generated texts. Furthermore, our results indicate that contrasting between model layers at inference time can yield substantial benefits to certain aspects of general language model capabilities, more effectively extracting knowledge during inference from a given set of model parameters. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.01628v1-abstract-full').style.display = 'none'; document.getElementById('2305.01628v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">9 pages, 8 figures; To be published in ACL 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.01015">arXiv:2301.01015</a> <span> [<a href="https://arxiv.org/pdf/2301.01015">pdf</a>, <a href="https://arxiv.org/format/2301.01015">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Semi-Structured Object Sequence Encoders </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Murthy%2C+R">Rudra Murthy V</a>, <a href="/search/cs?searchtype=author&query=Bhat%2C+R">Riyaz Bhat</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+S+S">Siva Sankalp Patel</a>, <a href="/search/cs?searchtype=author&query=Wan%2C+H">Hui Wan</a>, <a href="/search/cs?searchtype=author&query=Dhamecha%2C+T+I">Tejas Indulal Dhamecha</a>, <a href="/search/cs?searchtype=author&query=Contractor%2C+D">Danish Contractor</a>, <a href="/search/cs?searchtype=author&query=Danilevsky%2C+M">Marina Danilevsky</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.01015v4-abstract-short" style="display: inline;"> In this paper we explore the task of modeling semi-structured object sequences; in particular, we focus our attention on the problem of developing a structure-aware input representation for such sequences. Examples of such data include user activity on websites, machine logs, and many others. This type of data is often represented as a sequence of sets of key-value pairs over time and can present… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.01015v4-abstract-full').style.display = 'inline'; document.getElementById('2301.01015v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.01015v4-abstract-full" style="display: none;"> In this paper we explore the task of modeling semi-structured object sequences; in particular, we focus our attention on the problem of developing a structure-aware input representation for such sequences. Examples of such data include user activity on websites, machine logs, and many others. This type of data is often represented as a sequence of sets of key-value pairs over time and can present modeling challenges due to an ever-increasing sequence length. We propose a two-part approach, which first considers each key independently and encodes a representation of its values over time; we then self-attend over these value-aware key representations to accomplish a downstream task. This allows us to operate on longer object sequences than existing methods. We introduce a novel shared-attention-head architecture between the two modules and present an innovative training schedule that interleaves the training of both modules with shared weights for some attention heads. Our experiments on multiple prediction tasks using real-world data demonstrate that our approach outperforms a unified network with hierarchical encoding, as well as other methods including a record-centric representation and a flattened representation of the sequence. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.01015v4-abstract-full').style.display = 'none'; document.getElementById('2301.01015v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.15590">arXiv:2203.15590</a> <span> [<a href="https://arxiv.org/pdf/2203.15590">pdf</a>, <a href="https://arxiv.org/format/2203.15590">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Heuristic-based Inter-training to Improve Few-shot Multi-perspective Dialog Summarization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sznajder%2C+B">Benjamin Sznajder</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Lev%2C+G">Guy Lev</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Sachin Joshi</a>, <a href="/search/cs?searchtype=author&query=Shnarch%2C+E">Eyal Shnarch</a>, <a href="/search/cs?searchtype=author&query=Slonim%2C+N">Noam Slonim</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.15590v2-abstract-short" style="display: inline;"> Many organizations require their customer-care agents to manually summarize their conversations with customers. These summaries are vital for decision making purposes of the organizations. The perspective of the summary that is required to be created depends on the application of the summaries. With this work, we study the multi-perspective summarization of customer-care conversations between supp… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.15590v2-abstract-full').style.display = 'inline'; document.getElementById('2203.15590v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.15590v2-abstract-full" style="display: none;"> Many organizations require their customer-care agents to manually summarize their conversations with customers. These summaries are vital for decision making purposes of the organizations. The perspective of the summary that is required to be created depends on the application of the summaries. With this work, we study the multi-perspective summarization of customer-care conversations between support agents and customers. We observe that there are different heuristics that are associated with summaries of different perspectives, and explore these heuristics to create weak-labeled data for intermediate training of the models before fine-tuning with scarce human annotated summaries. Most importantly, we show that our approach supports models to generate multi-perspective summaries with a very small amount of annotated data. For example, our approach achieves 94\% of the performance (Rouge-2) of a model trained with the original data, by training only with 7\% of the original data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.15590v2-abstract-full').style.display = 'none'; document.getElementById('2203.15590v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.7 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.11894">arXiv:2111.11894</a> <span> [<a href="https://arxiv.org/pdf/2111.11894">pdf</a>, <a href="https://arxiv.org/format/2111.11894">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> TWEETSUMM -- A Dialog Summarization Dataset for Customer Service </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Feigenblat%2C+G">Guy Feigenblat</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Sznajder%2C+B">Benjamin Sznajder</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Sachindra Joshi</a>, <a href="/search/cs?searchtype=author&query=Konopnicki%2C+D">David Konopnicki</a>, <a href="/search/cs?searchtype=author&query=Aharonov%2C+R">Ranit Aharonov</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.11894v1-abstract-short" style="display: inline;"> In a typical customer service chat scenario, customers contact a support center to ask for help or raise complaints, and human agents try to solve the issues. In most cases, at the end of the conversation, agents are asked to write a short summary emphasizing the problem and the proposed solution, usually for the benefit of other agents that may have to deal with the same customer or issue. The go… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.11894v1-abstract-full').style.display = 'inline'; document.getElementById('2111.11894v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.11894v1-abstract-full" style="display: none;"> In a typical customer service chat scenario, customers contact a support center to ask for help or raise complaints, and human agents try to solve the issues. In most cases, at the end of the conversation, agents are asked to write a short summary emphasizing the problem and the proposed solution, usually for the benefit of other agents that may have to deal with the same customer or issue. The goal of the present article is advancing the automation of this task. We introduce the first large scale, high quality, customer care dialog summarization dataset with close to 6500 human annotated summaries. The data is based on real-world customer support dialogs and includes both extractive and abstractive summaries. We also introduce a new unsupervised, extractive summarization method specific to dialogs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.11894v1-abstract-full').style.display = 'none'; document.getElementById('2111.11894v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 November, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Findings of the Association for Computational Linguistics: EMNLP (2021) 245--260 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.03337">arXiv:2106.03337</a> <span> [<a href="https://arxiv.org/pdf/2106.03337">pdf</a>, <a href="https://arxiv.org/format/2106.03337">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Summary Grounded Conversation Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Feigenblat%2C+G">Guy Feigenblat</a>, <a href="/search/cs?searchtype=author&query=Sznajder%2C+B">Benjamin Sznajder</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Sachindra Joshi</a>, <a href="/search/cs?searchtype=author&query=Konopnicki%2C+D">David Konopnicki</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.03337v1-abstract-short" style="display: inline;"> Many conversation datasets have been constructed in the recent years using crowdsourcing. However, the data collection process can be time consuming and presents many challenges to ensure data quality. Since language generation has improved immensely in recent years with the advancement of pre-trained language models, we investigate how such models can be utilized to generate entire conversations,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.03337v1-abstract-full').style.display = 'inline'; document.getElementById('2106.03337v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.03337v1-abstract-full" style="display: none;"> Many conversation datasets have been constructed in the recent years using crowdsourcing. However, the data collection process can be time consuming and presents many challenges to ensure data quality. Since language generation has improved immensely in recent years with the advancement of pre-trained language models, we investigate how such models can be utilized to generate entire conversations, given only a summary of a conversation as the input. We explore three approaches to generate summary grounded conversations, and evaluate the generated conversations using automatic measures and human judgements. We also show that the accuracy of conversation summarization can be improved by augmenting a conversation summarization dataset with generated conversations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.03337v1-abstract-full').style.display = 'none'; document.getElementById('2106.03337v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Findings of ACL - 2021, 9 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.04488">arXiv:2104.04488</a> <span> [<a href="https://arxiv.org/pdf/2104.04488">pdf</a>, <a href="https://arxiv.org/format/2104.04488">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Explaining Neural Network Predictions on Sentence Pairs via Learning Word-Group Masks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+H">Hanjie Chen</a>, <a href="/search/cs?searchtype=author&query=Feng%2C+S">Song Feng</a>, <a href="/search/cs?searchtype=author&query=Ganhotra%2C+J">Jatin Ganhotra</a>, <a href="/search/cs?searchtype=author&query=Wan%2C+H">Hui Wan</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Sachindra Joshi</a>, <a href="/search/cs?searchtype=author&query=Ji%2C+Y">Yangfeng Ji</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.04488v2-abstract-short" style="display: inline;"> Explaining neural network models is important for increasing their trustworthiness in real-world applications. Most existing methods generate post-hoc explanations for neural network models by identifying individual feature attributions or detecting interactions between adjacent features. However, for models with text pairs as inputs (e.g., paraphrase identification), existing methods are not suff… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.04488v2-abstract-full').style.display = 'inline'; document.getElementById('2104.04488v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.04488v2-abstract-full" style="display: none;"> Explaining neural network models is important for increasing their trustworthiness in real-world applications. Most existing methods generate post-hoc explanations for neural network models by identifying individual feature attributions or detecting interactions between adjacent features. However, for models with text pairs as inputs (e.g., paraphrase identification), existing methods are not sufficient to capture feature interactions between two texts and their simple extension of computing all word-pair interactions between two texts is computationally inefficient. In this work, we propose the Group Mask (GMASK) method to implicitly detect word correlations by grouping correlated words from the input text pair together and measure their contribution to the corresponding NLP tasks as a whole. The proposed method is evaluated with two different model architectures (decomposable attention model and BERT) across four datasets, including natural language inference and paraphrase identification tasks. Experiments show the effectiveness of GMASK in providing faithful explanations to these models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.04488v2-abstract-full').style.display = 'none'; document.getElementById('2104.04488v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 9 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">NAACL-HLT 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.06623">arXiv:2011.06623</a> <span> [<a href="https://arxiv.org/pdf/2011.06623">pdf</a>, <a href="https://arxiv.org/format/2011.06623">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> doc2dial: A Goal-Oriented Document-Grounded Dialogue Dataset </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Feng%2C+S">Song Feng</a>, <a href="/search/cs?searchtype=author&query=Wan%2C+H">Hui Wan</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+S+S">Siva Sankalp Patel</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Sachindra Joshi</a>, <a href="/search/cs?searchtype=author&query=Lastras%2C+L+A">Luis A. Lastras</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.06623v2-abstract-short" style="display: inline;"> We introduce doc2dial, a new dataset of goal-oriented dialogues that are grounded in the associated documents. Inspired by how the authors compose documents for guiding end users, we first construct dialogue flows based on the content elements that corresponds to higher-level relations across text sections as well as lower-level relations between discourse units within a section. Then we present t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.06623v2-abstract-full').style.display = 'inline'; document.getElementById('2011.06623v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.06623v2-abstract-full" style="display: none;"> We introduce doc2dial, a new dataset of goal-oriented dialogues that are grounded in the associated documents. Inspired by how the authors compose documents for guiding end users, we first construct dialogue flows based on the content elements that corresponds to higher-level relations across text sections as well as lower-level relations between discourse units within a section. Then we present these dialogue flows to crowd contributors to create conversational utterances. The dataset includes about 4800 annotated conversations with an average of 14 turns that are grounded in over 480 documents from four domains. Compared to the prior document-grounded dialogue datasets, this dataset covers a variety of dialogue scenes in information-seeking conversations. For evaluating the versatility of the dataset, we introduce multiple dialogue modeling tasks and present baseline approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.06623v2-abstract-full').style.display = 'none'; document.getElementById('2011.06623v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.06486">arXiv:2011.06486</a> <span> [<a href="https://arxiv.org/pdf/2011.06486">pdf</a>, <a href="https://arxiv.org/ps/2011.06486">ps</a>, <a href="https://arxiv.org/format/2011.06486">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Overview of the Ninth Dialog System Technology Challenge: DSTC9 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+S">Seokhwan Kim</a>, <a href="/search/cs?searchtype=author&query=D%27Haro%2C+L+F">Luis Fernando D'Haro</a>, <a href="/search/cs?searchtype=author&query=Rastogi%2C+A">Abhinav Rastogi</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+Y">Yun-Nung Chen</a>, <a href="/search/cs?searchtype=author&query=Eric%2C+M">Mihail Eric</a>, <a href="/search/cs?searchtype=author&query=Hedayatnia%2C+B">Behnam Hedayatnia</a>, <a href="/search/cs?searchtype=author&query=Gopalakrishnan%2C+K">Karthik Gopalakrishnan</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Y">Yang Liu</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+C">Chao-Wei Huang</a>, <a href="/search/cs?searchtype=author&query=Hakkani-T%C3%BCr%2C+D">Dilek Hakkani-T眉r</a>, <a href="/search/cs?searchtype=author&query=Li%2C+J">Jinchao Li</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+Q">Qi Zhu</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+L">Lingxiao Luo</a>, <a href="/search/cs?searchtype=author&query=Liden%2C+L">Lars Liden</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+K">Kaili Huang</a>, <a href="/search/cs?searchtype=author&query=Shayandeh%2C+S">Shahin Shayandeh</a>, <a href="/search/cs?searchtype=author&query=Liang%2C+R">Runze Liang</a>, <a href="/search/cs?searchtype=author&query=Peng%2C+B">Baolin Peng</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Z">Zheng Zhang</a>, <a href="/search/cs?searchtype=author&query=Shukla%2C+S">Swadheen Shukla</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+M">Minlie Huang</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+J">Jianfeng Gao</a>, <a href="/search/cs?searchtype=author&query=Mehri%2C+S">Shikib Mehri</a>, <a href="/search/cs?searchtype=author&query=Feng%2C+Y">Yulan Feng</a> , et al. (14 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.06486v1-abstract-short" style="display: inline;"> This paper introduces the Ninth Dialog System Technology Challenge (DSTC-9). This edition of the DSTC focuses on applying end-to-end dialog technologies for four distinct tasks in dialog systems, namely, 1. Task-oriented dialog Modeling with unstructured knowledge access, 2. Multi-domain task-oriented dialog, 3. Interactive evaluation of dialog, and 4. Situated interactive multi-modal dialog. This… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.06486v1-abstract-full').style.display = 'inline'; document.getElementById('2011.06486v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.06486v1-abstract-full" style="display: none;"> This paper introduces the Ninth Dialog System Technology Challenge (DSTC-9). This edition of the DSTC focuses on applying end-to-end dialog technologies for four distinct tasks in dialog systems, namely, 1. Task-oriented dialog Modeling with unstructured knowledge access, 2. Multi-domain task-oriented dialog, 3. Interactive evaluation of dialog, and 4. Situated interactive multi-modal dialog. This paper describes the task definition, provided datasets, baselines and evaluation set-up for each track. We also summarize the results of the submitted systems to highlight the overall trends of the state-of-the-art technologies for the tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.06486v1-abstract-full').style.display = 'none'; document.getElementById('2011.06486v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.02305">arXiv:2010.02305</a> <span> [<a href="https://arxiv.org/pdf/2010.02305">pdf</a>, <a href="https://arxiv.org/ps/2010.02305">ps</a>, <a href="https://arxiv.org/format/2010.02305">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Conversational Document Prediction to Assist Customer Care Agents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ganhotra%2C+J">Jatin Ganhotra</a>, <a href="/search/cs?searchtype=author&query=Roitman%2C+H">Haggai Roitman</a>, <a href="/search/cs?searchtype=author&query=Cohen%2C+D">Doron Cohen</a>, <a href="/search/cs?searchtype=author&query=Mills%2C+N">Nathaniel Mills</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Mass%2C+Y">Yosi Mass</a>, <a href="/search/cs?searchtype=author&query=Joshi%2C+S">Sachindra Joshi</a>, <a href="/search/cs?searchtype=author&query=Lastras%2C+L">Luis Lastras</a>, <a href="/search/cs?searchtype=author&query=Konopnicki%2C+D">David Konopnicki</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.02305v1-abstract-short" style="display: inline;"> A frequent pattern in customer care conversations is the agents responding with appropriate webpage URLs that address users' needs. We study the task of predicting the documents that customer care agents can use to facilitate users' needs. We also introduce a new public dataset which supports the aforementioned problem. Using this dataset and two others, we investigate state-of-the art deep learni… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.02305v1-abstract-full').style.display = 'inline'; document.getElementById('2010.02305v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.02305v1-abstract-full" style="display: none;"> A frequent pattern in customer care conversations is the agents responding with appropriate webpage URLs that address users' needs. We study the task of predicting the documents that customer care agents can use to facilitate users' needs. We also introduce a new public dataset which supports the aforementioned problem. Using this dataset and two others, we investigate state-of-the art deep learning (DL) and information retrieval (IR) models for the task. Additionally, we analyze the practicality of such systems in terms of inference time complexity. Our show that an hybrid IR+DL approach provides the best of both worlds. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.02305v1-abstract-full').style.display = 'none'; document.getElementById('2010.02305v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP 2020. The released Twitter dataset is available at: https://github.com/IBM/twitter-customer-care-document-prediction</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.06394">arXiv:1911.06394</a> <span> [<a href="https://arxiv.org/pdf/1911.06394">pdf</a>, <a href="https://arxiv.org/format/1911.06394">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> The Eighth Dialog System Technology Challenge </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kim%2C+S">Seokhwan Kim</a>, <a href="/search/cs?searchtype=author&query=Galley%2C+M">Michel Galley</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+S">Sungjin Lee</a>, <a href="/search/cs?searchtype=author&query=Atkinson%2C+A">Adam Atkinson</a>, <a href="/search/cs?searchtype=author&query=Peng%2C+B">Baolin Peng</a>, <a href="/search/cs?searchtype=author&query=Schulz%2C+H">Hannes Schulz</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+J">Jianfeng Gao</a>, <a href="/search/cs?searchtype=author&query=Li%2C+J">Jinchao Li</a>, <a href="/search/cs?searchtype=author&query=Adada%2C+M">Mahmoud Adada</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+M">Minlie Huang</a>, <a href="/search/cs?searchtype=author&query=Lastras%2C+L">Luis Lastras</a>, <a href="/search/cs?searchtype=author&query=Kummerfeld%2C+J+K">Jonathan K. Kummerfeld</a>, <a href="/search/cs?searchtype=author&query=Lasecki%2C+W+S">Walter S. Lasecki</a>, <a href="/search/cs?searchtype=author&query=Hori%2C+C">Chiori Hori</a>, <a href="/search/cs?searchtype=author&query=Cherian%2C+A">Anoop Cherian</a>, <a href="/search/cs?searchtype=author&query=Marks%2C+T+K">Tim K. Marks</a>, <a href="/search/cs?searchtype=author&query=Rastogi%2C+A">Abhinav Rastogi</a>, <a href="/search/cs?searchtype=author&query=Zang%2C+X">Xiaoxue Zang</a>, <a href="/search/cs?searchtype=author&query=Sunkara%2C+S">Srinivas Sunkara</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+R">Raghav Gupta</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.06394v1-abstract-short" style="display: inline;"> This paper introduces the Eighth Dialog System Technology Challenge. In line with recent challenges, the eighth edition focuses on applying end-to-end dialog technologies in a pragmatic way for multi-domain task-completion, noetic response selection, audio visual scene-aware dialog, and schema-guided dialog state tracking tasks. This paper describes the task definition, provided datasets, and eval… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.06394v1-abstract-full').style.display = 'inline'; document.getElementById('1911.06394v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.06394v1-abstract-full" style="display: none;"> This paper introduces the Eighth Dialog System Technology Challenge. In line with recent challenges, the eighth edition focuses on applying end-to-end dialog technologies in a pragmatic way for multi-domain task-completion, noetic response selection, audio visual scene-aware dialog, and schema-guided dialog state tracking tasks. This paper describes the task definition, provided datasets, and evaluation set-up for each track. We also summarize the results of the submitted systems to highlight the overall trends of the state-of-the-art technologies for the tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.06394v1-abstract-full').style.display = 'none'; document.getElementById('1911.06394v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Submitted to NeurIPS 2019 3rd Conversational AI Workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1911.02060">arXiv:1911.02060</a> <span> [<a href="https://arxiv.org/pdf/1911.02060">pdf</a>, <a href="https://arxiv.org/format/1911.02060">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Infusing Knowledge into the Textual Entailment Task Using Graph Convolutional Networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kapanipathi%2C+P">Pavan Kapanipathi</a>, <a href="/search/cs?searchtype=author&query=Thost%2C+V">Veronika Thost</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+S+S">Siva Sankalp Patel</a>, <a href="/search/cs?searchtype=author&query=Whitehead%2C+S">Spencer Whitehead</a>, <a href="/search/cs?searchtype=author&query=Abdelaziz%2C+I">Ibrahim Abdelaziz</a>, <a href="/search/cs?searchtype=author&query=Balakrishnan%2C+A">Avinash Balakrishnan</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+M">Maria Chang</a>, <a href="/search/cs?searchtype=author&query=Fadnis%2C+K">Kshitij Fadnis</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Makni%2C+B">Bassem Makni</a>, <a href="/search/cs?searchtype=author&query=Mattei%2C+N">Nicholas Mattei</a>, <a href="/search/cs?searchtype=author&query=Talamadupula%2C+K">Kartik Talamadupula</a>, <a href="/search/cs?searchtype=author&query=Fokoue%2C+A">Achille Fokoue</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1911.02060v2-abstract-short" style="display: inline;"> Textual entailment is a fundamental task in natural language processing. Most approaches for solving the problem use only the textual content present in training data. A few approaches have shown that information from external knowledge sources like knowledge graphs (KGs) can add value, in addition to the textual content, by providing background knowledge that may be critical for a task. However,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.02060v2-abstract-full').style.display = 'inline'; document.getElementById('1911.02060v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1911.02060v2-abstract-full" style="display: none;"> Textual entailment is a fundamental task in natural language processing. Most approaches for solving the problem use only the textual content present in training data. A few approaches have shown that information from external knowledge sources like knowledge graphs (KGs) can add value, in addition to the textual content, by providing background knowledge that may be critical for a task. However, the proposed models do not fully exploit the information in the usually large and noisy KGs, and it is not clear how it can be effectively encoded to be useful for entailment. We present an approach that complements text-based entailment models with information from KGs by (1) using Personalized PageR- ank to generate contextual subgraphs with reduced noise and (2) encoding these subgraphs using graph convolutional networks to capture KG structure. Our technique extends the capability of text models exploiting structural and semantic information found in KGs. We evaluate our approach on multiple textual entailment datasets and show that the use of external knowledge helps improve prediction accuracy. This is particularly evident in the challenging BreakingNLI dataset, where we see an absolute improvement of 5-20% over multiple text-based entailment models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1911.02060v2-abstract-full').style.display = 'none'; document.getElementById('1911.02060v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.03461">arXiv:1901.03461</a> <span> [<a href="https://arxiv.org/pdf/1901.03461">pdf</a>, <a href="https://arxiv.org/ps/1901.03461">ps</a>, <a href="https://arxiv.org/format/1901.03461">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Dialog System Technology Challenge 7 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yoshino%2C+K">Koichiro Yoshino</a>, <a href="/search/cs?searchtype=author&query=Hori%2C+C">Chiori Hori</a>, <a href="/search/cs?searchtype=author&query=Perez%2C+J">Julien Perez</a>, <a href="/search/cs?searchtype=author&query=D%27Haro%2C+L+F">Luis Fernando D'Haro</a>, <a href="/search/cs?searchtype=author&query=Polymenakos%2C+L">Lazaros Polymenakos</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Lasecki%2C+W+S">Walter S. Lasecki</a>, <a href="/search/cs?searchtype=author&query=Kummerfeld%2C+J+K">Jonathan K. Kummerfeld</a>, <a href="/search/cs?searchtype=author&query=Galley%2C+M">Michel Galley</a>, <a href="/search/cs?searchtype=author&query=Brockett%2C+C">Chris Brockett</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+J">Jianfeng Gao</a>, <a href="/search/cs?searchtype=author&query=Dolan%2C+B">Bill Dolan</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+X">Xiang Gao</a>, <a href="/search/cs?searchtype=author&query=Alamari%2C+H">Huda Alamari</a>, <a href="/search/cs?searchtype=author&query=Marks%2C+T+K">Tim K. Marks</a>, <a href="/search/cs?searchtype=author&query=Parikh%2C+D">Devi Parikh</a>, <a href="/search/cs?searchtype=author&query=Batra%2C+D">Dhruv Batra</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.03461v1-abstract-short" style="display: inline;"> This paper introduces the Seventh Dialog System Technology Challenges (DSTC), which use shared datasets to explore the problem of building dialog systems. Recently, end-to-end dialog modeling approaches have been applied to various dialog tasks. The seventh DSTC (DSTC7) focuses on developing technologies related to end-to-end dialog systems for (1) sentence selection, (2) sentence generation and (… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.03461v1-abstract-full').style.display = 'inline'; document.getElementById('1901.03461v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.03461v1-abstract-full" style="display: none;"> This paper introduces the Seventh Dialog System Technology Challenges (DSTC), which use shared datasets to explore the problem of building dialog systems. Recently, end-to-end dialog modeling approaches have been applied to various dialog tasks. The seventh DSTC (DSTC7) focuses on developing technologies related to end-to-end dialog systems for (1) sentence selection, (2) sentence generation and (3) audio visual scene aware dialog. This paper summarizes the overall setup and results of DSTC7, including detailed descriptions of the different tracks and provided datasets. We also describe overall trends in the submitted systems and the key results. Each track introduced new datasets and participants achieved impressive results using state-of-the-art end-to-end technologies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.03461v1-abstract-full').style.display = 'none'; document.getElementById('1901.03461v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper is presented at NIPS2018 2nd Conversational AI workshop</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1812.10356">arXiv:1812.10356</a> <span> [<a href="https://arxiv.org/pdf/1812.10356">pdf</a>, <a href="https://arxiv.org/format/1812.10356">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Quantized-Dialog Language Model for Goal-Oriented Conversational Systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gunasekara%2C+R+C">R. Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Nahamoo%2C+D">David Nahamoo</a>, <a href="/search/cs?searchtype=author&query=Polymenakos%2C+L+C">Lazaros C. Polymenakos</a>, <a href="/search/cs?searchtype=author&query=Ganhotra%2C+J">Jatin Ganhotra</a>, <a href="/search/cs?searchtype=author&query=Fadnis%2C+K+P">Kshitij P. Fadnis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1812.10356v1-abstract-short" style="display: inline;"> We propose a novel methodology to address dialog learning in the context of goal-oriented conversational systems. The key idea is to quantize the dialog space into clusters and create a language model across the clusters, thus allowing for an accurate choice of the next utterance in the conversation. The language model relies on n-grams associated with clusters of utterances. This quantized-dialog… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.10356v1-abstract-full').style.display = 'inline'; document.getElementById('1812.10356v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1812.10356v1-abstract-full" style="display: none;"> We propose a novel methodology to address dialog learning in the context of goal-oriented conversational systems. The key idea is to quantize the dialog space into clusters and create a language model across the clusters, thus allowing for an accurate choice of the next utterance in the conversation. The language model relies on n-grams associated with clusters of utterances. This quantized-dialog language model methodology has been applied to the end-to-end goal-oriented track of the latest Dialog System Technology Challenges (DSTC6). The objective is to find the correct system utterance from a pool of candidates in order to complete a dialog between a user and an automated restaurant-reservation system. Our results show that the technique proposed in this paper achieves high accuracy regarding selection of the correct candidate utterance, and outperforms other state-of-the-art approaches based on neural networks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1812.10356v1-abstract-full').style.display = 'none'; document.getElementById('1812.10356v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.11118">arXiv:1810.11118</a> <span> [<a href="https://arxiv.org/pdf/1810.11118">pdf</a>, <a href="https://arxiv.org/format/1810.11118">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.18653/v1/P19-1374">10.18653/v1/P19-1374 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Large-Scale Corpus for Conversation Disentanglement </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kummerfeld%2C+J+K">Jonathan K. Kummerfeld</a>, <a href="/search/cs?searchtype=author&query=Gouravajhala%2C+S+R">Sai R. Gouravajhala</a>, <a href="/search/cs?searchtype=author&query=Peper%2C+J">Joseph Peper</a>, <a href="/search/cs?searchtype=author&query=Athreya%2C+V">Vignesh Athreya</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+C">Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Ganhotra%2C+J">Jatin Ganhotra</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+S+S">Siva Sankalp Patel</a>, <a href="/search/cs?searchtype=author&query=Polymenakos%2C+L">Lazaros Polymenakos</a>, <a href="/search/cs?searchtype=author&query=Lasecki%2C+W+S">Walter S. Lasecki</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.11118v2-abstract-short" style="display: inline;"> Disentangling conversations mixed together in a single stream of messages is a difficult task, made harder by the lack of large manually annotated datasets. We created a new dataset of 77,563 messages manually annotated with reply-structure graphs that both disentangle conversations and define internal conversation structure. Our dataset is 16 times larger than all previously released datasets com… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.11118v2-abstract-full').style.display = 'inline'; document.getElementById('1810.11118v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.11118v2-abstract-full" style="display: none;"> Disentangling conversations mixed together in a single stream of messages is a difficult task, made harder by the lack of large manually annotated datasets. We created a new dataset of 77,563 messages manually annotated with reply-structure graphs that both disentangle conversations and define internal conversation structure. Our dataset is 16 times larger than all previously released datasets combined, the first to include adjudication of annotation disagreements, and the first to include context. We use our data to re-examine prior work, in particular, finding that 80% of conversations in a widely used dialogue corpus are either missing messages or contain extra messages. Our manually-annotated data presents an opportunity to develop robust data-driven methods for conversation disentanglement, which will help advance dialogue research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.11118v2-abstract-full').style.display = 'none'; document.getElementById('1810.11118v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear at ACL</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.7 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> ACL (2019) 3846-3856 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1802.04358">arXiv:1802.04358</a> <span> [<a href="https://arxiv.org/pdf/1802.04358">pdf</a>, <a href="https://arxiv.org/format/1802.04358">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> A Unified Implicit Dialog Framework for Conversational Search </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Feng%2C+S">Song Feng</a>, <a href="/search/cs?searchtype=author&query=Gunasekara%2C+R+C">R. Chulaka Gunasekara</a>, <a href="/search/cs?searchtype=author&query=Shashidhara%2C+S">Sunil Shashidhara</a>, <a href="/search/cs?searchtype=author&query=Fadnis%2C+K+P">Kshitij P. Fadnis</a>, <a href="/search/cs?searchtype=author&query=Polymenakos%2C+L+C">Lazaros C. Polymenakos</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1802.04358v1-abstract-short" style="display: inline;"> We propose a unified Implicit Dialog framework for goal-oriented, information seeking tasks of Conversational Search applications. It aims to enable dialog interactions with domain data without replying on explicitly encoded the rules but utilizing the underlying data representation to build the components required for dialog interaction, which we refer as Implicit Dialog in this work. The propose… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.04358v1-abstract-full').style.display = 'inline'; document.getElementById('1802.04358v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1802.04358v1-abstract-full" style="display: none;"> We propose a unified Implicit Dialog framework for goal-oriented, information seeking tasks of Conversational Search applications. It aims to enable dialog interactions with domain data without replying on explicitly encoded the rules but utilizing the underlying data representation to build the components required for dialog interaction, which we refer as Implicit Dialog in this work. The proposed framework consists of a pipeline of End-to-End trainable modules. A centralized knowledge representation is used to semantically ground multiple dialog modules. An associated set of tools are integrated with the framework to gather end users' input for continuous improvement of the system. The goal is to facilitate development of conversational systems by identifying the components and the data that can be adapted and reused across many end-user applications. We demonstrate our approach by creating conversational agents for several independent domains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1802.04358v1-abstract-full').style.display = 'none'; document.getElementById('1802.04358v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 February, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Appeared as a demo in AAAI-2018</span> </p> </li> </ol> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>