CINXE.COM

Search | arXiv e-print repository

<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1&ndash;50 of 90 results for author: <span class="mathjax">Geist, M</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&amp;query=Geist%2C+M">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Geist, M"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Geist%2C+M&amp;terms-0-field=author&amp;size=50&amp;order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Geist, M"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Geist%2C+M&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Geist%2C+M&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Geist%2C+M&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11677">arXiv:2410.11677</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.11677">pdf</a>, <a href="https://arxiv.org/format/2410.11677">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Understanding Likelihood Over-optimisation in Direct Alignment Algorithms </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shi%2C+Z">Zhengyan Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Land%2C+S">Sander Land</a>, <a href="/search/cs?searchtype=author&amp;query=Locatelli%2C+A">Acyr Locatelli</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Bartolo%2C+M">Max Bartolo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11677v2-abstract-short" style="display: inline;"> Direct Alignment Algorithms (DAAs), such as Direct Preference Optimisation (DPO) and Identity Preference Optimisation (IPO), have emerged as alternatives to online Reinforcement Learning from Human Feedback (RLHF) algorithms such as Proximal Policy Optimisation (PPO) for aligning language models to human preferences, without the need for explicit reward modelling. These methods generally aim to in&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11677v2-abstract-full').style.display = 'inline'; document.getElementById('2410.11677v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11677v2-abstract-full" style="display: none;"> Direct Alignment Algorithms (DAAs), such as Direct Preference Optimisation (DPO) and Identity Preference Optimisation (IPO), have emerged as alternatives to online Reinforcement Learning from Human Feedback (RLHF) algorithms such as Proximal Policy Optimisation (PPO) for aligning language models to human preferences, without the need for explicit reward modelling. These methods generally aim to increase the likelihood of generating better (preferred) completions while discouraging worse (non-preferred) ones, while staying close to the original model&#39;s behaviour. In this work, we explore the relationship between completion likelihood and model performance in state-of-the-art DAAs, and identify a critical issue of likelihood over-optimisation. Contrary to expectations, we find that higher likelihood of better completions and larger margins between better and worse completion likelihoods do not necessarily lead to better performance, and may even degrade it. Our analysis reveals that while higher likelihood correlates with better memorisation of factual knowledge patterns, a slightly lower completion likelihood tends to improve output diversity, thus leading to better generalisation to unseen scenarios. Moreover, we identify two key indicators that signal when over-optimised output diversity begins to harm performance: Decreasing Entropy over Top-k Tokens and Diminishing Top-k Probability Mass. Our experimental results validate that these indicators are reliable signs of declining performance under different regularisations, helping prevent over-optimisation and improve alignment with human preferences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11677v2-abstract-full').style.display = 'none'; document.getElementById('2410.11677v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint Version</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.06212">arXiv:2410.06212</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2410.06212">pdf</a>, <a href="https://arxiv.org/format/2410.06212">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Solving robust MDPs as a sequence of static RL problems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zouitine%2C+A">Adil Zouitine</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Rachelson%2C+E">Emmanuel Rachelson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.06212v1-abstract-short" style="display: inline;"> Designing control policies whose performance level is guaranteed to remain above a given threshold in a span of environments is a critical feature for the adoption of reinforcement learning (RL) in real-world applications. The search for such robust policies is a notoriously difficult problem, related to the so-called dynamic model of transition function uncertainty, where the environment dynamics&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06212v1-abstract-full').style.display = 'inline'; document.getElementById('2410.06212v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.06212v1-abstract-full" style="display: none;"> Designing control policies whose performance level is guaranteed to remain above a given threshold in a span of environments is a critical feature for the adoption of reinforcement learning (RL) in real-world applications. The search for such robust policies is a notoriously difficult problem, related to the so-called dynamic model of transition function uncertainty, where the environment dynamics are allowed to change at each time step. But in practical cases, one is rather interested in robustness to a span of static transition models throughout interaction episodes. The static model is known to be harder to solve than the dynamic one, and seminal algorithms, such as robust value iteration, as well as most recent works on deep robust RL, build upon the dynamic model. In this work, we propose to revisit the static model. We suggest an analysis of why solving the static model under some mild hypotheses is a reasonable endeavor, based on an equivalence with the dynamic model, and formalize the general intuition that robust MDPs can be solved by tackling a series of static problems. We introduce a generic meta-algorithm called IWOCS, which incrementally identifies worst-case transition models so as to guide the search for a robust policy. Discussion on IWOCS sheds light on new ways to decouple policy optimization and adversarial transition functions and opens new perspectives for analysis. We derive a deep RL version of IWOCS and demonstrate it is competitive with state-of-the-art algorithms on classical benchmarks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.06212v1-abstract-full').style.display = 'none'; document.getElementById('2410.06212v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">12 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.01369">arXiv:2409.01369</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2409.01369">pdf</a>, <a href="https://arxiv.org/format/2409.01369">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Imitating Language via Scalable Inverse Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wulfmeier%2C+M">Markus Wulfmeier</a>, <a href="/search/cs?searchtype=author&amp;query=Bloesch%2C+M">Michael Bloesch</a>, <a href="/search/cs?searchtype=author&amp;query=Vieillard%2C+N">Nino Vieillard</a>, <a href="/search/cs?searchtype=author&amp;query=Ahuja%2C+A">Arun Ahuja</a>, <a href="/search/cs?searchtype=author&amp;query=Bornschein%2C+J">Jorg Bornschein</a>, <a href="/search/cs?searchtype=author&amp;query=Huang%2C+S">Sandy Huang</a>, <a href="/search/cs?searchtype=author&amp;query=Sokolov%2C+A">Artem Sokolov</a>, <a href="/search/cs?searchtype=author&amp;query=Barnes%2C+M">Matt Barnes</a>, <a href="/search/cs?searchtype=author&amp;query=Desjardins%2C+G">Guillaume Desjardins</a>, <a href="/search/cs?searchtype=author&amp;query=Bewley%2C+A">Alex Bewley</a>, <a href="/search/cs?searchtype=author&amp;query=Bechtle%2C+S+M+E">Sarah Maria Elisabeth Bechtle</a>, <a href="/search/cs?searchtype=author&amp;query=Springenberg%2C+J+T">Jost Tobias Springenberg</a>, <a href="/search/cs?searchtype=author&amp;query=Momchev%2C+N">Nikola Momchev</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Riedmiller%2C+M">Martin Riedmiller</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.01369v1-abstract-short" style="display: inline;"> The majority of language model training builds on imitation learning. It covers pretraining, supervised fine-tuning, and affects the starting conditions for reinforcement learning from human feedback (RLHF). The simplicity and scalability of maximum likelihood estimation (MLE) for next token prediction led to its role as predominant paradigm. However, the broader field of imitation learning can mo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.01369v1-abstract-full').style.display = 'inline'; document.getElementById('2409.01369v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.01369v1-abstract-full" style="display: none;"> The majority of language model training builds on imitation learning. It covers pretraining, supervised fine-tuning, and affects the starting conditions for reinforcement learning from human feedback (RLHF). The simplicity and scalability of maximum likelihood estimation (MLE) for next token prediction led to its role as predominant paradigm. However, the broader field of imitation learning can more effectively utilize the sequential structure underlying autoregressive generation. We focus on investigating the inverse reinforcement learning (IRL) perspective to imitation, extracting rewards and directly optimizing sequences instead of individual token likelihoods and evaluate its benefits for fine-tuning large language models. We provide a new angle, reformulating inverse soft-Q-learning as a temporal difference regularized extension of MLE. This creates a principled connection between MLE and IRL and allows trading off added complexity with increased performance and diversity of generations in the supervised fine-tuning (SFT) setting. We find clear advantages for IRL-based imitation, in particular for retaining diversity while maximizing task performance, rendering IRL a strong alternative on fixed SFT datasets even without online data generation. Our analysis of IRL-extracted reward functions further indicates benefits for more robust reward functions via tighter integration of supervised and preference-based LLM post-training. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.01369v1-abstract-full').style.display = 'none'; document.getElementById('2409.01369v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.06121">arXiv:2407.06121</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2407.06121">pdf</a>, <a href="https://arxiv.org/format/2407.06121">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Periodic agent-state based Q-learning for POMDPs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Sinha%2C+A">Amit Sinha</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Mahajan%2C+A">Aditya Mahajan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.06121v3-abstract-short" style="display: inline;"> The standard approach for Partially Observable Markov Decision Processes (POMDPs) is to convert them to a fully observed belief-state MDP. However, the belief state depends on the system model and is therefore not viable in reinforcement learning (RL) settings. A widely used alternative is to use an agent state, which is a model-free, recursively updateable function of the observation history. Exa&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.06121v3-abstract-full').style.display = 'inline'; document.getElementById('2407.06121v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.06121v3-abstract-full" style="display: none;"> The standard approach for Partially Observable Markov Decision Processes (POMDPs) is to convert them to a fully observed belief-state MDP. However, the belief state depends on the system model and is therefore not viable in reinforcement learning (RL) settings. A widely used alternative is to use an agent state, which is a model-free, recursively updateable function of the observation history. Examples include frame stacking and recurrent neural networks. Since the agent state is model-free, it is used to adapt standard RL algorithms to POMDPs. However, standard RL algorithms like Q-learning learn a stationary policy. Our main thesis that we illustrate via examples is that because the agent state does not satisfy the Markov property, non-stationary agent-state based policies can outperform stationary ones. To leverage this feature, we propose PASQL (periodic agent-state based Q-learning), which is a variant of agent-state-based Q-learning that learns periodic policies. By combining ideas from periodic Markov chains and stochastic approximation, we rigorously establish that PASQL converges to a cyclic limit and characterize the approximation error of the converged periodic policy. Finally, we present a numerical experiment to highlight the salient features of PASQL and demonstrate the benefit of learning periodic policies over stationary policies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.06121v3-abstract-full').style.display = 'none'; document.getElementById('2407.06121v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted in the 38th Conference on Neural Information Processing Systems (NeurIPS 2024)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.19188">arXiv:2406.19188</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.19188">pdf</a>, <a href="https://arxiv.org/format/2406.19188">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Averaging log-likelihoods in direct alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Grinsztajn%2C+N">Nathan Grinsztajn</a>, <a href="/search/cs?searchtype=author&amp;query=Flet-Berliac%2C+Y">Yannis Flet-Berliac</a>, <a href="/search/cs?searchtype=author&amp;query=Azar%2C+M+G">Mohammad Gheshlaghi Azar</a>, <a href="/search/cs?searchtype=author&amp;query=Strub%2C+F">Florian Strub</a>, <a href="/search/cs?searchtype=author&amp;query=Wu%2C+B">Bill Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Choi%2C+E">Eugene Choi</a>, <a href="/search/cs?searchtype=author&amp;query=Cremer%2C+C">Chris Cremer</a>, <a href="/search/cs?searchtype=author&amp;query=Ahmadian%2C+A">Arash Ahmadian</a>, <a href="/search/cs?searchtype=author&amp;query=Chandak%2C+Y">Yash Chandak</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.19188v1-abstract-short" style="display: inline;"> To better align Large Language Models (LLMs) with human judgment, Reinforcement Learning from Human Feedback (RLHF) learns a reward model and then optimizes it using regularized RL. Recently, direct alignment methods were introduced to learn such a fine-tuned model directly from a preference dataset without computing a proxy reward function. These methods are built upon contrastive losses involvin&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19188v1-abstract-full').style.display = 'inline'; document.getElementById('2406.19188v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.19188v1-abstract-full" style="display: none;"> To better align Large Language Models (LLMs) with human judgment, Reinforcement Learning from Human Feedback (RLHF) learns a reward model and then optimizes it using regularized RL. Recently, direct alignment methods were introduced to learn such a fine-tuned model directly from a preference dataset without computing a proxy reward function. These methods are built upon contrastive losses involving the log-likelihood of (dis)preferred completions according to the trained model. However, completions have various lengths, and the log-likelihood is not length-invariant. On the other side, the cross-entropy loss used in supervised training is length-invariant, as batches are typically averaged token-wise. To reconcile these approaches, we introduce a principled approach for making direct alignment length-invariant. Formally, we introduce a new averaging operator, to be composed with the optimality operator giving the best policy for the underlying RL problem. It translates into averaging the log-likelihood within the loss. We empirically study the effect of such averaging, observing a trade-off between the length of generations and their scores. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19188v1-abstract-full').style.display = 'none'; document.getElementById('2406.19188v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.19185">arXiv:2406.19185</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.19185">pdf</a>, <a href="https://arxiv.org/format/2406.19185">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Contrastive Policy Gradient: Aligning LLMs on sequence-level scores in a supervised-friendly fashion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Flet-Berliac%2C+Y">Yannis Flet-Berliac</a>, <a href="/search/cs?searchtype=author&amp;query=Grinsztajn%2C+N">Nathan Grinsztajn</a>, <a href="/search/cs?searchtype=author&amp;query=Strub%2C+F">Florian Strub</a>, <a href="/search/cs?searchtype=author&amp;query=Choi%2C+E">Eugene Choi</a>, <a href="/search/cs?searchtype=author&amp;query=Cremer%2C+C">Chris Cremer</a>, <a href="/search/cs?searchtype=author&amp;query=Ahmadian%2C+A">Arash Ahmadian</a>, <a href="/search/cs?searchtype=author&amp;query=Chandak%2C+Y">Yash Chandak</a>, <a href="/search/cs?searchtype=author&amp;query=Azar%2C+M+G">Mohammad Gheshlaghi Azar</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.19185v1-abstract-short" style="display: inline;"> Reinforcement Learning (RL) has been used to finetune Large Language Models (LLMs) using a reward model trained from preference data, to better align with human judgment. The recently introduced direct alignment methods, which are often simpler, more stable, and computationally lighter, can more directly achieve this. However, these approaches cannot optimize arbitrary rewards, and the preference-&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19185v1-abstract-full').style.display = 'inline'; document.getElementById('2406.19185v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.19185v1-abstract-full" style="display: none;"> Reinforcement Learning (RL) has been used to finetune Large Language Models (LLMs) using a reward model trained from preference data, to better align with human judgment. The recently introduced direct alignment methods, which are often simpler, more stable, and computationally lighter, can more directly achieve this. However, these approaches cannot optimize arbitrary rewards, and the preference-based ones are not the only rewards of interest for LLMs (eg., unit tests for code generation or textual entailment for summarization, among others). RL-finetuning is usually done with a variation of policy gradient, which calls for on-policy or near-on-policy samples, requiring costly generations. We introduce Contrastive Policy Gradient, or CoPG, a simple and mathematically principled new RL algorithm that can estimate the optimal policy even from off-policy data. It can be seen as an off-policy policy gradient approach that does not rely on important sampling techniques and highlights the importance of using (the right) state baseline. We show this approach to generalize the direct alignment method IPO (identity preference optimization) and classic policy gradient. We experiment with the proposed CoPG on a toy bandit problem to illustrate its properties, as well as for finetuning LLMs on a summarization task, using a learned reward function considered as ground truth for the purpose of the experiments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.19185v1-abstract-full').style.display = 'none'; document.getElementById('2406.19185v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.08406">arXiv:2406.08406</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.08406">pdf</a>, <a href="https://arxiv.org/format/2406.08406">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> RRLS : Robust Reinforcement Learning Suite </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zouitine%2C+A">Adil Zouitine</a>, <a href="/search/cs?searchtype=author&amp;query=Bertoin%2C+D">David Bertoin</a>, <a href="/search/cs?searchtype=author&amp;query=Clavier%2C+P">Pierre Clavier</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Rachelson%2C+E">Emmanuel Rachelson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.08406v1-abstract-short" style="display: inline;"> Robust reinforcement learning is the problem of learning control policies that provide optimal worst-case performance against a span of adversarial environments. It is a crucial ingredient for deploying algorithms in real-world scenarios with prevalent environmental uncertainties and has been a long-standing object of attention in the community, without a standardized set of benchmarks. This contr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08406v1-abstract-full').style.display = 'inline'; document.getElementById('2406.08406v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.08406v1-abstract-full" style="display: none;"> Robust reinforcement learning is the problem of learning control policies that provide optimal worst-case performance against a span of adversarial environments. It is a crucial ingredient for deploying algorithms in real-world scenarios with prevalent environmental uncertainties and has been a long-standing object of attention in the community, without a standardized set of benchmarks. This contribution endeavors to fill this gap. We introduce the Robust Reinforcement Learning Suite (RRLS), a benchmark suite based on Mujoco environments. RRLS provides six continuous control tasks with two types of uncertainty sets for training and evaluation. Our benchmark aims to standardize robust reinforcement learning tasks, facilitating reproducible and comparable experiments, in particular those from recent state-of-the-art contributions, for which we demonstrate the use of RRLS. It is also designed to be easily expandable to new environments. The source code is available at \href{https://github.com/SuReLI/RRLS}{https://github.com/SuReLI/RRLS}. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08406v1-abstract-full').style.display = 'none'; document.getElementById('2406.08406v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.08395">arXiv:2406.08395</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.08395">pdf</a>, <a href="https://arxiv.org/format/2406.08395">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Time-Constrained Robust MDPs </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Zouitine%2C+A">Adil Zouitine</a>, <a href="/search/cs?searchtype=author&amp;query=Bertoin%2C+D">David Bertoin</a>, <a href="/search/cs?searchtype=author&amp;query=Clavier%2C+P">Pierre Clavier</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Rachelson%2C+E">Emmanuel Rachelson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.08395v1-abstract-short" style="display: inline;"> Robust reinforcement learning is essential for deploying reinforcement learning algorithms in real-world scenarios where environmental uncertainty predominates. Traditional robust reinforcement learning often depends on rectangularity assumptions, where adverse probability measures of outcome states are assumed to be independent across different states and actions. This assumption, rarely fulfille&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08395v1-abstract-full').style.display = 'inline'; document.getElementById('2406.08395v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.08395v1-abstract-full" style="display: none;"> Robust reinforcement learning is essential for deploying reinforcement learning algorithms in real-world scenarios where environmental uncertainty predominates. Traditional robust reinforcement learning often depends on rectangularity assumptions, where adverse probability measures of outcome states are assumed to be independent across different states and actions. This assumption, rarely fulfilled in practice, leads to overly conservative policies. To address this problem, we introduce a new time-constrained robust MDP (TC-RMDP) formulation that considers multifactorial, correlated, and time-dependent disturbances, thus more accurately reflecting real-world dynamics. This formulation goes beyond the conventional rectangularity paradigm, offering new perspectives and expanding the analytical framework for robust RL. We propose three distinct algorithms, each using varying levels of environmental information, and evaluate them extensively on continuous control benchmarks. Our results demonstrate that these algorithms yield an efficient tradeoff between performance and robustness, outperforming traditional deep robust RL methods in time-constrained environments while preserving robustness in classical benchmarks. This study revisits the prevailing assumptions in robust RL and opens new avenues for developing more practical and realistic RL applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.08395v1-abstract-full').style.display = 'none'; document.getElementById('2406.08395v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.04081">arXiv:2406.04081</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.04081">pdf</a>, <a href="https://arxiv.org/format/2406.04081">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Bootstrapping Expectiles in Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Clavier%2C+P">Pierre Clavier</a>, <a href="/search/cs?searchtype=author&amp;query=Rachelson%2C+E">Emmanuel Rachelson</a>, <a href="/search/cs?searchtype=author&amp;query=Pennec%2C+E+L">Erwan Le Pennec</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.04081v1-abstract-short" style="display: inline;"> Many classic Reinforcement Learning (RL) algorithms rely on a Bellman operator, which involves an expectation over the next states, leading to the concept of bootstrapping. To introduce a form of pessimism, we propose to replace this expectation with an expectile. In practice, this can be very simply done by replacing the $L_2$ loss with a more general expectile loss for the critic. Introducing pe&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04081v1-abstract-full').style.display = 'inline'; document.getElementById('2406.04081v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.04081v1-abstract-full" style="display: none;"> Many classic Reinforcement Learning (RL) algorithms rely on a Bellman operator, which involves an expectation over the next states, leading to the concept of bootstrapping. To introduce a form of pessimism, we propose to replace this expectation with an expectile. In practice, this can be very simply done by replacing the $L_2$ loss with a more general expectile loss for the critic. Introducing pessimism in RL is desirable for various reasons, such as tackling the overestimation problem (for which classic solutions are double Q-learning or the twin-critic approach of TD3) or robust RL (where transitions are adversarial). We study empirically these two cases. For the overestimation problem, we show that the proposed approach, ExpectRL, provides better results than a classic twin-critic. On robust RL benchmarks, involving changes of the environment, we show that our approach is more robust than classic RL algorithms. We also introduce a variation of ExpectRL combined with domain randomization which is competitive with state-of-the-art robust RL agents. Eventually, we also extend \ExpectRL with a mechanism for choosing automatically the expectile value, that is the degree of pessimism <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.04081v1-abstract-full').style.display = 'none'; document.getElementById('2406.04081v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2406.01660">arXiv:2406.01660</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2406.01660">pdf</a>, <a href="https://arxiv.org/format/2406.01660">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Self-Improving Robust Preference Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Choi%2C+E">Eugene Choi</a>, <a href="/search/cs?searchtype=author&amp;query=Ahmadian%2C+A">Arash Ahmadian</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Oilvier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Azar%2C+M+G">Mohammad Gheshlaghi Azar</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2406.01660v3-abstract-short" style="display: inline;"> Both online and offline RLHF methods such as PPO and DPO have been extremely successful in aligning AI with human preferences. Despite their success, the existing methods suffer from a fundamental problem that their optimal solution is highly task-dependent (i.e., not robust to out-of-distribution (OOD) tasks). Here we address this challenge by proposing Self-Improving Robust Preference Optimizati&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.01660v3-abstract-full').style.display = 'inline'; document.getElementById('2406.01660v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2406.01660v3-abstract-full" style="display: none;"> Both online and offline RLHF methods such as PPO and DPO have been extremely successful in aligning AI with human preferences. Despite their success, the existing methods suffer from a fundamental problem that their optimal solution is highly task-dependent (i.e., not robust to out-of-distribution (OOD) tasks). Here we address this challenge by proposing Self-Improving Robust Preference Optimization SRPO, a practical and mathematically principled offline RLHF framework that is completely robust to the changes in the task. The key idea of SRPO is to cast the problem of learning from human preferences as a self-improvement process, which can be mathematically expressed in terms of a min-max objective that aims at joint optimization of self-improvement policy and the generative policy in an adversarial fashion. The solution for this optimization problem is independent of the training task and thus it is robust to its changes. We then show that this objective can be re-expressed in the form of a non-adversarial offline loss which can be optimized using standard supervised optimization techniques at scale without any need for reward model and online inference. We show the effectiveness of SRPO in terms of AI Win-Rate (WR) against human (GOLD) completions. In particular, when SRPO is evaluated on the OOD XSUM dataset, it outperforms the celebrated DPO by a clear margin of 15% after 5 self-revisions, achieving WR of 90%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2406.01660v3-abstract-full').style.display = 'none'; document.getElementById('2406.01660v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 3 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.01134">arXiv:2405.01134</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2405.01134">pdf</a>, <a href="https://arxiv.org/format/2405.01134">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Leveraging Procedural Generation for Learning Autonomous Peg-in-Hole Assembly in Space </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orsula%2C+A">Andrej Orsula</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Olivares-Mendez%2C+M">Miguel Olivares-Mendez</a>, <a href="/search/cs?searchtype=author&amp;query=Martinez%2C+C">Carol Martinez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.01134v1-abstract-short" style="display: inline;"> The ability to autonomously assemble structures is crucial for the development of future space infrastructure. However, the unpredictable conditions of space pose significant challenges for robotic systems, necessitating the development of advanced learning techniques to enable autonomous assembly. In this study, we present a novel approach for learning autonomous peg-in-hole assembly in the conte&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.01134v1-abstract-full').style.display = 'inline'; document.getElementById('2405.01134v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.01134v1-abstract-full" style="display: none;"> The ability to autonomously assemble structures is crucial for the development of future space infrastructure. However, the unpredictable conditions of space pose significant challenges for robotic systems, necessitating the development of advanced learning techniques to enable autonomous assembly. In this study, we present a novel approach for learning autonomous peg-in-hole assembly in the context of space robotics. Our focus is on enhancing the generalization and adaptability of autonomous systems through deep reinforcement learning. By integrating procedural generation and domain randomization, we train agents in a highly parallelized simulation environment across a spectrum of diverse scenarios with the aim of acquiring a robust policy. The proposed approach is evaluated using three distinct reinforcement learning algorithms to investigate the trade-offs among various paradigms. We demonstrate the adaptability of our agents to novel scenarios and assembly sequences while emphasizing the potential of leveraging advanced simulation techniques for robot learning in space. Our findings set the stage for future advancements in intelligent robotic systems capable of supporting ambitious space missions and infrastructure development beyond Earth. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.01134v1-abstract-full').style.display = 'none'; document.getElementById('2405.01134v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at the 2024 International Conference on Space Robotics (iSpaRo) | The source code is available at https://github.com/AndrejOrsula/drl_omni_peg</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.03552">arXiv:2403.03552</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2403.03552">pdf</a>, <a href="https://arxiv.org/format/2403.03552">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Population-aware Online Mirror Descent for Mean-Field Games by Deep Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Wu%2C+Z">Zida Wu</a>, <a href="/search/cs?searchtype=author&amp;query=Lauriere%2C+M">Mathieu Lauriere</a>, <a href="/search/cs?searchtype=author&amp;query=Chua%2C+S+J+C">Samuel Jia Cong Chua</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Mehta%2C+A">Ankur Mehta</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.03552v1-abstract-short" style="display: inline;"> Mean Field Games (MFGs) have the ability to handle large-scale multi-agent systems, but learning Nash equilibria in MFGs remains a challenging task. In this paper, we propose a deep reinforcement learning (DRL) algorithm that achieves population-dependent Nash equilibrium without the need for averaging or sampling from history, inspired by Munchausen RL and Online Mirror Descent. Through the desig&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.03552v1-abstract-full').style.display = 'inline'; document.getElementById('2403.03552v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.03552v1-abstract-full" style="display: none;"> Mean Field Games (MFGs) have the ability to handle large-scale multi-agent systems, but learning Nash equilibria in MFGs remains a challenging task. In this paper, we propose a deep reinforcement learning (DRL) algorithm that achieves population-dependent Nash equilibrium without the need for averaging or sampling from history, inspired by Munchausen RL and Online Mirror Descent. Through the design of an additional inner-loop replay buffer, the agents can effectively learn to achieve Nash equilibrium from any distribution, mitigating catastrophic forgetting. The resulting policy can be applied to various initial distributions. Numerical experiments on four canonical examples demonstrate our algorithm has better convergence properties than SOTA algorithms, in particular a DRL version of Fictitious Play for population-dependent policies. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.03552v1-abstract-full').style.display = 'none'; document.getElementById('2403.03552v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.04229">arXiv:2402.04229</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2402.04229">pdf</a>, <a href="https://arxiv.org/format/2402.04229">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> </div> </div> <p class="title is-5 mathjax"> MusicRL: Aligning Music Generation to Human Preferences </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cideron%2C+G">Geoffrey Cideron</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Verzetti%2C+M">Mauro Verzetti</a>, <a href="/search/cs?searchtype=author&amp;query=Vincent%2C+D">Damien Vincent</a>, <a href="/search/cs?searchtype=author&amp;query=Kastelic%2C+M">Matej Kastelic</a>, <a href="/search/cs?searchtype=author&amp;query=Borsos%2C+Z">Zal谩n Borsos</a>, <a href="/search/cs?searchtype=author&amp;query=McWilliams%2C+B">Brian McWilliams</a>, <a href="/search/cs?searchtype=author&amp;query=Ungureanu%2C+V">Victor Ungureanu</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Zeghidour%2C+N">Neil Zeghidour</a>, <a href="/search/cs?searchtype=author&amp;query=Agostinelli%2C+A">Andrea Agostinelli</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.04229v1-abstract-short" style="display: inline;"> We propose MusicRL, the first music generation system finetuned from human feedback. Appreciation of text-to-music models is particularly subjective since the concept of musicality as well as the specific intention behind a caption are user-dependent (e.g. a caption such as &#34;upbeat work-out music&#34; can map to a retro guitar solo or a techno pop beat). Not only this makes supervised training of such&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.04229v1-abstract-full').style.display = 'inline'; document.getElementById('2402.04229v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.04229v1-abstract-full" style="display: none;"> We propose MusicRL, the first music generation system finetuned from human feedback. Appreciation of text-to-music models is particularly subjective since the concept of musicality as well as the specific intention behind a caption are user-dependent (e.g. a caption such as &#34;upbeat work-out music&#34; can map to a retro guitar solo or a techno pop beat). Not only this makes supervised training of such models challenging, but it also calls for integrating continuous human feedback in their post-deployment finetuning. MusicRL is a pretrained autoregressive MusicLM (Agostinelli et al., 2023) model of discrete audio tokens finetuned with reinforcement learning to maximise sequence-level rewards. We design reward functions related specifically to text-adherence and audio quality with the help from selected raters, and use those to finetune MusicLM into MusicRL-R. We deploy MusicLM to users and collect a substantial dataset comprising 300,000 pairwise preferences. Using Reinforcement Learning from Human Feedback (RLHF), we train MusicRL-U, the first text-to-music model that incorporates human feedback at scale. Human evaluations show that both MusicRL-R and MusicRL-U are preferred to the baseline. Ultimately, MusicRL-RU combines the two approaches and results in the best model according to human raters. Ablation studies shed light on the musical attributes influencing human preferences, indicating that text adherence and quality only account for a part of it. This underscores the prevalence of subjectivity in musical appreciation and calls for further involvement of human listeners in the finetuning of music generation models. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.04229v1-abstract-full').style.display = 'none'; document.getElementById('2402.04229v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2401.11237">arXiv:2401.11237</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2401.11237">pdf</a>, <a href="https://arxiv.org/format/2401.11237">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Closing the Gap between TD Learning and Supervised Learning -- A Generalisation Point of View </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ghugare%2C+R">Raj Ghugare</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Berseth%2C+G">Glen Berseth</a>, <a href="/search/cs?searchtype=author&amp;query=Eysenbach%2C+B">Benjamin Eysenbach</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2401.11237v2-abstract-short" style="display: inline;"> Some reinforcement learning (RL) algorithms can stitch pieces of experience to solve a task never seen before during training. This oft-sought property is one of the few ways in which RL methods based on dynamic-programming differ from RL methods based on supervised-learning (SL). Yet, certain RL methods based on off-the-shelf SL algorithms achieve excellent results without an explicit mechanism f&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.11237v2-abstract-full').style.display = 'inline'; document.getElementById('2401.11237v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2401.11237v2-abstract-full" style="display: none;"> Some reinforcement learning (RL) algorithms can stitch pieces of experience to solve a task never seen before during training. This oft-sought property is one of the few ways in which RL methods based on dynamic-programming differ from RL methods based on supervised-learning (SL). Yet, certain RL methods based on off-the-shelf SL algorithms achieve excellent results without an explicit mechanism for stitching; it remains unclear whether those methods forgo this important stitching property. This paper studies this question for the problems of achieving a target goal state and achieving a target return value. Our main result is to show that the stitching property corresponds to a form of combinatorial generalization: after training on a distribution of (state, goal) pairs, one would like to evaluate on (state, goal) pairs not seen together in the training data. Our analysis shows that this sort of generalization is different from i.i.d. generalization. This connection between stitching and generalisation reveals why we should not expect SL-based RL methods to perform stitching, even in the limit of large datasets and models. Based on this analysis, we construct new datasets to explicitly test for this property, revealing that SL-based methods lack this stitching property and hence fail to perform combinatorial generalization. Nonetheless, the connection between stitching and combinatorial generalisation also suggests a simple remedy for improving generalisation in SL: data augmentation. We propose a temporal data augmentation and demonstrate that adding it to SL-based methods enables them to successfully complete tasks not seen together during training. On a high level, this connection illustrates the importance of combinatorial generalization for data efficiency in time-series data beyond tasks beyond RL, like audio, video, or text. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2401.11237v2-abstract-full').style.display = 'none'; document.getElementById('2401.11237v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICLR 2024, Project code: https://github.com/RajGhugare19/stitching-is-combinatorial-generalisation</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.11805">arXiv:2312.11805</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.11805">pdf</a>, <a href="https://arxiv.org/format/2312.11805">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Gemini: A Family of Highly Capable Multimodal Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Gemini+Team"> Gemini Team</a>, <a href="/search/cs?searchtype=author&amp;query=Anil%2C+R">Rohan Anil</a>, <a href="/search/cs?searchtype=author&amp;query=Borgeaud%2C+S">Sebastian Borgeaud</a>, <a href="/search/cs?searchtype=author&amp;query=Alayrac%2C+J">Jean-Baptiste Alayrac</a>, <a href="/search/cs?searchtype=author&amp;query=Yu%2C+J">Jiahui Yu</a>, <a href="/search/cs?searchtype=author&amp;query=Soricut%2C+R">Radu Soricut</a>, <a href="/search/cs?searchtype=author&amp;query=Schalkwyk%2C+J">Johan Schalkwyk</a>, <a href="/search/cs?searchtype=author&amp;query=Dai%2C+A+M">Andrew M. Dai</a>, <a href="/search/cs?searchtype=author&amp;query=Hauth%2C+A">Anja Hauth</a>, <a href="/search/cs?searchtype=author&amp;query=Millican%2C+K">Katie Millican</a>, <a href="/search/cs?searchtype=author&amp;query=Silver%2C+D">David Silver</a>, <a href="/search/cs?searchtype=author&amp;query=Johnson%2C+M">Melvin Johnson</a>, <a href="/search/cs?searchtype=author&amp;query=Antonoglou%2C+I">Ioannis Antonoglou</a>, <a href="/search/cs?searchtype=author&amp;query=Schrittwieser%2C+J">Julian Schrittwieser</a>, <a href="/search/cs?searchtype=author&amp;query=Glaese%2C+A">Amelia Glaese</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+J">Jilin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Pitler%2C+E">Emily Pitler</a>, <a href="/search/cs?searchtype=author&amp;query=Lillicrap%2C+T">Timothy Lillicrap</a>, <a href="/search/cs?searchtype=author&amp;query=Lazaridou%2C+A">Angeliki Lazaridou</a>, <a href="/search/cs?searchtype=author&amp;query=Firat%2C+O">Orhan Firat</a>, <a href="/search/cs?searchtype=author&amp;query=Molloy%2C+J">James Molloy</a>, <a href="/search/cs?searchtype=author&amp;query=Isard%2C+M">Michael Isard</a>, <a href="/search/cs?searchtype=author&amp;query=Barham%2C+P+R">Paul R. Barham</a>, <a href="/search/cs?searchtype=author&amp;query=Hennigan%2C+T">Tom Hennigan</a>, <a href="/search/cs?searchtype=author&amp;query=Lee%2C+B">Benjamin Lee</a> , et al. (1325 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.11805v4-abstract-short" style="display: inline;"> This report introduces a new family of multimodal models, Gemini, that exhibit remarkable capabilities across image, audio, video, and text understanding. The Gemini family consists of Ultra, Pro, and Nano sizes, suitable for applications ranging from complex reasoning tasks to on-device memory-constrained use-cases. Evaluation on a broad range of benchmarks shows that our most-capable Gemini Ultr&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.11805v4-abstract-full').style.display = 'inline'; document.getElementById('2312.11805v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.11805v4-abstract-full" style="display: none;"> This report introduces a new family of multimodal models, Gemini, that exhibit remarkable capabilities across image, audio, video, and text understanding. The Gemini family consists of Ultra, Pro, and Nano sizes, suitable for applications ranging from complex reasoning tasks to on-device memory-constrained use-cases. Evaluation on a broad range of benchmarks shows that our most-capable Gemini Ultra model advances the state of the art in 30 of 32 of these benchmarks - notably being the first model to achieve human-expert performance on the well-studied exam benchmark MMLU, and improving the state of the art in every one of the 20 multimodal benchmarks we examined. We believe that the new capabilities of the Gemini family in cross-modal reasoning and language understanding will enable a wide variety of use cases. We discuss our approach toward post-training and deploying Gemini models responsibly to users through services including Gemini, Gemini Advanced, Google AI Studio, and Cloud Vertex AI. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.11805v4-abstract-full').style.display = 'none'; document.getElementById('2312.11805v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.10787">arXiv:2312.10787</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.10787">pdf</a>, <a href="https://arxiv.org/format/2312.10787">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Learning Discrete-Time Major-Minor Mean Field Games </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cui%2C+K">Kai Cui</a>, <a href="/search/cs?searchtype=author&amp;query=Dayan%C4%B1kl%C4%B1%2C+G">G枚k莽e Dayan谋kl谋</a>, <a href="/search/cs?searchtype=author&amp;query=Lauri%C3%A8re%2C+M">Mathieu Lauri猫re</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Koeppl%2C+H">Heinz Koeppl</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.10787v1-abstract-short" style="display: inline;"> Recent techniques based on Mean Field Games (MFGs) allow the scalable analysis of multi-player games with many similar, rational agents. However, standard MFGs remain limited to homogeneous players that weakly influence each other, and cannot model major players that strongly influence other players, severely limiting the class of problems that can be handled. We propose a novel discrete time vers&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.10787v1-abstract-full').style.display = 'inline'; document.getElementById('2312.10787v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.10787v1-abstract-full" style="display: none;"> Recent techniques based on Mean Field Games (MFGs) allow the scalable analysis of multi-player games with many similar, rational agents. However, standard MFGs remain limited to homogeneous players that weakly influence each other, and cannot model major players that strongly influence other players, severely limiting the class of problems that can be handled. We propose a novel discrete time version of major-minor MFGs (M3FGs), along with a learning algorithm based on fictitious play and partitioning the probability simplex. Importantly, M3FGs generalize MFGs with common noise and can handle not only random exogeneous environment states but also major players. A key challenge is that the mean field is stochastic and not deterministic as in standard MFGs. Our theoretical investigation verifies both the M3FG model and its algorithmic solution, showing firstly the well-posedness of the M3FG model starting from a finite game of interest, and secondly convergence and approximation guarantees of the fictitious play algorithm. Then, we empirically verify the obtained theoretical results, ablating some of the theoretical assumptions made, and show successful equilibrium learning in three example problems. Overall, we establish a learning framework for a novel and broad class of tractable games. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.10787v1-abstract-full').style.display = 'none'; document.getElementById('2312.10787v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to AAAI 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.01072">arXiv:2312.01072</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.01072">pdf</a>, <a href="https://arxiv.org/format/2312.01072">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Survey of Temporal Credit Assignment in Deep Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Pignatelli%2C+E">Eduardo Pignatelli</a>, <a href="/search/cs?searchtype=author&amp;query=Ferret%2C+J">Johan Ferret</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Mesnard%2C+T">Thomas Mesnard</a>, <a href="/search/cs?searchtype=author&amp;query=van+Hasselt%2C+H">Hado van Hasselt</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Toni%2C+L">Laura Toni</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.01072v2-abstract-short" style="display: inline;"> The Credit Assignment Problem (CAP) refers to the longstanding challenge of Reinforcement Learning (RL) agents to associate actions with their long-term consequences. Solving the CAP is a crucial step towards the successful deployment of RL in the real world since most decision problems provide feedback that is noisy, delayed, and with little or no information about the causes. These conditions ma&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.01072v2-abstract-full').style.display = 'inline'; document.getElementById('2312.01072v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.01072v2-abstract-full" style="display: none;"> The Credit Assignment Problem (CAP) refers to the longstanding challenge of Reinforcement Learning (RL) agents to associate actions with their long-term consequences. Solving the CAP is a crucial step towards the successful deployment of RL in the real world since most decision problems provide feedback that is noisy, delayed, and with little or no information about the causes. These conditions make it hard to distinguish serendipitous outcomes from those caused by informed decision-making. However, the mathematical nature of credit and the CAP remains poorly understood and defined. In this survey, we review the state of the art of Temporal Credit Assignment (CA) in deep RL. We propose a unifying formalism for credit that enables equitable comparisons of state-of-the-art algorithms and improves our understanding of the trade-offs between the various methods. We cast the CAP as the problem of learning the influence of an action over an outcome from a finite amount of experience. We discuss the challenges posed by delayed effects, transpositions, and a lack of action influence, and analyse how existing methods aim to address them. Finally, we survey the protocols to evaluate a credit assignment method and suggest ways to diagnose the sources of struggle for different methods. Overall, this survey provides an overview of the field for new-entry practitioners and researchers, it offers a coherent perspective for scholars looking to expedite the starting stages of a new study on the CAP, and it suggests potential directions for future research. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.01072v2-abstract-full').style.display = 'none'; document.getElementById('2312.01072v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">56 pages, 2 figures, 4 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.00886">arXiv:2312.00886</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2312.00886">pdf</a>, <a href="https://arxiv.org/format/2312.00886">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Nash Learning from Human Feedback </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Munos%2C+R">R茅mi Munos</a>, <a href="/search/cs?searchtype=author&amp;query=Valko%2C+M">Michal Valko</a>, <a href="/search/cs?searchtype=author&amp;query=Calandriello%2C+D">Daniele Calandriello</a>, <a href="/search/cs?searchtype=author&amp;query=Azar%2C+M+G">Mohammad Gheshlaghi Azar</a>, <a href="/search/cs?searchtype=author&amp;query=Rowland%2C+M">Mark Rowland</a>, <a href="/search/cs?searchtype=author&amp;query=Guo%2C+Z+D">Zhaohan Daniel Guo</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yunhao Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Mesnard%2C+T">Thomas Mesnard</a>, <a href="/search/cs?searchtype=author&amp;query=Michi%2C+A">Andrea Michi</a>, <a href="/search/cs?searchtype=author&amp;query=Selvi%2C+M">Marco Selvi</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Momchev%2C+N">Nikola Momchev</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Mankowitz%2C+D+J">Daniel J. Mankowitz</a>, <a href="/search/cs?searchtype=author&amp;query=Precup%2C+D">Doina Precup</a>, <a href="/search/cs?searchtype=author&amp;query=Piot%2C+B">Bilal Piot</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.00886v4-abstract-short" style="display: inline;"> Reinforcement learning from human feedback (RLHF) has emerged as the main paradigm for aligning large language models (LLMs) with human preferences. Typically, RLHF involves the initial step of learning a reward model from human feedback, often expressed as preferences between pairs of text generations produced by a pre-trained LLM. Subsequently, the LLM&#39;s policy is fine-tuned by optimizing it to&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.00886v4-abstract-full').style.display = 'inline'; document.getElementById('2312.00886v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.00886v4-abstract-full" style="display: none;"> Reinforcement learning from human feedback (RLHF) has emerged as the main paradigm for aligning large language models (LLMs) with human preferences. Typically, RLHF involves the initial step of learning a reward model from human feedback, often expressed as preferences between pairs of text generations produced by a pre-trained LLM. Subsequently, the LLM&#39;s policy is fine-tuned by optimizing it to maximize the reward model through a reinforcement learning algorithm. However, an inherent limitation of current reward models is their inability to fully represent the richness of human preferences and their dependency on the sampling distribution. In this study, we introduce an alternative pipeline for the fine-tuning of LLMs using pairwise human feedback. Our approach entails the initial learning of a preference model, which is conditioned on two inputs given a prompt, followed by the pursuit of a policy that consistently generates responses preferred over those generated by any competing policy, thus defining the Nash equilibrium of this preference model. We term this approach Nash learning from human feedback (NLHF). In the context of a tabular policy representation, we present a novel algorithmic solution, Nash-MD, founded on the principles of mirror descent. This algorithm produces a sequence of policies, with the last iteration converging to the regularized Nash equilibrium. Additionally, we explore parametric representations of policies and introduce gradient descent algorithms for deep-learning architectures. To demonstrate the effectiveness of our approach, we present experimental results involving the fine-tuning of a LLM for a text summarization task. We believe NLHF offers a compelling avenue for preference learning and policy optimization with the potential of advancing the field of aligning LLMs with human preferences. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.00886v4-abstract-full').style.display = 'none'; document.getElementById('2312.00886v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.04266">arXiv:2310.04266</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2310.04266">pdf</a>, <a href="https://arxiv.org/format/2310.04266">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> DRIFT: Deep Reinforcement Learning for Intelligent Floating Platforms Trajectories </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=El-Hariry%2C+M">Matteo El-Hariry</a>, <a href="/search/cs?searchtype=author&amp;query=Richard%2C+A">Antoine Richard</a>, <a href="/search/cs?searchtype=author&amp;query=Muralidharan%2C+V">Vivek Muralidharan</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Olivares-Mendez%2C+M">Miguel Olivares-Mendez</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.04266v2-abstract-short" style="display: inline;"> This investigation introduces a novel deep reinforcement learning-based suite to control floating platforms in both simulated and real-world environments. Floating platforms serve as versatile test-beds to emulate micro-gravity environments on Earth, useful to test autonomous navigation systems for space applications. Our approach addresses the system and environmental uncertainties in controlling&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.04266v2-abstract-full').style.display = 'inline'; document.getElementById('2310.04266v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.04266v2-abstract-full" style="display: none;"> This investigation introduces a novel deep reinforcement learning-based suite to control floating platforms in both simulated and real-world environments. Floating platforms serve as versatile test-beds to emulate micro-gravity environments on Earth, useful to test autonomous navigation systems for space applications. Our approach addresses the system and environmental uncertainties in controlling such platforms by training policies capable of precise maneuvers amid dynamic and unpredictable conditions. Leveraging Deep Reinforcement Learning (DRL) techniques, our suite achieves robustness, adaptability, and good transferability from simulation to reality. Our deep reinforcement learning framework provides advantages such as fast training times, large-scale testing capabilities, rich visualization options, and ROS bindings for integration with real-world robotic systems. Being open access, our suite serves as a comprehensive platform for practitioners who want to replicate similar research in their own simulated environments and labs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.04266v2-abstract-full').style.display = 'none'; document.getElementById('2310.04266v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 6 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Updated to the version accepted at IROS 2024. Minor revisions based on peer review</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.13824">arXiv:2307.13824</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.13824">pdf</a>, <a href="https://arxiv.org/format/2307.13824">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Offline Reinforcement Learning with On-Policy Q-Function Regularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shi%2C+L">Laixi Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a>, <a href="/search/cs?searchtype=author&amp;query=Chi%2C+Y">Yuejie Chi</a>, <a href="/search/cs?searchtype=author&amp;query=Castro%2C+P+S">Pablo Samuel Castro</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.13824v1-abstract-short" style="display: inline;"> The core challenge of offline reinforcement learning (RL) is dealing with the (potentially catastrophic) extrapolation error induced by the distribution shift between the history dataset and the desired policy. A large portion of prior work tackles this challenge by implicitly/explicitly regularizing the learning policy towards the behavior policy, which is hard to estimate reliably in practice. I&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.13824v1-abstract-full').style.display = 'inline'; document.getElementById('2307.13824v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.13824v1-abstract-full" style="display: none;"> The core challenge of offline reinforcement learning (RL) is dealing with the (potentially catastrophic) extrapolation error induced by the distribution shift between the history dataset and the desired policy. A large portion of prior work tackles this challenge by implicitly/explicitly regularizing the learning policy towards the behavior policy, which is hard to estimate reliably in practice. In this work, we propose to regularize towards the Q-function of the behavior policy instead of the behavior policy itself, under the premise that the Q-function can be estimated more reliably and easily by a SARSA-style estimate and handles the extrapolation error more straightforwardly. We propose two algorithms taking advantage of the estimated Q-function through regularizations, and demonstrate they exhibit strong performance on the D4RL benchmarks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.13824v1-abstract-full').style.display = 'none'; document.getElementById('2307.13824v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Published at European Conference on Machine Learning (ECML), 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.12968">arXiv:2307.12968</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2307.12968">pdf</a>, <a href="https://arxiv.org/format/2307.12968">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> A Connection between One-Step Regularization and Critic Regularization in Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Eysenbach%2C+B">Benjamin Eysenbach</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Levine%2C+S">Sergey Levine</a>, <a href="/search/cs?searchtype=author&amp;query=Salakhutdinov%2C+R">Ruslan Salakhutdinov</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.12968v1-abstract-short" style="display: inline;"> As with any machine learning problem with limited data, effective offline RL algorithms require careful regularization to avoid overfitting. One-step methods perform regularization by doing just a single step of policy improvement, while critic regularization methods do many steps of policy improvement with a regularized objective. These methods appear distinct. One-step methods, such as advantage&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.12968v1-abstract-full').style.display = 'inline'; document.getElementById('2307.12968v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.12968v1-abstract-full" style="display: none;"> As with any machine learning problem with limited data, effective offline RL algorithms require careful regularization to avoid overfitting. One-step methods perform regularization by doing just a single step of policy improvement, while critic regularization methods do many steps of policy improvement with a regularized objective. These methods appear distinct. One-step methods, such as advantage-weighted regression and conditional behavioral cloning, truncate policy iteration after just one step. This ``early stopping&#39;&#39; makes one-step RL simple and stable, but can limit its asymptotic performance. Critic regularization typically requires more compute but has appealing lower-bound guarantees. In this paper, we draw a close connection between these methods: applying a multi-step critic regularization method with a regularization coefficient of 1 yields the same policy as one-step RL. While practical implementations violate our assumptions and critic regularization is typically applied with smaller regularization coefficients, our experiments nevertheless show that our analysis makes accurate, testable predictions about practical offline RL methods (CQL and one-step RL) with commonly-used hyperparameters. Our results that every problem can be solved with a single step of policy improvement, but rather that one-step RL might be competitive with critic regularization on RL problems that demand strong regularization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.12968v1-abstract-full').style.display = 'none'; document.getElementById('2307.12968v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 24 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ICML 2023. Video (https://www.youtube.com/watch?v=1xlixIHZ0R4) and code (https://github.com/ben-eysenbach/ac-connection)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.14799">arXiv:2306.14799</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.14799">pdf</a>, <a href="https://arxiv.org/format/2306.14799">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> </div> </div> <p class="title is-5 mathjax"> On Imitation in Mean-field Games </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Ramponi%2C+G">Giorgia Ramponi</a>, <a href="/search/cs?searchtype=author&amp;query=Kolev%2C+P">Pavel Kolev</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+N">Niao He</a>, <a href="/search/cs?searchtype=author&amp;query=Lauri%C3%A8re%2C+M">Mathieu Lauri猫re</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.14799v1-abstract-short" style="display: inline;"> We explore the problem of imitation learning (IL) in the context of mean-field games (MFGs), where the goal is to imitate the behavior of a population of agents following a Nash equilibrium policy according to some unknown payoff function. IL in MFGs presents new challenges compared to single-agent IL, particularly when both the reward function and the transition kernel depend on the population di&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.14799v1-abstract-full').style.display = 'inline'; document.getElementById('2306.14799v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.14799v1-abstract-full" style="display: none;"> We explore the problem of imitation learning (IL) in the context of mean-field games (MFGs), where the goal is to imitate the behavior of a population of agents following a Nash equilibrium policy according to some unknown payoff function. IL in MFGs presents new challenges compared to single-agent IL, particularly when both the reward function and the transition kernel depend on the population distribution. In this paper, departing from the existing literature on IL for MFGs, we introduce a new solution concept called the Nash imitation gap. Then we show that when only the reward depends on the population distribution, IL in MFGs can be reduced to single-agent IL with similar guarantees. However, when the dynamics is population-dependent, we provide a novel upper-bound that suggests IL is harder in this setting. To address this issue, we propose a new adversarial formulation where the reinforcement learning problem is replaced by a mean-field control (MFC) problem, suggesting progress in IL within MFGs may have to build upon MFC. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.14799v1-abstract-full').style.display = 'none'; document.getElementById('2306.14799v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.13649">arXiv:2306.13649</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.13649">pdf</a>, <a href="https://arxiv.org/format/2306.13649">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> On-Policy Distillation of Language Models: Learning from Self-Generated Mistakes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Agarwal%2C+R">Rishabh Agarwal</a>, <a href="/search/cs?searchtype=author&amp;query=Vieillard%2C+N">Nino Vieillard</a>, <a href="/search/cs?searchtype=author&amp;query=Zhou%2C+Y">Yongchao Zhou</a>, <a href="/search/cs?searchtype=author&amp;query=Stanczyk%2C+P">Piotr Stanczyk</a>, <a href="/search/cs?searchtype=author&amp;query=Ramos%2C+S">Sabela Ramos</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.13649v3-abstract-short" style="display: inline;"> Knowledge distillation (KD) is widely used for compressing a teacher model to reduce its inference cost and memory footprint, by training a smaller student model. However, current KD methods for auto-regressive sequence models suffer from distribution mismatch between output sequences seen during training and those generated by the student during inference. To address this issue, we introduce Gene&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.13649v3-abstract-full').style.display = 'inline'; document.getElementById('2306.13649v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.13649v3-abstract-full" style="display: none;"> Knowledge distillation (KD) is widely used for compressing a teacher model to reduce its inference cost and memory footprint, by training a smaller student model. However, current KD methods for auto-regressive sequence models suffer from distribution mismatch between output sequences seen during training and those generated by the student during inference. To address this issue, we introduce Generalized Knowledge Distillation (GKD). Instead of solely relying on a fixed set of output sequences, GKD trains the student on its self-generated output sequences by leveraging feedback from the teacher on such sequences. Unlike supervised KD approaches, GKD also offers the flexibility to employ alternative loss functions between the student and teacher, which can be useful when the student lacks the expressivity to mimic the teacher&#39;s distribution. Furthermore, GKD facilitates the seamless integration of distillation with RL fine-tuning (RLHF). We demonstrate the efficacy of GKD for distilling auto-regressive language models on summarization, translation, and arithmetic reasoning tasks, and task-agnostic distillation for instruction-tuning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.13649v3-abstract-full').style.display = 'none'; document.getElementById('2306.13649v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted at ICLR 2024. First two authors contributed equally</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2306.00186">arXiv:2306.00186</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2306.00186">pdf</a>, <a href="https://arxiv.org/format/2306.00186">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Factually Consistent Summarization via Reinforcement Learning with Textual Entailment Feedback </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Roit%2C+P">Paul Roit</a>, <a href="/search/cs?searchtype=author&amp;query=Ferret%2C+J">Johan Ferret</a>, <a href="/search/cs?searchtype=author&amp;query=Shani%2C+L">Lior Shani</a>, <a href="/search/cs?searchtype=author&amp;query=Aharoni%2C+R">Roee Aharoni</a>, <a href="/search/cs?searchtype=author&amp;query=Cideron%2C+G">Geoffrey Cideron</a>, <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Keller%2C+O">Orgad Keller</a>, <a href="/search/cs?searchtype=author&amp;query=Momchev%2C+N">Nikola Momchev</a>, <a href="/search/cs?searchtype=author&amp;query=Ramos%2C+S">Sabela Ramos</a>, <a href="/search/cs?searchtype=author&amp;query=Stanczyk%2C+P">Piotr Stanczyk</a>, <a href="/search/cs?searchtype=author&amp;query=Vieillard%2C+N">Nino Vieillard</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Elidan%2C+G">Gal Elidan</a>, <a href="/search/cs?searchtype=author&amp;query=Hassidim%2C+A">Avinatan Hassidim</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Szpektor%2C+I">Idan Szpektor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2306.00186v1-abstract-short" style="display: inline;"> Despite the seeming success of contemporary grounded text generation systems, they often tend to generate factually inconsistent text with respect to their input. This phenomenon is emphasized in tasks like summarization, in which the generated summaries should be corroborated by their source article. In this work, we leverage recent progress on textual entailment models to directly address this p&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.00186v1-abstract-full').style.display = 'inline'; document.getElementById('2306.00186v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2306.00186v1-abstract-full" style="display: none;"> Despite the seeming success of contemporary grounded text generation systems, they often tend to generate factually inconsistent text with respect to their input. This phenomenon is emphasized in tasks like summarization, in which the generated summaries should be corroborated by their source article. In this work, we leverage recent progress on textual entailment models to directly address this problem for abstractive summarization systems. We use reinforcement learning with reference-free, textual entailment rewards to optimize for factual consistency and explore the ensuing trade-offs, as improved consistency may come at the cost of less informative or more extractive summaries. Our results, according to both automatic metrics and human evaluation, show that our method considerably improves the faithfulness, salience, and conciseness of the generated summaries. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2306.00186v1-abstract-full').style.display = 'none'; document.getElementById('2306.00186v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ACL 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.16589">arXiv:2305.16589</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.16589">pdf</a>, <a href="https://arxiv.org/format/2305.16589">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Theory">cs.IT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Statistics Theory">math.ST</span> </div> </div> <p class="title is-5 mathjax"> The Curious Price of Distributional Robustness in Reinforcement Learning with a Generative Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Shi%2C+L">Laixi Shi</a>, <a href="/search/cs?searchtype=author&amp;query=Li%2C+G">Gen Li</a>, <a href="/search/cs?searchtype=author&amp;query=Wei%2C+Y">Yuting Wei</a>, <a href="/search/cs?searchtype=author&amp;query=Chen%2C+Y">Yuxin Chen</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Chi%2C+Y">Yuejie Chi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.16589v2-abstract-short" style="display: inline;"> This paper investigates model robustness in reinforcement learning (RL) to reduce the sim-to-real gap in practice. We adopt the framework of distributionally robust Markov decision processes (RMDPs), aimed at learning a policy that optimizes the worst-case performance when the deployed environment falls within a prescribed uncertainty set around the nominal MDP. Despite recent efforts, the sample&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.16589v2-abstract-full').style.display = 'inline'; document.getElementById('2305.16589v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.16589v2-abstract-full" style="display: none;"> This paper investigates model robustness in reinforcement learning (RL) to reduce the sim-to-real gap in practice. We adopt the framework of distributionally robust Markov decision processes (RMDPs), aimed at learning a policy that optimizes the worst-case performance when the deployed environment falls within a prescribed uncertainty set around the nominal MDP. Despite recent efforts, the sample complexity of RMDPs remained mostly unsettled regardless of the uncertainty set in use. It was unclear if distributional robustness bears any statistical consequences when benchmarked against standard RL. Assuming access to a generative model that draws samples based on the nominal MDP, we characterize the sample complexity of RMDPs when the uncertainty set is specified via either the total variation (TV) distance or $蠂^2$ divergence. The algorithm studied here is a model-based method called {\em distributionally robust value iteration}, which is shown to be near-optimal for the full range of uncertainty levels. Somewhat surprisingly, our results uncover that RMDPs are not necessarily easier or harder to learn than standard MDPs. The statistical consequence incurred by the robustness requirement depends heavily on the size and shape of the uncertainty set: in the case w.r.t.~the TV distance, the minimax sample complexity of RMDPs is always smaller than that of standard MDPs; in the case w.r.t.~the $蠂^2$ divergence, the sample complexity of RMDPs can often far exceed the standard MDP counterpart. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.16589v2-abstract-full').style.display = 'none'; document.getElementById('2305.16589v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Neural Information Processing Systems (2023)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.13185">arXiv:2305.13185</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.13185">pdf</a>, <a href="https://arxiv.org/format/2305.13185">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Regularization and Variance-Weighted Regression Achieves Minimax Optimality in Linear MDPs: Theory and Practice </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kitamura%2C+T">Toshinori Kitamura</a>, <a href="/search/cs?searchtype=author&amp;query=Kozuno%2C+T">Tadashi Kozuno</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yunhao Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Vieillard%2C+N">Nino Vieillard</a>, <a href="/search/cs?searchtype=author&amp;query=Valko%2C+M">Michal Valko</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+W">Wenhao Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Mei%2C+J">Jincheng Mei</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%A9nard%2C+P">Pierre M茅nard</a>, <a href="/search/cs?searchtype=author&amp;query=Azar%2C+M+G">Mohammad Gheshlaghi Azar</a>, <a href="/search/cs?searchtype=author&amp;query=Munos%2C+R">R茅mi Munos</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Szepesv%C3%A1ri%2C+C">Csaba Szepesv谩ri</a>, <a href="/search/cs?searchtype=author&amp;query=Kumagai%2C+W">Wataru Kumagai</a>, <a href="/search/cs?searchtype=author&amp;query=Matsuo%2C+Y">Yutaka Matsuo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.13185v1-abstract-short" style="display: inline;"> Mirror descent value iteration (MDVI), an abstraction of Kullback-Leibler (KL) and entropy-regularized reinforcement learning (RL), has served as the basis for recent high-performing practical RL algorithms. However, despite the use of function approximation in practice, the theoretical understanding of MDVI has been limited to tabular Markov decision processes (MDPs). We study MDVI with linear fu&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.13185v1-abstract-full').style.display = 'inline'; document.getElementById('2305.13185v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.13185v1-abstract-full" style="display: none;"> Mirror descent value iteration (MDVI), an abstraction of Kullback-Leibler (KL) and entropy-regularized reinforcement learning (RL), has served as the basis for recent high-performing practical RL algorithms. However, despite the use of function approximation in practice, the theoretical understanding of MDVI has been limited to tabular Markov decision processes (MDPs). We study MDVI with linear function approximation through its sample complexity required to identify an $\varepsilon$-optimal policy with probability $1-未$ under the settings of an infinite-horizon linear MDP, generative model, and G-optimal design. We demonstrate that least-squares regression weighted by the variance of an estimated optimal value function of the next state is crucial to achieving minimax optimality. Based on this observation, we present Variance-Weighted Least-Squares MDVI (VWLS-MDVI), the first theoretical algorithm that achieves nearly minimax optimal sample complexity for infinite-horizon linear MDPs. Furthermore, we propose a practical VWLS algorithm for value-based deep RL, Deep Variance Weighting (DVW). Our experiments demonstrate that DVW improves the performance of popular value-based deep RL algorithms on a set of MinAtar benchmarks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.13185v1-abstract-full').style.display = 'none'; document.getElementById('2305.13185v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICML 2023 accepted</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.01400">arXiv:2305.01400</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2305.01400">pdf</a>, <a href="https://arxiv.org/format/2305.01400">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Get Back Here: Robust Imitation by Return-to-Distribution Planning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Cideron%2C+G">Geoffrey Cideron</a>, <a href="/search/cs?searchtype=author&amp;query=Tabanpour%2C+B">Baruch Tabanpour</a>, <a href="/search/cs?searchtype=author&amp;query=Curi%2C+S">Sebastian Curi</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">Leonard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Dulac-Arnold%2C+G">Gabriel Dulac-Arnold</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.01400v1-abstract-short" style="display: inline;"> We consider the Imitation Learning (IL) setup where expert data are not collected on the actual deployment environment but on a different version. To address the resulting distribution shift, we combine behavior cloning (BC) with a planner that is tasked to bring the agent back to states visited by the expert whenever the agent deviates from the demonstration distribution. The resulting algorithm,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.01400v1-abstract-full').style.display = 'inline'; document.getElementById('2305.01400v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.01400v1-abstract-full" style="display: none;"> We consider the Imitation Learning (IL) setup where expert data are not collected on the actual deployment environment but on a different version. To address the resulting distribution shift, we combine behavior cloning (BC) with a planner that is tasked to bring the agent back to states visited by the expert whenever the agent deviates from the demonstration distribution. The resulting algorithm, POIR, can be trained offline, and leverages online interactions to efficiently fine-tune its planner to improve performance over time. We test POIR on a variety of human-generated manipulation demonstrations in a realistic robotic manipulation simulator and show robustness of the learned policy to different initial state distributions and noisy dynamics. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.01400v1-abstract-full').style.display = 'none'; document.getElementById('2305.01400v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.06654">arXiv:2303.06654</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2303.06654">pdf</a>, <a href="https://arxiv.org/format/2303.06654">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Twice Regularized Markov Decision Processes: The Equivalence between Robustness and Regularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Derman%2C+E">Esther Derman</a>, <a href="/search/cs?searchtype=author&amp;query=Men%2C+Y">Yevgeniy Men</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Mannor%2C+S">Shie Mannor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.06654v1-abstract-short" style="display: inline;"> Robust Markov decision processes (MDPs) aim to handle changing or partially known system dynamics. To solve them, one typically resorts to robust optimization methods. However, this significantly increases computational complexity and limits scalability in both learning and planning. On the other hand, regularized MDPs show more stability in policy learning without impairing time complexity. Yet,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.06654v1-abstract-full').style.display = 'inline'; document.getElementById('2303.06654v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.06654v1-abstract-full" style="display: none;"> Robust Markov decision processes (MDPs) aim to handle changing or partially known system dynamics. To solve them, one typically resorts to robust optimization methods. However, this significantly increases computational complexity and limits scalability in both learning and planning. On the other hand, regularized MDPs show more stability in policy learning without impairing time complexity. Yet, they generally do not encompass uncertainty in the model dynamics. In this work, we aim to learn robust MDPs using regularization. We first show that regularized MDPs are a particular instance of robust MDPs with uncertain reward. We thus establish that policy iteration on reward-robust MDPs can have the same time complexity as on regularized MDPs. We further extend this relationship to MDPs with uncertain transitions: this leads to a regularization term with an additional dependence on the value function. We then generalize regularized MDPs to twice regularized MDPs ($\text{R}^2$ MDPs), i.e., MDPs with $\textit{both}$ value and policy regularization. The corresponding Bellman operators enable us to derive planning and learning schemes with convergence and generalization guarantees, thus reducing robustness to regularization. We numerically show this two-fold advantage on tabular and physical domains, highlighting the fact that $\text{R}^2$ preserves its efficacy in continuous environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.06654v1-abstract-full').style.display = 'none'; document.getElementById('2303.06654v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extended version of NeuIPS paper: arXiv:2110.06267</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2302.05372">arXiv:2302.05372</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2302.05372">pdf</a>, <a href="https://arxiv.org/ps/2302.05372">ps</a>, <a href="https://arxiv.org/format/2302.05372">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Towards Minimax Optimality of Model-based Robust Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Clavier%2C+P">Pierre Clavier</a>, <a href="/search/cs?searchtype=author&amp;query=Pennec%2C+E+L">Erwan Le Pennec</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2302.05372v3-abstract-short" style="display: inline;"> We study the sample complexity of obtaining an $蔚$-optimal policy in \emph{Robust} discounted Markov Decision Processes (RMDPs), given only access to a generative model of the nominal kernel. This problem is widely studied in the non-robust case, and it is known that any planning approach applied to an empirical MDP estimated with $\tilde{\mathcal{O}}(\frac{H^3 \mid S \mid\mid A \mid}{蔚^2})$ sampl&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.05372v3-abstract-full').style.display = 'inline'; document.getElementById('2302.05372v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2302.05372v3-abstract-full" style="display: none;"> We study the sample complexity of obtaining an $蔚$-optimal policy in \emph{Robust} discounted Markov Decision Processes (RMDPs), given only access to a generative model of the nominal kernel. This problem is widely studied in the non-robust case, and it is known that any planning approach applied to an empirical MDP estimated with $\tilde{\mathcal{O}}(\frac{H^3 \mid S \mid\mid A \mid}{蔚^2})$ samples provides an $蔚$-optimal policy, which is minimax optimal. Results in the robust case are much more scarce. For $sa$- (resp $s$-)rectangular uncertainty sets, the best known sample complexity is $\tilde{\mathcal{O}}(\frac{H^4 \mid S \mid^2\mid A \mid}{蔚^2})$ (resp. $\tilde{\mathcal{O}}(\frac{H^4 \mid S \mid^2\mid A \mid^2}{蔚^2})$), for specific algorithms and when the uncertainty set is based on the total variation (TV), the KL or the Chi-square divergences. In this paper, we consider uncertainty sets defined with an $L_p$-ball (recovering the TV case), and study the sample complexity of \emph{any} planning algorithm (with high accuracy guarantee on the solution) applied to an empirical RMDP estimated using the generative model. In the general case, we prove a sample complexity of $\tilde{\mathcal{O}}(\frac{H^4 \mid S \mid\mid A \mid}{蔚^2})$ for both the $sa$- and $s$-rectangular cases (improvements of $\mid S \mid$ and $\mid S \mid\mid A \mid$ respectively). When the size of the uncertainty is small enough, we improve the sample complexity to $\tilde{\mathcal{O}}(\frac{H^3 \mid S \mid\mid A \mid }{蔚^2})$, recovering the lower-bound for the non-robust case for the first time and a robust lower-bound when the size of the uncertainty is small enough. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2302.05372v3-abstract-full').style.display = 'none'; document.getElementById('2302.05372v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.13589">arXiv:2301.13589</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.13589">pdf</a>, <a href="https://arxiv.org/ps/2301.13589">ps</a>, <a href="https://arxiv.org/format/2301.13589">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Policy Gradient for Rectangular Robust Markov Decision Processes </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kumar%2C+N">Navdeep Kumar</a>, <a href="/search/cs?searchtype=author&amp;query=Derman%2C+E">Esther Derman</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Levy%2C+K">Kfir Levy</a>, <a href="/search/cs?searchtype=author&amp;query=Mannor%2C+S">Shie Mannor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.13589v2-abstract-short" style="display: inline;"> Policy gradient methods have become a standard for training reinforcement learning agents in a scalable and efficient manner. However, they do not account for transition uncertainty, whereas learning robust policies can be computationally expensive. In this paper, we introduce robust policy gradient (RPG), a policy-based method that efficiently solves rectangular robust Markov decision processes (&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.13589v2-abstract-full').style.display = 'inline'; document.getElementById('2301.13589v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.13589v2-abstract-full" style="display: none;"> Policy gradient methods have become a standard for training reinforcement learning agents in a scalable and efficient manner. However, they do not account for transition uncertainty, whereas learning robust policies can be computationally expensive. In this paper, we introduce robust policy gradient (RPG), a policy-based method that efficiently solves rectangular robust Markov decision processes (MDPs). We provide a closed-form expression for the worst occupation measure. Incidentally, we find that the worst kernel is a rank-one perturbation of the nominal. Combining the worst occupation measure with a robust Q-value estimation yields an explicit form of the robust gradient. Our resulting RPG can be estimated from data with the same time complexity as its non-robust equivalent. Hence, it relieves the computational burden of convex optimization problems required for training robust policies by current policy gradient approaches. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.13589v2-abstract-full').style.display = 'none'; document.getElementById('2301.13589v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 31 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2301.02328">arXiv:2301.02328</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2301.02328">pdf</a>, <a href="https://arxiv.org/format/2301.02328">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Extreme Q-Learning: MaxEnt RL without Entropy </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Garg%2C+D">Divyansh Garg</a>, <a href="/search/cs?searchtype=author&amp;query=Hejna%2C+J">Joey Hejna</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Ermon%2C+S">Stefano Ermon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2301.02328v2-abstract-short" style="display: inline;"> Modern Deep Reinforcement Learning (RL) algorithms require estimates of the maximal Q-value, which are difficult to compute in continuous domains with an infinite number of possible actions. In this work, we introduce a new update rule for online and offline RL which directly models the maximal value using Extreme Value Theory (EVT), drawing inspiration from economics. By doing so, we avoid comput&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.02328v2-abstract-full').style.display = 'inline'; document.getElementById('2301.02328v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2301.02328v2-abstract-full" style="display: none;"> Modern Deep Reinforcement Learning (RL) algorithms require estimates of the maximal Q-value, which are difficult to compute in continuous domains with an infinite number of possible actions. In this work, we introduce a new update rule for online and offline RL which directly models the maximal value using Extreme Value Theory (EVT), drawing inspiration from economics. By doing so, we avoid computing Q-values using out-of-distribution actions which is often a substantial source of error. Our key insight is to introduce an objective that directly estimates the optimal soft-value functions (LogSumExp) in the maximum entropy RL setting without needing to sample from a policy. Using EVT, we derive our \emph{Extreme Q-Learning} framework and consequently online and, for the first time, offline MaxEnt Q-learning algorithms, that do not explicitly require access to a policy or its entropy. Our method obtains consistently strong performance in the D4RL benchmark, outperforming prior works by \emph{10+ points} on the challenging Franka Kitchen tasks while offering moderate improvements over SAC and TD3 on online DM Control tasks. Visualizations and code can be found on our website at https://div99.github.io/XQL/. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2301.02328v2-abstract-full').style.display = 'none'; document.getElementById('2301.02328v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 5 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICLR 2023 Oral</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.14449">arXiv:2212.14449</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2212.14449">pdf</a>, <a href="https://arxiv.org/ps/2212.14449">ps</a>, <a href="https://arxiv.org/format/2212.14449">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Policy Mirror Ascent for Efficient and Independent Learning in Mean Field Games </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Yardim%2C+B">Batuhan Yardim</a>, <a href="/search/cs?searchtype=author&amp;query=Cayci%2C+S">Semih Cayci</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=He%2C+N">Niao He</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.14449v2-abstract-short" style="display: inline;"> Mean-field games have been used as a theoretical tool to obtain an approximate Nash equilibrium for symmetric and anonymous $N$-player games. However, limiting applicability, existing theoretical results assume variations of a &#34;population generative model&#34;, which allows arbitrary modifications of the population distribution by the learning algorithm. Moreover, learning algorithms typically work on&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.14449v2-abstract-full').style.display = 'inline'; document.getElementById('2212.14449v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.14449v2-abstract-full" style="display: none;"> Mean-field games have been used as a theoretical tool to obtain an approximate Nash equilibrium for symmetric and anonymous $N$-player games. However, limiting applicability, existing theoretical results assume variations of a &#34;population generative model&#34;, which allows arbitrary modifications of the population distribution by the learning algorithm. Moreover, learning algorithms typically work on abstract simulators with population instead of the $N$-player game. Instead, we show that $N$ agents running policy mirror ascent converge to the Nash equilibrium of the regularized game within $\widetilde{\mathcal{O}}(\varepsilon^{-2})$ samples from a single sample trajectory without a population generative model, up to a standard $\mathcal{O}(\frac{1}{\sqrt{N}})$ error due to the mean field. Taking a divergent approach from the literature, instead of working with the best-response map we first show that a policy mirror ascent map can be used to construct a contractive operator having the Nash equilibrium as its fixed point. We analyze single-path TD learning for $N$-agent games, proving sample complexity guarantees by only using a sample path from the $N$-agent simulator without a population generative model. Furthermore, we demonstrate that our methodology allows for independent learning by $N$ agents with finite sample guarantees. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.14449v2-abstract-full').style.display = 'none'; document.getElementById('2212.14449v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 June, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted for publication at ICML 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.03521">arXiv:2211.03521</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2211.03521">pdf</a>, <a href="https://arxiv.org/format/2211.03521">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> On the importance of data collection for training general goal-reaching policies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jacq%2C+A">Alexis Jacq</a>, <a href="/search/cs?searchtype=author&amp;query=Orsini%2C+M">Manu Orsini</a>, <a href="/search/cs?searchtype=author&amp;query=Dulac-Arnold%2C+G">Gabriel Dulac-Arnold</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.03521v2-abstract-short" style="display: inline;"> Recent advances in ML suggest that the quantity of data available to a model is one of the primary bottlenecks to high performance. Although for language-based tasks there exist almost unlimited amounts of reasonably coherent data to train from, this is generally not the case for Reinforcement Learning, especially when dealing with a novel environment. In effect, even a relatively trivial continuo&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.03521v2-abstract-full').style.display = 'inline'; document.getElementById('2211.03521v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.03521v2-abstract-full" style="display: none;"> Recent advances in ML suggest that the quantity of data available to a model is one of the primary bottlenecks to high performance. Although for language-based tasks there exist almost unlimited amounts of reasonably coherent data to train from, this is generally not the case for Reinforcement Learning, especially when dealing with a novel environment. In effect, even a relatively trivial continuous environment has an almost limitless number of states, but simply sampling random states and actions will likely not provide transitions that are interesting or useful for any potential downstream task. How should one generate massive amounts of useful data given only an MDP with no indication of downstream tasks? Are the quantity and quality of data truly transformative to the performance of a general controller? We propose to answer both of these questions. First, we introduce a principled unsupervised exploration method, ChronoGEM, which aims to achieve uniform coverage over the manifold of achievable states, which we believe is the most reasonable goal given no prior task information. Secondly, we investigate the effects of both data quantity and data quality on the training of a downstream goal-achievement policy, and show that both large quantities and high-quality of data are essential to train a general controller: a high-precision pose-achievement policy capable of attaining a large number of poses over numerous continuous control embodiments including humanoid. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.03521v2-abstract-full').style.display = 'none'; document.getElementById('2211.03521v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 February, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.10138">arXiv:2208.10138</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2208.10138">pdf</a>, <a href="https://arxiv.org/format/2208.10138">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Learning Correlated Equilibria in Mean-Field Games </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Muller%2C+P">Paul Muller</a>, <a href="/search/cs?searchtype=author&amp;query=Elie%2C+R">Romuald Elie</a>, <a href="/search/cs?searchtype=author&amp;query=Rowland%2C+M">Mark Rowland</a>, <a href="/search/cs?searchtype=author&amp;query=Lauriere%2C+M">Mathieu Lauriere</a>, <a href="/search/cs?searchtype=author&amp;query=Perolat%2C+J">Julien Perolat</a>, <a href="/search/cs?searchtype=author&amp;query=Perrin%2C+S">Sarah Perrin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Piliouras%2C+G">Georgios Piliouras</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Tuyls%2C+K">Karl Tuyls</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.10138v1-abstract-short" style="display: inline;"> The designs of many large-scale systems today, from traffic routing environments to smart grids, rely on game-theoretic equilibrium concepts. However, as the size of an $N$-player game typically grows exponentially with $N$, standard game theoretic analysis becomes effectively infeasible beyond a low number of players. Recent approaches have gone around this limitation by instead considering Mean-&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.10138v1-abstract-full').style.display = 'inline'; document.getElementById('2208.10138v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.10138v1-abstract-full" style="display: none;"> The designs of many large-scale systems today, from traffic routing environments to smart grids, rely on game-theoretic equilibrium concepts. However, as the size of an $N$-player game typically grows exponentially with $N$, standard game theoretic analysis becomes effectively infeasible beyond a low number of players. Recent approaches have gone around this limitation by instead considering Mean-Field games, an approximation of anonymous $N$-player games, where the number of players is infinite and the population&#39;s state distribution, instead of every individual player&#39;s state, is the object of interest. The practical computability of Mean-Field Nash equilibria, the most studied Mean-Field equilibrium to date, however, typically depends on beneficial non-generic structural properties such as monotonicity or contraction properties, which are required for known algorithms to converge. In this work, we provide an alternative route for studying Mean-Field games, by developing the concepts of Mean-Field correlated and coarse-correlated equilibria. We show that they can be efficiently learnt in \emph{all games}, without requiring any additional assumption on the structure of the game, using three classical algorithms. Furthermore, we establish correspondences between our notions and those already present in the literature, derive optimality bounds for the Mean-Field - $N$-player transition, and empirically demonstrate the convergence of these algorithms on simple games. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.10138v1-abstract-full').style.display = 'none'; document.getElementById('2208.10138v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.14211">arXiv:2205.14211</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.14211">pdf</a>, <a href="https://arxiv.org/format/2205.14211">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> KL-Entropy-Regularized RL with a Generative Model is Minimax Optimal </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Kozuno%2C+T">Tadashi Kozuno</a>, <a href="/search/cs?searchtype=author&amp;query=Yang%2C+W">Wenhao Yang</a>, <a href="/search/cs?searchtype=author&amp;query=Vieillard%2C+N">Nino Vieillard</a>, <a href="/search/cs?searchtype=author&amp;query=Kitamura%2C+T">Toshinori Kitamura</a>, <a href="/search/cs?searchtype=author&amp;query=Tang%2C+Y">Yunhao Tang</a>, <a href="/search/cs?searchtype=author&amp;query=Mei%2C+J">Jincheng Mei</a>, <a href="/search/cs?searchtype=author&amp;query=M%C3%A9nard%2C+P">Pierre M茅nard</a>, <a href="/search/cs?searchtype=author&amp;query=Azar%2C+M+G">Mohammad Gheshlaghi Azar</a>, <a href="/search/cs?searchtype=author&amp;query=Valko%2C+M">Michal Valko</a>, <a href="/search/cs?searchtype=author&amp;query=Munos%2C+R">R茅mi Munos</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Szepesv%C3%A1ri%2C+C">Csaba Szepesv谩ri</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.14211v1-abstract-short" style="display: inline;"> In this work, we consider and analyze the sample complexity of model-free reinforcement learning with a generative model. Particularly, we analyze mirror descent value iteration (MDVI) by Geist et al. (2019) and Vieillard et al. (2020a), which uses the Kullback-Leibler divergence and entropy regularization in its value and policy updates. Our analysis shows that it is nearly minimax-optimal for fi&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.14211v1-abstract-full').style.display = 'inline'; document.getElementById('2205.14211v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.14211v1-abstract-full" style="display: none;"> In this work, we consider and analyze the sample complexity of model-free reinforcement learning with a generative model. Particularly, we analyze mirror descent value iteration (MDVI) by Geist et al. (2019) and Vieillard et al. (2020a), which uses the Kullback-Leibler divergence and entropy regularization in its value and policy updates. Our analysis shows that it is nearly minimax-optimal for finding an $\varepsilon$-optimal policy when $\varepsilon$ is sufficiently small. This is the first theoretical result that demonstrates that a simple model-free algorithm without variance-reduction can be nearly minimax-optimal under the considered setting. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.14211v1-abstract-full').style.display = 'none'; document.getElementById('2205.14211v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">29 pages, 6 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.12944">arXiv:2205.12944</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.12944">pdf</a>, <a href="https://arxiv.org/format/2205.12944">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Learning in Mean Field Games: A Survey </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lauri%C3%A8re%2C+M">Mathieu Lauri猫re</a>, <a href="/search/cs?searchtype=author&amp;query=Perrin%2C+S">Sarah Perrin</a>, <a href="/search/cs?searchtype=author&amp;query=P%C3%A9rolat%2C+J">Julien P茅rolat</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Muller%2C+P">Paul Muller</a>, <a href="/search/cs?searchtype=author&amp;query=%C3%89lie%2C+R">Romuald 脡lie</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.12944v4-abstract-short" style="display: inline;"> Non-cooperative and cooperative games with a very large number of players have many applications but remain generally intractable when the number of players increases. Introduced by Lasry and Lions, and Huang, Caines and Malham茅, Mean Field Games (MFGs) rely on a mean-field approximation to allow the number of players to grow to infinity. Traditional methods for solving these games generally rely&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.12944v4-abstract-full').style.display = 'inline'; document.getElementById('2205.12944v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.12944v4-abstract-full" style="display: none;"> Non-cooperative and cooperative games with a very large number of players have many applications but remain generally intractable when the number of players increases. Introduced by Lasry and Lions, and Huang, Caines and Malham茅, Mean Field Games (MFGs) rely on a mean-field approximation to allow the number of players to grow to infinity. Traditional methods for solving these games generally rely on solving partial or stochastic differential equations with a full knowledge of the model. Recently, Reinforcement Learning (RL) has appeared promising to solve complex problems at scale. The combination of RL and MFGs is promising to solve games at a very large scale both in terms of population size and environment complexity. In this survey, we review the quickly growing recent literature on RL methods to learn equilibria and social optima in MFGs. We first identify the most common settings (static, stationary, and evolutive) of MFGs. We then present a general framework for classical iterative methods (based on best-response computation or policy evaluation) to solve MFGs in an exact way. Building on these algorithms and the connection with Markov Decision Processes, we explain how RL can be used to learn MFG solutions in a model-free way. Last, we present numerical illustrations on a benchmark problem, and conclude with some perspectives. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.12944v4-abstract-full').style.display = 'none'; document.getElementById('2205.12944v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 25 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.09589">arXiv:2205.09589</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2205.09589">pdf</a>, <a href="https://arxiv.org/format/2205.09589">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Learning Energy Networks with Generalized Fenchel-Young Losses </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Blondel%2C+M">Mathieu Blondel</a>, <a href="/search/cs?searchtype=author&amp;query=Llinares-L%C3%B3pez%2C+F">Felipe Llinares-L贸pez</a>, <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.09589v2-abstract-short" style="display: inline;"> Energy-based models, a.k.a. energy networks, perform inference by optimizing an energy function, typically parametrized by a neural network. This allows one to capture potentially complex relationships between inputs and outputs. To learn the parameters of the energy function, the solution to that optimization problem is typically fed into a loss function. The key challenge for training energy net&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.09589v2-abstract-full').style.display = 'inline'; document.getElementById('2205.09589v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.09589v2-abstract-full" style="display: none;"> Energy-based models, a.k.a. energy networks, perform inference by optimizing an energy function, typically parametrized by a neural network. This allows one to capture potentially complex relationships between inputs and outputs. To learn the parameters of the energy function, the solution to that optimization problem is typically fed into a loss function. The key challenge for training energy networks lies in computing loss gradients, as this typically requires argmin/argmax differentiation. In this paper, building upon a generalized notion of conjugate function, which replaces the usual bilinear pairing with a general energy function, we propose generalized Fenchel-Young losses, a natural loss construction for learning energy networks. Our losses enjoy many desirable properties and their gradients can be computed efficiently without argmin/argmax differentiation. We also prove the calibration of their excess risk in the case of linear-concave energies. We demonstrate our losses on multilabel classification and imitation learning tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.09589v2-abstract-full').style.display = 'none'; document.getElementById('2205.09589v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.11973">arXiv:2203.11973</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.11973">pdf</a>, <a href="https://arxiv.org/format/2203.11973">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> Scalable Deep Reinforcement Learning Algorithms for Mean Field Games </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lauri%C3%A8re%2C+M">Mathieu Lauri猫re</a>, <a href="/search/cs?searchtype=author&amp;query=Perrin%2C+S">Sarah Perrin</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Muller%2C+P">Paul Muller</a>, <a href="/search/cs?searchtype=author&amp;query=Jain%2C+A">Ayush Jain</a>, <a href="/search/cs?searchtype=author&amp;query=Cabannes%2C+T">Theophile Cabannes</a>, <a href="/search/cs?searchtype=author&amp;query=Piliouras%2C+G">Georgios Piliouras</a>, <a href="/search/cs?searchtype=author&amp;query=P%C3%A9rolat%2C+J">Julien P茅rolat</a>, <a href="/search/cs?searchtype=author&amp;query=%C3%89lie%2C+R">Romuald 脡lie</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.11973v2-abstract-short" style="display: inline;"> Mean Field Games (MFGs) have been introduced to efficiently approximate games with very large populations of strategic agents. Recently, the question of learning equilibria in MFGs has gained momentum, particularly using model-free reinforcement learning (RL) methods. One limiting factor to further scale up using RL is that existing algorithms to solve MFGs require the mixing of approximated quant&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.11973v2-abstract-full').style.display = 'inline'; document.getElementById('2203.11973v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.11973v2-abstract-full" style="display: none;"> Mean Field Games (MFGs) have been introduced to efficiently approximate games with very large populations of strategic agents. Recently, the question of learning equilibria in MFGs has gained momentum, particularly using model-free reinforcement learning (RL) methods. One limiting factor to further scale up using RL is that existing algorithms to solve MFGs require the mixing of approximated quantities such as strategies or $q$-values. This is far from being trivial in the case of non-linear function approximation that enjoy good generalization properties, e.g. neural networks. We propose two methods to address this shortcoming. The first one learns a mixed strategy from distillation of historical data into a neural network and is applied to the Fictitious Play algorithm. The second one is an online mixing method based on regularization that does not require memorizing historical data or previous estimates. It is used to extend Online Mirror Descent. We demonstrate numerically that these methods efficiently enable the use of Deep RL algorithms to solve various MFGs. In addition, we show that these methods outperform SotA baselines from the literature. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.11973v2-abstract-full').style.display = 'none'; document.getElementById('2203.11973v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.08542">arXiv:2203.08542</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2203.08542">pdf</a>, <a href="https://arxiv.org/format/2203.08542">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Lazy-MDPs: Towards Interpretable Reinforcement Learning by Learning When to Act </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Jacq%2C+A">Alexis Jacq</a>, <a href="/search/cs?searchtype=author&amp;query=Ferret%2C+J">Johan Ferret</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.08542v1-abstract-short" style="display: inline;"> Traditionally, Reinforcement Learning (RL) aims at deciding how to act optimally for an artificial agent. We argue that deciding when to act is equally important. As humans, we drift from default, instinctive or memorized behaviors to focused, thought-out behaviors when required by the situation. To enhance RL agents with this aptitude, we propose to augment the standard Markov Decision Process an&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.08542v1-abstract-full').style.display = 'inline'; document.getElementById('2203.08542v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.08542v1-abstract-full" style="display: none;"> Traditionally, Reinforcement Learning (RL) aims at deciding how to act optimally for an artificial agent. We argue that deciding when to act is equally important. As humans, we drift from default, instinctive or memorized behaviors to focused, thought-out behaviors when required by the situation. To enhance RL agents with this aptitude, we propose to augment the standard Markov Decision Process and make a new mode of action available: being lazy, which defers decision-making to a default policy. In addition, we penalize non-lazy actions in order to encourage minimal effort and have agents focus on critical decisions only. We name the resulting formalism lazy-MDPs. We study the theoretical properties of lazy-MDPs, expressing value functions and characterizing optimal solutions. Then we empirically demonstrate that policies learned in lazy-MDPs generally come with a form of interpretability: by construction, they show us the states where the agent takes control over the default policy. We deem those states and corresponding actions important since they explain the difference in performance between the default and the new, lazy policy. With suboptimal policies as default (pretrained or random), we observe that agents are able to get competitive performance in Atari games while only taking control in a limited subset of states. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.08542v1-abstract-full').style.display = 'none'; document.getElementById('2203.08542v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">AAMAS 2022 (14 pages extended version, added Sec. 7.4 and appendix K)</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Autonomous Agents and Multi-Agent Systems (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.10149">arXiv:2110.10149</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.10149">pdf</a>, <a href="https://arxiv.org/format/2110.10149">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> Continuous Control with Action Quantization from Demonstrations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Vincent%2C+D">Damien Vincent</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Raichuk%2C+A">Anton Raichuk</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.10149v2-abstract-short" style="display: inline;"> In this paper, we propose a novel Reinforcement Learning (RL) framework for problems with continuous action spaces: Action Quantization from Demonstrations (AQuaDem). The proposed approach consists in learning a discretization of continuous action spaces from human demonstrations. This discretization returns a set of plausible actions (in light of the demonstrations) for each input state, thus cap&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10149v2-abstract-full').style.display = 'inline'; document.getElementById('2110.10149v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.10149v2-abstract-full" style="display: none;"> In this paper, we propose a novel Reinforcement Learning (RL) framework for problems with continuous action spaces: Action Quantization from Demonstrations (AQuaDem). The proposed approach consists in learning a discretization of continuous action spaces from human demonstrations. This discretization returns a set of plausible actions (in light of the demonstrations) for each input state, thus capturing the priors of the demonstrator and their multimodal behavior. By discretizing the action space, any discrete action deep RL technique can be readily applied to the continuous control problem. Experiments show that the proposed approach outperforms state-of-the-art methods such as SAC in the RL setup, and GAIL in the Imitation Learning setup. We provide a website with interactive videos: https://google-research.github.io/aquadem/ and make the code available: https://github.com/google-research/google-research/tree/master/aquadem. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10149v2-abstract-full').style.display = 'none'; document.getElementById('2110.10149v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ICML 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.06267">arXiv:2110.06267</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.06267">pdf</a>, <a href="https://arxiv.org/format/2110.06267">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Twice regularized MDPs and the equivalence between robustness and regularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Derman%2C+E">Esther Derman</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Mannor%2C+S">Shie Mannor</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.06267v1-abstract-short" style="display: inline;"> Robust Markov decision processes (MDPs) aim to handle changing or partially known system dynamics. To solve them, one typically resorts to robust optimization methods. However, this significantly increases computational complexity and limits scalability in both learning and planning. On the other hand, regularized MDPs show more stability in policy learning without impairing time complexity. Yet,&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.06267v1-abstract-full').style.display = 'inline'; document.getElementById('2110.06267v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.06267v1-abstract-full" style="display: none;"> Robust Markov decision processes (MDPs) aim to handle changing or partially known system dynamics. To solve them, one typically resorts to robust optimization methods. However, this significantly increases computational complexity and limits scalability in both learning and planning. On the other hand, regularized MDPs show more stability in policy learning without impairing time complexity. Yet, they generally do not encompass uncertainty in the model dynamics. In this work, we aim to learn robust MDPs using regularization. We first show that regularized MDPs are a particular instance of robust MDPs with uncertain reward. We thus establish that policy iteration on reward-robust MDPs can have the same time complexity as on regularized MDPs. We further extend this relationship to MDPs with uncertain transitions: this leads to a regularization term with an additional dependence on the value function. We finally generalize regularized MDPs to twice regularized MDPs (R${}^2$ MDPs), i.e., MDPs with $\textit{both}$ value and policy regularization. The corresponding Bellman operators enable developing policy iteration schemes with convergence and robustness guarantees. It also reduces planning and learning in robust MDPs to regularized MDPs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.06267v1-abstract-full').style.display = 'none'; document.getElementById('2110.06267v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to NeurIPS 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.01528">arXiv:2110.01528</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2110.01528">pdf</a>, <a href="https://arxiv.org/format/2110.01528">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Large Batch Experience Replay </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Lahire%2C+T">Thibault Lahire</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Rachelson%2C+E">Emmanuel Rachelson</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.01528v2-abstract-short" style="display: inline;"> Several algorithms have been proposed to sample non-uniformly the replay buffer of deep Reinforcement Learning (RL) agents to speed-up learning, but very few theoretical foundations of these sampling schemes have been provided. Among others, Prioritized Experience Replay appears as a hyperparameter sensitive heuristic, even though it can provide good performance. In this work, we cast the replay b&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.01528v2-abstract-full').style.display = 'inline'; document.getElementById('2110.01528v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.01528v2-abstract-full" style="display: none;"> Several algorithms have been proposed to sample non-uniformly the replay buffer of deep Reinforcement Learning (RL) agents to speed-up learning, but very few theoretical foundations of these sampling schemes have been provided. Among others, Prioritized Experience Replay appears as a hyperparameter sensitive heuristic, even though it can provide good performance. In this work, we cast the replay buffer sampling problem as an importance sampling one for estimating the gradient. This allows deriving the theoretically optimal sampling distribution, yielding the best theoretical convergence speed. Elaborating on the knowledge of the ideal sampling scheme, we exhibit new theoretical foundations of Prioritized Experience Replay. The optimal sampling distribution being intractable, we make several approximations providing good results in practice and introduce, among others, LaBER (Large Batch Experience Replay), an easy-to-code and efficient method for sampling the replay buffer. LaBER, which can be combined with Deep Q-Networks, distributional RL agents or actor-critic methods, yields improved performance over a diverse range of Atari games and PyBullet environments, compared to the base agent it is implemented on and to other prioritization schemes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.01528v2-abstract-full').style.display = 'none'; document.getElementById('2110.01528v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 12 figures, ICML 2022 - long presentation</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.09717">arXiv:2109.09717</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2109.09717">pdf</a>, <a href="https://arxiv.org/format/2109.09717">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Science and Game Theory">cs.GT</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Optimization and Control">math.OC</span> </div> </div> <p class="title is-5 mathjax"> Generalization in Mean Field Games by Learning Master Policies </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Perrin%2C+S">Sarah Perrin</a>, <a href="/search/cs?searchtype=author&amp;query=Lauri%C3%A8re%2C+M">Mathieu Lauri猫re</a>, <a href="/search/cs?searchtype=author&amp;query=P%C3%A9rolat%2C+J">Julien P茅rolat</a>, <a href="/search/cs?searchtype=author&amp;query=%C3%89lie%2C+R">Romuald 脡lie</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.09717v1-abstract-short" style="display: inline;"> Mean Field Games (MFGs) can potentially scale multi-agent systems to extremely large populations of agents. Yet, most of the literature assumes a single initial distribution for the agents, which limits the practical applications of MFGs. Machine Learning has the potential to solve a wider diversity of MFG problems thanks to generalizations capacities. We study how to leverage these generalization&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.09717v1-abstract-full').style.display = 'inline'; document.getElementById('2109.09717v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.09717v1-abstract-full" style="display: none;"> Mean Field Games (MFGs) can potentially scale multi-agent systems to extremely large populations of agents. Yet, most of the literature assumes a single initial distribution for the agents, which limits the practical applications of MFGs. Machine Learning has the potential to solve a wider diversity of MFG problems thanks to generalizations capacities. We study how to leverage these generalization properties to learn policies enabling a typical agent to behave optimally against any population distribution. In reference to the Master equation in MFGs, we coin the term ``Master policies&#39;&#39; to describe them and we prove that a single Master policy provides a Nash equilibrium, whatever the initial distribution. We propose a method to learn such Master policies. Our approach relies on three ingredients: adding the current population distribution as part of the observation, approximating Master policies with neural networks, and training via Reinforcement Learning and Fictitious Play. We illustrate on numerical examples not only the efficiency of the learned Master policy but also its generalization capabilities beyond the distributions used for training. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.09717v1-abstract-full').style.display = 'none'; document.getElementById('2109.09717v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.07041">arXiv:2108.07041</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.07041">pdf</a>, <a href="https://arxiv.org/format/2108.07041">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Implicitly Regularized RL with Implicit Q-Values </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Vieillard%2C+N">Nino Vieillard</a>, <a href="/search/cs?searchtype=author&amp;query=Andrychowicz%2C+M">Marcin Andrychowicz</a>, <a href="/search/cs?searchtype=author&amp;query=Raichuk%2C+A">Anton Raichuk</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.07041v2-abstract-short" style="display: inline;"> The $Q$-function is a central quantity in many Reinforcement Learning (RL) algorithms for which RL agents behave following a (soft)-greedy policy w.r.t. to $Q$. It is a powerful tool that allows action selection without a model of the environment and even without explicitly modeling the policy. Yet, this scheme can only be used in discrete action tasks, with small numbers of actions, as the softma&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.07041v2-abstract-full').style.display = 'inline'; document.getElementById('2108.07041v2-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.07041v2-abstract-full" style="display: none;"> The $Q$-function is a central quantity in many Reinforcement Learning (RL) algorithms for which RL agents behave following a (soft)-greedy policy w.r.t. to $Q$. It is a powerful tool that allows action selection without a model of the environment and even without explicitly modeling the policy. Yet, this scheme can only be used in discrete action tasks, with small numbers of actions, as the softmax cannot be computed exactly otherwise. Especially the usage of function approximation, to deal with continuous action spaces in modern actor-critic architectures, intrinsically prevents the exact computation of a softmax. We propose to alleviate this issue by parametrizing the $Q$-function implicitly, as the sum of a log-policy and of a value function. We use the resulting parametrization to derive a practical off-policy deep RL algorithm, suitable for large action spaces, and that enforces the softmax relation between the policy and the $Q$-value. We provide a theoretical analysis of our algorithm: from an Approximate Dynamic Programming perspective, we show its equivalence to a regularized version of value iteration, accounting for both entropy and Kullback-Leibler regularization, and that enjoys beneficial error propagation results. We then evaluate our algorithm on classic control tasks, where its results compete with state-of-the-art methods. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.07041v2-abstract-full').style.display = 'none'; document.getElementById('2108.07041v2-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 16 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">AISTATS 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.05828">arXiv:2108.05828</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2108.05828">pdf</a>, <a href="https://arxiv.org/format/2108.05828">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">stat.ML</span> </div> </div> <p class="title is-5 mathjax"> A general class of surrogate functions for stable and efficient reinforcement learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Vaswani%2C+S">Sharan Vaswani</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Totaro%2C+S">Simone Totaro</a>, <a href="/search/cs?searchtype=author&amp;query=Mueller%2C+R">Robert Mueller</a>, <a href="/search/cs?searchtype=author&amp;query=Garg%2C+S">Shivam Garg</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Machado%2C+M+C">Marlos C. Machado</a>, <a href="/search/cs?searchtype=author&amp;query=Castro%2C+P+S">Pablo Samuel Castro</a>, <a href="/search/cs?searchtype=author&amp;query=Roux%2C+N+L">Nicolas Le Roux</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.05828v5-abstract-short" style="display: inline;"> Common policy gradient methods rely on the maximization of a sequence of surrogate functions. In recent years, many such surrogate functions have been proposed, most without strong theoretical guarantees, leading to algorithms such as TRPO, PPO or MPO. Rather than design yet another surrogate function, we instead propose a general framework (FMA-PG) based on functional mirror ascent that gives ris&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.05828v5-abstract-full').style.display = 'inline'; document.getElementById('2108.05828v5-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.05828v5-abstract-full" style="display: none;"> Common policy gradient methods rely on the maximization of a sequence of surrogate functions. In recent years, many such surrogate functions have been proposed, most without strong theoretical guarantees, leading to algorithms such as TRPO, PPO or MPO. Rather than design yet another surrogate function, we instead propose a general framework (FMA-PG) based on functional mirror ascent that gives rise to an entire family of surrogate functions. We construct surrogate functions that enable policy improvement guarantees, a property not shared by most existing surrogate functions. Crucially, these guarantees hold regardless of the choice of policy parameterization. Moreover, a particular instantiation of FMA-PG recovers important implementation heuristics (e.g., using forward vs reverse KL divergence) resulting in a variant of TRPO with additional desirable properties. Via experiments on simple bandit problems, we evaluate the algorithms instantiated by FMA-PG. The proposed framework also suggests an improved variant of PPO, whose robustness and efficiency we empirically demonstrate on the MuJoCo suite. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.05828v5-abstract-full').style.display = 'none'; document.getElementById('2108.05828v5-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Fixed minor typos</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.12142">arXiv:2106.12142</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.12142">pdf</a>, <a href="https://arxiv.org/format/2106.12142">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> IQ-Learn: Inverse soft-Q Learning for Imitation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Garg%2C+D">Divyansh Garg</a>, <a href="/search/cs?searchtype=author&amp;query=Chakraborty%2C+S">Shuvam Chakraborty</a>, <a href="/search/cs?searchtype=author&amp;query=Cundy%2C+C">Chris Cundy</a>, <a href="/search/cs?searchtype=author&amp;query=Song%2C+J">Jiaming Song</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Ermon%2C+S">Stefano Ermon</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.12142v4-abstract-short" style="display: inline;"> In many sequential decision-making problems (e.g., robotics control, game playing, sequential prediction), human or expert data is available containing useful information about the task. However, imitation learning (IL) from a small amount of expert data can be challenging in high-dimensional environments with complex dynamics. Behavioral cloning is a simple method that is widely used due to its s&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.12142v4-abstract-full').style.display = 'inline'; document.getElementById('2106.12142v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.12142v4-abstract-full" style="display: none;"> In many sequential decision-making problems (e.g., robotics control, game playing, sequential prediction), human or expert data is available containing useful information about the task. However, imitation learning (IL) from a small amount of expert data can be challenging in high-dimensional environments with complex dynamics. Behavioral cloning is a simple method that is widely used due to its simplicity of implementation and stable convergence but doesn&#39;t utilize any information involving the environment&#39;s dynamics. Many existing methods that exploit dynamics information are difficult to train in practice due to an adversarial optimization process over reward and policy approximators or biased, high variance gradient estimators. We introduce a method for dynamics-aware IL which avoids adversarial training by learning a single Q-function, implicitly representing both reward and policy. On standard benchmarks, the implicitly learned rewards show a high positive correlation with the ground-truth rewards, illustrating our method can also be used for inverse reinforcement learning (IRL). Our method, Inverse soft-Q learning (IQ-Learn) obtains state-of-the-art results in offline and online imitation learning settings, significantly outperforming existing methods both in the number of required environment interactions and scalability in high-dimensional spaces, often by more than 3x. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.12142v4-abstract-full').style.display = 'none'; document.getElementById('2106.12142v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Spotlight in NeurIPS 2021. Winner of &#39;21 MineRL BASALT Challenge. Website: https://div99.github.io/IQ-Learn</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.06431">arXiv:2106.06431</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.06431">pdf</a>, <a href="https://arxiv.org/format/2106.06431">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Offline Reinforcement Learning as Anti-Exploration </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Rezaeifar%2C+S">Shideh Rezaeifar</a>, <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a>, <a href="/search/cs?searchtype=author&amp;query=Vieillard%2C+N">Nino Vieillard</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.06431v1-abstract-short" style="display: inline;"> Offline Reinforcement Learning (RL) aims at learning an optimal control from a fixed dataset, without interactions with the system. An agent in this setting should avoid selecting actions whose consequences cannot be predicted from the data. This is the converse of exploration in RL, which favors such actions. We thus take inspiration from the literature on bonus-based exploration to design a new&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.06431v1-abstract-full').style.display = 'inline'; document.getElementById('2106.06431v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.06431v1-abstract-full" style="display: none;"> Offline Reinforcement Learning (RL) aims at learning an optimal control from a fixed dataset, without interactions with the system. An agent in this setting should avoid selecting actions whose consequences cannot be predicted from the data. This is the converse of exploration in RL, which favors such actions. We thus take inspiration from the literature on bonus-based exploration to design a new offline RL agent. The core idea is to subtract a prediction-based exploration bonus from the reward, instead of adding it for exploration. This allows the policy to stay close to the support of the dataset. We connect this approach to a more common regularization of the learned policy towards the data. Instantiated with a bonus based on the prediction error of a variational autoencoder, we show that our agent is competitive with the state of the art on a set of continuous control locomotion and manipulation tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.06431v1-abstract-full').style.display = 'none'; document.getElementById('2106.06431v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.04480">arXiv:2106.04480</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.04480">pdf</a>, <a href="https://arxiv.org/format/2106.04480">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> There Is No Turning Back: A Self-Supervised Approach for Reversibility-Aware Reinforcement Learning </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Grinsztajn%2C+N">Nathan Grinsztajn</a>, <a href="/search/cs?searchtype=author&amp;query=Ferret%2C+J">Johan Ferret</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Preux%2C+P">Philippe Preux</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.04480v3-abstract-short" style="display: inline;"> We propose to learn to distinguish reversible from irreversible actions for better informed decision-making in Reinforcement Learning (RL). From theoretical considerations, we show that approximate reversibility can be learned through a simple surrogate task: ranking randomly sampled trajectory events in chronological order. Intuitively, pairs of events that are always observed in the same order a&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.04480v3-abstract-full').style.display = 'inline'; document.getElementById('2106.04480v3-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.04480v3-abstract-full" style="display: none;"> We propose to learn to distinguish reversible from irreversible actions for better informed decision-making in Reinforcement Learning (RL). From theoretical considerations, we show that approximate reversibility can be learned through a simple surrogate task: ranking randomly sampled trajectory events in chronological order. Intuitively, pairs of events that are always observed in the same order are likely to be separated by an irreversible sequence of actions. Conveniently, learning the temporal order of events can be done in a fully self-supervised way, which we use to estimate the reversibility of actions from experience, without any priors. We propose two different strategies that incorporate reversibility in RL agents, one strategy for exploration (RAE) and one strategy for control (RAC). We demonstrate the potential of reversibility-aware agents in several environments, including the challenging Sokoban game. In synthetic tasks, we show that we can learn control policies that never fail and reduce to zero the side-effects of interactions, even without access to the reward function. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.04480v3-abstract-full').style.display = 'none'; document.getElementById('2106.04480v3-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.03787">arXiv:2106.03787</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.03787">pdf</a>, <a href="https://arxiv.org/format/2106.03787">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multiagent Systems">cs.MA</span> </div> </div> <p class="title is-5 mathjax"> Concave Utility Reinforcement Learning: the Mean-Field Game Viewpoint </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=P%C3%A9rolat%2C+J">Julien P茅rolat</a>, <a href="/search/cs?searchtype=author&amp;query=Lauri%C3%A8re%2C+M">Mathieu Lauri猫re</a>, <a href="/search/cs?searchtype=author&amp;query=Elie%2C+R">Romuald Elie</a>, <a href="/search/cs?searchtype=author&amp;query=Perrin%2C+S">Sarah Perrin</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Munos%2C+R">R茅mi Munos</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.03787v4-abstract-short" style="display: inline;"> Concave Utility Reinforcement Learning (CURL) extends RL from linear to concave utilities in the occupancy measure induced by the agent&#39;s policy. This encompasses not only RL but also imitation learning and exploration, among others. Yet, this more general paradigm invalidates the classical Bellman equations, and calls for new algorithms. Mean-field Games (MFGs) are a continuous approximation of m&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.03787v4-abstract-full').style.display = 'inline'; document.getElementById('2106.03787v4-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.03787v4-abstract-full" style="display: none;"> Concave Utility Reinforcement Learning (CURL) extends RL from linear to concave utilities in the occupancy measure induced by the agent&#39;s policy. This encompasses not only RL but also imitation learning and exploration, among others. Yet, this more general paradigm invalidates the classical Bellman equations, and calls for new algorithms. Mean-field Games (MFGs) are a continuous approximation of many-agent RL. They consider the limit case of a continuous distribution of identical agents, anonymous with symmetric interests, and reduce the problem to the study of a single representative agent in interaction with the full population. Our core contribution consists in showing that CURL is a subclass of MFGs. We think this important to bridge together both communities. It also allows to shed light on aspects of both fields: we show the equivalence between concavity in CURL and monotonicity in the associated MFG, between optimality conditions in CURL and Nash equilibrium in MFG, or that Fictitious Play (FP) for this class of MFGs is simply Frank-Wolfe, bringing the first convergence rate for discrete-time FP for MFGs. We also experimentally demonstrate that, using algorithms recently introduced for solving MFGs, we can address the CURL problem more efficiently. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.03787v4-abstract-full').style.display = 'none'; document.getElementById('2106.03787v4-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 7 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">AAMAS 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.00672">arXiv:2106.00672</a> <span>&nbsp;[<a href="https://arxiv.org/pdf/2106.00672">pdf</a>, <a href="https://arxiv.org/format/2106.00672">other</a>]&nbsp;</span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Neural and Evolutionary Computing">cs.NE</span> </div> </div> <p class="title is-5 mathjax"> What Matters for Adversarial Imitation Learning? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&amp;query=Orsini%2C+M">Manu Orsini</a>, <a href="/search/cs?searchtype=author&amp;query=Raichuk%2C+A">Anton Raichuk</a>, <a href="/search/cs?searchtype=author&amp;query=Hussenot%2C+L">L茅onard Hussenot</a>, <a href="/search/cs?searchtype=author&amp;query=Vincent%2C+D">Damien Vincent</a>, <a href="/search/cs?searchtype=author&amp;query=Dadashi%2C+R">Robert Dadashi</a>, <a href="/search/cs?searchtype=author&amp;query=Girgin%2C+S">Sertan Girgin</a>, <a href="/search/cs?searchtype=author&amp;query=Geist%2C+M">Matthieu Geist</a>, <a href="/search/cs?searchtype=author&amp;query=Bachem%2C+O">Olivier Bachem</a>, <a href="/search/cs?searchtype=author&amp;query=Pietquin%2C+O">Olivier Pietquin</a>, <a href="/search/cs?searchtype=author&amp;query=Andrychowicz%2C+M">Marcin Andrychowicz</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.00672v1-abstract-short" style="display: inline;"> Adversarial imitation learning has become a popular framework for imitation in continuous control. Over the years, several variations of its components were proposed to enhance the performance of the learned policies as well as the sample complexity of the algorithm. In practice, these choices are rarely tested all together in rigorous empirical studies. It is therefore difficult to discuss and un&hellip; <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.00672v1-abstract-full').style.display = 'inline'; document.getElementById('2106.00672v1-abstract-short').style.display = 'none';">&#9661; More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.00672v1-abstract-full" style="display: none;"> Adversarial imitation learning has become a popular framework for imitation in continuous control. Over the years, several variations of its components were proposed to enhance the performance of the learned policies as well as the sample complexity of the algorithm. In practice, these choices are rarely tested all together in rigorous empirical studies. It is therefore difficult to discuss and understand what choices, among the high-level algorithmic options as well as low-level implementation details, matter. To tackle this issue, we implement more than 50 of these choices in a generic adversarial imitation learning framework and investigate their impacts in a large-scale study (&gt;500k trained agents) with both synthetic and human-generated demonstrations. While many of our findings confirm common practices, some of them are surprising or even contradict prior work. In particular, our results suggest that artificial demonstrations are not a good proxy for human data and that the very common practice of evaluating imitation algorithms only with synthetic demonstrations may lead to algorithms which perform poorly in the more realistic scenarios with human demonstrations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.00672v1-abstract-full').style.display = 'none'; document.getElementById('2106.00672v1-abstract-short').style.display = 'inline';">&#9651; Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&amp;query=Geist%2C+M&amp;start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&amp;query=Geist%2C+M&amp;start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&amp;query=Geist%2C+M&amp;start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a>&nbsp;&nbsp;</span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10