CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 82 results for author: <span class="mathjax">Otani, M</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/" aria-role="search"> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Otani, M"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Otani%2C+M&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Otani, M"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Otani%2C+M&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Otani%2C+M&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Otani%2C+M&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2502.16213">arXiv:2502.16213</a> <span> [<a href="https://arxiv.org/pdf/2502.16213">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> </div> </div> <p class="title is-5 mathjax"> Multizone sound field reproduction with direction-of-arrival-distribution-based regularization and its application to binaural-centered mode-matching </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Matsuda%2C+R">Ryo Matsuda</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Makoto Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2502.16213v1-abstract-short" style="display: inline;"> In higher-order Ambisonics, a framework for sound field reproduction, secondary-source driving signals are generally obtained by regularized mode matching. The authors have proposed a regularization technique based on direction-of-arrival (DoA) distribution of wavefronts in the primary sound field. Such DoA-distribution-based regularization enables a suppression of excessively large driving signal… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.16213v1-abstract-full').style.display = 'inline'; document.getElementById('2502.16213v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2502.16213v1-abstract-full" style="display: none;"> In higher-order Ambisonics, a framework for sound field reproduction, secondary-source driving signals are generally obtained by regularized mode matching. The authors have proposed a regularization technique based on direction-of-arrival (DoA) distribution of wavefronts in the primary sound field. Such DoA-distribution-based regularization enables a suppression of excessively large driving signal gains for secondary sources that are in the directions far from the primary source direction. This improves the reproduction accuracy at regions away from the reproduction center. First, this study applies the DoA-distribution-based regularization to a multizone sound field reproduction based on the addition theorem. Furthermore, the regularized multizone sound field reproduction is extended to a binaural-centered mode matching (BCMM), which produces two reproduction points, one at each ear, to avoid a degraded reproduction accuracy due to a shrinking sweet spot at higher frequencies. Free-field and binaural simulations were numerically performed to examine the effectiveness of the DoA-distribution-based regularization on the multizone sound field reproduction and the BCMM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2502.16213v1-abstract-full').style.display = 'none'; document.getElementById('2502.16213v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 February, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Presented at International Congress on Acoustics (ICA) 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2501.03628">arXiv:2501.03628</a> <span> [<a href="https://arxiv.org/pdf/2501.03628">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Statistical Mechanics">cond-mat.stat-mech</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cellular Automata and Lattice Gases">nlin.CG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Physics and Society">physics.soc-ph</span> </div> </div> <p class="title is-5 mathjax"> A Novel Approach to Real-Time Short-Term Traffic Prediction based on Distributed Fiber-Optic Sensing and Data Assimilation with a Stochastic Cell-Automata Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Yajima%2C+Y">Yoshiyuki Yajima</a>, <a href="/search/?searchtype=author&query=Prasad%2C+H">Hemant Prasad</a>, <a href="/search/?searchtype=author&query=Ikefuji%2C+D">Daisuke Ikefuji</a>, <a href="/search/?searchtype=author&query=Suzuki%2C+T">Takemasa Suzuki</a>, <a href="/search/?searchtype=author&query=Tominaga%2C+S">Shin Tominaga</a>, <a href="/search/?searchtype=author&query=Sakurai%2C+H">Hitoshi Sakurai</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Manabu Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2501.03628v1-abstract-short" style="display: inline;"> This paper demonstrates real-time short-term traffic flow prediction through distributed fiber-optic sensing (DFOS) and data assimilation with a stochastic cell-automata-based traffic model. Traffic congestion on expressways is a severe issue. To alleviate its negative impacts, it is necessary to optimize traffic flow prior to becoming serious congestion. For this purpose, real-time short-term tra… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03628v1-abstract-full').style.display = 'inline'; document.getElementById('2501.03628v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2501.03628v1-abstract-full" style="display: none;"> This paper demonstrates real-time short-term traffic flow prediction through distributed fiber-optic sensing (DFOS) and data assimilation with a stochastic cell-automata-based traffic model. Traffic congestion on expressways is a severe issue. To alleviate its negative impacts, it is necessary to optimize traffic flow prior to becoming serious congestion. For this purpose, real-time short-term traffic flow prediction is promising. However, conventional traffic monitoring apparatus used in prediction methods faces a technical issue due to the sparsity in traffic flow data. To overcome the issue for realizing real-time traffic prediction, this paper employs DFOS, which enables to obtain spatially continuous and real-time traffic flow data along the road without dead zones. Using mean velocities derived from DFOS data as a feature extraction, this paper proposes a real-time data assimilation method for the short-term prediction. As the theoretical model, the stochastic Nishinari-Fukui-Schadschneider model is adopted. Future traffic flow is simulated with the optimal values of model parameters estimated from observed mean velocities and the initial condition estimated as the latest microscopic traffic state. This concept is validated using two congestion scenarios obtained in Japanese expressways. The results show that the mean absolute error of the predicted mean velocities is 10-15 km/h in the prediction horizon of 30 minutes. Furthermore, the prediction error in congestion length and travel time decreases by 40-84% depending on congestion scenarios when compared with conventional methods with traffic counters. This paper concludes that real-time data assimilation using DFOS enables an accurate short-term traffic prediction. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2501.03628v1-abstract-full').style.display = 'none'; document.getElementById('2501.03628v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 January, 2025; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2025. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages, 11 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.11367">arXiv:2410.11367</a> <span> [<a href="https://arxiv.org/pdf/2410.11367">pdf</a>, <a href="https://arxiv.org/ps/2410.11367">ps</a>, <a href="https://arxiv.org/format/2410.11367">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Accelerator Physics">physics.acc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Acceleration of positive muons by a radio-frequency cavity </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Aritome%2C+S">S. Aritome</a>, <a href="/search/?searchtype=author&query=Futatsukawa%2C+K">K. Futatsukawa</a>, <a href="/search/?searchtype=author&query=Hara%2C+H">H. Hara</a>, <a href="/search/?searchtype=author&query=Hayasaka%2C+K">K. Hayasaka</a>, <a href="/search/?searchtype=author&query=Ibaraki%2C+Y">Y. Ibaraki</a>, <a href="/search/?searchtype=author&query=Ichikawa%2C+T">T. Ichikawa</a>, <a href="/search/?searchtype=author&query=Iijima%2C+T">T. Iijima</a>, <a href="/search/?searchtype=author&query=Iinuma%2C+H">H. Iinuma</a>, <a href="/search/?searchtype=author&query=Ikedo%2C+Y">Y. Ikedo</a>, <a href="/search/?searchtype=author&query=Imai%2C+Y">Y. Imai</a>, <a href="/search/?searchtype=author&query=Inami%2C+K">K. Inami</a>, <a href="/search/?searchtype=author&query=Ishida%2C+K">K. Ishida</a>, <a href="/search/?searchtype=author&query=Kamal%2C+S">S. Kamal</a>, <a href="/search/?searchtype=author&query=Kamioka%2C+S">S. Kamioka</a>, <a href="/search/?searchtype=author&query=Kawamura%2C+N">N. Kawamura</a>, <a href="/search/?searchtype=author&query=Kimura%2C+M">M. Kimura</a>, <a href="/search/?searchtype=author&query=Koda%2C+A">A. Koda</a>, <a href="/search/?searchtype=author&query=Koji%2C+S">S. Koji</a>, <a href="/search/?searchtype=author&query=Kojima%2C+K">K. Kojima</a>, <a href="/search/?searchtype=author&query=Kondo%2C+A">A. Kondo</a>, <a href="/search/?searchtype=author&query=Kondo%2C+Y">Y. Kondo</a>, <a href="/search/?searchtype=author&query=Kuzuba%2C+M">M. Kuzuba</a>, <a href="/search/?searchtype=author&query=Matsushita%2C+R">R. Matsushita</a>, <a href="/search/?searchtype=author&query=Mibe%2C+T">T. Mibe</a>, <a href="/search/?searchtype=author&query=Miyamoto%2C+Y">Y. Miyamoto</a> , et al. (29 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.11367v1-abstract-short" style="display: inline;"> Acceleration of positive muons from thermal energy to $100~$keV has been demonstrated. Thermal muons were generated by resonant multi-photon ionization of muonium atoms emitted from a sheet of laser-ablated aerogel. The thermal muons were first electrostatically accelerated to $5.7~$keV, followed by further acceleration to 100 keV using a radio-frequency quadrupole. The transverse normalized emitt… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11367v1-abstract-full').style.display = 'inline'; document.getElementById('2410.11367v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.11367v1-abstract-full" style="display: none;"> Acceleration of positive muons from thermal energy to $100~$keV has been demonstrated. Thermal muons were generated by resonant multi-photon ionization of muonium atoms emitted from a sheet of laser-ablated aerogel. The thermal muons were first electrostatically accelerated to $5.7~$keV, followed by further acceleration to 100 keV using a radio-frequency quadrupole. The transverse normalized emittance of the accelerated muons in the horizontal and vertical planes were $0.85 \pm 0.25 ~\rm{(stat.)}~^{+0.22}_{-0.13} ~\rm{(syst.)}~蟺~$mm$\cdot$mrad and $0.32\pm 0.03~\rm{(stat.)} ^{+0.05}_{-0.02} ~\rm{(syst.)}~蟺~$mm$\cdot$mrad, respectively. The measured emittance values demonstrated phase space reduction by a factor of $2.0\times 10^2$ (horizontal) and $4.1\times 10^2$ (vertical) allowing good acceleration efficiency. These results pave the way to realize the first-ever muon accelerator for a variety of applications in particle physics, material science, and other fields. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.11367v1-abstract-full').style.display = 'none'; document.getElementById('2410.11367v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2410.01366">arXiv:2410.01366</a> <span> [<a href="https://arxiv.org/pdf/2410.01366">pdf</a>, <a href="https://arxiv.org/format/2410.01366">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> Harnessing the Latent Diffusion Model for Training-Free Image Style Transfer </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Masui%2C+K">Kento Masui</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Nomura%2C+M">Masahiro Nomura</a>, <a href="/search/?searchtype=author&query=Nakayama%2C+H">Hideki Nakayama</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2410.01366v1-abstract-short" style="display: inline;"> Diffusion models have recently shown the ability to generate high-quality images. However, controlling its generation process still poses challenges. The image style transfer task is one of those challenges that transfers the visual attributes of a style image to another content image. Typical obstacle of this task is the requirement of additional training of a pre-trained model. We propose a trai… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01366v1-abstract-full').style.display = 'inline'; document.getElementById('2410.01366v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2410.01366v1-abstract-full" style="display: none;"> Diffusion models have recently shown the ability to generate high-quality images. However, controlling its generation process still poses challenges. The image style transfer task is one of those challenges that transfers the visual attributes of a style image to another content image. Typical obstacle of this task is the requirement of additional training of a pre-trained model. We propose a training-free style transfer algorithm, Style Tracking Reverse Diffusion Process (STRDP) for a pretrained Latent Diffusion Model (LDM). Our algorithm employs Adaptive Instance Normalization (AdaIN) function in a distinct manner during the reverse diffusion process of an LDM while tracking the encoding history of the style image. This algorithm enables style transfer in the latent space of LDM for reduced computational cost, and provides compatibility for various LDM models. Through a series of experiments and a user study, we show that our method can quickly transfer the style of an image without additional training. The speed, compatibility, and training-free aspect of our algorithm facilitates agile experiments with combinations of styles and LDMs for extensive application. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2410.01366v1-abstract-full').style.display = 'none'; document.getElementById('2410.01366v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2409.19051">arXiv:2409.19051</a> <span> [<a href="https://arxiv.org/pdf/2409.19051">pdf</a>, <a href="https://arxiv.org/format/2409.19051">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> Multimodal Markup Document Models for Graphic Design Completion </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kikuchi%2C+K">Kotaro Kikuchi</a>, <a href="/search/?searchtype=author&query=Inoue%2C+N">Naoto Inoue</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Simo-Serra%2C+E">Edgar Simo-Serra</a>, <a href="/search/?searchtype=author&query=Yamaguchi%2C+K">Kota Yamaguchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2409.19051v1-abstract-short" style="display: inline;"> This paper presents multimodal markup document models (MarkupDM) that can generate both markup language and images within interleaved multimodal documents. Unlike existing vision-and-language multimodal models, our MarkupDM tackles unique challenges critical to graphic design tasks: generating partial images that contribute to the overall appearance, often involving transparency and varying sizes,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19051v1-abstract-full').style.display = 'inline'; document.getElementById('2409.19051v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2409.19051v1-abstract-full" style="display: none;"> This paper presents multimodal markup document models (MarkupDM) that can generate both markup language and images within interleaved multimodal documents. Unlike existing vision-and-language multimodal models, our MarkupDM tackles unique challenges critical to graphic design tasks: generating partial images that contribute to the overall appearance, often involving transparency and varying sizes, and understanding the syntax and semantics of markup languages, which play a fundamental role as a representational format of graphic designs. To address these challenges, we design an image quantizer to tokenize images of diverse sizes with transparency and modify a code language model to process markup languages and incorporate image modalities. We provide in-depth evaluations of our approach on three graphic design completion tasks: generating missing attribute values, images, and texts in graphic design templates. Results corroborate the effectiveness of our MarkupDM for graphic design tasks. We also discuss the strengths and weaknesses in detail, providing insights for future research on multimodal document generation. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2409.19051v1-abstract-full').style.display = 'none'; document.getElementById('2409.19051v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 September, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Project page: https://cyberagentailab.github.io/MarkupDM/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.12356">arXiv:2407.12356</a> <span> [<a href="https://arxiv.org/pdf/2407.12356">pdf</a>, <a href="https://arxiv.org/format/2407.12356">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LTSim: Layout Transportation-based Similarity Measure for Evaluating Layout Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Inoue%2C+N">Naoto Inoue</a>, <a href="/search/?searchtype=author&query=Kikuchi%2C+K">Kotaro Kikuchi</a>, <a href="/search/?searchtype=author&query=Togashi%2C+R">Riku Togashi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.12356v1-abstract-short" style="display: inline;"> We introduce a layout similarity measure designed to evaluate the results of layout generation. While several similarity measures have been proposed in prior research, there has been a lack of comprehensive discussion about their behaviors. Our research uncovers that the majority of these measures are unable to handle various layout differences, primarily due to their dependencies on strict elemen… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.12356v1-abstract-full').style.display = 'inline'; document.getElementById('2407.12356v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.12356v1-abstract-full" style="display: none;"> We introduce a layout similarity measure designed to evaluate the results of layout generation. While several similarity measures have been proposed in prior research, there has been a lack of comprehensive discussion about their behaviors. Our research uncovers that the majority of these measures are unable to handle various layout differences, primarily due to their dependencies on strict element matching, that is one-by-one matching of elements within the same category. To overcome this limitation, we propose a new similarity measure based on optimal transport, which facilitates a more flexible matching of elements. This approach allows us to quantify the similarity between any two layouts even those sharing no element categories, making our measure highly applicable to a wide range of layout generation tasks. For tasks such as unconditional layout generation, where FID is commonly used, we also extend our measure to deal with collection-level similarities between groups of layouts. The empirical result suggests that our collection-level measure offers more reliable comparisons than existing ones like FID and Max.IoU. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.12356v1-abstract-full').style.display = 'none'; document.getElementById('2407.12356v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2404.03242">arXiv:2404.03242</a> <span> [<a href="https://arxiv.org/pdf/2404.03242">pdf</a>, <a href="https://arxiv.org/format/2404.03242">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Would Deep Generative Models Amplify Bias in Future Models? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Chen%2C+T">Tianwei Chen</a>, <a href="/search/?searchtype=author&query=Hirota%2C+Y">Yusuke Hirota</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Garcia%2C+N">Noa Garcia</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2404.03242v1-abstract-short" style="display: inline;"> We investigate the impact of deep generative models on potential social biases in upcoming computer vision models. As the internet witnesses an increasing influx of AI-generated images, concerns arise regarding inherent biases that may accompany them, potentially leading to the dissemination of harmful content. This paper explores whether a detrimental feedback loop, resulting in bias amplificatio… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.03242v1-abstract-full').style.display = 'inline'; document.getElementById('2404.03242v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2404.03242v1-abstract-full" style="display: none;"> We investigate the impact of deep generative models on potential social biases in upcoming computer vision models. As the internet witnesses an increasing influx of AI-generated images, concerns arise regarding inherent biases that may accompany them, potentially leading to the dissemination of harmful content. This paper explores whether a detrimental feedback loop, resulting in bias amplification, would occur if generated images were used as the training data for future models. We conduct simulations by progressively substituting original images in COCO and CC3M datasets with images generated through Stable Diffusion. The modified datasets are used to train OpenCLIP and image captioning models, which we evaluate in terms of quality and bias. Contrary to expectations, our findings indicate that introducing generated images during training does not uniformly amplify bias. Instead, instances of bias mitigation across specific tasks are observed. We further explore the factors that may influence these phenomena, such as artifacts in image generation (e.g., blurry faces) or pre-existing biases in the original datasets. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2404.03242v1-abstract-full').style.display = 'none'; document.getElementById('2404.03242v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This paper has been accepted to CVPR 2024</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2403.18187">arXiv:2403.18187</a> <span> [<a href="https://arxiv.org/pdf/2403.18187">pdf</a>, <a href="https://arxiv.org/format/2403.18187">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> LayoutFlow: Flow Matching for Layout Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Guerreiro%2C+J+J+A">Julian Jorge Andrade Guerreiro</a>, <a href="/search/?searchtype=author&query=Inoue%2C+N">Naoto Inoue</a>, <a href="/search/?searchtype=author&query=Masui%2C+K">Kento Masui</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Nakayama%2C+H">Hideki Nakayama</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2403.18187v2-abstract-short" style="display: inline;"> Finding a suitable layout represents a crucial task for diverse applications in graphic design. Motivated by simpler and smoother sampling trajectories, we explore the use of Flow Matching as an alternative to current diffusion-based layout generation models. Specifically, we propose LayoutFlow, an efficient flow-based model capable of generating high-quality layouts. Instead of progressively deno… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.18187v2-abstract-full').style.display = 'inline'; document.getElementById('2403.18187v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2403.18187v2-abstract-full" style="display: none;"> Finding a suitable layout represents a crucial task for diverse applications in graphic design. Motivated by simpler and smoother sampling trajectories, we explore the use of Flow Matching as an alternative to current diffusion-based layout generation models. Specifically, we propose LayoutFlow, an efficient flow-based model capable of generating high-quality layouts. Instead of progressively denoising the elements of a noisy layout, our method learns to gradually move, or flow, the elements of an initial sample until it reaches its final prediction. In addition, we employ a conditioning scheme that allows us to handle various generation tasks with varying degrees of conditioning with a single model. Empirically, LayoutFlow performs on par with state-of-the-art models while being significantly faster. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2403.18187v2-abstract-full').style.display = 'none'; document.getElementById('2403.18187v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 July, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 March, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ECCV 2024, Project Page: https://julianguerreiro.github.io/layoutflow/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2309.06661">arXiv:2309.06661</a> <span> [<a href="https://arxiv.org/pdf/2309.06661">pdf</a>, <a href="https://arxiv.org/ps/2309.06661">ps</a>, <a href="https://arxiv.org/format/2309.06661">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Audio and Speech Processing">eess.AS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Sound">cs.SD</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> </div> </div> <p class="title is-5 mathjax"> Sound field decomposition based on two-stage neural networks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Matsuda%2C+R">Ryo Matsuda</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Makoto Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2309.06661v1-abstract-short" style="display: inline;"> A method for sound field decomposition based on neural networks is proposed. The method comprises two stages: a sound field separation stage and a single-source localization stage. In the first stage, the sound pressure at microphones synthesized by multiple sources is separated into one excited by each sound source. In the second stage, the source location is obtained as a regression from the sou… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.06661v1-abstract-full').style.display = 'inline'; document.getElementById('2309.06661v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2309.06661v1-abstract-full" style="display: none;"> A method for sound field decomposition based on neural networks is proposed. The method comprises two stages: a sound field separation stage and a single-source localization stage. In the first stage, the sound pressure at microphones synthesized by multiple sources is separated into one excited by each sound source. In the second stage, the source location is obtained as a regression from the sound pressure at microphones consisting of a single sound source. The estimated location is not affected by discretization because the second stage is designed as a regression rather than a classification. Datasets are generated by simulation using Green's function, and the neural network is trained for each frequency. Numerical experiments reveal that, compared with conventional methods, the proposed method can achieve higher source-localization accuracy and higher sound-field-reconstruction accuracy. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2309.06661v1-abstract-full').style.display = 'none'; document.getElementById('2309.06661v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">31 pages, 16 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2308.04118">arXiv:2308.04118</a> <span> [<a href="https://arxiv.org/pdf/2308.04118">pdf</a>, <a href="https://arxiv.org/format/2308.04118">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> Multimodal Color Recommendation in Vector Graphic Documents </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Qiu%2C+Q">Qianru Qiu</a>, <a href="/search/?searchtype=author&query=Wang%2C+X">Xueting Wang</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2308.04118v1-abstract-short" style="display: inline;"> Color selection plays a critical role in graphic document design and requires sufficient consideration of various contexts. However, recommending appropriate colors which harmonize with the other colors and textual contexts in documents is a challenging task, even for experienced designers. In this study, we propose a multimodal masked color model that integrates both color and textual contexts to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.04118v1-abstract-full').style.display = 'inline'; document.getElementById('2308.04118v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2308.04118v1-abstract-full" style="display: none;"> Color selection plays a critical role in graphic document design and requires sufficient consideration of various contexts. However, recommending appropriate colors which harmonize with the other colors and textual contexts in documents is a challenging task, even for experienced designers. In this study, we propose a multimodal masked color model that integrates both color and textual contexts to provide text-aware color recommendation for graphic documents. Our proposed model comprises self-attention networks to capture the relationships between colors in multiple palettes, and cross-attention networks that incorporate both color and CLIP-based text representations. Our proposed method primarily focuses on color palette completion, which recommends colors based on the given colors and text. Additionally, it is applicable for another color recommendation task, full palette generation, which generates a complete color palette corresponding to the given text. Experimental results demonstrate that our proposed approach surpasses previous color palette completion methods on accuracy, color distribution, and user experience, as well as full palette generation methods concerning color diversity and similarity to the ground truth palettes. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2308.04118v1-abstract-full').style.display = 'none'; document.getElementById('2308.04118v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 August, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to ACM MM 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2304.01816">arXiv:2304.01816</a> <span> [<a href="https://arxiv.org/pdf/2304.01816">pdf</a>, <a href="https://arxiv.org/format/2304.01816">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Toward Verifiable and Reproducible Human Evaluation for Text-to-Image Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Togashi%2C+R">Riku Togashi</a>, <a href="/search/?searchtype=author&query=Sawai%2C+Y">Yu Sawai</a>, <a href="/search/?searchtype=author&query=Ishigami%2C+R">Ryosuke Ishigami</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Rahtu%2C+E">Esa Rahtu</a>, <a href="/search/?searchtype=author&query=Heikkil%C3%A4%2C+J">Janne Heikkil盲</a>, <a href="/search/?searchtype=author&query=Satoh%2C+S">Shin'ichi Satoh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2304.01816v1-abstract-short" style="display: inline;"> Human evaluation is critical for validating the performance of text-to-image generative models, as this highly cognitive process requires deep comprehension of text and images. However, our survey of 37 recent papers reveals that many works rely solely on automatic measures (e.g., FID) or perform poorly described human evaluations that are not reliable or repeatable. This paper proposes a standard… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.01816v1-abstract-full').style.display = 'inline'; document.getElementById('2304.01816v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2304.01816v1-abstract-full" style="display: none;"> Human evaluation is critical for validating the performance of text-to-image generative models, as this highly cognitive process requires deep comprehension of text and images. However, our survey of 37 recent papers reveals that many works rely solely on automatic measures (e.g., FID) or perform poorly described human evaluations that are not reliable or repeatable. This paper proposes a standardized and well-defined human evaluation protocol to facilitate verifiable and reproducible human evaluation in future works. In our pilot data collection, we experimentally show that the current automatic measures are incompatible with human perception in evaluating the performance of the text-to-image generation results. Furthermore, we provide insights for designing human evaluation experiments reliably and conclusively. Finally, we make several resources publicly available to the community to facilitate easy and fast implementations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2304.01816v1-abstract-full').style.display = 'none'; document.getElementById('2304.01816v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.18248">arXiv:2303.18248</a> <span> [<a href="https://arxiv.org/pdf/2303.18248">pdf</a>, <a href="https://arxiv.org/format/2303.18248">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Towards Flexible Multi-modal Document Models </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Inoue%2C+N">Naoto Inoue</a>, <a href="/search/?searchtype=author&query=Kikuchi%2C+K">Kotaro Kikuchi</a>, <a href="/search/?searchtype=author&query=Simo-Serra%2C+E">Edgar Simo-Serra</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Yamaguchi%2C+K">Kota Yamaguchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.18248v1-abstract-short" style="display: inline;"> Creative workflows for generating graphical documents involve complex inter-related tasks, such as aligning elements, choosing appropriate fonts, or employing aesthetically harmonious colors. In this work, we attempt at building a holistic model that can jointly solve many different design tasks. Our model, which we denote by FlexDM, treats vector graphic documents as a set of multi-modal elements… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.18248v1-abstract-full').style.display = 'inline'; document.getElementById('2303.18248v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.18248v1-abstract-full" style="display: none;"> Creative workflows for generating graphical documents involve complex inter-related tasks, such as aligning elements, choosing appropriate fonts, or employing aesthetically harmonious colors. In this work, we attempt at building a holistic model that can jointly solve many different design tasks. Our model, which we denote by FlexDM, treats vector graphic documents as a set of multi-modal elements, and learns to predict masked fields such as element type, position, styling attributes, image, or text, using a unified architecture. Through the use of explicit multi-task learning and in-domain pre-training, our model can better capture the multi-modal relationships among the different document fields. Experimental results corroborate that our single FlexDM is able to successfully solve a multitude of different design tasks, while achieving performance that is competitive with task-specific and costly baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.18248v1-abstract-full').style.display = 'none'; document.getElementById('2303.18248v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To be published in CVPR2023 (highlight), project page: https://cyberagentailab.github.io/flex-dm</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.08137">arXiv:2303.08137</a> <span> [<a href="https://arxiv.org/pdf/2303.08137">pdf</a>, <a href="https://arxiv.org/format/2303.08137">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Graphics">cs.GR</span> </div> </div> <p class="title is-5 mathjax"> LayoutDM: Discrete Diffusion Model for Controllable Layout Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Inoue%2C+N">Naoto Inoue</a>, <a href="/search/?searchtype=author&query=Kikuchi%2C+K">Kotaro Kikuchi</a>, <a href="/search/?searchtype=author&query=Simo-Serra%2C+E">Edgar Simo-Serra</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Yamaguchi%2C+K">Kota Yamaguchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.08137v1-abstract-short" style="display: inline;"> Controllable layout generation aims at synthesizing plausible arrangement of element bounding boxes with optional constraints, such as type or position of a specific element. In this work, we try to solve a broad range of layout generation tasks in a single model that is based on discrete state-space diffusion models. Our model, named LayoutDM, naturally handles the structured layout data in the d… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.08137v1-abstract-full').style.display = 'inline'; document.getElementById('2303.08137v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.08137v1-abstract-full" style="display: none;"> Controllable layout generation aims at synthesizing plausible arrangement of element bounding boxes with optional constraints, such as type or position of a specific element. In this work, we try to solve a broad range of layout generation tasks in a single model that is based on discrete state-space diffusion models. Our model, named LayoutDM, naturally handles the structured layout data in the discrete representation and learns to progressively infer a noiseless layout from the initial input, where we model the layout corruption process by modality-wise discrete diffusion. For conditional generation, we propose to inject layout constraints in the form of masking or logit adjustment during inference. We show in the experiments that our LayoutDM successfully generates high-quality layouts and outperforms both task-specific and task-agnostic baselines on several layout tasks. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.08137v1-abstract-full').style.display = 'none'; document.getElementById('2303.08137v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 14 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To be published in CVPR2023, project page: https://cyberagentailab.github.io/layout-dm/</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2212.11541">arXiv:2212.11541</a> <span> [<a href="https://arxiv.org/pdf/2212.11541">pdf</a>, <a href="https://arxiv.org/format/2212.11541">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> Generative Colorization of Structured Mobile Web Pages </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kikuchi%2C+K">Kotaro Kikuchi</a>, <a href="/search/?searchtype=author&query=Inoue%2C+N">Naoto Inoue</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Simo-Serra%2C+E">Edgar Simo-Serra</a>, <a href="/search/?searchtype=author&query=Yamaguchi%2C+K">Kota Yamaguchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2212.11541v2-abstract-short" style="display: inline;"> Color is a critical design factor for web pages, affecting important factors such as viewer emotions and the overall trust and satisfaction of a website. Effective coloring requires design knowledge and expertise, but if this process could be automated through data-driven modeling, efficient exploration and alternative workflows would be possible. However, this direction remains underexplored due… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.11541v2-abstract-full').style.display = 'inline'; document.getElementById('2212.11541v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2212.11541v2-abstract-full" style="display: none;"> Color is a critical design factor for web pages, affecting important factors such as viewer emotions and the overall trust and satisfaction of a website. Effective coloring requires design knowledge and expertise, but if this process could be automated through data-driven modeling, efficient exploration and alternative workflows would be possible. However, this direction remains underexplored due to the lack of a formalization of the web page colorization problem, datasets, and evaluation protocols. In this work, we propose a new dataset consisting of e-commerce mobile web pages in a tractable format, which are created by simplifying the pages and extracting canonical color styles with a common web browser. The web page colorization problem is then formalized as a task of estimating plausible color styles for a given web page content with a given hierarchical structure of the elements. We present several Transformer-based methods that are adapted to this task by prepending structural message passing to capture hierarchical relationships between elements. Experimental results, including a quantitative evaluation designed for this task, demonstrate the advantages of our methods over statistical and image colorization methods. The code is available at https://github.com/CyberAgentAILab/webcolor. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2212.11541v2-abstract-full').style.display = 'none'; document.getElementById('2212.11541v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 December, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to WACV 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.15810">arXiv:2211.15810</a> <span> [<a href="https://arxiv.org/pdf/2211.15810">pdf</a>, <a href="https://arxiv.org/format/2211.15810">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Human-Computer Interaction">cs.HC</span> </div> </div> <p class="title is-5 mathjax"> Practical Challenges in Indoor Mobile Recommendation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+L+M+F">Leandro Marega Ferreira Otani</a>, <a href="/search/?searchtype=author&query=de+Santana%2C+V+F">Vagner Figueredo de Santana</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.15810v1-abstract-short" style="display: inline;"> Recommendation systems are present in multiple contexts as e-commerce, websites, and media streaming services. As scenarios get more complex, techniques and tools have to consider a number of variables. When recommending services/products to mobile users while they are in indoor environments next to the object of the recommendation, variables as location, interests, route, and interaction logs als… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.15810v1-abstract-full').style.display = 'inline'; document.getElementById('2211.15810v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.15810v1-abstract-full" style="display: none;"> Recommendation systems are present in multiple contexts as e-commerce, websites, and media streaming services. As scenarios get more complex, techniques and tools have to consider a number of variables. When recommending services/products to mobile users while they are in indoor environments next to the object of the recommendation, variables as location, interests, route, and interaction logs also need to be taken into account. In this context, this work discusses the practical challenges inherent to the context of indoor mobile recommendation (e.g., mall, parking lot, museum, among others) grounded on a case and a systematic review. With the presented results, one expects to support practitioners in the task of defining the proper approach, technology, and notification method when recommending services/products to mobile users in indoor environments. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.15810v1-abstract-full').style.display = 'none'; document.getElementById('2211.15810v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">10 pages, 3 figures, 2 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.10056">arXiv:2211.10056</a> <span> [<a href="https://arxiv.org/pdf/2211.10056">pdf</a>, <a href="https://arxiv.org/format/2211.10056">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Contrastive Losses Are Natural Criteria for Unsupervised Video Summarization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Pang%2C+Z">Zongshang Pang</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Nagahara%2C+H">Hajime Nagahara</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.10056v1-abstract-short" style="display: inline;"> Video summarization aims to select the most informative subset of frames in a video to facilitate efficient video browsing. Unsupervised methods usually rely on heuristic training objectives such as diversity and representativeness. However, such methods need to bootstrap the online-generated summaries to compute the objectives for importance score regression. We consider such a pipeline inefficie… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.10056v1-abstract-full').style.display = 'inline'; document.getElementById('2211.10056v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.10056v1-abstract-full" style="display: none;"> Video summarization aims to select the most informative subset of frames in a video to facilitate efficient video browsing. Unsupervised methods usually rely on heuristic training objectives such as diversity and representativeness. However, such methods need to bootstrap the online-generated summaries to compute the objectives for importance score regression. We consider such a pipeline inefficient and seek to directly quantify the frame-level importance with the help of contrastive losses in the representation learning literature. Leveraging the contrastive losses, we propose three metrics featuring a desirable key frame: local dissimilarity, global consistency, and uniqueness. With features pre-trained on the image classification task, the metrics can already yield high-quality importance scores, demonstrating competitive or better performance than past heavily-trained methods. We show that by refining the pre-trained features with a lightweight contrastively learned projection module, the frame-level importance scores can be further improved, and the model can also leverage a large number of random videos and generalize to test videos with decent performance. Code available at https://github.com/pangzss/pytorch-CTVSUM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.10056v1-abstract-full').style.display = 'none'; document.getElementById('2211.10056v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in WACV2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2210.11707">arXiv:2210.11707</a> <span> [<a href="https://arxiv.org/pdf/2210.11707">pdf</a>, <a href="https://arxiv.org/format/2210.11707">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1561/0600000099">10.1561/0600000099 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Video Summarization Overview </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Song%2C+Y">Yale Song</a>, <a href="/search/?searchtype=author&query=Wang%2C+Y">Yang Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2210.11707v1-abstract-short" style="display: inline;"> With the broad growth of video capturing devices and applications on the web, it is more demanding to provide desired video content for users efficiently. Video summarization facilitates quickly grasping video content by creating a compact summary of videos. Much effort has been devoted to automatic video summarization, and various problem settings and approaches have been proposed. Our goal is to… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.11707v1-abstract-full').style.display = 'inline'; document.getElementById('2210.11707v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2210.11707v1-abstract-full" style="display: none;"> With the broad growth of video capturing devices and applications on the web, it is more demanding to provide desired video content for users efficiently. Video summarization facilitates quickly grasping video content by creating a compact summary of videos. Much effort has been devoted to automatic video summarization, and various problem settings and approaches have been proposed. Our goal is to provide an overview of this field. This survey covers early studies as well as recent approaches which take advantage of deep learning techniques. We describe video summarization approaches and their underlying concepts. We also discuss benchmarks and evaluations. We overview how prior work addressed evaluation and detail the pros and cons of the evaluation protocols. Last but not least, we discuss open challenges in this field. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2210.11707v1-abstract-full').style.display = 'none'; document.getElementById('2210.11707v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">53 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Foundations and Trends in Computer Graphics and Vision: Vol. 13: No. 4, pp 284-335 (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.10820">arXiv:2209.10820</a> <span> [<a href="https://arxiv.org/pdf/2209.10820">pdf</a>, <a href="https://arxiv.org/format/2209.10820">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Color Recommendation for Vector Graphic Documents based on Multi-Palette Representation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Qiu%2C+Q">Qianru Qiu</a>, <a href="/search/?searchtype=author&query=Wang%2C+X">Xueting Wang</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Iwazaki%2C+Y">Yuki Iwazaki</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.10820v1-abstract-short" style="display: inline;"> Vector graphic documents present multiple visual elements, such as images, shapes, and texts. Choosing appropriate colors for multiple visual elements is a difficult but crucial task for both amateurs and professional designers. Instead of creating a single color palette for all elements, we extract multiple color palettes from each visual element in a graphic document, and then combine them into… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.10820v1-abstract-full').style.display = 'inline'; document.getElementById('2209.10820v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.10820v1-abstract-full" style="display: none;"> Vector graphic documents present multiple visual elements, such as images, shapes, and texts. Choosing appropriate colors for multiple visual elements is a difficult but crucial task for both amateurs and professional designers. Instead of creating a single color palette for all elements, we extract multiple color palettes from each visual element in a graphic document, and then combine them into a color sequence. We propose a masked color model for color sequence completion and recommend the specified colors based on color context in multi-palette with high probability. We train the model and build a color recommendation system on a large-scale dataset of vector graphic documents. The proposed color recommendation method outperformed other state-of-the-art methods by both quantitative and qualitative evaluations on color prediction and our color recommendation system received positive feedback from professional designers in an interview study. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.10820v1-abstract-full').style.display = 'none'; document.getElementById('2209.10820v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted to WACV 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.04728">arXiv:2209.04728</a> <span> [<a href="https://arxiv.org/pdf/2209.04728">pdf</a>, <a href="https://arxiv.org/ps/2209.04728">ps</a>, <a href="https://arxiv.org/format/2209.04728">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> Periodic Solutions of the complex Ginzburg-Landau Equation in bounded domains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kuroda%2C+T">Takanori Kuroda</a>, <a href="/search/?searchtype=author&query=%C3%94tani%2C+M">Mitsuharu 脭tani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.04728v1-abstract-short" style="display: inline;"> In this paper, we are concerned with complex Ginzburg-Landau (CGL) equations. There are several results on the global existence and smoothing effects of solutions to the initial boundary value problem for (CGL) in bounded or unbounded domains. In this paper, we study the time periodic problem for (CGL) in bounded domains. The main strategy in this paper is to regard (CGL) as a parabolic equation w… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.04728v1-abstract-full').style.display = 'inline'; document.getElementById('2209.04728v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.04728v1-abstract-full" style="display: none;"> In this paper, we are concerned with complex Ginzburg-Landau (CGL) equations. There are several results on the global existence and smoothing effects of solutions to the initial boundary value problem for (CGL) in bounded or unbounded domains. In this paper, we study the time periodic problem for (CGL) in bounded domains. The main strategy in this paper is to regard (CGL) as a parabolic equation with monotone and non-monotone perturbations and to apply non-monotone perturbation theory of parabolic equations developed by Otani (1984). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.04728v1-abstract-full').style.display = 'none'; document.getElementById('2209.04728v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 35Q56; 47J35; 39A23 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.04727">arXiv:2209.04727</a> <span> [<a href="https://arxiv.org/pdf/2209.04727">pdf</a>, <a href="https://arxiv.org/ps/2209.04727">ps</a>, <a href="https://arxiv.org/format/2209.04727">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> Local well-posedness of the complex Ginzburg-Landau Equation in general domains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kuroda%2C+T">Takanori Kuroda</a>, <a href="/search/?searchtype=author&query=%C3%94tani%2C+M">Mitsuharu 脭tani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.04727v1-abstract-short" style="display: inline;"> In this paper, complex Ginzburg-Landau (CGL) equations with superlinear growth terms are studied. We discuss the local well-posedness in the energy space H1 for the initial-boundary value problem of the equations in general domains. The local well-posedness in H1 in bounded domains is already examined by authors (2019). Our approach to CGL equations is based on the theory of parabolic equations go… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.04727v1-abstract-full').style.display = 'inline'; document.getElementById('2209.04727v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.04727v1-abstract-full" style="display: none;"> In this paper, complex Ginzburg-Landau (CGL) equations with superlinear growth terms are studied. We discuss the local well-posedness in the energy space H1 for the initial-boundary value problem of the equations in general domains. The local well-posedness in H1 in bounded domains is already examined by authors (2019). Our approach to CGL equations is based on the theory of parabolic equations governed by subdifferential operators with non-monotone perturbations. By using this method together with the Yosida approximation procedure, we discuss the existence and the uniqueness of local solutions as well as the global existence of solutions with small initial data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.04727v1-abstract-full').style.display = 'none'; document.getElementById('2209.04727v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 35Q56; 47J35; 35K61 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.10758">arXiv:2208.10758</a> <span> [<a href="https://arxiv.org/pdf/2208.10758">pdf</a>, <a href="https://arxiv.org/format/2208.10758">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Learning More May Not Be Better: Knowledge Transferability in Vision and Language Tasks </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Chen%2C+T">Tianwei Chen</a>, <a href="/search/?searchtype=author&query=Garcia%2C+N">Noa Garcia</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Chu%2C+C">Chenhui Chu</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Nagahara%2C+H">Hajime Nagahara</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.10758v1-abstract-short" style="display: inline;"> Is more data always better to train vision-and-language models? We study knowledge transferability in multi-modal tasks. The current tendency in machine learning is to assume that by joining multiple datasets from different tasks their overall performance will improve. However, we show that not all the knowledge transfers well or has a positive impact on related tasks, even when they share a commo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.10758v1-abstract-full').style.display = 'inline'; document.getElementById('2208.10758v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.10758v1-abstract-full" style="display: none;"> Is more data always better to train vision-and-language models? We study knowledge transferability in multi-modal tasks. The current tendency in machine learning is to assume that by joining multiple datasets from different tasks their overall performance will improve. However, we show that not all the knowledge transfers well or has a positive impact on related tasks, even when they share a common goal. We conduct an exhaustive analysis based on hundreds of cross-experiments on 12 vision-and-language tasks categorized in 4 groups. Whereas tasks in the same group are prone to improve each other, results show that this is not always the case. Other factors such as dataset size or pre-training stage have also a great impact on how well the knowledge is transferred. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.10758v1-abstract-full').style.display = 'none'; document.getElementById('2208.10758v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2205.00231">arXiv:2205.00231</a> <span> [<a href="https://arxiv.org/pdf/2205.00231">pdf</a>, <a href="https://arxiv.org/format/2205.00231">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Mesoscale and Nanoscale Physics">cond-mat.mes-hall</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1021/acsomega.2c03257">10.1021/acsomega.2c03257 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Theoretical Analysis on the Stability of 1-Pyrenebutanoic Acid Succinimidyl Ester Adsorbed on Graphene </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Oishi%2C+Y">Yasuhiro Oishi</a>, <a href="/search/?searchtype=author&query=Ogi%2C+H">Hirotsugu Ogi</a>, <a href="/search/?searchtype=author&query=Hagiwara%2C+S">Satoshi Hagiwara</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Minoru Otani</a>, <a href="/search/?searchtype=author&query=Kusakabe%2C+K">Koichi Kusakabe</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2205.00231v2-abstract-short" style="display: inline;"> The adsorbed structure of 1-pyrenebutanoic acid succinimidyl ester (PASE) on graphene was investigated based on density functional theory. We found two locally stable structures: a straight structure with the chainlike part of butanoic acid succinimidyl ester (BSE) lying down and a bent structure with the BSE part directed away from graphene, keeping the pyrene (Py) part adsorbed on graphene. Then… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.00231v2-abstract-full').style.display = 'inline'; document.getElementById('2205.00231v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2205.00231v2-abstract-full" style="display: none;"> The adsorbed structure of 1-pyrenebutanoic acid succinimidyl ester (PASE) on graphene was investigated based on density functional theory. We found two locally stable structures: a straight structure with the chainlike part of butanoic acid succinimidyl ester (BSE) lying down and a bent structure with the BSE part directed away from graphene, keeping the pyrene (Py) part adsorbed on graphene. Then, to elucidate the adsorption mechanism, we separately estimated the contributions of the Py and BSE parts to the entire PASE adsorption, and the adsorption effect of the BSE part was found to be secondary in comparison to the contribution of the Py. Next, the mobility of the BSE part at room temperature was confirmed by the activation energy barrier between straight and bent structures. To take account of the external environment, we considered the presence of amino acids and the hydration effect by a three-dimensional reference interaction site model. The contributions of glycine molecules and the solvent environment to stabilizing the bent PASE structure relative to the straight PASE structure were found. Therefore, the effect of the external environment around PASE is of importance when the standing-up process of the BSE part from graphene is considered. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2205.00231v2-abstract-full').style.display = 'none'; document.getElementById('2205.00231v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 30 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">21 pages, 5 figures, accepted for publication in ACS Omega</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.03934">arXiv:2204.03934</a> <span> [<a href="https://arxiv.org/pdf/2204.03934">pdf</a>, <a href="https://arxiv.org/format/2204.03934">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Does Robustness on ImageNet Transfer to Downstream Tasks? </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Yamada%2C+Y">Yutaro Yamada</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.03934v1-abstract-short" style="display: inline;"> As clean ImageNet accuracy nears its ceiling, the research community is increasingly more concerned about robust accuracy under distributional shifts. While a variety of methods have been proposed to robustify neural networks, these techniques often target models trained on ImageNet classification. At the same time, it is a common practice to use ImageNet pretrained backbones for downstream tasks… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.03934v1-abstract-full').style.display = 'inline'; document.getElementById('2204.03934v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.03934v1-abstract-full" style="display: none;"> As clean ImageNet accuracy nears its ceiling, the research community is increasingly more concerned about robust accuracy under distributional shifts. While a variety of methods have been proposed to robustify neural networks, these techniques often target models trained on ImageNet classification. At the same time, it is a common practice to use ImageNet pretrained backbones for downstream tasks such as object detection, semantic segmentation, and image classification from different domains. This raises a question: Can these robust image classifiers transfer robustness to downstream tasks? For object detection and semantic segmentation, we find that a vanilla Swin Transformer, a variant of Vision Transformer tailored for dense prediction tasks, transfers robustness better than Convolutional Neural Networks that are trained to be robust to the corrupted version of ImageNet. For CIFAR10 classification, we find that models that are robustified for ImageNet do not retain robustness when fully fine-tuned. These findings suggest that current robustification techniques tend to emphasize ImageNet evaluations. Moreover, network architecture is a strong source of robustness when we consider transfer learning. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.03934v1-abstract-full').style.display = 'none'; document.getElementById('2204.03934v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2204.01243">arXiv:2204.01243</a> <span> [<a href="https://arxiv.org/pdf/2204.01243">pdf</a>, <a href="https://arxiv.org/ps/2204.01243">ps</a>, <a href="https://arxiv.org/format/2204.01243">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Accelerator Physics">physics.acc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/ptep/ptac067">10.1093/ptep/ptac067 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> First muon acceleration and muon linear accelerator for measuring the muon anomalous magnetic moment and electric dipole moment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+M">M. Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2204.01243v1-abstract-short" style="display: inline;"> Muon acceleration using a radio-frequency accelerator was recently demonstrated for the first time. Measurement of the muon anomalous magnetic moment and electric dipole moment at Japan Proton Accelerator Research Complex is the first experiment using accelerated muon beams, and construction will begin soon. The radio-frequency accelerator used in the experiment and the first muon acceleration are… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.01243v1-abstract-full').style.display = 'inline'; document.getElementById('2204.01243v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2204.01243v1-abstract-full" style="display: none;"> Muon acceleration using a radio-frequency accelerator was recently demonstrated for the first time. Measurement of the muon anomalous magnetic moment and electric dipole moment at Japan Proton Accelerator Research Complex is the first experiment using accelerated muon beams, and construction will begin soon. The radio-frequency accelerator used in the experiment and the first muon acceleration are described in this paper. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2204.01243v1-abstract-full').style.display = 'none'; document.getElementById('2204.01243v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 April, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Prog Theor Exp Phys (2022) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.16062">arXiv:2203.16062</a> <span> [<a href="https://arxiv.org/pdf/2203.16062">pdf</a>, <a href="https://arxiv.org/format/2203.16062">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> AxIoU: An Axiomatically Justified Measure for Video Moment Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Togashi%2C+R">Riku Togashi</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Rahtu%2C+E">Esa Rahtu</a>, <a href="/search/?searchtype=author&query=Heikkila%2C+J">Janne Heikkila</a>, <a href="/search/?searchtype=author&query=Sakai%2C+T">Tetsuya Sakai</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.16062v1-abstract-short" style="display: inline;"> Evaluation measures have a crucial impact on the direction of research. Therefore, it is of utmost importance to develop appropriate and reliable evaluation measures for new applications where conventional measures are not well suited. Video Moment Retrieval (VMR) is one such application, and the current practice is to use R@$K,胃$ for evaluating VMR systems. However, this measure has two disadvant… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.16062v1-abstract-full').style.display = 'inline'; document.getElementById('2203.16062v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.16062v1-abstract-full" style="display: none;"> Evaluation measures have a crucial impact on the direction of research. Therefore, it is of utmost importance to develop appropriate and reliable evaluation measures for new applications where conventional measures are not well suited. Video Moment Retrieval (VMR) is one such application, and the current practice is to use R@$K,胃$ for evaluating VMR systems. However, this measure has two disadvantages. First, it is rank-insensitive: It ignores the rank positions of successfully localised moments in the top-$K$ ranked list by treating the list as a set. Second, it binarizes the Intersection over Union (IoU) of each retrieved video moment using the threshold $胃$ and thereby ignoring fine-grained localisation quality of ranked moments. We propose an alternative measure for evaluating VMR, called Average Max IoU (AxIoU), which is free from the above two problems. We show that AxIoU satisfies two important axioms for VMR evaluation, namely, \textbf{Invariance against Redundant Moments} and \textbf{Monotonicity with respect to the Best Moment}, and also that R@$K,胃$ satisfies the first axiom only. We also empirically examine how AxIoU agrees with R@$K,胃$, as well as its stability with respect to change in the test data and human-annotated temporal boundaries. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.16062v1-abstract-full').style.display = 'none'; document.getElementById('2203.16062v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 30 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by CVPR2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2203.14438">arXiv:2203.14438</a> <span> [<a href="https://arxiv.org/pdf/2203.14438">pdf</a>, <a href="https://arxiv.org/format/2203.14438">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Optimal Correction Cost for Object Detection Evaluation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Togashi%2C+R">Riku Togashi</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Rahtu%2C+E">Esa Rahtu</a>, <a href="/search/?searchtype=author&query=Heikkil%C3%A4%2C+J">Janne Heikkil盲</a>, <a href="/search/?searchtype=author&query=Satoh%2C+S">Shin'ichi Satoh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2203.14438v1-abstract-short" style="display: inline;"> Mean Average Precision (mAP) is the primary evaluation measure for object detection. Although object detection has a broad range of applications, mAP evaluates detectors in terms of the performance of ranked instance retrieval. Such the assumption for the evaluation task does not suit some downstream tasks. To alleviate the gap between downstream tasks and the evaluation scenario, we propose Optim… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.14438v1-abstract-full').style.display = 'inline'; document.getElementById('2203.14438v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2203.14438v1-abstract-full" style="display: none;"> Mean Average Precision (mAP) is the primary evaluation measure for object detection. Although object detection has a broad range of applications, mAP evaluates detectors in terms of the performance of ranked instance retrieval. Such the assumption for the evaluation task does not suit some downstream tasks. To alleviate the gap between downstream tasks and the evaluation scenario, we propose Optimal Correction Cost (OC-cost), which assesses detection accuracy at image level. OC-cost computes the cost of correcting detections to ground truths as a measure of accuracy. The cost is obtained by solving an optimal transportation problem between the detections and the ground truths. Unlike mAP, OC-cost is designed to penalize false positive and false negative detections properly, and every image in a dataset is treated equally. Our experimental result validates that OC-cost has better agreement with human preference than a ranking-based measure, i.e., mAP for a single image. We also show that detectors' rankings by OC-cost are more consistent on different data splits than mAP. Our goal is not to replace mAP with OC-cost but provide an additional tool to evaluate detectors from another aspect. To help future researchers and developers choose a target measure, we provide a series of experiments to clarify how mAP and OC-cost differ. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2203.14438v1-abstract-full').style.display = 'none'; document.getElementById('2203.14438v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 27 March, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.13395">arXiv:2110.13395</a> <span> [<a href="https://arxiv.org/pdf/2110.13395">pdf</a>, <a href="https://arxiv.org/format/2110.13395">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Transferring Domain-Agnostic Knowledge in Video Question Answering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Wu%2C+T">Tianran Wu</a>, <a href="/search/?searchtype=author&query=Garcia%2C+N">Noa Garcia</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Chu%2C+C">Chenhui Chu</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Takemura%2C+H">Haruo Takemura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.13395v1-abstract-short" style="display: inline;"> Video question answering (VideoQA) is designed to answer a given question based on a relevant video clip. The current available large-scale datasets have made it possible to formulate VideoQA as the joint understanding of visual and language information. However, this training procedure is costly and still less competent with human performance. In this paper, we investigate a transfer learning met… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.13395v1-abstract-full').style.display = 'inline'; document.getElementById('2110.13395v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.13395v1-abstract-full" style="display: none;"> Video question answering (VideoQA) is designed to answer a given question based on a relevant video clip. The current available large-scale datasets have made it possible to formulate VideoQA as the joint understanding of visual and language information. However, this training procedure is costly and still less competent with human performance. In this paper, we investigate a transfer learning method by the introduction of domain-agnostic knowledge and domain-specific knowledge. First, we develop a novel transfer learning framework, which finetunes the pre-trained model by applying domain-agnostic knowledge as the medium. Second, we construct a new VideoQA dataset with 21,412 human-generated question-answer samples for comparable transfer of knowledge. Our experiments show that: (i) domain-agnostic knowledge is transferable and (ii) our proposed transfer learning framework can boost VideoQA performance effectively. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.13395v1-abstract-full').style.display = 'none'; document.getElementById('2110.13395v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.01803">arXiv:2109.01803</a> <span> [<a href="https://arxiv.org/pdf/2109.01803">pdf</a>, <a href="https://arxiv.org/ps/2109.01803">ps</a>, <a href="https://arxiv.org/format/2109.01803">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> On a comparison theorem for parabolic equations with nonlinear boundary conditions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kita%2C+K">Kosuke Kita</a>, <a href="/search/?searchtype=author&query=%C3%94tani%2C+M">Mitsuharu 脭tani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.01803v1-abstract-short" style="display: inline;"> In this paper, a new type of comparison theorem is presented for some initial-boundary value problems of second order nonlinear parabolic systems with nonlinear boundary conditions. This comparison theorem has an advantage over the classical ones, since this makes it possible to compare two solutions satisfying different types of boundary conditions. Some applications are given in the last section… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.01803v1-abstract-full').style.display = 'inline'; document.getElementById('2109.01803v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.01803v1-abstract-full" style="display: none;"> In this paper, a new type of comparison theorem is presented for some initial-boundary value problems of second order nonlinear parabolic systems with nonlinear boundary conditions. This comparison theorem has an advantage over the classical ones, since this makes it possible to compare two solutions satisfying different types of boundary conditions. Some applications are given in the last section, where the existence of blow-up solutions is shown for some nonlinear parabolic equations and systems with nonlinear boundary conditions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.01803v1-abstract-full').style.display = 'none'; document.getElementById('2109.01803v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">19 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 35B51; 35B40; 35K51; 35K57 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2108.00871">arXiv:2108.00871</a> <span> [<a href="https://arxiv.org/pdf/2108.00871">pdf</a>, <a href="https://arxiv.org/format/2108.00871">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3474085.3475497">10.1145/3474085.3475497 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Constrained Graphic Layout Generation via Latent Optimization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kikuchi%2C+K">Kotaro Kikuchi</a>, <a href="/search/?searchtype=author&query=Simo-Serra%2C+E">Edgar Simo-Serra</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Yamaguchi%2C+K">Kota Yamaguchi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2108.00871v1-abstract-short" style="display: inline;"> It is common in graphic design humans visually arrange various elements according to their design intent and semantics. For example, a title text almost always appears on top of other elements in a document. In this work, we generate graphic layouts that can flexibly incorporate such design semantics, either specified implicitly or explicitly by a user. We optimize using the latent space of an off… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.00871v1-abstract-full').style.display = 'inline'; document.getElementById('2108.00871v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2108.00871v1-abstract-full" style="display: none;"> It is common in graphic design humans visually arrange various elements according to their design intent and semantics. For example, a title text almost always appears on top of other elements in a document. In this work, we generate graphic layouts that can flexibly incorporate such design semantics, either specified implicitly or explicitly by a user. We optimize using the latent space of an off-the-shelf layout generation model, allowing our approach to be complementary to and used with existing layout generation models. Our approach builds on a generative layout model based on a Transformer architecture, and formulates the layout generation as a constrained optimization problem where design constraints are used for element alignment, overlap avoidance, or any other user-specified relationship. We show in the experiments that our approach is capable of generating realistic layouts in both constrained and unconstrained generation tasks with a single model. The code is available at https://github.com/ktrk115/const_layout . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2108.00871v1-abstract-full').style.display = 'none'; document.getElementById('2108.00871v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 2 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by ACM Multimedia 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.13445">arXiv:2106.13445</a> <span> [<a href="https://arxiv.org/pdf/2106.13445">pdf</a>, <a href="https://arxiv.org/format/2106.13445">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> A Picture May Be Worth a Hundred Words for Visual Question Answering </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Hirota%2C+Y">Yusuke Hirota</a>, <a href="/search/?searchtype=author&query=Garcia%2C+N">Noa Garcia</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Chu%2C+C">Chenhui Chu</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Taniguchi%2C+I">Ittetsu Taniguchi</a>, <a href="/search/?searchtype=author&query=Onoye%2C+T">Takao Onoye</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.13445v1-abstract-short" style="display: inline;"> How far can we go with textual representations for understanding pictures? In image understanding, it is essential to use concise but detailed image representations. Deep visual features extracted by vision models, such as Faster R-CNN, are prevailing used in multiple tasks, and especially in visual question answering (VQA). However, conventional deep visual features may struggle to convey all the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.13445v1-abstract-full').style.display = 'inline'; document.getElementById('2106.13445v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.13445v1-abstract-full" style="display: none;"> How far can we go with textual representations for understanding pictures? In image understanding, it is essential to use concise but detailed image representations. Deep visual features extracted by vision models, such as Faster R-CNN, are prevailing used in multiple tasks, and especially in visual question answering (VQA). However, conventional deep visual features may struggle to convey all the details in an image as we humans do. Meanwhile, with recent language models' progress, descriptive text may be an alternative to this problem. This paper delves into the effectiveness of textual representations for image understanding in the specific context of VQA. We propose to take description-question pairs as input, instead of deep visual features, and fed them into a language-only Transformer model, simplifying the process and the computational cost. We also experiment with data augmentation techniques to increase the diversity in the training set and avoid learning statistical bias. Extensive evaluations have shown that textual representations require only about a hundred words to compete with deep visual features on both VQA 2.0 and VQA-CP v2. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.13445v1-abstract-full').style.display = 'none'; document.getElementById('2106.13445v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.04769">arXiv:2105.04769</a> <span> [<a href="https://arxiv.org/pdf/2105.04769">pdf</a>, <a href="https://arxiv.org/format/2105.04769">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3404835.3462933">10.1145/3404835.3462933 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Scalable Personalised Item Ranking through Parametric Density Estimation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Togashi%2C+R">Riku Togashi</a>, <a href="/search/?searchtype=author&query=Kato%2C+M">Masahiro Kato</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Sakai%2C+T">Tetsuya Sakai</a>, <a href="/search/?searchtype=author&query=Satoh%2C+S">Shin'ichi Satoh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.04769v1-abstract-short" style="display: inline;"> Learning from implicit feedback is challenging because of the difficult nature of the one-class problem: we can observe only positive examples. Most conventional methods use a pairwise ranking approach and negative samplers to cope with the one-class problem. However, such methods have two main drawbacks particularly in large-scale applications; (1) the pairwise approach is severely inefficient du… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.04769v1-abstract-full').style.display = 'inline'; document.getElementById('2105.04769v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.04769v1-abstract-full" style="display: none;"> Learning from implicit feedback is challenging because of the difficult nature of the one-class problem: we can observe only positive examples. Most conventional methods use a pairwise ranking approach and negative samplers to cope with the one-class problem. However, such methods have two main drawbacks particularly in large-scale applications; (1) the pairwise approach is severely inefficient due to the quadratic computational cost; and (2) even recent model-based samplers (e.g. IRGAN) cannot achieve practical efficiency due to the training of an extra model. In this paper, we propose a learning-to-rank approach, which achieves convergence speed comparable to the pointwise counterpart while performing similarly to the pairwise counterpart in terms of ranking effectiveness. Our approach estimates the probability densities of positive items for each user within a rich class of distributions, viz. \emph{exponential family}. In our formulation, we derive a loss function and the appropriate negative sampling distribution based on maximum likelihood estimation. We also develop a practical technique for risk approximation and a regularisation scheme. We then discuss that our single-model approach is equivalent to an IRGAN variant under a certain condition. Through experiments on real-world datasets, our approach outperforms the pointwise and pairwise counterparts in terms of effectiveness and efficiency. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.04769v1-abstract-full').style.display = 'none'; document.getElementById('2105.04769v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by SIGIR'21</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2101.07481">arXiv:2101.07481</a> <span> [<a href="https://arxiv.org/pdf/2101.07481">pdf</a>, <a href="https://arxiv.org/format/2101.07481">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Density-Ratio Based Personalised Ranking from Implicit Feedback </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Togashi%2C+R">Riku Togashi</a>, <a href="/search/?searchtype=author&query=Kato%2C+M">Masahiro Kato</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Satoh%2C+S">Shin'ichi Satoh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2101.07481v1-abstract-short" style="display: inline;"> Learning from implicit user feedback is challenging as we can only observe positive samples but never access negative ones. Most conventional methods cope with this issue by adopting a pairwise ranking approach with negative sampling. However, the pairwise ranking approach has a severe disadvantage in the convergence time owing to the quadratically increasing computational cost with respect to the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.07481v1-abstract-full').style.display = 'inline'; document.getElementById('2101.07481v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2101.07481v1-abstract-full" style="display: none;"> Learning from implicit user feedback is challenging as we can only observe positive samples but never access negative ones. Most conventional methods cope with this issue by adopting a pairwise ranking approach with negative sampling. However, the pairwise ranking approach has a severe disadvantage in the convergence time owing to the quadratically increasing computational cost with respect to the sample size; it is problematic, particularly for large-scale datasets and complex models such as neural networks. By contrast, a pointwise approach does not directly solve a ranking problem, and is therefore inferior to a pairwise counterpart in top-K ranking tasks; however, it is generally advantageous in regards to the convergence time. This study aims to establish an approach to learn personalised ranking from implicit feedback, which reconciles the training efficiency of the pointwise approach and ranking effectiveness of the pairwise counterpart. The key idea is to estimate the ranking of items in a pointwise manner; we first reformulate the conventional pointwise approach based on density ratio estimation and then incorporate the essence of ranking-oriented approaches (e.g. the pairwise approach) into our formulation. Through experiments on three real-world datasets, we demonstrate that our approach not only dramatically reduces the convergence time (one to two orders of magnitude faster) but also significantly improving the ranking performance. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2101.07481v1-abstract-full').style.display = 'none'; document.getElementById('2101.07481v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 January, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Accepted by WWW 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2012.10090">arXiv:2012.10090</a> <span> [<a href="https://arxiv.org/pdf/2012.10090">pdf</a>, <a href="https://arxiv.org/ps/2012.10090">ps</a>, <a href="https://arxiv.org/format/2012.10090">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Other Condensed Matter">cond-mat.other</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Chemical Physics">physics.chem-ph</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1103/PhysRevMaterials.5.065001">10.1103/PhysRevMaterials.5.065001 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Bias-dependent diffusion of H$_2$O molecules on an Al(111) surface </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Hagiwara%2C+S">Satoshi Hagiwara</a>, <a href="/search/?searchtype=author&query=Hu%2C+C">Chunping Hu</a>, <a href="/search/?searchtype=author&query=Nishihara%2C+S">Satomichi Nishihara</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Minoru Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2012.10090v1-abstract-short" style="display: inline;"> We investigate the process by which a water molecule diffuses on the surface of an Al(111) electrode under constant bias voltage by first-principles density functional theory. To understand the diffusion path of the water on the Al(111), we calculated the minimum energy path (MEP) determined by the nudged elastic band method in combination with constant electron chemical potential (constant-… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.10090v1-abstract-full').style.display = 'inline'; document.getElementById('2012.10090v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2012.10090v1-abstract-full" style="display: none;"> We investigate the process by which a water molecule diffuses on the surface of an Al(111) electrode under constant bias voltage by first-principles density functional theory. To understand the diffusion path of the water on the Al(111), we calculated the minimum energy path (MEP) determined by the nudged elastic band method in combination with constant electron chemical potential (constant-$渭_{\rm e}$) methods. The simulation shows that the MEP of the water molecule, its adsorption site, and the activation barrier strongly depend on the applied bias voltage. This strong dependence of the water diffusion process on the bias voltage is in good agreement with the result of a previous scanning tunneling microscopy (STM) experiment. The agreement between the theoretical and experimental results implies that accurate treatment of bias voltage plays a significant role in understanding the interaction between the electric field and the surface of the material. Comparative studies of the diffusion process with the constant total number of electrons (constant-$N_\mathrm{e}$) scheme show that the absence of strong interaction between the molecular dipole and the electric field leads to a different understanding of how water diffuses on a metal surface. The proposed constant-$渭_{\rm e}$ scheme is a realistic tool for the simulation of reactions under bias voltage not only using STM but also at the electrochemical interface. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2012.10090v1-abstract-full').style.display = 'none'; document.getElementById('2012.10090v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 December, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Phys. Rev. Materials 5, 065001 (2021) </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.13571">arXiv:2011.13571</a> <span> [<a href="https://arxiv.org/pdf/2011.13571">pdf</a>, <a href="https://arxiv.org/ps/2011.13571">ps</a>, <a href="https://arxiv.org/format/2011.13571">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Chemical Physics">physics.chem-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Materials Science">cond-mat.mtrl-sci</span> </div> </div> <p class="title is-5 mathjax"> Study on the free corrosion potential at an interface between an Al electrode and an acidic aqueous NaCl solution through density functional theory combined with the reference interaction site model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kano%2C+K">Koichi Kano</a>, <a href="/search/?searchtype=author&query=Hagiwara%2C+S">Satoshi Hagiwara</a>, <a href="/search/?searchtype=author&query=Igarashi%2C+T">Takahiro Igarashi</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Minoru Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.13571v2-abstract-short" style="display: inline;"> We investigated the free corrosion potential at an interface between an Al electrode and an aqueous NaCl solution (NaCl(aq)) under acidic conditions via density functional theory combined with the effective screening medium and reference interaction site model (ESM-RISM). Firstly, the electrode potentials for the anodic and cathodic corrosion reactions were obtained from the grand potential profil… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.13571v2-abstract-full').style.display = 'inline'; document.getElementById('2011.13571v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.13571v2-abstract-full" style="display: none;"> We investigated the free corrosion potential at an interface between an Al electrode and an aqueous NaCl solution (NaCl(aq)) under acidic conditions via density functional theory combined with the effective screening medium and reference interaction site model (ESM-RISM). Firstly, the electrode potentials for the anodic and cathodic corrosion reactions were obtained from the grand potential profile as a function of the electron chemical potential at the interface. Thereafter, we determined the free corrosion potential using the Tafel extrapolation method. The results of the free corrosion potential were consistent with previous experimental data. By controlling the pH of the NaCl(aq), we determined the pH dependence of the free corrosion potential, and the results agreed well with the experimental results. Our results indicated that the ESM-RISM method duly described the environmental effect of an acidic solution and precisely determined the free corrosion potential. Therefore, the application of this method presents an efficient approach toward calculating the free corrosion potential for various reactions. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.13571v2-abstract-full').style.display = 'none'; document.getElementById('2011.13571v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 March, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages, 7 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2011.05061">arXiv:2011.05061</a> <span> [<a href="https://arxiv.org/pdf/2011.05061">pdf</a>, <a href="https://arxiv.org/format/2011.05061">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Alleviating Cold-Start Problems in Recommendation through Pseudo-Labelling over Knowledge Graph </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Togashi%2C+R">Riku Togashi</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Satoh%2C+S">Shin'ichi Satoh</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2011.05061v1-abstract-short" style="display: inline;"> Solving cold-start problems is indispensable to provide meaningful recommendation results for new users and items. Under sparsely observed data, unobserved user-item pairs are also a vital source for distilling latent users' information needs. Most present works leverage unobserved samples for extracting negative signals. However, such an optimisation strategy can lead to biased results toward alr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.05061v1-abstract-full').style.display = 'inline'; document.getElementById('2011.05061v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2011.05061v1-abstract-full" style="display: none;"> Solving cold-start problems is indispensable to provide meaningful recommendation results for new users and items. Under sparsely observed data, unobserved user-item pairs are also a vital source for distilling latent users' information needs. Most present works leverage unobserved samples for extracting negative signals. However, such an optimisation strategy can lead to biased results toward already popular items by frequently handling new items as negative instances. In this study, we tackle the cold-start problems for new users/items by appropriately leveraging unobserved samples. We propose a knowledge graph (KG)-aware recommender based on graph neural networks, which augments labelled samples through pseudo-labelling. Our approach aggressively employs unobserved samples as positive instances and brings new items into the spotlight. To avoid exhaustive label assignments to all possible pairs of users and items, we exploit a KG for selecting probably positive items for each user. We also utilise an improved negative sampling strategy and thereby suppress the exacerbation of popularity biases. Through experiments, we demonstrate that our approach achieves improvements over the state-of-the-art KG-aware recommenders in a variety of scenarios; in particular, our methodology successfully improves recommendation performance for cold-start users/items. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2011.05061v1-abstract-full').style.display = 'none'; document.getElementById('2011.05061v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 November, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">WSDM 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2010.11570">arXiv:2010.11570</a> <span> [<a href="https://arxiv.org/pdf/2010.11570">pdf</a>, <a href="https://arxiv.org/ps/2010.11570">ps</a>, <a href="https://arxiv.org/format/2010.11570">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> Periodic Problem for Doubly Nonlinear Evolution Equation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Koike%2C+M">Masahiro Koike</a>, <a href="/search/?searchtype=author&query=%C3%94tani%2C+M">Mitsuharu 脭tani</a>, <a href="/search/?searchtype=author&query=Uchida%2C+S">Shun Uchida</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2010.11570v1-abstract-short" style="display: inline;"> We are concerned with the time-periodic problem of some doubly nonlinear equations governed by differentials of two convex functionals over uniformly convex Banach spaces. Akagi--Stefanelli (2011) considered Cauchy problem of the same equation via the so-called WED functional approach. Main purpose of this paper is to show the existence of the time-periodic solution under the same growth condition… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.11570v1-abstract-full').style.display = 'inline'; document.getElementById('2010.11570v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2010.11570v1-abstract-full" style="display: none;"> We are concerned with the time-periodic problem of some doubly nonlinear equations governed by differentials of two convex functionals over uniformly convex Banach spaces. Akagi--Stefanelli (2011) considered Cauchy problem of the same equation via the so-called WED functional approach. Main purpose of this paper is to show the existence of the time-periodic solution under the same growth conditions on functionals and differentials as those imposed in Akagi--Stefanelli (2011). Because of the difference of nature between Cauchy problem and the periodic problem, we can not apply the WED functional approach directly, so we here adopt standard compactness methods with suitable approximation procedures. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2010.11570v1-abstract-full').style.display = 'none'; document.getElementById('2010.11570v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">42 pages, no figure</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> Primary 47J35; Secondary 35B10; 35K55; 35K92 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.00325">arXiv:2009.00325</a> <span> [<a href="https://arxiv.org/pdf/2009.00325">pdf</a>, <a href="https://arxiv.org/format/2009.00325">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Uncovering Hidden Challenges in Query-Based Video Moment Retrieval </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Rahtu%2C+E">Esa Rahtu</a>, <a href="/search/?searchtype=author&query=Heikkil%C3%A4%2C+J">Janne Heikkil盲</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.00325v2-abstract-short" style="display: inline;"> The query-based moment retrieval is a problem of localising a specific clip from an untrimmed video according a query sentence. This is a challenging task that requires interpretation of both the natural language query and the video content. Like in many other areas in computer vision and machine learning, the progress in query-based moment retrieval is heavily driven by the benchmark datasets and… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.00325v2-abstract-full').style.display = 'inline'; document.getElementById('2009.00325v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.00325v2-abstract-full" style="display: none;"> The query-based moment retrieval is a problem of localising a specific clip from an untrimmed video according a query sentence. This is a challenging task that requires interpretation of both the natural language query and the video content. Like in many other areas in computer vision and machine learning, the progress in query-based moment retrieval is heavily driven by the benchmark datasets and, therefore, their quality has significant impact on the field. In this paper, we present a series of experiments assessing how well the benchmark results reflect the true progress in solving the moment retrieval task. Our results indicate substantial biases in the popular datasets and unexpected behaviour of the state-of-the-art models. Moreover, we present new sanity check experiments and approaches for visualising the results. Finally, we suggest possible directions to improve the temporal sentence grounding in the future. Our code for this paper is available at https://mayu-ot.github.io/hidden-challenges-MR . <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.00325v2-abstract-full').style.display = 'none'; document.getElementById('2009.00325v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 1 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">British Machine Vision Conference (BMVC), 2020. (v2) added references</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2008.12520">arXiv:2008.12520</a> <span> [<a href="https://arxiv.org/pdf/2008.12520">pdf</a>, <a href="https://arxiv.org/format/2008.12520">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> A Dataset and Baselines for Visual Question Answering on Art </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Garcia%2C+N">Noa Garcia</a>, <a href="/search/?searchtype=author&query=Ye%2C+C">Chentao Ye</a>, <a href="/search/?searchtype=author&query=Liu%2C+Z">Zihua Liu</a>, <a href="/search/?searchtype=author&query=Hu%2C+Q">Qingtao Hu</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Chu%2C+C">Chenhui Chu</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Mitamura%2C+T">Teruko Mitamura</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2008.12520v1-abstract-short" style="display: inline;"> Answering questions related to art pieces (paintings) is a difficult task, as it implies the understanding of not only the visual information that is shown in the picture, but also the contextual knowledge that is acquired through the study of the history of art. In this work, we introduce our first attempt towards building a new dataset, coined AQUA (Art QUestion Answering). The question-answer (… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.12520v1-abstract-full').style.display = 'inline'; document.getElementById('2008.12520v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2008.12520v1-abstract-full" style="display: none;"> Answering questions related to art pieces (paintings) is a difficult task, as it implies the understanding of not only the visual information that is shown in the picture, but also the contextual knowledge that is acquired through the study of the history of art. In this work, we introduce our first attempt towards building a new dataset, coined AQUA (Art QUestion Answering). The question-answer (QA) pairs are automatically generated using state-of-the-art question generation methods based on paintings and comments provided in an existing art understanding dataset. The QA pairs are cleansed by crowdsourcing workers with respect to their grammatical correctness, answerability, and answers' correctness. Our dataset inherently consists of visual (painting-based) and knowledge (comment-based) questions. We also present a two-branch model as baseline, where the visual and knowledge questions are handled independently. We extensively compare our baseline model against the state-of-the-art models for question answering, and we provide a comprehensive study about the challenges and potential future directions for visual question answering on art. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2008.12520v1-abstract-full').style.display = 'none'; document.getElementById('2008.12520v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2006.01947">arXiv:2006.01947</a> <span> [<a href="https://arxiv.org/pdf/2006.01947">pdf</a>, <a href="https://arxiv.org/format/2006.01947">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> </div> <p class="title is-5 mathjax"> Study of muonium emission from laser-ablated silica aerogel </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Beare%2C+J">J. Beare</a>, <a href="/search/?searchtype=author&query=Beer%2C+G">G. Beer</a>, <a href="/search/?searchtype=author&query=Brewer%2C+J+H">J. H. Brewer</a>, <a href="/search/?searchtype=author&query=Iijima%2C+T">T. Iijima</a>, <a href="/search/?searchtype=author&query=Ishida%2C+K">K. Ishida</a>, <a href="/search/?searchtype=author&query=Iwasaki%2C+M">M. Iwasaki</a>, <a href="/search/?searchtype=author&query=Kamal%2C+S">S. Kamal</a>, <a href="/search/?searchtype=author&query=Kanamori%2C+K">K. Kanamori</a>, <a href="/search/?searchtype=author&query=Kawamura%2C+N">N. Kawamura</a>, <a href="/search/?searchtype=author&query=Kitamura%2C+R">R. Kitamura</a>, <a href="/search/?searchtype=author&query=Li%2C+S">S. Li</a>, <a href="/search/?searchtype=author&query=Luke%2C+G+M">G. M. Luke</a>, <a href="/search/?searchtype=author&query=Marshall%2C+G+M">G. M. Marshall</a>, <a href="/search/?searchtype=author&query=Mibe%2C+T">T. Mibe</a>, <a href="/search/?searchtype=author&query=Miyake%2C+Y">Y. Miyake</a>, <a href="/search/?searchtype=author&query=Oishi%2C+Y">Y. Oishi</a>, <a href="/search/?searchtype=author&query=Olchanski%2C+K">K. Olchanski</a>, <a href="/search/?searchtype=author&query=Olin%2C+A">A. Olin</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">M. Otani</a>, <a href="/search/?searchtype=author&query=Rehman%2C+M+A">M. A. Rehman</a>, <a href="/search/?searchtype=author&query=Saito%2C+N">N. Saito</a>, <a href="/search/?searchtype=author&query=Sato%2C+Y">Y. Sato</a>, <a href="/search/?searchtype=author&query=Shimomura%2C+K">K. Shimomura</a>, <a href="/search/?searchtype=author&query=Suzuki%2C+K">K. Suzuki</a>, <a href="/search/?searchtype=author&query=Tabata%2C+M">M. Tabata</a> , et al. (1 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2006.01947v2-abstract-short" style="display: inline;"> The emission of muonium ($渭^+e^-$) atoms into vacuum from silica aerogel with laser ablation on its surface was studied with various ablation structures at room temperature using the subsurface muon beams at TRIUMF and Japan Proton Accelerator Research Complex (J-PARC). Laser ablation was applied to produce holes or grooves with typical dimensions of a few hundred $渭$m to a few mm, except for some… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.01947v2-abstract-full').style.display = 'inline'; document.getElementById('2006.01947v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2006.01947v2-abstract-full" style="display: none;"> The emission of muonium ($渭^+e^-$) atoms into vacuum from silica aerogel with laser ablation on its surface was studied with various ablation structures at room temperature using the subsurface muon beams at TRIUMF and Japan Proton Accelerator Research Complex (J-PARC). Laser ablation was applied to produce holes or grooves with typical dimensions of a few hundred $渭$m to a few mm, except for some extreme conditions. The measured emission rate tends to be higher for larger fractions of ablation opening and for shallower depths. More than a few ablation structures reach the emission rates similar to the highest achieved in the past measurements. The emission rate is found to be stable at least for a couple of days. Measurements of spin precession amplitudes for the produced muonium atoms and remaining muons in a magnetic field determine a muonium formation fraction of $(65.5 \pm 1.8)$%. The precession of the polarized muonium atoms is also observed clearly in vacuum. A projection of the emission rates measured at TRIUMF to the corresponding rates at J-PARC is demonstrated taking the different beam condition into account reasonably. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2006.01947v2-abstract-full').style.display = 'none'; document.getElementById('2006.01947v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 2 June, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">34 pages, 13 figures, submitted to PTEP</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.08385">arXiv:2004.08385</a> <span> [<a href="https://arxiv.org/pdf/2004.08385">pdf</a>, <a href="https://arxiv.org/format/2004.08385">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Knowledge-Based Visual Question Answering in Videos </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Garcia%2C+N">Noa Garcia</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Chu%2C+C">Chenhui Chu</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.08385v1-abstract-short" style="display: inline;"> We propose a novel video understanding task by fusing knowledge-based and video question answering. First, we introduce KnowIT VQA, a video dataset with 24,282 human-generated question-answer pairs about a popular sitcom. The dataset combines visual, textual and temporal coherence reasoning together with knowledge-based questions, which need of the experience obtained from the viewing of the serie… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.08385v1-abstract-full').style.display = 'inline'; document.getElementById('2004.08385v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.08385v1-abstract-full" style="display: none;"> We propose a novel video understanding task by fusing knowledge-based and video question answering. First, we introduce KnowIT VQA, a video dataset with 24,282 human-generated question-answer pairs about a popular sitcom. The dataset combines visual, textual and temporal coherence reasoning together with knowledge-based questions, which need of the experience obtained from the viewing of the series to be answered. Second, we propose a video understanding model by combining the visual and textual video content with specific knowledge about the show. Our main findings are: (i) the incorporation of knowledge produces outstanding improvements for VQA in video, and (ii) the performance on KnowIT VQA still lags well behind human accuracy, indicating its usefulness for studying current video modelling limitations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.08385v1-abstract-full').style.display = 'none'; document.getElementById('2004.08385v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:1910.10706</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.10706">arXiv:1910.10706</a> <span> [<a href="https://arxiv.org/pdf/1910.10706">pdf</a>, <a href="https://arxiv.org/format/1910.10706">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> KnowIT VQA: Answering Knowledge-Based Questions about Videos </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Garcia%2C+N">Noa Garcia</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Chu%2C+C">Chenhui Chu</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.10706v3-abstract-short" style="display: inline;"> We propose a novel video understanding task by fusing knowledge-based and video question answering. First, we introduce KnowIT VQA, a video dataset with 24,282 human-generated question-answer pairs about a popular sitcom. The dataset combines visual, textual and temporal coherence reasoning together with knowledge-based questions, which need of the experience obtained from the viewing of the serie… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.10706v3-abstract-full').style.display = 'inline'; document.getElementById('1910.10706v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.10706v3-abstract-full" style="display: none;"> We propose a novel video understanding task by fusing knowledge-based and video question answering. First, we introduce KnowIT VQA, a video dataset with 24,282 human-generated question-answer pairs about a popular sitcom. The dataset combines visual, textual and temporal coherence reasoning together with knowledge-based questions, which need of the experience obtained from the viewing of the series to be answered. Second, we propose a video understanding model by combining the visual and textual video content with specific knowledge about the show. Our main findings are: (i) the incorporation of knowledge produces outstanding improvements for VQA in video, and (ii) the performance on KnowIT VQA still lags well behind human accuracy, indicating its usefulness for studying current video modelling limitations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.10706v3-abstract-full').style.display = 'none'; document.getElementById('1910.10706v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 December, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 22 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1909.07067">arXiv:1909.07067</a> <span> [<a href="https://arxiv.org/pdf/1909.07067">pdf</a>, <a href="https://arxiv.org/ps/1909.07067">ps</a>, <a href="https://arxiv.org/format/1909.07067">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Functional Analysis">math.FA</span> </div> </div> <p class="title is-5 mathjax"> On spatial Gevrey regularity for some strongly dissipative second order evolution equations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Haraux%2C+A">Alain Haraux</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mitsuharu Otani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1909.07067v1-abstract-short" style="display: inline;"> Let A be a positive self-adjoint linear operator acting on a real Hilbert space H and $伪$, c be positive constants. We show that all solutions of the evolution equation u + Au + cA $伪$ u = 0 with u(0) $\in$ D(A 1 2), u (0) $\in$ H belong for all t > 0 to the Gevrey space G(A, $蟽$) with $蟽$ = min{ 1 $伪$ , 1 1--$伪$ }. This result is optimal in the sense that $蟽$ can not be reduced in general. For th… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.07067v1-abstract-full').style.display = 'inline'; document.getElementById('1909.07067v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1909.07067v1-abstract-full" style="display: none;"> Let A be a positive self-adjoint linear operator acting on a real Hilbert space H and $伪$, c be positive constants. We show that all solutions of the evolution equation u + Au + cA $伪$ u = 0 with u(0) $\in$ D(A 1 2), u (0) $\in$ H belong for all t > 0 to the Gevrey space G(A, $蟽$) with $蟽$ = min{ 1 $伪$ , 1 1--$伪$ }. This result is optimal in the sense that $蟽$ can not be reduced in general. For the damped wave equation (SDW) $伪$ corresponding to the case where A = --$螖$ with domain D(A) = {w $\in$ H 1 0 ($惟$), $螖$w $\in$ L 2 ($惟$)} with $惟$ any open subset of R N and (u(0), u (0)) $\in$ H 1 0 ($惟$)xL 2 ($惟$), the unique solution u of (SDW) $伪$ satisfies $\forall$t > 0, u(t) $\in$ G s ($惟$) with s = min{ 1 2$伪$ , 1 2(1--$伪$) }, and this result is also optimal. Mathematics Subject Classification 2010 (MSC2010): 35L10, 35B65, 47A60. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1909.07067v1-abstract-full').style.display = 'none'; document.getElementById('1909.07067v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1907.02235">arXiv:1907.02235</a> <span> [<a href="https://arxiv.org/pdf/1907.02235">pdf</a>, <a href="https://arxiv.org/format/1907.02235">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Accelerator Physics">physics.acc-ph</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1016/j.nima.2019.162693">10.1016/j.nima.2019.162693 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Compact buncher cavity for muons accelerated by a radio-frequency quadrupole </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+M">M. Otani</a>, <a href="/search/?searchtype=author&query=Sue%2C+Y">Y. Sue</a>, <a href="/search/?searchtype=author&query=Futatsukawa%2C+K">K. Futatsukawa</a>, <a href="/search/?searchtype=author&query=Iijima%2C+T">T. Iijima</a>, <a href="/search/?searchtype=author&query=Iinuma%2C+H">H. Iinuma</a>, <a href="/search/?searchtype=author&query=Kawamura%2C+N">N. Kawamura</a>, <a href="/search/?searchtype=author&query=Kitamura%2C+R">R. Kitamura</a>, <a href="/search/?searchtype=author&query=Kondo%2C+Y">Y. Kondo</a>, <a href="/search/?searchtype=author&query=Morishita%2C+T">T. Morishita</a>, <a href="/search/?searchtype=author&query=Nakazawa%2C+Y">Y. Nakazawa</a>, <a href="/search/?searchtype=author&query=Yasuda%2C+H">H. Yasuda</a>, <a href="/search/?searchtype=author&query=Yotsuzuka%2C+M">M. Yotsuzuka</a>, <a href="/search/?searchtype=author&query=Saito%2C+N">N. Saito</a>, <a href="/search/?searchtype=author&query=Yamazaki%2C+T">T. Yamazaki</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1907.02235v1-abstract-short" style="display: inline;"> A buncher cavity has been developed for the muons accelerated by a radio-frequency quadrupole linac (RFQ). The buncher cavity is designed for $尾=v/c=0.04$ at an operational frequency of 324 MHz. It employs a double-gap structure operated in the TEM mode for the required effective voltage with compact dimensions, in order to account for the limited space of the experiment. The measured resonant fre… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.02235v1-abstract-full').style.display = 'inline'; document.getElementById('1907.02235v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1907.02235v1-abstract-full" style="display: none;"> A buncher cavity has been developed for the muons accelerated by a radio-frequency quadrupole linac (RFQ). The buncher cavity is designed for $尾=v/c=0.04$ at an operational frequency of 324 MHz. It employs a double-gap structure operated in the TEM mode for the required effective voltage with compact dimensions, in order to account for the limited space of the experiment. The measured resonant frequency and unloaded quality factor are 323.95 MHz and $3.06\times10^3$, respectively. The buncher cavity was successfully operated for longitudinal bunch size measurement of the muons accelerated by the RFQ. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1907.02235v1-abstract-full').style.display = 'none'; document.getElementById('1907.02235v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 July, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1903.11328">arXiv:1903.11328</a> <span> [<a href="https://arxiv.org/pdf/1903.11328">pdf</a>, <a href="https://arxiv.org/format/1903.11328">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Rethinking the Evaluation of Video Summaries </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a>, <a href="/search/?searchtype=author&query=Rahtu%2C+E">Esa Rahtu</a>, <a href="/search/?searchtype=author&query=Heikkil%C3%A4%2C+J">Janne Heikkil盲</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1903.11328v2-abstract-short" style="display: inline;"> Video summarization is a technique to create a short skim of the original video while preserving the main stories/content. There exists a substantial interest in automatizing this process due to the rapid growth of the available material. The recent progress has been facilitated by public benchmark datasets, which enable easy and fair comparison of methods. Currently the established evaluation pro… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.11328v2-abstract-full').style.display = 'inline'; document.getElementById('1903.11328v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1903.11328v2-abstract-full" style="display: none;"> Video summarization is a technique to create a short skim of the original video while preserving the main stories/content. There exists a substantial interest in automatizing this process due to the rapid growth of the available material. The recent progress has been facilitated by public benchmark datasets, which enable easy and fair comparison of methods. Currently the established evaluation protocol is to compare the generated summary with respect to a set of reference summaries provided by the dataset. In this paper, we will provide in-depth assessment of this pipeline using two popular benchmark datasets. Surprisingly, we observe that randomly generated summaries achieve comparable or better performance to the state-of-the-art. In some cases, the random summaries outperform even the human generated summaries in leave-one-out experiments. Moreover, it turns out that the video segmentation, which is often considered as a fixed pre-processing method, has the most significant impact on the performance measure. Based on our observations, we propose alternative approaches for assessing the importance scores as well as an intuitive visualization of correlation between the estimated scoring and human annotations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1903.11328v2-abstract-full').style.display = 'none'; document.getElementById('1903.11328v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">CVPR'19 poster</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1901.03047">arXiv:1901.03047</a> <span> [<a href="https://arxiv.org/pdf/1901.03047">pdf</a>, <a href="https://arxiv.org/format/1901.03047">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Instrumentation and Detectors">physics.ins-det</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="High Energy Physics - Experiment">hep-ex</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Accelerator Physics">physics.acc-ph</span> </div> </div> <p class="title is-5 mathjax"> A New Approach for Measuring the Muon Anomalous Magnetic Moment and Electric Dipole Moment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Abe%2C+M">M. Abe</a>, <a href="/search/?searchtype=author&query=Bae%2C+S">S. Bae</a>, <a href="/search/?searchtype=author&query=Beer%2C+G">G. Beer</a>, <a href="/search/?searchtype=author&query=Bunce%2C+G">G. Bunce</a>, <a href="/search/?searchtype=author&query=Choi%2C+H">H. Choi</a>, <a href="/search/?searchtype=author&query=Choi%2C+S">S. Choi</a>, <a href="/search/?searchtype=author&query=Chung%2C+M">M. Chung</a>, <a href="/search/?searchtype=author&query=da+Silva%2C+W">W. da Silva</a>, <a href="/search/?searchtype=author&query=Eidelman%2C+S">S. Eidelman</a>, <a href="/search/?searchtype=author&query=Finger%2C+M">M. Finger</a>, <a href="/search/?searchtype=author&query=Fukao%2C+Y">Y. Fukao</a>, <a href="/search/?searchtype=author&query=Fukuyama%2C+T">T. Fukuyama</a>, <a href="/search/?searchtype=author&query=Haciomeroglu%2C+S">S. Haciomeroglu</a>, <a href="/search/?searchtype=author&query=Hasegawa%2C+K">K. Hasegawa</a>, <a href="/search/?searchtype=author&query=Hayasaka%2C+K">K. Hayasaka</a>, <a href="/search/?searchtype=author&query=Hayashizaki%2C+N">N. Hayashizaki</a>, <a href="/search/?searchtype=author&query=Hisamatsu%2C+H">H. Hisamatsu</a>, <a href="/search/?searchtype=author&query=Iijima%2C+T">T. Iijima</a>, <a href="/search/?searchtype=author&query=Iinuma%2C+H">H. Iinuma</a>, <a href="/search/?searchtype=author&query=Inami%2C+K">K. Inami</a>, <a href="/search/?searchtype=author&query=Ikeda%2C+H">H. Ikeda</a>, <a href="/search/?searchtype=author&query=Ikeno%2C+M">M. Ikeno</a>, <a href="/search/?searchtype=author&query=Ishida%2C+K">K. Ishida</a>, <a href="/search/?searchtype=author&query=Itahashi%2C+T">T. Itahashi</a>, <a href="/search/?searchtype=author&query=Iwasaki%2C+M">M. Iwasaki</a> , et al. (71 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1901.03047v2-abstract-short" style="display: inline;"> This paper introduces a new approach to measure the muon magnetic moment anomaly $a_渭 = (g-2)/2$, and the muon electric dipole moment (EDM) $d_渭$ at the J-PARC muon facility. The goal of our experiment is to measure $a_渭$ and $d_渭$ using an independent method with a factor of 10 lower muon momentum, and a factor of 20 smaller diameter storage-ring solenoid compared with previous and ongoing muon… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.03047v2-abstract-full').style.display = 'inline'; document.getElementById('1901.03047v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1901.03047v2-abstract-full" style="display: none;"> This paper introduces a new approach to measure the muon magnetic moment anomaly $a_渭 = (g-2)/2$, and the muon electric dipole moment (EDM) $d_渭$ at the J-PARC muon facility. The goal of our experiment is to measure $a_渭$ and $d_渭$ using an independent method with a factor of 10 lower muon momentum, and a factor of 20 smaller diameter storage-ring solenoid compared with previous and ongoing muon $g-2$ experiments with unprecedented quality of the storage magnetic field. Additional significant differences from the present experimental method include a factor of 1,000 smaller transverse emittance of the muon beam (reaccelerated thermal muon beam), its efficient vertical injection into the solenoid, and tracking each decay positron from muon decay to obtain its momentum vector. The precision goal for $a_渭$ is statistical uncertainty of 450 part per billion (ppb), similar to the present experimental uncertainty, and a systematic uncertainty less than 70 ppb. The goal for EDM is a sensitivity of $1.5\times 10^{-21}~e\cdot\mbox{cm}$. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1901.03047v2-abstract-full').style.display = 'none'; document.getElementById('1901.03047v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 March, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 January, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2019. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">24 pages, 14 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1811.03844">arXiv:1811.03844</a> <span> [<a href="https://arxiv.org/pdf/1811.03844">pdf</a>, <a href="https://arxiv.org/ps/1811.03844">ps</a>, <a href="https://arxiv.org/format/1811.03844">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> Bounds for global solutions of a reaction diffusion system with the Robin boundary conditions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kita%2C+K">Kosuke Kita</a>, <a href="/search/?searchtype=author&query=%C3%94tani%2C+M">Mitsuharu 脭tani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1811.03844v1-abstract-short" style="display: inline;"> In this paper, we consider the large-time behavior of solutions of a reaction diffusion system arising from a nuclear reactor model with the Robin boundary conditions, which consists of two real-valued unknown functions. In particular, we show that global solutions of this system are uniformly bounded in a suitable norm with respect to time. Since this system has no variational structure, we canno… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.03844v1-abstract-full').style.display = 'inline'; document.getElementById('1811.03844v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1811.03844v1-abstract-full" style="display: none;"> In this paper, we consider the large-time behavior of solutions of a reaction diffusion system arising from a nuclear reactor model with the Robin boundary conditions, which consists of two real-valued unknown functions. In particular, we show that global solutions of this system are uniformly bounded in a suitable norm with respect to time. Since this system has no variational structure, we cannot apply the standard methods relying on the Lyapunov functional in order to obtain a priori estimates of global solutions. To cope with this difficulty, we make use of the weighted $L^1$ norm characterized by the first eigenfunction of Laplacian with the Robin boundary condition. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1811.03844v1-abstract-full').style.display = 'none'; document.getElementById('1811.03844v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 November, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 35K57; 35B40; 35B45 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1806.04284">arXiv:1806.04284</a> <span> [<a href="https://arxiv.org/pdf/1806.04284">pdf</a>, <a href="https://arxiv.org/format/1806.04284">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Multimedia">cs.MM</span> </div> </div> <p class="title is-5 mathjax"> iParaphrasing: Extracting Visually Grounded Paraphrases via an Image </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Chu%2C+C">Chenhui Chu</a>, <a href="/search/?searchtype=author&query=Otani%2C+M">Mayu Otani</a>, <a href="/search/?searchtype=author&query=Nakashima%2C+Y">Yuta Nakashima</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1806.04284v1-abstract-short" style="display: inline;"> A paraphrase is a restatement of the meaning of a text in other words. Paraphrases have been studied to enhance the performance of many natural language processing tasks. In this paper, we propose a novel task iParaphrasing to extract visually grounded paraphrases (VGPs), which are different phrasal expressions describing the same visual concept in an image. These extracted VGPs have the potential… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.04284v1-abstract-full').style.display = 'inline'; document.getElementById('1806.04284v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1806.04284v1-abstract-full" style="display: none;"> A paraphrase is a restatement of the meaning of a text in other words. Paraphrases have been studied to enhance the performance of many natural language processing tasks. In this paper, we propose a novel task iParaphrasing to extract visually grounded paraphrases (VGPs), which are different phrasal expressions describing the same visual concept in an image. These extracted VGPs have the potential to improve language and image multimodal tasks such as visual question answering and image captioning. How to model the similarity between VGPs is the key of iParaphrasing. We apply various existing methods as well as propose a novel neural network-based method with image attention, and report the results of the first attempt toward iParaphrasing. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1806.04284v1-abstract-full').style.display = 'none'; document.getElementById('1806.04284v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">COLING 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.04312">arXiv:1805.04312</a> <span> [<a href="https://arxiv.org/pdf/1805.04312">pdf</a>, <a href="https://arxiv.org/format/1805.04312">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> Initial-boundary value problems for complex Ginzburg-Landau equations governed by $p$-Laplacian in general domains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kuroda%2C+T">Takanori Kuroda</a>, <a href="/search/?searchtype=author&query=%C3%94tani%2C+M">Mitsuharu 脭tani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.04312v1-abstract-short" style="display: inline;"> In this paper, complex Ginzburg-Landau (CGL) equations governed by p-Laplacian are studied. We discuss the global existence of solutions for the initial-boundary value problem of the equation in general domains. The global solvability of the initial-boundary value problem for the case when $p = 2$ is already examined by several authors provided that parameters appearing in CGL equations satisfy a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.04312v1-abstract-full').style.display = 'inline'; document.getElementById('1805.04312v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.04312v1-abstract-full" style="display: none;"> In this paper, complex Ginzburg-Landau (CGL) equations governed by p-Laplacian are studied. We discuss the global existence of solutions for the initial-boundary value problem of the equation in general domains. The global solvability of the initial-boundary value problem for the case when $p = 2$ is already examined by several authors provided that parameters appearing in CGL equations satisfy a suitable condition. Our approach to CGL equations is based on the theory of parabolic equations with non-monotone perturbations. By using this method together with some approximate procedure and a diagonal argument, the global solvability is shown without assuming any growth conditions on the nonlinear terms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.04312v1-abstract-full').style.display = 'none'; document.getElementById('1805.04312v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">33 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 35Q56; 47J35; 35K61 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.04275">arXiv:1805.04275</a> <span> [<a href="https://arxiv.org/pdf/1805.04275">pdf</a>, <a href="https://arxiv.org/format/1805.04275">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> Local well-posedness of the complex Ginzburg-Landau equation in bounded domains </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kuroda%2C+T">Takanori Kuroda</a>, <a href="/search/?searchtype=author&query=%C3%94tani%2C+M">Mitsuharu 脭tani</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.04275v1-abstract-short" style="display: inline;"> In this paper, we are concerned with the local well-posedness of the initial-boundary value problem for complex Ginzburg-Landau (CGL) equations in bounded domains. There are many studies for the case where the real part of its nonlinear term plays as dissipation. This dissipative case is intensively studied and it is shown that (CGL) admits a global solution when parameters appearing in (CGL) belo… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.04275v1-abstract-full').style.display = 'inline'; document.getElementById('1805.04275v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.04275v1-abstract-full" style="display: none;"> In this paper, we are concerned with the local well-posedness of the initial-boundary value problem for complex Ginzburg-Landau (CGL) equations in bounded domains. There are many studies for the case where the real part of its nonlinear term plays as dissipation. This dissipative case is intensively studied and it is shown that (CGL) admits a global solution when parameters appearing in (CGL) belong to the so-called CGL-region. This paper deals with the non-dissipative case. We regard (CGL) as a parabolic equation perturbed by monotone and non-monotone perturbations and follows the basic strategy developed in 脭tani (1982) to show the local well-posedness of (CGL) and the existence of small global solutions provided that the nonlinearity is Sobolev subcritical. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.04275v1-abstract-full').style.display = 'none'; document.getElementById('1805.04275v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">22 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 35Q56; 47J35; 35K61 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.04244">arXiv:1805.04244</a> <span> [<a href="https://arxiv.org/pdf/1805.04244">pdf</a>, <a href="https://arxiv.org/ps/1805.04244">ps</a>, <a href="https://arxiv.org/format/1805.04244">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Analysis of PDEs">math.AP</span> </div> </div> <p class="title is-5 mathjax"> On some parabolic systems arising from a nuclear reactor model with nonlinear boundary conditions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/?searchtype=author&query=Kita%2C+K">Kosuke Kita</a>, <a href="/search/?searchtype=author&query=%C3%94tani%2C+M">Mitsuharu 脭tani</a>, <a href="/search/?searchtype=author&query=Sakamoto%2C+H">Hiroki Sakamoto</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.04244v1-abstract-short" style="display: inline;"> In this paper, we are concerned with a reaction diffusion system arising from a nuclear reactor model in bounded domains with nonlinear boundary conditions. We show the existence of a stationary solution and its ordered uniqueness. It is also shown that every positive stationary solution possesses threshold property to determine blow-up or globally existence for solutions of nonstationary problem. </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.04244v1-abstract-full" style="display: none;"> In this paper, we are concerned with a reaction diffusion system arising from a nuclear reactor model in bounded domains with nonlinear boundary conditions. We show the existence of a stationary solution and its ordered uniqueness. It is also shown that every positive stationary solution possesses threshold property to determine blow-up or globally existence for solutions of nonstationary problem. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.04244v1-abstract-full').style.display = 'none'; document.getElementById('1805.04244v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">MSC Class:</span> 35K57; 35K61; 35Q79 </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Otani%2C+M&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Otani%2C+M&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Otani%2C+M&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>