CINXE.COM

Sound

<!DOCTYPE html> <html lang="en"> <head> <title>Sound </title> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="apple-touch-icon" sizes="180x180" href="/static/browse/0.3.4/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="/static/browse/0.3.4/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="/static/browse/0.3.4/images/icons/favicon-16x16.png"> <link rel="manifest" href="/static/browse/0.3.4/images/icons/site.webmanifest"> <link rel="mask-icon" href="/static/browse/0.3.4/images/icons/safari-pinned-tab.svg" color="#5bbad5"> <meta name="msapplication-TileColor" content="#da532c"> <meta name="theme-color" content="#ffffff"> <link rel="stylesheet" type="text/css" media="screen" href="/static/browse/0.3.4/css/arXiv.css?v=20241206" /> <link rel="stylesheet" type="text/css" media="print" href="/static/browse/0.3.4/css/arXiv-print.css?v=20200611" /> <link rel="stylesheet" type="text/css" media="screen" href="/static/browse/0.3.4/css/browse_search.css" /> <script language="javascript" src="/static/browse/0.3.4/js/accordion.js" /></script> <script src="/static/browse/0.3.4/js/mathjaxToggle.min.js" type="text/javascript"></script> <script type="text/javascript" language="javascript">mathjaxToggle();</script> </head> <body class="with-cu-identity"> <div class="flex-wrap-footer"> <header> <a href="#content" class="is-sr-only">Skip to main content</a> <!-- start desktop header --> <div class="columns is-vcentered is-hidden-mobile" id="cu-identity"> <div class="column" id="cu-logo"> <a href="https://www.cornell.edu/"><img src="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg" alt="Cornell University" /></a> </div> <!-- /from April 7 at 1:00 AM to May 29 at 21:40 --><!-- /from May 2 at 1:00 AM to May 5 at 9:45 AM --><div class="column" id="support-ack"> <span id="support-ack-url">We gratefully acknowledge support from the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors.</span> <a href="https://info.arxiv.org/about/donate.html" class="btn-header-donate">Donate</a> </div> </div> <div id="header" class="is-hidden-mobile"> <a aria-hidden="true" tabindex="-1" href="/IgnoreMe"></a> <div class="header-breadcrumbs"> <a href="/"><img src="/static/browse/0.3.4/images/arxiv-logo-one-color-white.svg" alt="arxiv logo" style="height:40px;"/></a> <span>&gt;</span> <a href="/list/cs.SD/recent">cs.SD</a> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div><!-- /end desktop header --> <div class="mobile-header"> <div class="columns is-mobile"> <div class="column logo-arxiv"><a href="https://arxiv.org/"><img src="/static/browse/0.3.4/images/arxiv-logomark-small-white.svg" alt="arXiv logo" style="height:60px;" /></a></div> <div class="column logo-cornell"><a href="https://www.cornell.edu/"> <picture> <source media="(min-width: 501px)" srcset="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg 400w" sizes="400w" /> <source srcset="/static/browse/0.3.4/images/icons/cu/cornell_seal_simple_black.svg 2x" /> <img src="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg" alt="Cornell University Logo" /> </picture> </a></div> <div class="column nav" id="toggle-container" role="menubar"> <button class="toggle-control"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-white"><title>open search</title><path d="M505 442.7L405.3 343c-4.5-4.5-10.6-7-17-7H372c27.6-35.3 44-79.7 44-128C416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c48.3 0 92.7-16.4 128-44v16.3c0 6.4 2.5 12.5 7 17l99.7 99.7c9.4 9.4 24.6 9.4 33.9 0l28.3-28.3c9.4-9.4 9.4-24.6.1-34zM208 336c-70.7 0-128-57.2-128-128 0-70.7 57.2-128 128-128 70.7 0 128 57.2 128 128 0 70.7-57.2 128-128 128z"/></svg></button> <div class="mobile-toggle-block toggle-target"> <form class="mobile-search-form" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <input class="input" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <input type="hidden" name="source" value="header"> <input type="hidden" name="searchtype" value="all"> <button class="button">GO</button> </div> </form> </div> <button class="toggle-control"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-white" role="menu"><title>open navigation menu</title><path d="M16 132h416c8.837 0 16-7.163 16-16V76c0-8.837-7.163-16-16-16H16C7.163 60 0 67.163 0 76v40c0 8.837 7.163 16 16 16zm0 160h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16z"/ ></svg></button> <div class="mobile-toggle-block toggle-target"> <nav class="mobile-menu" aria-labelledby="mobilemenulabel"> <h2 id="mobilemenulabel">quick links</h2> <ul> <li><a href="https://arxiv.org/login">Login</a></li> <li><a href="https://info.arxiv.org/help">Help Pages</a></li> <li><a href="https://info.arxiv.org/about">About</a></li> </ul> </nav> </div> </div> </div> </div><!-- /end mobile-header --> </header> <main> <div id="content"> <div id='content-inner'> <div id='dlpage'> <h1>Sound</h1> <h2>Authors and titles for recent submissions</h2> <ul> <li> <a href="/list/cs.SD/recent?skip=0&amp;show=50"> Thu, 31 Jul 2025 </a> </li><li> <a href="/list/cs.SD/recent?skip=6&amp;show=50"> Wed, 30 Jul 2025 </a> </li><li> <a href="/list/cs.SD/recent?skip=15&amp;show=50"> Tue, 29 Jul 2025 </a> </li><li> <a href="/list/cs.SD/recent?skip=34&amp;show=50"> Mon, 28 Jul 2025 </a> </li><li> <a href="/list/cs.SD/recent?skip=47&amp;show=50"> Fri, 25 Jul 2025 </a> </li></ul> <p>See today's <a id="new-cs.SD" aria-labelledby="new-cs.SD" href="/list/cs.SD/new">new</a> changes</p> <div class='paging'>Total of 61 entries : <span>1-50</span> <a href=/list/cs.SD/recent?skip=50&amp;show=50>51-61</a> </div> <div class='morefewer'>Showing up to 50 entries per page: <a href=/list/cs.SD/recent?skip=0&amp;show=25 rel="nofollow"> fewer</a> | <span style="color: #454545">more</span> | <a href=/list/cs.SD/recent?skip=0&amp;show=2000 rel="nofollow"> all</a> </div> <dl id='articles'> <h3>Thu, 31 Jul 2025 (showing 6 of 6 entries )</h3> <dt> <a name='item1'>[1]</a> <a href ="/abs/2507.22746" title="Abstract" id="2507.22746"> arXiv:2507.22746 </a> [<a href="/pdf/2507.22746" title="Download PDF" id="pdf-2507.22746" aria-labelledby="pdf-2507.22746">pdf</a>, <a href="https://arxiv.org/html/2507.22746v1" title="View HTML" id="html-2507.22746" aria-labelledby="html-2507.22746" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.22746" title="Other formats" id="oth-2507.22746" aria-labelledby="oth-2507.22746">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Next Tokens Denoising for Speech Synthesis </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Liu,+Y">Yanqing Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Xue,+R">Ruiqing Xue</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhang,+C">Chong Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Liu,+Y">Yufei Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+G">Gang Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+B">Bohan Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Qian,+Y">Yao Qian</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=He,+L">Lei He</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Liu,+S">Shujie Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhao,+S">Sheng Zhao</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Computation and Language (cs.CL); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item2'>[2]</a> <a href ="/abs/2507.22612" title="Abstract" id="2507.22612"> arXiv:2507.22612 </a> [<a href="/pdf/2507.22612" title="Download PDF" id="pdf-2507.22612" aria-labelledby="pdf-2507.22612">pdf</a>, <a href="https://arxiv.org/html/2507.22612v1" title="View HTML" id="html-2507.22612" aria-labelledby="html-2507.22612" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.22612" title="Other formats" id="oth-2507.22612" aria-labelledby="oth-2507.22612">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Adaptive Duration Model for Text Speech Alignment </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Cao,+J">Junjie Cao</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 4 pages, 3 figures, 2 tables </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Artificial Intelligence (cs.AI); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item3'>[3]</a> <a href ="/abs/2507.22322" title="Abstract" id="2507.22322"> arXiv:2507.22322 </a> [<a href="/pdf/2507.22322" title="Download PDF" id="pdf-2507.22322" aria-labelledby="pdf-2507.22322">pdf</a>, <a href="https://arxiv.org/html/2507.22322v1" title="View HTML" id="html-2507.22322" aria-labelledby="html-2507.22322" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.22322" title="Other formats" id="oth-2507.22322" aria-labelledby="oth-2507.22322">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Two-Step Learning Framework for Enhancing Sound Event Localization and Detection </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yu,+H">Hogeon Yu</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 5pages, 2figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item4'>[4]</a> <a href ="/abs/2507.22208" title="Abstract" id="2507.22208"> arXiv:2507.22208 </a> [<a href="/pdf/2507.22208" title="Download PDF" id="pdf-2507.22208" aria-labelledby="pdf-2507.22208">pdf</a>, <a href="https://arxiv.org/html/2507.22208v1" title="View HTML" id="html-2507.22208" aria-labelledby="html-2507.22208" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.22208" title="Other formats" id="oth-2507.22208" aria-labelledby="oth-2507.22208">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Quantum-Inspired Audio Unlearning: Towards Privacy-Preserving Voice Biometrics </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Pathak,+S">Shreyansh Pathak</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Shreshtha,+S">Sonu Shreshtha</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Singh,+R">Richa Singh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Vatsa,+M">Mayank Vatsa</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 9 pages, 2 figures, 5 tables, Accepted at IJCB 2025 (Osaka, Japan) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Artificial Intelligence (cs.AI); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item5'>[5]</a> <a href ="/abs/2507.22628" title="Abstract" id="2507.22628"> arXiv:2507.22628 </a> (cross-list from eess.AS) [<a href="/pdf/2507.22628" title="Download PDF" id="pdf-2507.22628" aria-labelledby="pdf-2507.22628">pdf</a>, <a href="https://arxiv.org/html/2507.22628v1" title="View HTML" id="html-2507.22628" aria-labelledby="html-2507.22628" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.22628" title="Other formats" id="oth-2507.22628" aria-labelledby="oth-2507.22628">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A k-space approach to modeling multi-channel parametric array loudspeaker systems </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Zhuang,+T">Tao Zhuang</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=He,+L">Longbiao He</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Niu,+F">Feng Niu</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Zhong,+J">Jia-Xin Zhong</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Lu,+J">Jing Lu</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Audio and Speech Processing (eess.AS)</span>; Sound (cs.SD) </div> </div> </dd> <dt> <a name='item6'>[6]</a> <a href ="/abs/2507.22370" title="Abstract" id="2507.22370"> arXiv:2507.22370 </a> (cross-list from cs.LG) [<a href="/pdf/2507.22370" title="Download PDF" id="pdf-2507.22370" aria-labelledby="pdf-2507.22370">pdf</a>, <a href="https://arxiv.org/html/2507.22370v1" title="View HTML" id="html-2507.22370" aria-labelledby="html-2507.22370" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.22370" title="Other formats" id="oth-2507.22370" aria-labelledby="oth-2507.22370">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Prediction of acoustic field in 1-D uniform duct with varying mean flow and temperature using neural networks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Veerababu,+D">D. Veerababu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Ghosh,+P+K">Prasanta K. Ghosh</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 22 pages </div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> Journal of Theoretical and Computational Acoustics, 33, 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> </dl> <dl id='articles'> <h3>Wed, 30 Jul 2025 (showing 9 of 9 entries )</h3> <dt> <a name='item7'>[7]</a> <a href ="/abs/2507.21642" title="Abstract" id="2507.21642"> arXiv:2507.21642 </a> [<a href="/pdf/2507.21642" title="Download PDF" id="pdf-2507.21642" aria-labelledby="pdf-2507.21642">pdf</a>, <a href="https://arxiv.org/html/2507.21642v1" title="View HTML" id="html-2507.21642" aria-labelledby="html-2507.21642" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.21642" title="Other formats" id="oth-2507.21642" aria-labelledby="oth-2507.21642">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Whilter: A Whisper-based Data Filter for &#34;In-the-Wild&#34; Speech Corpora Using Utterance-level Multi-Task Classification </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Ravenscroft,+W">William Ravenscroft</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Close,+G">George Close</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Bower-Morris,+K">Kit Bower-Morris</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Stacey,+J">Jamie Stacey</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Sityaev,+D">Dmitry Sityaev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Hong,+K+Y">Kris Y. Hong</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted for Interspeech 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Machine Learning (cs.LG); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item8'>[8]</a> <a href ="/abs/2507.21463" title="Abstract" id="2507.21463"> arXiv:2507.21463 </a> [<a href="/pdf/2507.21463" title="Download PDF" id="pdf-2507.21463" aria-labelledby="pdf-2507.21463">pdf</a>, <a href="https://arxiv.org/html/2507.21463v1" title="View HTML" id="html-2507.21463" aria-labelledby="html-2507.21463" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.21463" title="Other formats" id="oth-2507.21463" aria-labelledby="oth-2507.21463">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> SpeechFake: A Large-Scale Multilingual Speech Deepfake Dataset Incorporating Cutting-Edge Generation Methods </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Huang,+W">Wen Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Gu,+Y">Yanmei Gu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+Z">Zhiming Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhu,+H">Huijia Zhu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Qian,+Y">Yanmin Qian</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Published in ACL 2025. Dataset available at: <a href="https://github.com/YMLLG/SpeechFake" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item9'>[9]</a> <a href ="/abs/2507.21426" title="Abstract" id="2507.21426"> arXiv:2507.21426 </a> [<a href="/pdf/2507.21426" title="Download PDF" id="pdf-2507.21426" aria-labelledby="pdf-2507.21426">pdf</a>, <a href="https://arxiv.org/html/2507.21426v1" title="View HTML" id="html-2507.21426" aria-labelledby="html-2507.21426" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.21426" title="Other formats" id="oth-2507.21426" aria-labelledby="oth-2507.21426">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Relationship between objective and subjective perceptual measures of speech in individuals with head and neck cancer </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Halpern,+B+M">Bence Mark Halpern</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Tienkamp,+T">Thomas Tienkamp</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Rebernik,+T">Teja Rebernik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=van+Son,+R+J">Rob J.J.H. van Son</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wieling,+M">Martijn Wieling</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Abur,+D">Defne Abur</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Toda,+T">Tomoki Toda</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 5 pages, 1 figure, 1 table. Accepted at Interspeech 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item10'>[10]</a> <a href ="/abs/2507.21202" title="Abstract" id="2507.21202"> arXiv:2507.21202 </a> [<a href="/pdf/2507.21202" title="Download PDF" id="pdf-2507.21202" aria-labelledby="pdf-2507.21202">pdf</a>, <a href="/format/2507.21202" title="Other formats" id="oth-2507.21202" aria-labelledby="oth-2507.21202">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Combolutional Neural Networks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Churchwell,+C">Cameron Churchwell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kim,+M">Minje Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Smaragdis,+P">Paris Smaragdis</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 4 pages, 3 figures, accepted to WASPAA 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Machine Learning (cs.LG); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item11'>[11]</a> <a href ="/abs/2507.21591" title="Abstract" id="2507.21591"> arXiv:2507.21591 </a> (cross-list from cs.CR) [<a href="/pdf/2507.21591" title="Download PDF" id="pdf-2507.21591" aria-labelledby="pdf-2507.21591">pdf</a>, <a href="https://arxiv.org/html/2507.21591v1" title="View HTML" id="html-2507.21591" aria-labelledby="html-2507.21591" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.21591" title="Other formats" id="oth-2507.21591" aria-labelledby="oth-2507.21591">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Hierarchical Graph Neural Network for Compressed Speech Steganalysis </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Hemis,+M">Mustapha Hemis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kheddar,+H">Hamza Kheddar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Ghanem,+M+C">Mohamed Chahine Ghanem</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Boudraa,+B">Bachir Boudraa</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Artificial Intelligence (cs.AI); Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item12'>[12]</a> <a href ="/abs/2507.21522" title="Abstract" id="2507.21522"> arXiv:2507.21522 </a> (cross-list from cs.CL) [<a href="/pdf/2507.21522" title="Download PDF" id="pdf-2507.21522" aria-labelledby="pdf-2507.21522">pdf</a>, <a href="https://arxiv.org/html/2507.21522v1" title="View HTML" id="html-2507.21522" aria-labelledby="html-2507.21522" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.21522" title="Other formats" id="oth-2507.21522" aria-labelledby="oth-2507.21522">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Model-free Speculative Decoding for Transformer-based ASR with Token Map Drafting </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Ho,+T+V">Tuan Vu Ho</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kokubo,+H">Hiroaki Kokubo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yamamoto,+M">Masaaki Yamamoto</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kawaguchi,+Y">Yohei Kawaguchi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at EUSIPCO 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item13'>[13]</a> <a href ="/abs/2507.21395" title="Abstract" id="2507.21395"> arXiv:2507.21395 </a> (cross-list from cs.MM) [<a href="/pdf/2507.21395" title="Download PDF" id="pdf-2507.21395" aria-labelledby="pdf-2507.21395">pdf</a>, <a href="https://arxiv.org/html/2507.21395v1" title="View HTML" id="html-2507.21395" aria-labelledby="html-2507.21395" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.21395" title="Other formats" id="oth-2507.21395" aria-labelledby="oth-2507.21395">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Sync-TVA: A Graph-Attention Framework for Multimodal Emotion Recognition with Cross-Modal Fusion </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Deng,+Z">Zeyu Deng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Lu,+Y">Yanhui Lu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Liao,+J">Jiashu Liao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wu,+S">Shuang Wu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wei,+C">Chongfeng Wei</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Multimedia (cs.MM)</span>; Artificial Intelligence (cs.AI); Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item14'>[14]</a> <a href ="/abs/2507.21331" title="Abstract" id="2507.21331"> arXiv:2507.21331 </a> (cross-list from cs.CL) [<a href="/pdf/2507.21331" title="Download PDF" id="pdf-2507.21331" aria-labelledby="pdf-2507.21331">pdf</a>, <a href="/format/2507.21331" title="Other formats" id="oth-2507.21331" aria-labelledby="oth-2507.21331">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Deep Learning Automatic Speech Recognition Model for Shona Language </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Sirora,+L+W">Leslie Wellington Sirora</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Mutandavari,+M">Mainford Mutandavari</a></div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> International Journal of Innovative Research in Computer and Communication Engineering, 12(9) (2024) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item15'>[15]</a> <a href ="/abs/2507.21138" title="Abstract" id="2507.21138"> arXiv:2507.21138 </a> (cross-list from cs.CL) [<a href="/pdf/2507.21138" title="Download PDF" id="pdf-2507.21138" aria-labelledby="pdf-2507.21138">pdf</a>, <a href="https://arxiv.org/html/2507.21138v1" title="View HTML" id="html-2507.21138" aria-labelledby="html-2507.21138" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.21138" title="Other formats" id="oth-2507.21138" aria-labelledby="oth-2507.21138">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> TTS-1 Technical Report </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Atamanenko,+O">Oleg Atamanenko</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chalova,+A">Anna Chalova</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Coombes,+J">Joseph Coombes</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Cope,+N">Nikki Cope</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Dang,+P">Phillip Dang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Deng,+Z">Zhifeng Deng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Du,+J">Jimmy Du</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Ermolenko,+M">Michael Ermolenko</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Fan,+F">Feifan Fan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Feng,+Y">Yufei Feng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Fichter,+C">Cheryl Fichter</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Filimonov,+P">Pavel Filimonov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Fischer,+L">Louis Fischer</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Gibbs,+K">Kylan Gibbs</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Gusarova,+V">Valeria Gusarova</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Karpik,+P">Pavel Karpik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kottner,+A+A">Andreas Assad Kottner</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Lee,+I">Ian Lee</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Louie,+O">Oliver Louie</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Mai,+J">Jasmine Mai</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Mamontov,+M">Mikhail Mamontov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Mao,+S">Suri Mao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Morshed,+N">Nurullah Morshed</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Poletaev,+I">Igor Poletaev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Radu,+F">Florin Radu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Semernia,+D">Dmytro Semernia</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Shingarev,+E">Evgenii Shingarev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Sivaraja,+V">Vikram Sivaraja</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Skirko,+P">Peter Skirko</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Takhautdinov,+R">Rinat Takhautdinov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Villahermosa,+R">Robert Villahermosa</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+J">Jean Wang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 20 pages, 10 figures. For associated modeling and training code, see <a href="https://github.com/inworld-ai/tts" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG); Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> </dl> <dl id='articles'> <h3>Tue, 29 Jul 2025 (showing 19 of 19 entries )</h3> <dt> <a name='item16'>[16]</a> <a href ="/abs/2507.20900" title="Abstract" id="2507.20900"> arXiv:2507.20900 </a> [<a href="/pdf/2507.20900" title="Download PDF" id="pdf-2507.20900" aria-labelledby="pdf-2507.20900">pdf</a>, <a href="https://arxiv.org/html/2507.20900v1" title="View HTML" id="html-2507.20900" aria-labelledby="html-2507.20900" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20900" title="Other formats" id="oth-2507.20900" aria-labelledby="oth-2507.20900">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Music Arena: Live Evaluation for Text-to-Music </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kim,+Y">Yonghyun Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chi,+W">Wayne Chi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Angelopoulos,+A+N">Anastasios N. Angelopoulos</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chiang,+W">Wei-Lin Chiang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Saito,+K">Koichi Saito</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Watanabe,+S">Shinji Watanabe</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Mitsufuji,+Y">Yuki Mitsufuji</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Donahue,+C">Chris Donahue</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Artificial Intelligence (cs.AI); Multimedia (cs.MM) </div> </div> </dd> <dt> <a name='item17'>[17]</a> <a href ="/abs/2507.20880" title="Abstract" id="2507.20880"> arXiv:2507.20880 </a> [<a href="/pdf/2507.20880" title="Download PDF" id="pdf-2507.20880" aria-labelledby="pdf-2507.20880">pdf</a>, <a href="https://arxiv.org/html/2507.20880v1" title="View HTML" id="html-2507.20880" aria-labelledby="html-2507.20880" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20880" title="Other formats" id="oth-2507.20880" aria-labelledby="oth-2507.20880">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> JAM: A Tiny Flow-based Song Generator with Fine-grained Controllability and Aesthetic Alignment </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Liu,+R">Renhang Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Hung,+C">Chia-Yu Hung</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Majumder,+N">Navonil Majumder</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Gautreaux,+T">Taylor Gautreaux</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Bagherzadeh,+A+A">Amir Ali Bagherzadeh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+C">Chuan Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Herremans,+D">Dorien Herremans</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Poria,+S">Soujanya Poria</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> <a href="https://github.com/declare-lab/jamify" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Artificial Intelligence (cs.AI) </div> </div> </dd> <dt> <a name='item18'>[18]</a> <a href ="/abs/2507.20731" title="Abstract" id="2507.20731"> arXiv:2507.20731 </a> [<a href="/pdf/2507.20731" title="Download PDF" id="pdf-2507.20731" aria-labelledby="pdf-2507.20731">pdf</a>, <a href="/format/2507.20731" title="Other formats" id="oth-2507.20731" aria-labelledby="oth-2507.20731">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Learning Neural Vocoder from Range-Null Space Decomposition </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+A">Andong Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Lei,+T">Tong Lei</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Sun,+Z">Zhihang Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chen,+R">Rilin Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yin,+E">Erwei Yin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+X">Xiaodong Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zheng,+C">Chengshi Zheng</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, 7 figures, IJCAI2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span> </div> </div> </dd> <dt> <a name='item19'>[19]</a> <a href ="/abs/2507.20624" title="Abstract" id="2507.20624"> arXiv:2507.20624 </a> [<a href="/pdf/2507.20624" title="Download PDF" id="pdf-2507.20624" aria-labelledby="pdf-2507.20624">pdf</a>, <a href="/format/2507.20624" title="Other formats" id="oth-2507.20624" aria-labelledby="oth-2507.20624">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Hyperbolic Embeddings for Order-Aware Classification of Audio Effect Chains </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wada,+A">Aogu Wada</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Nakamura,+T">Tomohiko Nakamura</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Saruwatari,+H">Hiroshi Saruwatari</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 7 pages, 3 figures, accepted for the 28th International Conference on Digital Audio Effects (DAFx25) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item20'>[20]</a> <a href ="/abs/2507.20485" title="Abstract" id="2507.20485"> arXiv:2507.20485 </a> [<a href="/pdf/2507.20485" title="Download PDF" id="pdf-2507.20485" aria-labelledby="pdf-2507.20485">pdf</a>, <a href="https://arxiv.org/html/2507.20485v1" title="View HTML" id="html-2507.20485" aria-labelledby="html-2507.20485" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20485" title="Other formats" id="oth-2507.20485" aria-labelledby="oth-2507.20485">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Sound Safeguarding for Acoustic Measurement Using Any Sounds: Tools and Applications </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kawahara,+H">Hideki Kawahara</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yatabe,+K">Kohei Yatabe</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Sakakibara,+K">Ken-Ichi Sakakibara</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 2 pages, 2 figures, IEEE GCCE 2025 Demo session, Accepted </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item21'>[21]</a> <a href ="/abs/2507.20417" title="Abstract" id="2507.20417"> arXiv:2507.20417 </a> [<a href="/pdf/2507.20417" title="Download PDF" id="pdf-2507.20417" aria-labelledby="pdf-2507.20417">pdf</a>, <a href="https://arxiv.org/html/2507.20417v1" title="View HTML" id="html-2507.20417" aria-labelledby="html-2507.20417" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20417" title="Other formats" id="oth-2507.20417" aria-labelledby="oth-2507.20417">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Two Views, One Truth: Spectral and Self-Supervised Features Fusion for Robust Speech Deepfake Detection </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kheir,+Y+E">Yassine El Kheir</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Das,+A">Arnab Das</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Erdogan,+E+E">Enes Erdem Erdogan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Ritter-Guttierez,+F">Fabian Ritter-Guttierez</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Polzehl,+T">Tim Polzehl</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=M%C3%B6ller,+S">Sebastian M枚ller</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> ACCEPTED WASPAA 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Cryptography and Security (cs.CR); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item22'>[22]</a> <a href ="/abs/2507.20169" title="Abstract" id="2507.20169"> arXiv:2507.20169 </a> [<a href="/pdf/2507.20169" title="Download PDF" id="pdf-2507.20169" aria-labelledby="pdf-2507.20169">pdf</a>, <a href="https://arxiv.org/html/2507.20169v1" title="View HTML" id="html-2507.20169" aria-labelledby="html-2507.20169" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20169" title="Other formats" id="oth-2507.20169" aria-labelledby="oth-2507.20169">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Self-Improvement for Audio Large Language Model using Unlabeled Speech </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+S">Shaowen Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chen,+X">Xinyuan Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Xu,+Y">Yao Xu</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> To appear in Interspeech 2025. 6 pages, 1 figure </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item23'>[23]</a> <a href ="/abs/2507.20140" title="Abstract" id="2507.20140"> arXiv:2507.20140 </a> [<a href="/pdf/2507.20140" title="Download PDF" id="pdf-2507.20140" aria-labelledby="pdf-2507.20140">pdf</a>, <a href="https://arxiv.org/html/2507.20140v1" title="View HTML" id="html-2507.20140" aria-labelledby="html-2507.20140" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20140" title="Other formats" id="oth-2507.20140" aria-labelledby="oth-2507.20140">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Do Not Mimic My Voice: Speaker Identity Unlearning for Zero-Shot Text-to-Speech </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kim,+T">Taesoo Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kim,+J">Jinju Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kim,+D">Dongchan Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Ko,+J+H">Jong Hwan Ko</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Park,+G">Gyeong-Moon Park</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Proceedings of the 42nd International Conference on Machine Learning (ICML 2025), Vancouver, Canada. PMLR 267, 2025. Authors Jinju Kim and Taesoo Kim contributed equally </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Artificial Intelligence (cs.AI); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item24'>[24]</a> <a href ="/abs/2507.20128" title="Abstract" id="2507.20128"> arXiv:2507.20128 </a> [<a href="/pdf/2507.20128" title="Download PDF" id="pdf-2507.20128" aria-labelledby="pdf-2507.20128">pdf</a>, <a href="https://arxiv.org/html/2507.20128v1" title="View HTML" id="html-2507.20128" aria-labelledby="html-2507.20128" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20128" title="Other formats" id="oth-2507.20128" aria-labelledby="oth-2507.20128">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Diffusion-based Symbolic Music Generation with Structured State Space Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yuan,+S">Shenghua Yuan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Tang,+X">Xing Tang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chen,+J">Jiatao Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Xie,+T">Tianming Xie</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+J">Jing Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Shi,+B">Bing Shi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 9 pages,3figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span> </div> </div> </dd> <dt> <a name='item25'>[25]</a> <a href ="/abs/2507.20052" title="Abstract" id="2507.20052"> arXiv:2507.20052 </a> [<a href="/pdf/2507.20052" title="Download PDF" id="pdf-2507.20052" aria-labelledby="pdf-2507.20052">pdf</a>, <a href="https://arxiv.org/html/2507.20052v1" title="View HTML" id="html-2507.20052" aria-labelledby="html-2507.20052" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20052" title="Other formats" id="oth-2507.20052" aria-labelledby="oth-2507.20052">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Improving Deep Learning-based Respiratory Sound Analysis with Frequency Selection and Attention Mechanism </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Fraihi,+N">Nouhaila Fraihi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Karrakchou,+O">Ouassim Karrakchou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Ghogho,+M">Mounir Ghogho</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Machine Learning (cs.LG) </div> </div> </dd> <dt> <a name='item26'>[26]</a> <a href ="/abs/2507.20036" title="Abstract" id="2507.20036"> arXiv:2507.20036 </a> [<a href="/pdf/2507.20036" title="Download PDF" id="pdf-2507.20036" aria-labelledby="pdf-2507.20036">pdf</a>, <a href="https://arxiv.org/html/2507.20036v1" title="View HTML" id="html-2507.20036" aria-labelledby="html-2507.20036" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.20036" title="Other formats" id="oth-2507.20036" aria-labelledby="oth-2507.20036">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Improving Audio Classification by Transitioning from Zero- to Few-Shot </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Taylor,+J">James Taylor</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Mack,+W">Wolfgang Mack</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Submitted to Interspeech 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Machine Learning (cs.LG) </div> </div> </dd> <dt> <a name='item27'>[27]</a> <a href ="/abs/2507.19991" title="Abstract" id="2507.19991"> arXiv:2507.19991 </a> [<a href="/pdf/2507.19991" title="Download PDF" id="pdf-2507.19991" aria-labelledby="pdf-2507.19991">pdf</a>, <a href="https://arxiv.org/html/2507.19991v1" title="View HTML" id="html-2507.19991" aria-labelledby="html-2507.19991" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19991" title="Other formats" id="oth-2507.19991" aria-labelledby="oth-2507.19991">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Efficient Vocal-Conditioned Music Generation via Soft Alignment Attention and Latent Diffusion </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Cheung,+H+S">Hei Shing Cheung</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhang,+B">Boya Zhang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 6 page, 3 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Machine Learning (cs.LG) </div> </div> </dd> <dt> <a name='item28'>[28]</a> <a href ="/abs/2507.19835" title="Abstract" id="2507.19835"> arXiv:2507.19835 </a> [<a href="/pdf/2507.19835" title="Download PDF" id="pdf-2507.19835" aria-labelledby="pdf-2507.19835">pdf</a>, <a href="https://arxiv.org/html/2507.19835v1" title="View HTML" id="html-2507.19835" aria-labelledby="html-2507.19835" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19835" title="Other formats" id="oth-2507.19835" aria-labelledby="oth-2507.19835">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> SonicGauss: Position-Aware Physical Sound Synthesis for 3D Gaussian Representations </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+C">Chunshi Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+H">Hongxing Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Luo,+Y">Yawei Luo</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted by ACMMM&#39;25 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Multimedia (cs.MM) </div> </div> </dd> <dt> <a name='item29'>[29]</a> <a href ="/abs/2507.19557" title="Abstract" id="2507.19557"> arXiv:2507.19557 </a> [<a href="/pdf/2507.19557" title="Download PDF" id="pdf-2507.19557" aria-labelledby="pdf-2507.19557">pdf</a>, <a href="https://arxiv.org/html/2507.19557v1" title="View HTML" id="html-2507.19557" aria-labelledby="html-2507.19557" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19557" title="Other formats" id="oth-2507.19557" aria-labelledby="oth-2507.19557">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Joint Feature and Output Distillation for Low-complexity Acoustic Scene Classification </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+H">Haowen Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yang,+Z">Ziyi Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+M">Mou Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Tan,+E">Ee-Leng Tan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yeow,+J">Junwei Yeow</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Peksi,+S">Santi Peksi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Gan,+W">Woon-Seng Gan</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 4 pages, submitted to DCASE2025 Challenge Task 1 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item30'>[30]</a> <a href ="/abs/2507.20666" title="Abstract" id="2507.20666"> arXiv:2507.20666 </a> (cross-list from eess.AS) [<a href="/pdf/2507.20666" title="Download PDF" id="pdf-2507.20666" aria-labelledby="pdf-2507.20666">pdf</a>, <a href="/format/2507.20666" title="Other formats" id="oth-2507.20666" aria-labelledby="oth-2507.20666">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> MIMII-Agent: Leveraging LLMs with Function Calling for Relative Evaluation of Anomalous Sound Detection </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Purohit,+H">Harsh Purohit</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Nishida,+T">Tomoya Nishida</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Dohi,+K">Kota Dohi</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Endo,+T">Takashi Endo</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Kawaguchi,+Y">Yohei Kawaguchi</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Audio and Speech Processing (eess.AS)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG); Sound (cs.SD) </div> </div> </dd> <dt> <a name='item31'>[31]</a> <a href ="/abs/2507.20627" title="Abstract" id="2507.20627"> arXiv:2507.20627 </a> (cross-list from cs.MM) [<a href="/pdf/2507.20627" title="Download PDF" id="pdf-2507.20627" aria-labelledby="pdf-2507.20627">pdf</a>, <a href="/format/2507.20627" title="Other formats" id="oth-2507.20627" aria-labelledby="oth-2507.20627">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Controllable Video-to-Music Generation with Multiple Time-Varying Conditions </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wu,+J">Junxian Wu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=You,+W">Weitao You</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zuo,+H">Heda Zuo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhang,+D">Dengming Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chen,+P">Pei Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Sun,+L">Lingyun Sun</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted by the 33rd ACM International Conference on Multimedia (ACMMM 2025). The project page is available at <a href="https://kita-wjx.github.io/MCV2M/" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Multimedia (cs.MM)</span>; Artificial Intelligence (cs.AI); Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item32'>[32]</a> <a href ="/abs/2507.20530" title="Abstract" id="2507.20530"> arXiv:2507.20530 </a> (cross-list from eess.AS) [<a href="/pdf/2507.20530" title="Download PDF" id="pdf-2507.20530" aria-labelledby="pdf-2507.20530">pdf</a>, <a href="/format/2507.20530" title="Other formats" id="oth-2507.20530" aria-labelledby="oth-2507.20530">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Binaural Sound Event Localization and Detection based on HRTF Cues for Humanoid Robots </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Lee,+G">Gyeong-Tae Lee</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Nam,+H">Hyeonuk Nam</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Park,+Y">Yong-Hwa Park</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Submitted to IEEE/ACM TASLP </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Audio and Speech Processing (eess.AS)</span>; Sound (cs.SD) </div> </div> </dd> <dt> <a name='item33'>[33]</a> <a href ="/abs/2507.19836" title="Abstract" id="2507.19836"> arXiv:2507.19836 </a> (cross-list from cs.GR) [<a href="/pdf/2507.19836" title="Download PDF" id="pdf-2507.19836" aria-labelledby="pdf-2507.19836">pdf</a>, <a href="https://arxiv.org/html/2507.19836v1" title="View HTML" id="html-2507.19836" aria-labelledby="html-2507.19836" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19836" title="Other formats" id="oth-2507.19836" aria-labelledby="oth-2507.19836">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> ChoreoMuse: Robust Music-to-Dance Video Generation with Style Transfer and Beat-Adherent Motion </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+X">Xuanchen Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+H">Heng Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Cai,+W">Weidong Cai</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, 5 figures, accepted by the 33rd ACM International Conference on Multimedia (ACM MM 2025), demo page: <a href="https://choreomuse.github.io" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Graphics (cs.GR)</span>; Artificial Intelligence (cs.AI); Computer Vision and Pattern Recognition (cs.CV); Multimedia (cs.MM); Sound (cs.SD) </div> </div> </dd> <dt> <a name='item34'>[34]</a> <a href ="/abs/2507.19634" title="Abstract" id="2507.19634"> arXiv:2507.19634 </a> (cross-list from cs.CL) [<a href="/pdf/2507.19634" title="Download PDF" id="pdf-2507.19634" aria-labelledby="pdf-2507.19634">pdf</a>, <a href="https://arxiv.org/html/2507.19634v1" title="View HTML" id="html-2507.19634" aria-labelledby="html-2507.19634" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19634" title="Other formats" id="oth-2507.19634" aria-labelledby="oth-2507.19634">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> MCIF: Multimodal Crosslingual Instruction-Following Benchmark from Scientific Talks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Papi,+S">Sara Papi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Z%C3%BCfle,+M">Maike Z眉fle</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Gaido,+M">Marco Gaido</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Savoldi,+B">Beatrice Savoldi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Liu,+D">Danni Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Douros,+I">Ioannis Douros</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Bentivogli,+L">Luisa Bentivogli</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Niehues,+J">Jan Niehues</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Work in progress </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Computer Vision and Pattern Recognition (cs.CV); Sound (cs.SD) </div> </div> </dd> </dl> <dl id='articles'> <h3>Mon, 28 Jul 2025 (showing 13 of 13 entries )</h3> <dt> <a name='item35'>[35]</a> <a href ="/abs/2507.19308" title="Abstract" id="2507.19308"> arXiv:2507.19308 </a> [<a href="/pdf/2507.19308" title="Download PDF" id="pdf-2507.19308" aria-labelledby="pdf-2507.19308">pdf</a>, <a href="https://arxiv.org/html/2507.19308v1" title="View HTML" id="html-2507.19308" aria-labelledby="html-2507.19308" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19308" title="Other formats" id="oth-2507.19308" aria-labelledby="oth-2507.19308">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> The Eloquence team submission for task 1 of MLC-SLM challenge </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Concina,+L">Lorenzo Concina</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Luque,+J">Jordi Luque</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Brutti,+A">Alessio Brutti</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Matassoni,+M">Marco Matassoni</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhang,+Y">Yuchen Zhang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Technical Report for MLC-SLM Challenge of Interspeech2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Computation and Language (cs.CL); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item36'>[36]</a> <a href ="/abs/2507.19225" title="Abstract" id="2507.19225"> arXiv:2507.19225 </a> [<a href="/pdf/2507.19225" title="Download PDF" id="pdf-2507.19225" aria-labelledby="pdf-2507.19225">pdf</a>, <a href="https://arxiv.org/html/2507.19225v1" title="View HTML" id="html-2507.19225" aria-labelledby="html-2507.19225" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19225" title="Other formats" id="oth-2507.19225" aria-labelledby="oth-2507.19225">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Face2VoiceSync: Lightweight Face-Voice Consistency for Text-Driven Talking Face Generation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kang,+F">Fang Kang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Cao,+Y">Yin Cao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chen,+H">Haoyu Chen</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Computer Vision and Pattern Recognition (cs.CV); Multimedia (cs.MM); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item37'>[37]</a> <a href ="/abs/2507.19202" title="Abstract" id="2507.19202"> arXiv:2507.19202 </a> [<a href="/pdf/2507.19202" title="Download PDF" id="pdf-2507.19202" aria-labelledby="pdf-2507.19202">pdf</a>, <a href="https://arxiv.org/html/2507.19202v1" title="View HTML" id="html-2507.19202" aria-labelledby="html-2507.19202" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19202" title="Other formats" id="oth-2507.19202" aria-labelledby="oth-2507.19202">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Latent Granular Resynthesis using Neural Audio Codecs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Tokui,+N">Nao Tokui</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Baker,+T">Tom Baker</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at ISMIR 2025 Late Breaking Demos </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Machine Learning (cs.LG); Audio and Speech Processing (eess.AS); Signal Processing (eess.SP) </div> </div> </dd> <dt> <a name='item38'>[38]</a> <a href ="/abs/2507.19062" title="Abstract" id="2507.19062"> arXiv:2507.19062 </a> [<a href="/pdf/2507.19062" title="Download PDF" id="pdf-2507.19062" aria-labelledby="pdf-2507.19062">pdf</a>, <a href="https://arxiv.org/html/2507.19062v1" title="View HTML" id="html-2507.19062" aria-labelledby="html-2507.19062" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19062" title="Other formats" id="oth-2507.19062" aria-labelledby="oth-2507.19062">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> From Continuous to Discrete: Cross-Domain Collaborative General Speech Enhancement via Hierarchical Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Mu,+Z">Zhaoxi Mu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chen,+R">Rilin Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+A">Andong Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yu,+M">Meng Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yang,+X">Xinyu Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yu,+D">Dong Yu</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> ACMMM 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item39'>[39]</a> <a href ="/abs/2507.19037" title="Abstract" id="2507.19037"> arXiv:2507.19037 </a> [<a href="/pdf/2507.19037" title="Download PDF" id="pdf-2507.19037" aria-labelledby="pdf-2507.19037">pdf</a>, <a href="https://arxiv.org/html/2507.19037v1" title="View HTML" id="html-2507.19037" aria-labelledby="html-2507.19037" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19037" title="Other formats" id="oth-2507.19037" aria-labelledby="oth-2507.19037">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> MLLM-based Speech Recognition: When and How is Multimodality Beneficial? </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Guan,+Y">Yiwen Guan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Trinh,+V+A">Viet Anh Trinh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Voleti,+V">Vivek Voleti</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Whitehill,+J">Jacob Whitehill</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Computation and Language (cs.CL); Multimedia (cs.MM); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item40'>[40]</a> <a href ="/abs/2507.18897" title="Abstract" id="2507.18897"> arXiv:2507.18897 </a> [<a href="/pdf/2507.18897" title="Download PDF" id="pdf-2507.18897" aria-labelledby="pdf-2507.18897">pdf</a>, <a href="https://arxiv.org/html/2507.18897v1" title="View HTML" id="html-2507.18897" aria-labelledby="html-2507.18897" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.18897" title="Other formats" id="oth-2507.18897" aria-labelledby="oth-2507.18897">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> HH-Codec: High Compression High-fidelity Discrete Neural Codec for Spoken Language Modeling </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Xue,+R">Rongkun Xue</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Niu,+Y">Yazhe Niu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Hu,+S">Shuai Hu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yin,+Z">Zixin Yin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yao,+Y">Yongqiang Yao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yang,+J">Jing Yang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Artificial Intelligence (cs.AI); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item41'>[41]</a> <a href ="/abs/2507.18723" title="Abstract" id="2507.18723"> arXiv:2507.18723 </a> [<a href="/pdf/2507.18723" title="Download PDF" id="pdf-2507.18723" aria-labelledby="pdf-2507.18723">pdf</a>, <a href="https://arxiv.org/html/2507.18723v1" title="View HTML" id="html-2507.18723" aria-labelledby="html-2507.18723" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.18723" title="Other formats" id="oth-2507.18723" aria-labelledby="oth-2507.18723">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> SCORE-SET: A dataset of GuitarPro files for Music Phrase Generation and Sequence Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Begari,+V">Vishakh Begari</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 6 pages, 6 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Machine Learning (cs.LG); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item42'>[42]</a> <a href ="/abs/2507.19369" title="Abstract" id="2507.19369"> arXiv:2507.19369 </a> (cross-list from eess.AS) [<a href="/pdf/2507.19369" title="Download PDF" id="pdf-2507.19369" aria-labelledby="pdf-2507.19369">pdf</a>, <a href="https://arxiv.org/html/2507.19369v1" title="View HTML" id="html-2507.19369" aria-labelledby="html-2507.19369" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19369" title="Other formats" id="oth-2507.19369" aria-labelledby="oth-2507.19369">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Binaural Target Speaker Extraction using HRTFs and a Complex-Valued Neural Network </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Ellinson,+Y">Yoav Ellinson</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Gannot,+S">Sharon Gannot</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Audio and Speech Processing (eess.AS)</span>; Sound (cs.SD) </div> </div> </dd> <dt> <a name='item43'>[43]</a> <a href ="/abs/2507.19361" title="Abstract" id="2507.19361"> arXiv:2507.19361 </a> (cross-list from cs.CL) [<a href="/pdf/2507.19361" title="Download PDF" id="pdf-2507.19361" aria-labelledby="pdf-2507.19361">pdf</a>, <a href="https://arxiv.org/html/2507.19361v1" title="View HTML" id="html-2507.19361" aria-labelledby="html-2507.19361" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19361" title="Other formats" id="oth-2507.19361" aria-labelledby="oth-2507.19361">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> SpeechIQ: Speech Intelligence Quotient Across Cognitive Levels in Voice Understanding Large Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wan,+Z">Zhen Wan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yang,+C+H">Chao-Han Huck Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Yu,+Y">Yahan Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Tian,+J">Jinchuan Tian</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+S">Sheng Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Hu,+K">Ke Hu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chen,+Z">Zhehuai Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Watanabe,+S">Shinji Watanabe</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Cheng,+F">Fei Cheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chu,+C">Chenhui Chu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kurohashi,+S">Sadao Kurohashi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Our Speech-IQ leaderboard will be hosted at <a href="http://huggingface.co/spaces/nvidia/Speech-IQ-leaderboard" rel="external noopener nofollow" class="link-external link-http">this http URL</a>. ACL 2025 main </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Symbolic Computation (cs.SC); Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item44'>[44]</a> <a href ="/abs/2507.19204" title="Abstract" id="2507.19204"> arXiv:2507.19204 </a> (cross-list from eess.AS) [<a href="/pdf/2507.19204" title="Download PDF" id="pdf-2507.19204" aria-labelledby="pdf-2507.19204">pdf</a>, <a href="https://arxiv.org/html/2507.19204v2" title="View HTML" id="html-2507.19204" aria-labelledby="html-2507.19204" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19204" title="Other formats" id="oth-2507.19204" aria-labelledby="oth-2507.19204">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Should Top-Down Clustering Affect Boundaries in Unsupervised Word Discovery? </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Malan,+S">Simon Malan</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=van+Niekerk,+B">Benjamin van Niekerk</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Kamper,+H">Herman Kamper</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Submitted to the IEEE/ACM Transactions on Audio, Speech and Language Processing </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Audio and Speech Processing (eess.AS)</span>; Computation and Language (cs.CL); Sound (cs.SD) </div> </div> </dd> <dt> <a name='item45'>[45]</a> <a href ="/abs/2507.19137" title="Abstract" id="2507.19137"> arXiv:2507.19137 </a> (cross-list from eess.AS) [<a href="/pdf/2507.19137" title="Download PDF" id="pdf-2507.19137" aria-labelledby="pdf-2507.19137">pdf</a>, <a href="https://arxiv.org/html/2507.19137v1" title="View HTML" id="html-2507.19137" aria-labelledby="html-2507.19137" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.19137" title="Other formats" id="oth-2507.19137" aria-labelledby="oth-2507.19137">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Assessment of Personality Dimensions Across Situations Using Conversational Speech </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Zhang,+A">Alice Zhang</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Muralidhar,+S">Skanda Muralidhar</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Gatica-Perez,+D">Daniel Gatica-Perez</a>, <a href="https://arxiv.org/search/eess?searchtype=author&amp;query=Magimai-Doss,+M">Mathew Magimai-Doss</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Audio and Speech Processing (eess.AS)</span>; Artificial Intelligence (cs.AI); Sound (cs.SD) </div> </div> </dd> <dt> <a name='item46'>[46]</a> <a href ="/abs/2507.18750" title="Abstract" id="2507.18750"> arXiv:2507.18750 </a> (cross-list from cs.MM) [<a href="/pdf/2507.18750" title="Download PDF" id="pdf-2507.18750" aria-labelledby="pdf-2507.18750">pdf</a>, <a href="/format/2507.18750" title="Other formats" id="oth-2507.18750" aria-labelledby="oth-2507.18750">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> CatchPhrase: EXPrompt-Guided Encoder Adaptation for Audio-to-Image Generation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Oh,+H">Hyunwoo Oh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Cha,+S">SeungJu Cha</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Lee,+K">Kwanyoung Lee</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kim,+S">Si-Woo Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kim,+D">Dong-Jin Kim</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Multimedia (cs.MM)</span>; Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item47'>[47]</a> <a href ="/abs/2507.18741" title="Abstract" id="2507.18741"> arXiv:2507.18741 </a> (cross-list from cs.CV) [<a href="/pdf/2507.18741" title="Download PDF" id="pdf-2507.18741" aria-labelledby="pdf-2507.18741">pdf</a>, <a href="/format/2507.18741" title="Other formats" id="oth-2507.18741" aria-labelledby="oth-2507.18741">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> KuiSCIMA v2.0: Improved Baselines, Calibration, and Cross-Notation Generalization for Historical Chinese Music Notations in Jiang Kui&#39;s Baishidaoren Gequ </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Repolusk,+T">Tristan Repolusk</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Veas,+E">Eduardo Veas</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> International Conference on Document Analysis and Recognition. This preprint has not undergone any post-submission improvements or corrections. The Version of Record of this contribution is published in &#34;19th International Conference on Document Analysis and Recognition (ICDAR 2025), Wuhan, China, September 16-21, 2025, Proceedings&#34;, and is available online at the External DOI field below </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Digital Libraries (cs.DL); Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> </div> </dd> </dl> <dl id='articles'> <h3>Fri, 25 Jul 2025 (showing first 3 of 14 entries )</h3> <dt> <a name='item48'>[48]</a> <a href ="/abs/2507.18452" title="Abstract" id="2507.18452"> arXiv:2507.18452 </a> [<a href="/pdf/2507.18452" title="Download PDF" id="pdf-2507.18452" aria-labelledby="pdf-2507.18452">pdf</a>, <a href="https://arxiv.org/html/2507.18452v1" title="View HTML" id="html-2507.18452" aria-labelledby="html-2507.18452" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.18452" title="Other formats" id="oth-2507.18452" aria-labelledby="oth-2507.18452">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> DIFFA: Large Language Diffusion Models Can Listen and Understand </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhou,+J">Jiaming Zhou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Chen,+H">Hongjie Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhao,+S">Shiwan Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kang,+J">Jian Kang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+J">Jie Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+E">Enzhi Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Guo,+Y">Yujie Guo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Sun,+H">Haoqin Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Wang,+H">Hui Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Kong,+A">Aobo Kong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Qin,+Y">Yong Qin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Li,+X">Xuelong Li</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item49'>[49]</a> <a href ="/abs/2507.18051" title="Abstract" id="2507.18051"> arXiv:2507.18051 </a> [<a href="/pdf/2507.18051" title="Download PDF" id="pdf-2507.18051" aria-labelledby="pdf-2507.18051">pdf</a>, <a href="https://arxiv.org/html/2507.18051v1" title="View HTML" id="html-2507.18051" aria-labelledby="html-2507.18051" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.18051" title="Other formats" id="oth-2507.18051" aria-labelledby="oth-2507.18051">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> The TEA-ASLP System for Multilingual Conversational Speech Recognition and Speech Diarization in MLC-SLM 2025 Challenge </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Xue,+H">Hongfei Xue</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Huang,+K">Kaixun Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Zhou,+Z">Zhikai Zhou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Huang,+S">Shen Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Shang,+S">Shidong Shang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Interspeech 2025 workshop </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> <dt> <a name='item50'>[50]</a> <a href ="/abs/2507.17941" title="Abstract" id="2507.17941"> arXiv:2507.17941 </a> [<a href="/pdf/2507.17941" title="Download PDF" id="pdf-2507.17941" aria-labelledby="pdf-2507.17941">pdf</a>, <a href="https://arxiv.org/html/2507.17941v1" title="View HTML" id="html-2507.17941" aria-labelledby="html-2507.17941" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2507.17941" title="Other formats" id="oth-2507.17941" aria-labelledby="oth-2507.17941">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Resnet-conformer network with shared weights and attention mechanism for sound event localization, detection, and distance estimation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Vo,+Q+T">Quoc Thinh Vo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&amp;query=Han,+D">David Han</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> This paper has been submitted as a technical report outlining our approach to Task 3A of the Detection and Classification of Acoustic Scenes and Events (DCASE) 2024 and can be found in DCASE2024 technical reports </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Audio and Speech Processing (eess.AS) </div> </div> </dd> </dl> <div class='paging'>Total of 61 entries : <span>1-50</span> <a href=/list/cs.SD/recent?skip=50&amp;show=50>51-61</a> </div> <div class='morefewer'>Showing up to 50 entries per page: <a href=/list/cs.SD/recent?skip=0&amp;show=25 rel="nofollow"> fewer</a> | <span style="color: #454545">more</span> | <a href=/list/cs.SD/recent?skip=0&amp;show=2000 rel="nofollow"> all</a> </div> </div> </div> </div> </main> <footer style="clear: both;"> <div class="columns is-desktop" role="navigation" aria-label="Secondary" style="margin: -0.75em -0.75em 0.75em -0.75em"> <!-- Macro-Column 1 --> <div class="column" style="padding: 0;"> <div class="columns"> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- End Macro-Column 1 --> <!-- Macro-Column 2 --> <div class="column" style="padding: 0;"> <div class="columns"> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> <!-- End Macro-Column 2 --> </div> </footer> </div> <script src="/static/base/1.0.1/js/member_acknowledgement.js"></script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10