CINXE.COM
Machine Learning
<!DOCTYPE html> <html lang="en"> <head> <title>Machine Learning </title> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="apple-touch-icon" sizes="180x180" href="/static/browse/0.3.4/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="/static/browse/0.3.4/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="/static/browse/0.3.4/images/icons/favicon-16x16.png"> <link rel="manifest" href="/static/browse/0.3.4/images/icons/site.webmanifest"> <link rel="mask-icon" href="/static/browse/0.3.4/images/icons/safari-pinned-tab.svg" color="#5bbad5"> <meta name="msapplication-TileColor" content="#da532c"> <meta name="theme-color" content="#ffffff"> <link rel="stylesheet" type="text/css" media="screen" href="/static/browse/0.3.4/css/arXiv.css?v=20241206" /> <link rel="stylesheet" type="text/css" media="print" href="/static/browse/0.3.4/css/arXiv-print.css?v=20200611" /> <link rel="stylesheet" type="text/css" media="screen" href="/static/browse/0.3.4/css/browse_search.css" /> <script language="javascript" src="/static/browse/0.3.4/js/accordion.js" /></script> <link rel="stylesheet" type="text/css" media="screen" href="/static/browse/0.3.4/css/slider.css?v=20250312" /> <script src="//code.jquery.com/jquery-latest.min.js" type="text/javascript"></script> <script type="text/javascript" src="/static/browse/0.3.4/js/donate.js?v=040725"></script><script src="/static/browse/0.3.4/js/mathjaxToggle.min.js" type="text/javascript"></script> <script type="text/javascript" language="javascript">mathjaxToggle();</script> </head> <body class="with-cu-identity"> <aside class="slider-wrapper bps-banner forum green"> <a class="close-slider do-close-slider bps-banner" href="#"><img src="/static/browse/0.3.4/images/icons/close-slider.png" alt="close this message"></a> <div class="columns"> <img role="presentation" class="bps-banner-image" src="/static/browse/0.3.4/images/icons/smileybones-pixel.png" alt="arXiv smileybones"> <div class="copy-donation bps-banner"> <h2>arXiv Is Hiring Software Developers</h2> <p>Work on one of the world's most important websites and make an impact on open science.</p> </div> <div class="amount-donation bps-banner"> <div class="donate-cta"><a class="banner_link banner-btn-grad" target="_blank" href="https://info.arxiv.org/hiring/index.html"><b>View Jobs</b></a></div> </div> </div> </aside> <div class="flex-wrap-footer"> <header> <a href="#content" class="is-sr-only">Skip to main content</a> <!-- start desktop header --> <div class="columns is-vcentered is-hidden-mobile" id="cu-identity"> <div class="column" id="cu-logo"> <a href="https://www.cornell.edu/"><img src="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg" alt="Cornell University" /></a> </div> <!-- /from April 7 at 1:00 AM to June 9 at 11:30 PM --><div class="column banner-minimal forum"> <p>arXiv Is Hiring Software Devs</p> <a href="https://info.arxiv.org/hiring/index.html" target="_blank">View Jobs</a> </div><div class="column" id="support-ack"> <span id="support-ack-url">We gratefully acknowledge support from the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors.</span> <a href="https://info.arxiv.org/about/donate.html" class="btn-header-donate">Donate</a> </div> </div> <div id="header" class="is-hidden-mobile"> <a aria-hidden="true" tabindex="-1" href="/IgnoreMe"></a> <div class="header-breadcrumbs"> <a href="/"><img src="/static/browse/0.3.4/images/arxiv-logo-one-color-white.svg" alt="arxiv logo" style="height:40px;"/></a> <span>></span> <a href="/list/cs.LG/recent">cs.LG</a> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div><!-- /end desktop header --> <div class="mobile-header"> <div class="columns is-mobile"> <div class="column logo-arxiv"><a href="https://arxiv.org/"><img src="/static/browse/0.3.4/images/arxiv-logomark-small-white.svg" alt="arXiv logo" style="height:60px;" /></a></div> <div class="column logo-cornell"><a href="https://www.cornell.edu/"> <picture> <source media="(min-width: 501px)" srcset="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg 400w" sizes="400w" /> <source srcset="/static/browse/0.3.4/images/icons/cu/cornell_seal_simple_black.svg 2x" /> <img src="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg" alt="Cornell University Logo" /> </picture> </a></div> <div class="column nav" id="toggle-container" role="menubar"> <button class="toggle-control"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-white"><title>open search</title><path d="M505 442.7L405.3 343c-4.5-4.5-10.6-7-17-7H372c27.6-35.3 44-79.7 44-128C416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c48.3 0 92.7-16.4 128-44v16.3c0 6.4 2.5 12.5 7 17l99.7 99.7c9.4 9.4 24.6 9.4 33.9 0l28.3-28.3c9.4-9.4 9.4-24.6.1-34zM208 336c-70.7 0-128-57.2-128-128 0-70.7 57.2-128 128-128 70.7 0 128 57.2 128 128 0 70.7-57.2 128-128 128z"/></svg></button> <div class="mobile-toggle-block toggle-target"> <form class="mobile-search-form" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <input class="input" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <input type="hidden" name="source" value="header"> <input type="hidden" name="searchtype" value="all"> <button class="button">GO</button> </div> </form> </div> <button class="toggle-control"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-white" role="menu"><title>open navigation menu</title><path d="M16 132h416c8.837 0 16-7.163 16-16V76c0-8.837-7.163-16-16-16H16C7.163 60 0 67.163 0 76v40c0 8.837 7.163 16 16 16zm0 160h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16z"/ ></svg></button> <div class="mobile-toggle-block toggle-target"> <nav class="mobile-menu" aria-labelledby="mobilemenulabel"> <h2 id="mobilemenulabel">quick links</h2> <ul> <li><a href="https://arxiv.org/login">Login</a></li> <li><a href="https://info.arxiv.org/help">Help Pages</a></li> <li><a href="https://info.arxiv.org/about">About</a></li> </ul> </nav> </div> </div> </div> </div><!-- /end mobile-header --> </header> <main> <div id="content"> <div id='content-inner'> <div id='dlpage'> <h1>Machine Learning</h1> <ul> <li><a href="#item0">New submissions</a></li> <li><a href="#item74">Cross-lists</a></li> <li><a href="#item132">Replacements</a></li> </ul> <p>See <a id="recent-cs.LG" aria-labelledby="recent-cs.LG" href="/list/cs.LG/recent">recent</a> articles</p> <h3>Showing new listings for Wednesday, 9 April 2025</h3> <div class='paging'>Total of 249 entries </div> <div class='morefewer'>Showing up to 2000 entries per page: <a href=/list/cs.LG/new?skip=0&show=1000 rel="nofollow"> fewer</a> | <span style="color: #454545">more</span> | <span style="color: #454545">all</span> </div> <dl id='articles'> <h3>New submissions (showing 73 of 73 entries)</h3> <dt> <a name='item1'>[1]</a> <a href ="/abs/2504.05334" title="Abstract" id="2504.05334"> arXiv:2504.05334 </a> [<a href="/pdf/2504.05334" title="Download PDF" id="pdf-2504.05334" aria-labelledby="pdf-2504.05334">pdf</a>, <a href="https://arxiv.org/html/2504.05334v1" title="View HTML" id="html-2504.05334" aria-labelledby="html-2504.05334" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05334" title="Other formats" id="oth-2504.05334" aria-labelledby="oth-2504.05334">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Level Generation with Constrained Expressive Range </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bazzaz,+M">Mahsa Bazzaz</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cooper,+S">Seth Cooper</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Expressive range analysis is a visualization-based technique used to evaluate the performance of generative models, particularly in game level generation. It typically employs two quantifiable metrics to position generated artifacts on a 2D plot, offering insight into how content is distributed within a defined metric space. In this work, we use the expressive range of a generator as the conceptual space of possible creations. Inspired by the quality diversity paradigm, we explore this space to generate levels. To do so, we use a constraint-based generator that systematically traverses and generates levels in this space. To train the constraint-based generator we use different tile patterns to learn from the initial example levels. We analyze how different patterns influence the exploration of the expressive range. Specifically, we compare the exploration process based on time, the number of successful and failed sample generations, and the overall interestingness of the generated levels. Unlike typical quality diversity approaches that rely on random generation and hope to get good coverage of the expressive range, this approach systematically traverses the grid ensuring more coverage. This helps create unique and interesting game levels while also improving our understanding of the generator's strengths and limitations. </p> </div> </dd> <dt> <a name='item2'>[2]</a> <a href ="/abs/2504.05335" title="Abstract" id="2504.05335"> arXiv:2504.05335 </a> [<a href="/pdf/2504.05335" title="Download PDF" id="pdf-2504.05335" aria-labelledby="pdf-2504.05335">pdf</a>, <a href="https://arxiv.org/html/2504.05335v1" title="View HTML" id="html-2504.05335" aria-labelledby="html-2504.05335" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05335" title="Other formats" id="oth-2504.05335" aria-labelledby="oth-2504.05335">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Impact of Price Inflation on Algorithmic Collusion Through Reinforcement Learning Agents </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Tinoco,+S">Sebasti谩n Tinoco</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Abeliuk,+A">Andr茅s Abeliuk</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=del+Solar,+J+R">Javier Ruiz del Solar</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computer Science and Game Theory (cs.GT) </div> <p class='mathjax'> Algorithmic pricing is increasingly shaping market competition, raising concerns about its potential to compromise competitive dynamics. While prior work has shown that reinforcement learning (RL)-based pricing algorithms can lead to tacit collusion, less attention has been given to the role of macroeconomic factors in shaping these dynamics. This study examines the role of inflation in influencing algorithmic collusion within competitive markets. By incorporating inflation shocks into a RL-based pricing model, we analyze whether agents adapt their strategies to sustain supra-competitive profits. Our findings indicate that inflation reduces market competitiveness by fostering implicit coordination among agents, even without direct collusion. However, despite achieving sustained higher profitability, agents fail to develop robust punishment mechanisms to deter deviations from equilibrium strategies. The results suggest that inflation amplifies non-competitive dynamics in algorithmic pricing, emphasizing the need for regulatory oversight in markets where AI-driven pricing is prevalent. </p> </div> </dd> <dt> <a name='item3'>[3]</a> <a href ="/abs/2504.05338" title="Abstract" id="2504.05338"> arXiv:2504.05338 </a> [<a href="/pdf/2504.05338" title="Download PDF" id="pdf-2504.05338" aria-labelledby="pdf-2504.05338">pdf</a>, <a href="/format/2504.05338" title="Other formats" id="oth-2504.05338" aria-labelledby="oth-2504.05338">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Improving Early Prediction of Type 2 Diabetes Mellitus with ECG-DiaNet: A Multimodal Neural Network Leveraging Electrocardiogram and Clinical Risk Factors </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mohsen,+F">Farida Mohsen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shah,+Z">Zubair Shah</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Type 2 Diabetes Mellitus (T2DM) remains a global health challenge, underscoring the need for early and accurate risk prediction. This study presents ECG-DiaNet, a multimodal deep learning model that integrates electrocardiogram (ECG) features with clinical risk factors (CRFs) to enhance T2DM onset prediction. Using data from Qatar Biobank (QBB), we trained and validated models on a development cohort (n=2043) and evaluated performance on a longitudinal test set (n=395) with five-year follow-up. ECG-DiaNet outperformed unimodal ECG-only and CRF-only models, achieving a higher AUROC (0.845 vs 0.8217) than the CRF-only model, with statistical significance (DeLong p<0.001). Reclassification metrics further confirmed improvements: Net Reclassification Improvement (NRI=0.0153) and Integrated Discrimination Improvement (IDI=0.0482). Risk stratification into low-, medium-, and high-risk groups showed ECG-DiaNet achieved superior positive predictive value (PPV) in high-risk individuals. The model's reliance on non-invasive and widely available ECG signals supports its feasibility in clinical and community health settings. By combining cardiac electrophysiology and systemic risk profiles, ECG-DiaNet addresses the multifactorial nature of T2DM and supports precision prevention. These findings highlight the value of multimodal AI in advancing early detection and prevention strategies for T2DM, particularly in underrepresented Middle Eastern populations. </p> </div> </dd> <dt> <a name='item4'>[4]</a> <a href ="/abs/2504.05342" title="Abstract" id="2504.05342"> arXiv:2504.05342 </a> [<a href="/pdf/2504.05342" title="Download PDF" id="pdf-2504.05342" aria-labelledby="pdf-2504.05342">pdf</a>, <a href="https://arxiv.org/html/2504.05342v1" title="View HTML" id="html-2504.05342" aria-labelledby="html-2504.05342" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05342" title="Other formats" id="oth-2504.05342" aria-labelledby="oth-2504.05342">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> MASS: MoErging through Adaptive Subspace Selection </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Crisostomi,+D">Donato Crisostomi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zirilli,+A">Alessandro Zirilli</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gargiulo,+A+A">Antonio Andrea Gargiulo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bucarelli,+M+S">Maria Sofia Bucarelli</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Scardapane,+S">Simone Scardapane</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Silvestri,+F">Fabrizio Silvestri</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Masi,+I">Iacopo Masi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rodol%C3%A0,+E">Emanuele Rodol脿</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computer Vision and Pattern Recognition (cs.CV) </div> <p class='mathjax'> Model merging has recently emerged as a lightweight alternative to ensembling, combining multiple fine-tuned models into a single set of parameters with no additional training overhead. Yet, existing merging methods fall short of matching the full accuracy of separately fine-tuned endpoints. We present MASS (MoErging through Adaptive Subspace Selection), a new approach that closes this gap by unifying multiple fine-tuned models while retaining near state-of-the-art performance across tasks. Building on the low-rank decomposition of per-task updates, MASS stores only the most salient singular components for each task and merges them into a shared model. At inference time, a non-parametric, data-free router identifies which subspace (or combination thereof) best explains an input's intermediate features and activates the corresponding task-specific block. This procedure is fully training-free and introduces only a two-pass inference overhead plus a ~2 storage factor compared to a single pretrained model, irrespective of the number of tasks. We evaluate MASS on CLIP-based image classification using ViT-B-16, ViT-B-32 and ViT-L-14 for benchmarks of 8, 14 and 20 tasks respectively, establishing a new state-of-the-art. Most notably, MASS recovers up to ~98% of the average accuracy of individual fine-tuned models, making it a practical alternative to ensembling at a fraction of the storage cost. </p> </div> </dd> <dt> <a name='item5'>[5]</a> <a href ="/abs/2504.05343" title="Abstract" id="2504.05343"> arXiv:2504.05343 </a> [<a href="/pdf/2504.05343" title="Download PDF" id="pdf-2504.05343" aria-labelledby="pdf-2504.05343">pdf</a>, <a href="https://arxiv.org/html/2504.05343v1" title="View HTML" id="html-2504.05343" aria-labelledby="html-2504.05343" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05343" title="Other formats" id="oth-2504.05343" aria-labelledby="oth-2504.05343">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> AROMA: Autonomous Rank-one Matrix Adaptation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Sheng,+H+N">Hao Nan Sheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zhi-yong Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+M">Mingrui Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=So,+H+C">Hing Cheung So</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> As large language models continue to grow in size, parameter-efficient fine-tuning has become increasingly crucial. While low-rank adaptation (LoRA) offers a solution through low-rank updates, its static rank allocation may yield suboptimal results. Adaptive low-rank adaptation (AdaLoRA) improves this with dynamic allocation but remains sensitive to initial and target rank configurations. We introduce AROMA, a framework that automatically constructs layer-specific updates by iteratively building up rank-one components with very few trainable parameters that gradually diminish to zero. Unlike existing methods that employ rank reduction mechanisms, AROMA introduces a dual-loop architecture for rank growth. The inner loop extracts information from each rank-one subspace, while the outer loop determines the number of rank-one subspaces, i.e., the optimal rank. We reset optimizer states to maintain subspace independence. AROMA significantly reduces parameters compared to LoRA and AdaLoRA while achieving superior performance on natural language understanding and commonsense reasoning tasks, offering new insights into adaptive parameter-efficient fine-tuning. The code is available at \href{<a href="https://github.com/ShuDun23/AROMA" rel="external noopener nofollow" class="link-external link-https">this https URL</a>}{AROMA}. </p> </div> </dd> <dt> <a name='item6'>[6]</a> <a href ="/abs/2504.05344" title="Abstract" id="2504.05344"> arXiv:2504.05344 </a> [<a href="/pdf/2504.05344" title="Download PDF" id="pdf-2504.05344" aria-labelledby="pdf-2504.05344">pdf</a>, <a href="/format/2504.05344" title="Other formats" id="oth-2504.05344" aria-labelledby="oth-2504.05344">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Divergent Paths: Separating Homophilic and Heterophilic Learning for Enhanced Graph-level Representations </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Lei,+H">Han Lei</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+J">Jiaxing Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dong,+X">Xia Dong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ke,+Y">Yiping Ke</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, 6 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Graph Convolutional Networks (GCNs) are predominantly tailored for graphs displaying homophily, where similar nodes connect, but often fail on heterophilic graphs. The strategy of adopting distinct approaches to learn from homophilic and heterophilic components in node-level tasks has been widely discussed and proven effective both theoretically and experimentally. However, in graph-level tasks, research on this topic remains notably scarce. Addressing this gap, our research conducts an analysis on graphs with nodes' category ID available, distinguishing intra-category and inter-category components as embodiment of homophily and heterophily, respectively. We find while GCNs excel at extracting information within categories, they frequently capture noise from inter-category components. Consequently, it is crucial to employ distinct learning strategies for intra- and inter-category elements. To alleviate this problem, we separately learn the intra- and inter-category parts by a combination of an intra-category convolution (IntraNet) and an inter-category high-pass graph convolution (InterNet). Our IntraNet is supported by sophisticated graph preprocessing steps and a novel category-based graph readout function. For the InterNet, we utilize a high-pass filter to amplify the node disparities, enhancing the recognition of details in the high-frequency components. The proposed approach, DivGNN, combines the IntraNet and InterNet with a gated mechanism and substantially improves classification performance on graph-level tasks, surpassing traditional GNN baselines in effectiveness. </p> </div> </dd> <dt> <a name='item7'>[7]</a> <a href ="/abs/2504.05345" title="Abstract" id="2504.05345"> arXiv:2504.05345 </a> [<a href="/pdf/2504.05345" title="Download PDF" id="pdf-2504.05345" aria-labelledby="pdf-2504.05345">pdf</a>, <a href="https://arxiv.org/html/2504.05345v1" title="View HTML" id="html-2504.05345" aria-labelledby="html-2504.05345" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05345" title="Other formats" id="oth-2504.05345" aria-labelledby="oth-2504.05345">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> ZeroED: Hybrid Zero-shot Error Detection through Large Language Model Reasoning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Ni,+W">Wei Ni</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+K">Kaihang Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Miao,+X">Xiaoye Miao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+X">Xiangyu Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wu,+Y">Yangyang Wu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Y">Yaoshu Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yin,+J">Jianwei Yin</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 12 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Databases (cs.DB) </div> <p class='mathjax'> Error detection (ED) in tabular data is crucial yet challenging due to diverse error types and the need for contextual understanding. Traditional ED methods often rely heavily on manual criteria and labels, making them labor-intensive. Large language models (LLM) can minimize human effort but struggle with errors requiring a comprehensive understanding of data context. In this paper, we propose ZeroED, a novel hybrid zero-shot error detection framework, which combines LLM reasoning ability with the manual label-based ED pipeline. ZeroED operates in four steps, i.e., feature representation, error labeling, training data construction, and detector training. Initially, to enhance error distinction, ZeroED generates rich data representations using error reason-aware binary features, pre-trained embeddings, and statistical features. Then, ZeroED employs LLM to label errors holistically through in-context learning, guided by a two-step reasoning process for detailed error detection guidelines. To reduce token costs, LLMs are applied only to representative data selected via clustering-based sampling. High-quality training data is constructed through in-cluster label propagation and LLM augmentation with verification. Finally, a classifier is trained to detect all errors. Extensive experiments on seven public datasets demonstrate that, ZeroED substantially outperforms state-of-the-art methods by a maximum 30% improvement in F1 score and up to 90% token cost reduction. </p> </div> </dd> <dt> <a name='item8'>[8]</a> <a href ="/abs/2504.05346" title="Abstract" id="2504.05346"> arXiv:2504.05346 </a> [<a href="/pdf/2504.05346" title="Download PDF" id="pdf-2504.05346" aria-labelledby="pdf-2504.05346">pdf</a>, <a href="https://arxiv.org/html/2504.05346v1" title="View HTML" id="html-2504.05346" aria-labelledby="html-2504.05346" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05346" title="Other formats" id="oth-2504.05346" aria-labelledby="oth-2504.05346">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Thanos: A Block-wise Pruning Algorithm for Efficient Large Language Model Compression </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Ilin,+I">Ivan Ilin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Richtarik,+P">Peter Richtarik</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 8 pages, 3 Figures, 3 Tables, 2 Algorithms, paper comes with Appendix </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computation and Language (cs.CL); Performance (cs.PF) </div> <p class='mathjax'> This paper presents Thanos, a novel weight-pruning algorithm designed to reduce the memory footprint and enhance the computational efficiency of large language models (LLMs) by removing redundant weights while maintaining accuracy. Thanos introduces a block-wise pruning strategy with adaptive masks that dynamically adjust to weight importance, enabling flexible sparsity patterns and structured formats, such as $n:m$ sparsity, optimized for hardware acceleration. Experimental evaluations demonstrate that Thanos achieves state-of-the-art performance in structured pruning and outperforms existing methods in unstructured pruning. By providing an efficient and adaptable approach to model compression, Thanos offers a practical solution for deploying large models in resource-constrained environments. </p> </div> </dd> <dt> <a name='item9'>[9]</a> <a href ="/abs/2504.05352" title="Abstract" id="2504.05352"> arXiv:2504.05352 </a> [<a href="/pdf/2504.05352" title="Download PDF" id="pdf-2504.05352" aria-labelledby="pdf-2504.05352">pdf</a>, <a href="https://arxiv.org/html/2504.05352v1" title="View HTML" id="html-2504.05352" aria-labelledby="html-2504.05352" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05352" title="Other formats" id="oth-2504.05352" aria-labelledby="oth-2504.05352">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Achieving binary weight and activation for LLMs using Post-Training Quantization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Song,+S">Siqing Song</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+C">Chuang Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+R">Ruiqi Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+Y">Yi Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+X">Xuyao Zhang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Quantizing large language models (LLMs) to 1-bit precision significantly reduces computational costs, but existing quantization techniques suffer from noticeable performance degradation when using weight and activation precisions below 4 bits (W4A4). In this paper, we propose a post-training quantization framework with W(1+1)A(1*4) configuration, where weights are quantized to 1 bit with an additional 1 bit for fine-grain grouping and activations are quantized to 1 bit with a 4-fold increase in the number of channels. For weight quantization, we propose utilizing Hessian-aware fine-grained grouping along with an EM-based quantization scheme. For activation quantization, we decompose INT4-quantized activations into a 4 * INT1 format equivalently and simultaneously smooth the scaling factors based on quantization errors, which further reduces the quantization errors in activations. Our method surpasses state-of-the-art (SOTA) LLM quantization baselines on W2A4 across multiple tasks, pushing the boundaries of existing LLM quantization methods toward fully binarized models. </p> </div> </dd> <dt> <a name='item10'>[10]</a> <a href ="/abs/2504.05355" title="Abstract" id="2504.05355"> arXiv:2504.05355 </a> [<a href="/pdf/2504.05355" title="Download PDF" id="pdf-2504.05355" aria-labelledby="pdf-2504.05355">pdf</a>, <a href="https://arxiv.org/html/2504.05355v1" title="View HTML" id="html-2504.05355" aria-labelledby="html-2504.05355" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05355" title="Other formats" id="oth-2504.05355" aria-labelledby="oth-2504.05355">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Deep Learning for Double Auction </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+J">Jiayin Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+C">Chenglong Zhang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computer Science and Game Theory (cs.GT); Theoretical Economics (econ.TH) </div> <p class='mathjax'> Auctions are important mechanisms extensively implemented in various markets, e.g., search engines' keyword auctions, antique auctions, etc. Finding an optimal auction mechanism is extremely difficult due to the constraints of imperfect information, incentive compatibility (IC), and individual rationality (IR). In addition to the traditional economic methods, some recently attempted to find the optimal (single) auction using deep learning methods. Unlike those attempts focusing on single auctions, we develop deep learning methods for double auctions, where imperfect information exists on both the demand and supply sides. The previous attempts on single auction cannot directly apply to our contexts and those attempts additionally suffer from limited generalizability, inefficiency in ensuring the constraints, and learning fluctuations. We innovate in designing deep learning models for solving the more complex problem and additionally addressing the previous models' three limitations. Specifically, we achieve generalizability by leveraging a transformer-based architecture to model market participants as sequences for varying market sizes; we utilize the numerical features of the constraints and pre-treat them for a higher learning efficiency; we develop a gradient-conflict-elimination scheme to address the problem of learning fluctuation. Extensive experimental evaluations demonstrate the superiority of our approach to classical and machine learning baselines. </p> </div> </dd> <dt> <a name='item11'>[11]</a> <a href ="/abs/2504.05356" title="Abstract" id="2504.05356"> arXiv:2504.05356 </a> [<a href="/pdf/2504.05356" title="Download PDF" id="pdf-2504.05356" aria-labelledby="pdf-2504.05356">pdf</a>, <a href="https://arxiv.org/html/2504.05356v1" title="View HTML" id="html-2504.05356" aria-labelledby="html-2504.05356" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05356" title="Other formats" id="oth-2504.05356" aria-labelledby="oth-2504.05356">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> DyTTP: Trajectory Prediction with Normalization-Free Transformers </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Y">Yunxiang Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Niu,+H">Hongkuo Niu</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Accurate trajectory prediction is a cornerstone for the safe operation of autonomous driving systems, where understanding the dynamic behavior of surrounding agents is crucial. Transformer-based architectures have demonstrated significant promise in capturing complex spatio-temporality dependencies. However, their reliance on normalization layers can lead to computation overhead and training instabilities. In this work, we present a two-fold approach to address these challenges. First, we integrate DynamicTanh (DyT), which is the latest method to promote transformers, into the backbone, replacing traditional layer normalization. This modification simplifies the network architecture and improves the stability of the inference. We are the first work to deploy the DyT to the trajectory prediction task. Complementing this, we employ a snapshot ensemble strategy to further boost trajectory prediction performance. Using cyclical learning rate scheduling, multiple model snapshots are captured during a single training run. These snapshots are then aggregated via simple averaging at inference time, allowing the model to benefit from diverse hypotheses without incurring substantial additional computational cost. Extensive experiments on Argoverse datasets demonstrate that our combined approach significantly improves prediction accuracy, inference speed and robustness in diverse driving scenarios. This work underscores the potential of normalization-free transformer designs augmented with lightweight ensemble techniques in advancing trajectory forecasting for autonomous vehicles. </p> </div> </dd> <dt> <a name='item12'>[12]</a> <a href ="/abs/2504.05357" title="Abstract" id="2504.05357"> arXiv:2504.05357 </a> [<a href="/pdf/2504.05357" title="Download PDF" id="pdf-2504.05357" aria-labelledby="pdf-2504.05357">pdf</a>, <a href="https://arxiv.org/html/2504.05357v1" title="View HTML" id="html-2504.05357" aria-labelledby="html-2504.05357" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05357" title="Other formats" id="oth-2504.05357" aria-labelledby="oth-2504.05357">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Find A Winning Sign: Sign Is All We Need to Win the Lottery </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Oh,+J">Junghun Oh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Baik,+S">Sungyong Baik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lee,+K+M">Kyoung Mu Lee</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at ICLR2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> The Lottery Ticket Hypothesis (LTH) posits the existence of a sparse subnetwork (a.k.a. winning ticket) that can generalize comparably to its over-parameterized counterpart when trained from scratch. The common approach to finding a winning ticket is to preserve the original strong generalization through Iterative Pruning (IP) and transfer information useful for achieving the learned generalization by applying the resulting sparse mask to an untrained network. However, existing IP methods still struggle to generalize their observations beyond ad-hoc initialization and small-scale architectures or datasets, or they bypass these challenges by applying their mask to trained weights instead of initialized ones. In this paper, we demonstrate that the parameter sign configuration plays a crucial role in conveying useful information for generalization to any randomly initialized network. Through linear mode connectivity analysis, we observe that a sparse network trained by an existing IP method can retain its basin of attraction if its parameter signs and normalization layer parameters are preserved. To take a step closer to finding a winning ticket, we alleviate the reliance on normalization layer parameters by preventing high error barriers along the linear path between the sparse network trained by our method and its counterpart with initialized normalization layer parameters. Interestingly, across various architectures and datasets, we observe that any randomly initialized network can be optimized to exhibit low error barriers along the linear path to the sparse network trained by our method by inheriting its sparsity and parameter sign information, potentially achieving performance comparable to the original. The code is available at <a href="https://github.com/JungHunOh/AWS" rel="external noopener nofollow" class="link-external link-https">this https URL</a>\<a href="http://_ICLR2025.git" rel="external noopener nofollow" class="link-external link-http">this http URL</a> </p> </div> </dd> <dt> <a name='item13'>[13]</a> <a href ="/abs/2504.05366" title="Abstract" id="2504.05366"> arXiv:2504.05366 </a> [<a href="/pdf/2504.05366" title="Download PDF" id="pdf-2504.05366" aria-labelledby="pdf-2504.05366">pdf</a>, <a href="https://arxiv.org/html/2504.05366v1" title="View HTML" id="html-2504.05366" aria-labelledby="html-2504.05366" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05366" title="Other formats" id="oth-2504.05366" aria-labelledby="oth-2504.05366">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Handling Weather Uncertainty in Air Traffic Prediction through an Inverse Approach </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Lancia,+G">G. Lancia</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Falanga,+D">D. Falanga</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Alam,+S">S. Alam</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lulli,+G">G. Lulli</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Adverse weather conditions, particularly convective phenomena, pose significant challenges to Air Traffic Management, often requiring real-time rerouting decisions that impact efficiency and safety. This study introduces a 3-D Gaussian Mixture Model to predict long lead-time flight trajectory changes, incorporating comprehensive weather and traffic data. Utilizing high-resolution meteorological datasets, including convective weather maps and wind data, alongside traffic records, the model demonstrates robust performance in forecasting reroutes up to 60 minutes. The novel 3-D Gaussian Mixture Model framework employs a probabilistic approach to capture uncertainty while providing accurate forecasts of altitude, latitude, and longitude. Extensive evaluation revealed a Mean Absolute Percentage Error below 0.02 across varying lead times, highlighting the model's accuracy and scalability. By integrating explainability techniques such as the Vanilla Gradient algorithm, the study provides insights into feature contributions, showing that they contribute to improving Air Traffic Management strategies to mitigate weather-induced disruptions. </p> </div> </dd> <dt> <a name='item14'>[14]</a> <a href ="/abs/2504.05405" title="Abstract" id="2504.05405"> arXiv:2504.05405 </a> [<a href="/pdf/2504.05405" title="Download PDF" id="pdf-2504.05405" aria-labelledby="pdf-2504.05405">pdf</a>, <a href="/format/2504.05405" title="Other formats" id="oth-2504.05405" aria-labelledby="oth-2504.05405">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> The Role of Environment Access in Agnostic Reinforcement Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Krishnamurthy,+A">Akshay Krishnamurthy</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+G">Gene Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sekhari,+A">Ayush Sekhari</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> comments welcome </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Machine Learning (stat.ML) </div> <p class='mathjax'> We study Reinforcement Learning (RL) in environments with large state spaces, where function approximation is required for sample-efficient learning. Departing from a long history of prior work, we consider the weakest possible form of function approximation, called agnostic policy learning, where the learner seeks to find the best policy in a given class $\Pi$, with no guarantee that $\Pi$ contains an optimal policy for the underlying task. Although it is known that sample-efficient agnostic policy learning is not possible in the standard online RL setting without further assumptions, we investigate the extent to which this can be overcome with stronger forms of access to the environment. Specifically, we show that: 1. Agnostic policy learning remains statistically intractable when given access to a local simulator, from which one can reset to any previously seen state. This result holds even when the policy class is realizable, and stands in contrast to a positive result of [MFR24] showing that value-based learning under realizability is tractable with local simulator access. 2. Agnostic policy learning remains statistically intractable when given online access to a reset distribution with good coverage properties over the state space (the so-called $\mu$-reset setting). We also study stronger forms of function approximation for policy learning, showing that PSDP [BKSN03] and CPI [KL02] provably fail in the absence of policy completeness. 3. On a positive note, agnostic policy learning is statistically tractable for Block MDPs with access to both of the above reset models. We establish this via a new algorithm that carefully constructs a policy emulator: a tabular MDP with a small state space that approximates the value functions of all policies $\pi \in \Pi$. These values are approximated without any explicit value function class. </p> </div> </dd> <dt> <a name='item15'>[15]</a> <a href ="/abs/2504.05425" title="Abstract" id="2504.05425"> arXiv:2504.05425 </a> [<a href="/pdf/2504.05425" title="Download PDF" id="pdf-2504.05425" aria-labelledby="pdf-2504.05425">pdf</a>, <a href="https://arxiv.org/html/2504.05425v1" title="View HTML" id="html-2504.05425" aria-labelledby="html-2504.05425" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05425" title="Other formats" id="oth-2504.05425" aria-labelledby="oth-2504.05425">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Behavior-Based Knowledge Representation Improves Prediction of Players' Moves in Chess by 25% </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Skidanov,+B">Benny Skidanov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Erbesfeld,+D">Daniel Erbesfeld</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Weiss,+G">Gera Weiss</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Elyasaf,+A">Achiya Elyasaf</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 8 pages, 2 tables, 2 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Predicting player behavior in strategic games, especially complex ones like chess, presents a significant challenge. The difficulty arises from several factors. First, the sheer number of potential outcomes stemming from even a single position, starting from the initial setup, makes forecasting a player's next move incredibly complex. Second, and perhaps even more challenging, is the inherent unpredictability of human behavior. Unlike the optimized play of engines, humans introduce a layer of variability due to differing playing styles and decision-making processes. Each player approaches the game with a unique blend of strategic thinking, tactical awareness, and psychological tendencies, leading to diverse and often unexpected actions. This stylistic variation, combined with the capacity for creativity and even irrational moves, makes predicting human play difficult. Chess, a longstanding benchmark of artificial intelligence research, has seen significant advancements in tools and automation. Engines like Deep Blue, AlphaZero, and Stockfish can defeat even the most skilled human players. However, despite their exceptional ability to outplay top-level grandmasters, predicting the moves of non-grandmaster players, who comprise most of the global chess community -- remains complicated for these engines. This paper proposes a novel approach combining expert knowledge with machine learning techniques to predict human players' next moves. By applying feature engineering grounded in domain expertise, we seek to uncover the patterns in the moves of intermediate-level chess players, particularly during the opening phase of the game. Our methodology offers a promising framework for anticipating human behavior, advancing both the fields of AI and human-computer interaction. </p> </div> </dd> <dt> <a name='item16'>[16]</a> <a href ="/abs/2504.05454" title="Abstract" id="2504.05454"> arXiv:2504.05454 </a> [<a href="/pdf/2504.05454" title="Download PDF" id="pdf-2504.05454" aria-labelledby="pdf-2504.05454">pdf</a>, <a href="https://arxiv.org/html/2504.05454v1" title="View HTML" id="html-2504.05454" aria-labelledby="html-2504.05454" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05454" title="Other formats" id="oth-2504.05454" aria-labelledby="oth-2504.05454">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> GraphPINE: Graph Importance Propagation for Interpretable Drug Response Prediction </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Inoue,+Y">Yoshitaka Inoue</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fu,+T">Tianfan Fu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Luna,+A">Augustin Luna</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computational Engineering, Finance, and Science (cs.CE); Genomics (q-bio.GN); Quantitative Methods (q-bio.QM) </div> <p class='mathjax'> Explainability is necessary for many tasks in biomedical research. Recent explainability methods have focused on attention, gradient, and Shapley value. These do not handle data with strong associated prior knowledge and fail to constrain explainability results based on known relationships between predictive features. <br>We propose GraphPINE, a graph neural network (GNN) architecture leveraging domain-specific prior knowledge to initialize node importance optimized during training for drug response prediction. Typically, a manual post-prediction step examines literature (i.e., prior knowledge) to understand returned predictive features. While node importance can be obtained for gradient and attention after prediction, node importance from these methods lacks complementary prior knowledge; GraphPINE seeks to overcome this limitation. GraphPINE differs from other GNN gating methods by utilizing an LSTM-like sequential format. We introduce an importance propagation layer that unifies 1) updates for feature matrix and node importance and 2) uses GNN-based graph propagation of feature values. This initialization and updating mechanism allows for informed feature learning and improved graph representation. <br>We apply GraphPINE to cancer drug response prediction using drug screening and gene data collected for over 5,000 gene nodes included in a gene-gene graph with a drug-target interaction (DTI) graph for initial importance. The gene-gene graph and DTIs were obtained from curated sources and weighted by article count discussing relationships between drugs and genes. GraphPINE achieves a PR-AUC of 0.894 and ROC-AUC of 0.796 across 952 drugs. Code is available at <a href="https://anonymous.4open.science/r/GraphPINE-40DE" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item17'>[17]</a> <a href ="/abs/2504.05461" title="Abstract" id="2504.05461"> arXiv:2504.05461 </a> [<a href="/pdf/2504.05461" title="Download PDF" id="pdf-2504.05461" aria-labelledby="pdf-2504.05461">pdf</a>, <a href="https://arxiv.org/html/2504.05461v1" title="View HTML" id="html-2504.05461" aria-labelledby="html-2504.05461" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05461" title="Other formats" id="oth-2504.05461" aria-labelledby="oth-2504.05461">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Intermediate Layer Classifiers for OOD generalization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Uselis,+A">Arnas Uselis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Oh,+S+J">Seong Joon Oh</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> ICLR 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Deep classifiers are known to be sensitive to data distribution shifts, primarily due to their reliance on spurious correlations in training data. It has been suggested that these classifiers can still find useful features in the network's last layer that hold up under such shifts. In this work, we question the use of last-layer representations for out-of-distribution (OOD) generalisation and explore the utility of intermediate layers. To this end, we introduce \textit{Intermediate Layer Classifiers} (ILCs). We discover that intermediate layer representations frequently offer substantially better generalisation than those from the penultimate layer. In many cases, zero-shot OOD generalisation using earlier-layer representations approaches the few-shot performance of retraining on penultimate layer representations. This is confirmed across multiple datasets, architectures, and types of distribution shifts. Our analysis suggests that intermediate layers are less sensitive to distribution shifts compared to the penultimate layer. These findings highlight the importance of understanding how information is distributed across network layers and its role in OOD generalisation, while also pointing to the limits of penultimate layer representation utility. Code is available at <a href="https://github.com/oshapio/intermediate-layer-generalization" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item18'>[18]</a> <a href ="/abs/2504.05471" title="Abstract" id="2504.05471"> arXiv:2504.05471 </a> [<a href="/pdf/2504.05471" title="Download PDF" id="pdf-2504.05471" aria-labelledby="pdf-2504.05471">pdf</a>, <a href="/format/2504.05471" title="Other formats" id="oth-2504.05471" aria-labelledby="oth-2504.05471">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Graph Neural Networks for Enhancing Ensemble Forecasts of Extreme Rainfall </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=B%C3%BClte,+C">Christopher B眉lte</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Maskey,+S">Sohir Maskey</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Scholl,+P">Philipp Scholl</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=von+Berg,+J">Jonas von Berg</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kutyniok,+G">Gitta Kutyniok</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted paper at ICLR 2025 - Tackling Climate Change with Machine Learning Workshop (<a href="https://www.climatechange.ai/events/iclr2025" rel="external noopener nofollow" class="link-external link-https">this https URL</a>) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Climate change is increasing the occurrence of extreme precipitation events, threatening infrastructure, agriculture, and public safety. Ensemble prediction systems provide probabilistic forecasts but exhibit biases and difficulties in capturing extreme weather. While post-processing techniques aim to enhance forecast accuracy, they rarely focus on precipitation, which exhibits complex spatial dependencies and tail behavior. Our novel framework leverages graph neural networks to post-process ensemble forecasts, specifically modeling the extremes of the underlying distribution. This allows to capture spatial dependencies and improves forecast accuracy for extreme events, thus leading to more reliable forecasts and mitigating risks of extreme precipitation and flooding. </p> </div> </dd> <dt> <a name='item19'>[19]</a> <a href ="/abs/2504.05478" title="Abstract" id="2504.05478"> arXiv:2504.05478 </a> [<a href="/pdf/2504.05478" title="Download PDF" id="pdf-2504.05478" aria-labelledby="pdf-2504.05478">pdf</a>, <a href="https://arxiv.org/html/2504.05478v1" title="View HTML" id="html-2504.05478" aria-labelledby="html-2504.05478" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05478" title="Other formats" id="oth-2504.05478" aria-labelledby="oth-2504.05478">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> GraphRAFT: Retrieval Augmented Fine-Tuning for Knowledge Graphs on Graph Databases </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Clemedtson,+A">Alfred Clemedtson</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shi,+B">Borun Shi</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computation and Language (cs.CL); Information Retrieval (cs.IR) </div> <p class='mathjax'> Large language models have shown remarkable language processing and reasoning ability but are prone to hallucinate when asked about private data. Retrieval-augmented generation (RAG) retrieves relevant data that fit into an LLM's context window and prompts the LLM for an answer. GraphRAG extends this approach to structured Knowledge Graphs (KGs) and questions regarding entities multiple hops away. The majority of recent GraphRAG methods either overlook the retrieval step or have ad hoc retrieval processes that are abstract or inefficient. This prevents them from being adopted when the KGs are stored in graph databases supporting graph query languages. In this work, we present GraphRAFT, a retrieve-and-reason framework that finetunes LLMs to generate provably correct Cypher queries to retrieve high-quality subgraph contexts and produce accurate answers. Our method is the first such solution that can be taken off-the-shelf and used on KGs stored in native graph DBs. Benchmarks suggest that our method is sample-efficient and scales with the availability of training data. Our method achieves significantly better results than all state-of-the-art models across all four standard metrics on two challenging Q\&As on large text-attributed KGs. </p> </div> </dd> <dt> <a name='item20'>[20]</a> <a href ="/abs/2504.05490" title="Abstract" id="2504.05490"> arXiv:2504.05490 </a> [<a href="/pdf/2504.05490" title="Download PDF" id="pdf-2504.05490" aria-labelledby="pdf-2504.05490">pdf</a>, <a href="https://arxiv.org/html/2504.05490v1" title="View HTML" id="html-2504.05490" aria-labelledby="html-2504.05490" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05490" title="Other formats" id="oth-2504.05490" aria-labelledby="oth-2504.05490">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Optimal Bayesian Affine Estimator and Active Learning for the Wiener Model </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Vakili,+S">Sasan Vakili</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mazo,+M">Manuel Mazo Jr.</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Esfahani,+P+M">Peyman Mohajerin Esfahani</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 23 pages, 4 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Systems and Control (eess.SY) </div> <p class='mathjax'> This paper presents a Bayesian estimation framework for Wiener models, focusing on learning nonlinear output functions under known linear state dynamics. We derive a closed-form optimal affine estimator for the unknown parameters, characterized by the so-called "dynamic basis statistics (DBS)." Several features of the proposed estimator are studied, including Bayesian unbiasedness, closed-form posterior statistics, error monotonicity in trajectory length, and consistency condition (also known as persistent excitation). In the special case of Fourier basis functions, we demonstrate that the closed-form description is computationally available, as the Fourier DBS enjoys explicit expression. Furthermore, we identify an inherent inconsistency in single-trajectory measurements, regardless of input excitation. Leveraging the closed-form estimation error, we develop an active learning algorithm synthesizing input signals to minimize estimation error. Numerical experiments validate the efficacy of our approach, showing significant improvements over traditional regularized least-squares methods. </p> </div> </dd> <dt> <a name='item21'>[21]</a> <a href ="/abs/2504.05520" title="Abstract" id="2504.05520"> arXiv:2504.05520 </a> [<a href="/pdf/2504.05520" title="Download PDF" id="pdf-2504.05520" aria-labelledby="pdf-2504.05520">pdf</a>, <a href="https://arxiv.org/html/2504.05520v1" title="View HTML" id="html-2504.05520" aria-labelledby="html-2504.05520" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05520" title="Other formats" id="oth-2504.05520" aria-labelledby="oth-2504.05520">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Efficient Reinforcement Finetuning via Adaptive Curriculum Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Shi,+T">Taiwei Shi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wu,+Y">Yiyang Wu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Song,+L">Linxin Song</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhou,+T">Tianyi Zhou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+J">Jieyu Zhao</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 18 pages, 4 figures, 2 tables </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computation and Language (cs.CL) </div> <p class='mathjax'> Reinforcement finetuning (RFT) has shown great potential for enhancing the mathematical reasoning capabilities of large language models (LLMs), but it is often sample- and compute-inefficient, requiring extensive training. In this work, we introduce AdaRFT (Adaptive Curriculum Reinforcement Finetuning), a method that significantly improves both the efficiency and final accuracy of RFT through adaptive curriculum learning. AdaRFT dynamically adjusts the difficulty of training problems based on the model's recent reward signals, ensuring that the model consistently trains on tasks that are challenging but solvable. This adaptive sampling strategy accelerates learning by maintaining an optimal difficulty range, avoiding wasted computation on problems that are too easy or too hard. AdaRFT requires only a lightweight extension to standard RFT algorithms like Proximal Policy Optimization (PPO), without modifying the reward function or model architecture. Experiments on competition-level math datasets-including AMC, AIME, and IMO-style problems-demonstrate that AdaRFT significantly improves both training efficiency and reasoning performance. We evaluate AdaRFT across multiple data distributions and model sizes, showing that it reduces the number of training steps by up to 2x and improves accuracy by a considerable margin, offering a more scalable and effective RFT framework. </p> </div> </dd> <dt> <a name='item22'>[22]</a> <a href ="/abs/2504.05530" title="Abstract" id="2504.05530"> arXiv:2504.05530 </a> [<a href="/pdf/2504.05530" title="Download PDF" id="pdf-2504.05530" aria-labelledby="pdf-2504.05530">pdf</a>, <a href="https://arxiv.org/html/2504.05530v1" title="View HTML" id="html-2504.05530" aria-labelledby="html-2504.05530" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05530" title="Other formats" id="oth-2504.05530" aria-labelledby="oth-2504.05530">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> FORCE: Feature-Oriented Representation with Clustering and Explanation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mukherjee,+R">Rishav Mukherjee</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Thompson,+J+A">Jeffrey Ahearn Thompson</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 12 pages, 3 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Applications (stat.AP) </div> <p class='mathjax'> Learning about underlying patterns in data using latent unobserved structures to improve the accuracy of predictive models has become an active avenue of deep learning research. Most approaches cluster the original features to capture certain latent structures. However, the information gained in the process can often be implicitly derived by sufficiently complex models. Thus, such approaches often provide minimal benefits. We propose a SHAP (Shapley Additive exPlanations) based supervised deep learning framework FORCE which relies on two-stage usage of SHAP values in the neural network architecture, (i) an additional latent feature to guide model training, based on clustering SHAP values, and (ii) initiating an attention mechanism within the architecture using latent information. This approach gives a neural network an indication about the effect of unobserved values that modify feature importance for an observation. The proposed framework is evaluated on three real life datasets. Our results demonstrate that FORCE led to dramatic improvements in overall performance as compared to networks that did not incorporate the latent feature and attention framework (e.g., F1 score for presence of heart disease 0.80 vs 0.72). Using cluster assignments and attention based on SHAP values guides deep learning, enhancing latent pattern learning and overall discriminative capability. </p> </div> </dd> <dt> <a name='item23'>[23]</a> <a href ="/abs/2504.05553" title="Abstract" id="2504.05553"> arXiv:2504.05553 </a> [<a href="/pdf/2504.05553" title="Download PDF" id="pdf-2504.05553" aria-labelledby="pdf-2504.05553">pdf</a>, <a href="https://arxiv.org/html/2504.05553v1" title="View HTML" id="html-2504.05553" aria-labelledby="html-2504.05553" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05553" title="Other formats" id="oth-2504.05553" aria-labelledby="oth-2504.05553">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Federated Hierarchical Reinforcement Learning for Adaptive Traffic Signal Control </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Fu,+Y">Yongjie Fu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhong,+L">Lingyun Zhong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Z">Zifan Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Di,+X">Xuan Di</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Multi-agent reinforcement learning (MARL) has shown promise for adaptive traffic signal control (ATSC), enabling multiple intersections to coordinate signal timings in real time. However, in large-scale settings, MARL faces constraints due to extensive data sharing and communication requirements. Federated learning (FL) mitigates these challenges by training shared models without directly exchanging raw data, yet traditional FL methods such as FedAvg struggle with highly heterogeneous intersections. Different intersections exhibit varying traffic patterns, demands, and road structures, so performing FedAvg across all agents is inefficient. To address this gap, we propose Hierarchical Federated Reinforcement Learning (HFRL) for ATSC. HFRL employs clustering-based or optimization-based techniques to dynamically group intersections and perform FedAvg independently within groups of intersections with similar characteristics, enabling more effective coordination and scalability than standard FedAvg. Our experiments on synthetic and real-world traffic networks demonstrate that HFRL not only outperforms both decentralized and standard federated RL approaches but also identifies suitable grouping patterns based on network structure or traffic demand, resulting in a more robust framework for distributed, heterogeneous systems. </p> </div> </dd> <dt> <a name='item24'>[24]</a> <a href ="/abs/2504.05585" title="Abstract" id="2504.05585"> arXiv:2504.05585 </a> [<a href="/pdf/2504.05585" title="Download PDF" id="pdf-2504.05585" aria-labelledby="pdf-2504.05585">pdf</a>, <a href="https://arxiv.org/html/2504.05585v1" title="View HTML" id="html-2504.05585" aria-labelledby="html-2504.05585" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05585" title="Other formats" id="oth-2504.05585" aria-labelledby="oth-2504.05585">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> TW-CRL: Time-Weighted Contrastive Reward Learning for Efficient Inverse Reinforcement Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Y">Yuxuan Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+N">Ning Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xia,+S">Stephen Xia</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Episodic tasks in Reinforcement Learning (RL) often pose challenges due to sparse reward signals and high-dimensional state spaces, which hinder efficient learning. Additionally, these tasks often feature hidden "trap states" -- irreversible failures that prevent task completion but do not provide explicit negative rewards to guide agents away from repeated errors. To address these issues, we propose Time-Weighted Contrastive Reward Learning (TW-CRL), an Inverse Reinforcement Learning (IRL) framework that leverages both successful and failed demonstrations. By incorporating temporal information, TW-CRL learns a dense reward function that identifies critical states associated with success or failure. This approach not only enables agents to avoid trap states but also encourages meaningful exploration beyond simple imitation of expert trajectories. Empirical evaluations on navigation tasks and robotic manipulation benchmarks demonstrate that TW-CRL surpasses state-of-the-art methods, achieving improved efficiency and robustness. </p> </div> </dd> <dt> <a name='item25'>[25]</a> <a href ="/abs/2504.05586" title="Abstract" id="2504.05586"> arXiv:2504.05586 </a> [<a href="/pdf/2504.05586" title="Download PDF" id="pdf-2504.05586" aria-labelledby="pdf-2504.05586">pdf</a>, <a href="https://arxiv.org/html/2504.05586v1" title="View HTML" id="html-2504.05586" aria-labelledby="html-2504.05586" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05586" title="Other formats" id="oth-2504.05586" aria-labelledby="oth-2504.05586">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Finding Fantastic Experts in MoEs: A Unified Study for Expert Dropping Strategies and Observations </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Jaiswal,+A">Ajay Jaiswal</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+J">Jianyu Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Y">Yixiao Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+P">Pingzhi Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+T">Tianlong Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zhangyang Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+C">Chong Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pang,+R">Ruoming Pang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Du,+X">Xianzhi Du</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Sparsely activated Mixture-of-Experts (SMoE) has shown promise in scaling up the learning capacity of neural networks. However, vanilla SMoEs have issues such as expert redundancy and heavy memory requirements, making them inefficient and non-scalable, especially for resource-constrained scenarios. Expert-level sparsification of SMoEs involves pruning the least important experts to address these limitations. In this work, we aim to address three questions: (1) What is the best recipe to identify the least knowledgeable subset of experts that can be dropped with minimal impact on performance? (2) How should we perform expert dropping (one-shot or iterative), and what correction measures can we undertake to minimize its drastic impact on SMoE subnetwork capabilities? (3) What capabilities of full-SMoEs are severely impacted by the removal of the least dominant experts, and how can we recover them? Firstly, we propose MoE Experts Compression Suite (MC-Suite), which is a collection of some previously explored and multiple novel recipes to provide a comprehensive benchmark for estimating expert importance from diverse perspectives, as well as unveil numerous valuable insights for SMoE experts. Secondly, unlike prior works with a one-shot expert pruning approach, we explore the benefits of iterative pruning with the re-estimation of the MC-Suite criterion. Moreover, we introduce the benefits of task-agnostic fine-tuning as a correction mechanism during iterative expert dropping, which we term MoE Lottery Subnetworks. Lastly, we present an experimentally validated conjecture that, during expert dropping, SMoEs' instruction-following capabilities are predominantly hurt, which can be restored to a robust level subject to external augmentation of instruction-following capabilities using k-shot examples and supervised fine-tuning. </p> </div> </dd> <dt> <a name='item26'>[26]</a> <a href ="/abs/2504.05588" title="Abstract" id="2504.05588"> arXiv:2504.05588 </a> [<a href="/pdf/2504.05588" title="Download PDF" id="pdf-2504.05588" aria-labelledby="pdf-2504.05588">pdf</a>, <a href="https://arxiv.org/html/2504.05588v1" title="View HTML" id="html-2504.05588" aria-labelledby="html-2504.05588" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05588" title="Other formats" id="oth-2504.05588" aria-labelledby="oth-2504.05588">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Multi-fidelity Reinforcement Learning Control for Complex Dynamical Systems </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+L">Luning Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+X">Xin-Yang Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+S">Siyan Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Grover,+A">Aditya Grover</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+J">Jian-Xun Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Thiagarajan,+J+J">Jayaraman J. Thiagarajan</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Controlling instabilities in complex dynamical systems is challenging in scientific and engineering applications. Deep reinforcement learning (DRL) has seen promising results for applications in different scientific applications. The many-query nature of control tasks requires multiple interactions with real environments of the underlying physics. However, it is usually sparse to collect from the experiments or expensive to simulate for complex dynamics. Alternatively, controlling surrogate modeling could mitigate the computational cost issue. However, a fast and accurate learning-based model by offline training makes it very hard to get accurate pointwise dynamics when the dynamics are chaotic. To bridge this gap, the current work proposes a multi-fidelity reinforcement learning (MFRL) framework that leverages differentiable hybrid models for control tasks, where a physics-based hybrid model is corrected by limited high-fidelity data. We also proposed a spectrum-based reward function for RL learning. The effect of the proposed framework is demonstrated on two complex dynamics in physics. The statistics of the MFRL control result match that computed from many-query evaluations of the high-fidelity environments and outperform other SOTA baselines. </p> </div> </dd> <dt> <a name='item27'>[27]</a> <a href ="/abs/2504.05610" title="Abstract" id="2504.05610"> arXiv:2504.05610 </a> [<a href="/pdf/2504.05610" title="Download PDF" id="pdf-2504.05610" aria-labelledby="pdf-2504.05610">pdf</a>, <a href="https://arxiv.org/html/2504.05610v1" title="View HTML" id="html-2504.05610" aria-labelledby="html-2504.05610" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05610" title="Other formats" id="oth-2504.05610" aria-labelledby="oth-2504.05610">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Fairness in Machine Learning-based Hand Load Estimation: A Case Study on Load Carriage Tasks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Rahman,+A">Arafat Rahman</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lim,+S">Sol Lim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chung,+S">Seokhyun Chung</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Predicting external hand load from sensor data is essential for ergonomic exposure assessments, as obtaining this information typically requires direct observation or supplementary data. While machine learning methods have been used to estimate external hand load from worker postures or force exertion data, our findings reveal systematic bias in these predictions due to individual differences such as age and biological sex. To explore this issue, we examined bias in hand load prediction by varying the sex ratio in the training dataset. We found substantial sex disparity in predictive performance, especially when the training dataset is more sex-imbalanced. To address this bias, we developed and evaluated a fair predictive model for hand load estimation that leverages a Variational Autoencoder (VAE) with feature disentanglement. This approach is designed to separate sex-agnostic and sex-specific latent features, minimizing feature overlap. The disentanglement capability enables the model to make predictions based solely on sex-agnostic features of motion patterns, ensuring fair prediction for both biological sexes. Our proposed fair algorithm outperformed conventional machine learning methods (e.g., Random Forests) in both fairness and predictive accuracy, achieving a lower mean absolute error (MAE) difference across male and female sets and improved fairness metrics such as statistical parity (SP) and positive and negative residual differences (PRD and NRD), even when trained on imbalanced sex datasets. These findings emphasize the importance of fairness-aware machine learning algorithms to prevent potential disadvantages in workplace health and safety for certain worker populations. </p> </div> </dd> <dt> <a name='item28'>[28]</a> <a href ="/abs/2504.05615" title="Abstract" id="2504.05615"> arXiv:2504.05615 </a> [<a href="/pdf/2504.05615" title="Download PDF" id="pdf-2504.05615" aria-labelledby="pdf-2504.05615">pdf</a>, <a href="https://arxiv.org/html/2504.05615v1" title="View HTML" id="html-2504.05615" aria-labelledby="html-2504.05615" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05615" title="Other formats" id="oth-2504.05615" aria-labelledby="oth-2504.05615">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> FedEFC: Federated Learning Using Enhanced Forward Correction Against Noisy Labels </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+S">Seunghun Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ahn,+J">Jin-Hyun Ahn</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kang,+J">Joonhyuk Kang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 9 pages, 3 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Federated Learning (FL) is a powerful framework for privacy-preserving distributed learning. It enables multiple clients to collaboratively train a global model without sharing raw data. However, handling noisy labels in FL remains a major challenge due to heterogeneous data distributions and communication constraints, which can severely degrade model performance. To address this issue, we propose FedEFC, a novel method designed to tackle the impact of noisy labels in FL. FedEFC mitigates this issue through two key techniques: (1) prestopping, which prevents overfitting to mislabeled data by dynamically halting training at an optimal point, and (2) loss correction, which adjusts model updates to account for label noise. In particular, we develop an effective loss correction tailored to the unique challenges of FL, including data heterogeneity and decentralized training. Furthermore, we provide a theoretical analysis, leveraging the composite proper loss property, to demonstrate that the FL objective function under noisy label distributions can be aligned with the clean label distribution. Extensive experimental results validate the effectiveness of our approach, showing that it consistently outperforms existing FL techniques in mitigating the impact of noisy labels, particularly under heterogeneous data settings (e.g., achieving up to 41.64% relative performance improvement over the existing loss correction method). </p> </div> </dd> <dt> <a name='item29'>[29]</a> <a href ="/abs/2504.05618" title="Abstract" id="2504.05618"> arXiv:2504.05618 </a> [<a href="/pdf/2504.05618" title="Download PDF" id="pdf-2504.05618" aria-labelledby="pdf-2504.05618">pdf</a>, <a href="https://arxiv.org/html/2504.05618v1" title="View HTML" id="html-2504.05618" aria-labelledby="html-2504.05618" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05618" title="Other formats" id="oth-2504.05618" aria-labelledby="oth-2504.05618">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Technical Report: Full Version of Analyzing and Optimizing Perturbation of DP-SGD Geometrically </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Duan,+J">Jiawei Duan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hu,+H">Haibo Hu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ye,+Q">Qingqing Ye</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+X">Xinyue Sun</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> This is the full version of our paper "Analyzing and Optimizing Perturbation of DP-SGD Geometrically", which will appear in ICDE 2025 as a regular research paper </div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> International Conference of Data Engineering (ICDE 2025) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computer Vision and Pattern Recognition (cs.CV); Databases (cs.DB) </div> <p class='mathjax'> Differential privacy (DP) has become a prevalent privacy model in a wide range of machine learning tasks, especially after the debut of DP-SGD. However, DP-SGD, which directly perturbs gradients in the training iterations, fails to mitigate the negative impacts of noise on gradient direction. As a result, DP-SGD is often inefficient. Although various solutions (e.g., clipping to reduce the sensitivity of gradients and amplifying privacy bounds to save privacy budgets) are proposed to trade privacy for model efficiency, the root cause of its inefficiency is yet unveiled. <br>In this work, we first generalize DP-SGD and theoretically derive the impact of DP noise on the training process. Our analysis reveals that, in terms of a perturbed gradient, only the noise on direction has eminent impact on the model efficiency while that on magnitude can be mitigated by optimization techniques, i.e., fine-tuning gradient clipping and learning rate. Besides, we confirm that traditional DP introduces biased noise on the direction when adding unbiased noise to the gradient itself. Overall, the perturbation of DP-SGD is actually sub-optimal from a geometric perspective. Motivated by this, we design a geometric perturbation strategy GeoDP within the DP framework, which perturbs the direction and the magnitude of a gradient, respectively. By directly reducing the noise on the direction, GeoDP mitigates the negative impact of DP noise on model efficiency with the same DP guarantee. Extensive experiments on two public datasets (i.e., MNIST and CIFAR-10), one synthetic dataset and three prevalent models (i.e., Logistic Regression, CNN and ResNet) confirm the effectiveness and generality of our strategy. </p> </div> </dd> <dt> <a name='item30'>[30]</a> <a href ="/abs/2504.05625" title="Abstract" id="2504.05625"> arXiv:2504.05625 </a> [<a href="/pdf/2504.05625" title="Download PDF" id="pdf-2504.05625" aria-labelledby="pdf-2504.05625">pdf</a>, <a href="https://arxiv.org/html/2504.05625v1" title="View HTML" id="html-2504.05625" aria-labelledby="html-2504.05625" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05625" title="Other formats" id="oth-2504.05625" aria-labelledby="oth-2504.05625">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Model-Agnostic Policy Explanations with Large Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Xi-Jia,+Z">Zhang Xi-Jia</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Guo,+Y">Yue Guo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+S">Shufei Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Stepputtis,+S">Simon Stepputtis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gombolay,+M">Matthew Gombolay</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sycara,+K">Katia Sycara</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Campbell,+J">Joseph Campbell</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> This paper significantly extends our prior preprint [<a href="https://arxiv.org/abs/2311.18062" data-arxiv-id="2311.18062" class="link-https">arXiv:2311.18062</a>], which was not peer-reviewed and has since been substantially revised in methods, results, and authorship </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Intelligent agents, such as robots, are increasingly deployed in real-world, human-centric environments. To foster appropriate human trust and meet legal and ethical standards, these agents must be able to explain their behavior. However, state-of-the-art agents are typically driven by black-box models like deep neural networks, limiting their interpretability. We propose a method for generating natural language explanations of agent behavior based only on observed states and actions -- without access to the agent's underlying model. Our approach learns a locally interpretable surrogate model of the agent's behavior from observations, which then guides a large language model to generate plausible explanations with minimal hallucination. Empirical results show that our method produces explanations that are more comprehensible and correct than those from baselines, as judged by both language models and human evaluators. Furthermore, we find that participants in a user study more accurately predicted the agent's future actions when given our explanations, suggesting improved understanding of agent behavior. </p> </div> </dd> <dt> <a name='item31'>[31]</a> <a href ="/abs/2504.05627" title="Abstract" id="2504.05627"> arXiv:2504.05627 </a> [<a href="/pdf/2504.05627" title="Download PDF" id="pdf-2504.05627" aria-labelledby="pdf-2504.05627">pdf</a>, <a href="https://arxiv.org/html/2504.05627v1" title="View HTML" id="html-2504.05627" aria-labelledby="html-2504.05627" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05627" title="Other formats" id="oth-2504.05627" aria-labelledby="oth-2504.05627">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Maternal and Fetal Health Status Assessment by Using Machine Learning on Optical 3D Body Scans </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Cheng,+R">Ruting Cheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+Y">Yijiang Zheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Feng,+B">Boyuan Feng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Qiu,+C">Chuhui Qiu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Long,+Z">Zhuoxin Long</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Calderon,+J+A">Joaquin A. Calderon</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+X">Xiaoke Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=M.Phillips,+J">Jaclyn M.Phillips</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hahn,+J+K">James K. Hahn</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Monitoring maternal and fetal health during pregnancy is crucial for preventing adverse outcomes. While tests such as ultrasound scans offer high accuracy, they can be costly and inconvenient. Telehealth and more accessible body shape information provide pregnant women with a convenient way to monitor their health. This study explores the potential of 3D body scan data, captured during the 18-24 gestational weeks, to predict adverse pregnancy outcomes and estimate clinical parameters. We developed a novel algorithm with two parallel streams which are used for extract body shape features: one for supervised learning to extract sequential abdominal circumference information, and another for unsupervised learning to extract global shape descriptors, alongside a branch for demographic data. <br>Our results indicate that 3D body shape can assist in predicting preterm labor, gestational diabetes mellitus (GDM), gestational hypertension (GH), and in estimating fetal weight. Compared to other machine learning models, our algorithm achieved the best performance, with prediction accuracies exceeding 88% and fetal weight estimation accuracy of 76.74% within a 10% error margin, outperforming conventional anthropometric methods by 22.22%. </p> </div> </dd> <dt> <a name='item32'>[32]</a> <a href ="/abs/2504.05633" title="Abstract" id="2504.05633"> arXiv:2504.05633 </a> [<a href="/pdf/2504.05633" title="Download PDF" id="pdf-2504.05633" aria-labelledby="pdf-2504.05633">pdf</a>, <a href="https://arxiv.org/html/2504.05633v1" title="View HTML" id="html-2504.05633" aria-labelledby="html-2504.05633" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05633" title="Other formats" id="oth-2504.05633" aria-labelledby="oth-2504.05633">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> To Start Up a Start-Up$-$Embedding Strategic Demand Development in Operational On-Demand Fulfillment via Reinforcement Learning with Information Shaping </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+X">Xinwei Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ulmer,+M+W">Marlin W. Ulmer</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Thomas,+B+W">Barrett W. Thomas</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> The last few years have witnessed rapid growth in the on-demand delivery market, with many start-ups entering the field. However, not all of these start-ups have succeeded due to various reasons, among others, not being able to establish a large enough customer base. In this paper, we address this problem that many on-demand transportation start-ups face: how to establish themselves in a new market. When starting, such companies often have limited fleet resources to serve demand across a city. Depending on the use of the fleet, varying service quality is observed in different areas of the city, and in turn, the service quality impacts the respective growth of demand in each area. Thus, operational fulfillment decisions drive the longer-term demand development. To integrate strategic demand development into real-time fulfillment operations, we propose a two-step approach. First, we derive analytical insights into optimal allocation decisions for a stylized problem. Second, we use these insights to shape the training data of a reinforcement learning strategy for operational real-time fulfillment. Our experiments demonstrate that combining operational efficiency with long-term strategic planning is highly advantageous. Further, we show that the careful shaping of training data is essential for the successful development of demand. </p> </div> </dd> <dt> <a name='item33'>[33]</a> <a href ="/abs/2504.05638" title="Abstract" id="2504.05638"> arXiv:2504.05638 </a> [<a href="/pdf/2504.05638" title="Download PDF" id="pdf-2504.05638" aria-labelledby="pdf-2504.05638">pdf</a>, <a href="https://arxiv.org/html/2504.05638v1" title="View HTML" id="html-2504.05638" aria-labelledby="html-2504.05638" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05638" title="Other formats" id="oth-2504.05638" aria-labelledby="oth-2504.05638">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> TAGC: Optimizing Gradient Communication in Distributed Transformer Training </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Polyakov,+I">Igor Polyakov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dukhanov,+A">Alexey Dukhanov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Spirin,+E">Egor Spirin</a></div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> EuroMLSys '25: Proceedings of the 5th Workshop on Machine Learning and Systems. 2025. 254-260 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Distributed, Parallel, and Cluster Computing (cs.DC) </div> <p class='mathjax'> The increasing complexity of large language models (LLMs) necessitates efficient training strategies to mitigate the high computational costs associated with distributed training. A significant bottleneck in this process is gradient synchronization across multiple GPUs, particularly in the zero-redundancy parallelism mode. In this paper, we introduce Transformer-Aware Gradient Compression (TAGC), an optimized gradient compression algorithm designed specifically for transformer-based models. TAGC extends the lossless homomorphic compression method by adapting it for sharded models and incorporating transformer-specific optimizations, such as layer-selective compression and dynamic sparsification. Our experimental results demonstrate that TAGC accelerates training by up to 15% compared to the standard Fully Sharded Data Parallel (FSDP) approach, with minimal impact on model quality. We integrate TAGC into the PyTorch FSDP framework, the implementation is publicly available at <a href="https://github.com/ipolyakov/TAGC" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item34'>[34]</a> <a href ="/abs/2504.05646" title="Abstract" id="2504.05646"> arXiv:2504.05646 </a> [<a href="/pdf/2504.05646" title="Download PDF" id="pdf-2504.05646" aria-labelledby="pdf-2504.05646">pdf</a>, <a href="https://arxiv.org/html/2504.05646v1" title="View HTML" id="html-2504.05646" aria-labelledby="html-2504.05646" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05646" title="Other formats" id="oth-2504.05646" aria-labelledby="oth-2504.05646">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Lattice: Learning to Efficiently Compress the Memory </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Karami,+M">Mahdi Karami</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mirrokni,+V">Vahab Mirrokni</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Attention mechanisms have revolutionized sequence learning but suffer from quadratic computational complexity. This paper introduces Lattice, a novel recurrent neural network (RNN) mechanism that leverages the inherent low-rank structure of K-V matrices to efficiently compress the cache into a fixed number of memory slots, achieving sub-quadratic complexity. We formulate this compression as an online optimization problem and derive a dynamic memory update rule based on a single gradient descent step. The resulting recurrence features a state- and input-dependent gating mechanism, offering an interpretable memory update process. The core innovation is the orthogonal update: each memory slot is updated exclusively with information orthogonal to its current state hence incorporation of only novel, non-redundant data, which minimizes the interference with previously stored information. The experimental results show that Lattice achieves the best perplexity compared to all baselines across diverse context lengths, with performance improvement becoming more pronounced as the context length increases. </p> </div> </dd> <dt> <a name='item35'>[35]</a> <a href ="/abs/2504.05651" title="Abstract" id="2504.05651"> arXiv:2504.05651 </a> [<a href="/pdf/2504.05651" title="Download PDF" id="pdf-2504.05651" aria-labelledby="pdf-2504.05651">pdf</a>, <a href="https://arxiv.org/html/2504.05651v1" title="View HTML" id="html-2504.05651" aria-labelledby="html-2504.05651" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05651" title="Other formats" id="oth-2504.05651" aria-labelledby="oth-2504.05651">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Measuring D茅j脿 vu Memorization Efficiently </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kokhlikyan,+N">Narine Kokhlikyan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jayaraman,+B">Bargav Jayaraman</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bordes,+F">Florian Bordes</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Guo,+C">Chuan Guo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chaudhuri,+K">Kamalika Chaudhuri</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computer Vision and Pattern Recognition (cs.CV) </div> <p class='mathjax'> Recent research has shown that representation learning models may accidentally memorize their training data. For example, the d茅j脿 vu method shows that for certain representation learning models and training images, it is sometimes possible to correctly predict the foreground label given only the representation of the background - better than through dataset-level correlations. However, their measurement method requires training two models - one to estimate dataset-level correlations and the other to estimate memorization. This multiple model setup becomes infeasible for large open-source models. In this work, we propose alternative simple methods to estimate dataset-level correlations, and show that these can be used to approximate an off-the-shelf model's memorization ability without any retraining. This enables, for the first time, the measurement of memorization in pre-trained open-source image representation and vision-language representation models. Our results show that different ways of measuring memorization yield very similar aggregate results. We also find that open-source models typically have lower aggregate memorization than similar models trained on a subset of the data. The code is available both for vision and vision language models. </p> </div> </dd> <dt> <a name='item36'>[36]</a> <a href ="/abs/2504.05670" title="Abstract" id="2504.05670"> arXiv:2504.05670 </a> [<a href="/pdf/2504.05670" title="Download PDF" id="pdf-2504.05670" aria-labelledby="pdf-2504.05670">pdf</a>, <a href="https://arxiv.org/html/2504.05670v1" title="View HTML" id="html-2504.05670" aria-labelledby="html-2504.05670" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05670" title="Other formats" id="oth-2504.05670" aria-labelledby="oth-2504.05670">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Dual Boost-Driven Graph-Level Clustering Network </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Smith,+J">John Smith</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tu,+W">Wenxuan Tu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wu,+J">Junlong Wu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+W">Wenxin Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+J">Jingxin Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+H">Haotian Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cheng,+J">Jieren Cheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lei,+H">Huajie Lei</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yao,+G">Guangzhen Yao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+L">Lingren Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+M">Mengfei Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Han,+R">Renda Han</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Y">Yu Li</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Graph-level clustering remains a pivotal yet formidable challenge in graph learning. Recently, the integration of deep learning with representation learning has demonstrated notable advancements, yielding performance enhancements to a certain degree. However, existing methods suffer from at least one of the following issues: 1. the original graph structure has noise, and 2. during feature propagation and pooling processes, noise is gradually aggregated into the graph-level embeddings through information propagation. Consequently, these two limitations mask clustering-friendly information, leading to suboptimal graph-level clustering performance. To this end, we propose a novel Dual Boost-Driven Graph-Level Clustering Network (DBGCN) to alternately promote graph-level clustering and filtering out interference information in a unified framework. Specifically, in the pooling step, we evaluate the contribution of features at the global and optimize them using a learnable transformation matrix to obtain high-quality graph-level representation, such that the model's reasoning capability can be improved. Moreover, to enable reliable graph-level clustering, we first identify and suppress information detrimental to clustering by evaluating similarities between graph-level representations, providing more accurate guidance for multi-view fusion. Extensive experiments demonstrated that DBGCN outperforms the state-of-the-art graph-level clustering methods on six benchmark datasets. </p> </div> </dd> <dt> <a name='item37'>[37]</a> <a href ="/abs/2504.05695" title="Abstract" id="2504.05695"> arXiv:2504.05695 </a> [<a href="/pdf/2504.05695" title="Download PDF" id="pdf-2504.05695" aria-labelledby="pdf-2504.05695">pdf</a>, <a href="https://arxiv.org/html/2504.05695v1" title="View HTML" id="html-2504.05695" aria-labelledby="html-2504.05695" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05695" title="Other formats" id="oth-2504.05695" aria-labelledby="oth-2504.05695">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Architecture independent generalization bounds for overparametrized deep ReLU networks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+T">Thomas Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chien,+C+K">Chun-Kai Kevin Chien</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ewald,+P+M">Patricia Mu帽oz Ewald</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Moore,+A+G">Andrew G. Moore</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> AMS Latex, 12 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Analysis of PDEs (math.AP); Optimization and Control (math.OC); Machine Learning (stat.ML) </div> <p class='mathjax'> We prove that overparametrized neural networks are able to generalize with a test error that is independent of the level of overparametrization, and independent of the Vapnik-Chervonenkis (VC) dimension. We prove explicit bounds that only depend on the metric geometry of the test and training sets, on the regularity properties of the activation function, and on the operator norms of the weights and norms of biases. For overparametrized deep ReLU networks with a training sample size bounded by the input space dimension, we explicitly construct zero loss minimizers without use of gradient descent, and prove that the generalization error is independent of the network architecture. </p> </div> </dd> <dt> <a name='item38'>[38]</a> <a href ="/abs/2504.05716" title="Abstract" id="2504.05716"> arXiv:2504.05716 </a> [<a href="/pdf/2504.05716" title="Download PDF" id="pdf-2504.05716" aria-labelledby="pdf-2504.05716">pdf</a>, <a href="https://arxiv.org/html/2504.05716v1" title="View HTML" id="html-2504.05716" aria-labelledby="html-2504.05716" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05716" title="Other formats" id="oth-2504.05716" aria-labelledby="oth-2504.05716">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Single-Agent vs. Multi-Agent LLM Strategies for Automated Student Reflection Assessment </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+G">Gen Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+L">Li Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tang,+C">Cheng Tang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=%C5%A0v%C3%A1bensk%C3%BD,+V">Valdemar 艩v谩bensk媒</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Deguchi,+D">Daisuke Deguchi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yamashita,+T">Takayoshi Yamashita</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shimada,+A">Atsushi Shimada</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> To be published in Proceedings of the 29th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD 2025) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computers and Society (cs.CY) </div> <p class='mathjax'> We explore the use of Large Language Models (LLMs) for automated assessment of open-text student reflections and prediction of academic performance. Traditional methods for evaluating reflections are time-consuming and may not scale effectively in educational settings. In this work, we employ LLMs to transform student reflections into quantitative scores using two assessment strategies (single-agent and multi-agent) and two prompting techniques (zero-shot and few-shot). Our experiments, conducted on a dataset of 5,278 reflections from 377 students over three academic terms, demonstrate that the single-agent with few-shot strategy achieves the highest match rate with human evaluations. Furthermore, models utilizing LLM-assessed reflection scores outperform baselines in both at-risk student identification and grade prediction tasks. These findings suggest that LLMs can effectively automate reflection assessment, reduce educators' workload, and enable timely support for students who may need additional assistance. Our work emphasizes the potential of integrating advanced generative AI technologies into educational practices to enhance student engagement and academic success. </p> </div> </dd> <dt> <a name='item39'>[39]</a> <a href ="/abs/2504.05756" title="Abstract" id="2504.05756"> arXiv:2504.05756 </a> [<a href="/pdf/2504.05756" title="Download PDF" id="pdf-2504.05756" aria-labelledby="pdf-2504.05756">pdf</a>, <a href="https://arxiv.org/html/2504.05756v1" title="View HTML" id="html-2504.05756" aria-labelledby="html-2504.05756" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05756" title="Other formats" id="oth-2504.05756" aria-labelledby="oth-2504.05756">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Interpretable Non-linear Survival Analysis with Evolutionary Symbolic Regression </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Rovito,+L">Luigi Rovito</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Virgolin,+M">Marco Virgolin</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Neural and Evolutionary Computing (cs.NE) </div> <p class='mathjax'> Survival Regression (SuR) is a key technique for modeling time to event in important applications such as clinical trials and semiconductor manufacturing. Currently, SuR algorithms belong to one of three classes: non-linear black-box -- allowing adaptability to many datasets but offering limited interpretability (e.g., tree ensembles); linear glass-box -- being easier to interpret but limited to modeling only linear interactions (e.g., Cox proportional hazards); and non-linear glass-box -- allowing adaptability and interpretability, but empirically found to have several limitations (e.g., explainable boosting machines, survival trees). In this work, we investigate whether Symbolic Regression (SR), i.e., the automated search of mathematical expressions from data, can lead to non-linear glass-box survival models that are interpretable and accurate. We propose an evolutionary, multi-objective, and multi-expression implementation of SR adapted to SuR. Our empirical results on five real-world datasets show that SR consistently outperforms traditional glass-box methods for SuR in terms of accuracy per number of dimensions in the model, while exhibiting comparable accuracy with black-box methods. Furthermore, we offer qualitative examples to assess the interpretability potential of SR models for SuR. Code at: <a href="https://github.com/lurovi/SurvivalMultiTree-pyNSGP" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item40'>[40]</a> <a href ="/abs/2504.05758" title="Abstract" id="2504.05758"> arXiv:2504.05758 </a> [<a href="/pdf/2504.05758" title="Download PDF" id="pdf-2504.05758" aria-labelledby="pdf-2504.05758">pdf</a>, <a href="/format/2504.05758" title="Other formats" id="oth-2504.05758" aria-labelledby="oth-2504.05758">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Addressing Class Imbalance with Probabilistic Graphical Models and Variational Inference </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Lou,+Y">Yujia Lou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+J">Jie Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sheng,+Y">Yuan Sheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+J">Jiawei Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+Y">Yiwei Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ren,+Y">Yaokun Ren</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> This study proposes a method for imbalanced data classification based on deep probabilistic graphical models (DPGMs) to solve the problem that traditional methods have insufficient learning ability for minority class samples. To address the classification bias caused by class imbalance, we introduce variational inference optimization probability modeling, which enables the model to adaptively adjust the representation ability of minority classes and combines the class-aware weight adjustment strategy to enhance the classifier's sensitivity to minority classes. In addition, we combine the adversarial learning mechanism to generate minority class samples in the latent space so that the model can better characterize the category boundary in the high-dimensional feature space. The experiment is evaluated on the Kaggle "Credit Card Fraud Detection" dataset and compared with a variety of advanced imbalanced classification methods (such as GAN-based sampling, BRF, XGBoost-Cost Sensitive, SAAD, HAN). The results show that the method in this study has achieved the best performance in AUC, Precision, Recall and F1-score indicators, effectively improving the recognition rate of minority classes and reducing the false alarm rate. This method can be widely used in imbalanced classification tasks such as financial fraud detection, medical diagnosis, and anomaly detection, providing a new solution for related research. </p> </div> </dd> <dt> <a name='item41'>[41]</a> <a href ="/abs/2504.05761" title="Abstract" id="2504.05761"> arXiv:2504.05761 </a> [<a href="/pdf/2504.05761" title="Download PDF" id="pdf-2504.05761" aria-labelledby="pdf-2504.05761">pdf</a>, <a href="https://arxiv.org/html/2504.05761v1" title="View HTML" id="html-2504.05761" aria-labelledby="html-2504.05761" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05761" title="Other formats" id="oth-2504.05761" aria-labelledby="oth-2504.05761">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> AiGAS-dEVL-RC: An Adaptive Growing Neural Gas Model for Recurrently Drifting Unsupervised Data Streams </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Arostegi,+M">Maria Arostegi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bilbao,+M+N">Miren Nekane Bilbao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lobo,+J+L">Jesus L. Lobo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Del+Ser,+J">Javier Del Ser</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Copyright 2025 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Neural and Evolutionary Computing (cs.NE) </div> <p class='mathjax'> Concept drift and extreme verification latency pose significant challenges in data stream learning, particularly when dealing with recurring concept changes in dynamic environments. This work introduces a novel method based on the Growing Neural Gas (GNG) algorithm, designed to effectively handle abrupt recurrent drifts while adapting to incrementally evolving data distributions (incremental drifts). Leveraging the self-organizing and topological adaptability of GNG, the proposed approach maintains a compact yet informative memory structure, allowing it to efficiently store and retrieve knowledge of past or recurring concepts, even under conditions of delayed or sparse stream supervision. Our experiments highlight the superiority of our approach over existing data stream learning methods designed to cope with incremental non-stationarities and verification latency, demonstrating its ability to quickly adapt to new drifts, robustly manage recurring patterns, and maintain high predictive accuracy with a minimal memory footprint. Unlike other techniques that fail to leverage recurring knowledge, our proposed approach is proven to be a robust and efficient online learning solution for unsupervised drifting data flows. </p> </div> </dd> <dt> <a name='item42'>[42]</a> <a href ="/abs/2504.05768" title="Abstract" id="2504.05768"> arXiv:2504.05768 </a> [<a href="/pdf/2504.05768" title="Download PDF" id="pdf-2504.05768" aria-labelledby="pdf-2504.05768">pdf</a>, <a href="https://arxiv.org/html/2504.05768v1" title="View HTML" id="html-2504.05768" aria-labelledby="html-2504.05768" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05768" title="Other formats" id="oth-2504.05768" aria-labelledby="oth-2504.05768">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Temporal Dynamic Embedding for Irregularly Sampled Time Series </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kim,+M">Mincheol Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shin,+S">Soo-Yong Shin</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> In several practical applications, particularly healthcare, clinical data of each patient is individually recorded in a database at irregular intervals as required. This causes a sparse and irregularly sampled time series, which makes it difficult to handle as a structured representation of the prerequisites of neural network models. We therefore propose temporal dynamic embedding (TDE), which enables neural network models to receive data that change the number of variables over time. TDE regards each time series variable as an embedding vector evolving over time, instead of a conventional fixed structured representation, which causes a critical missing problem. For each time step, TDE allows for the selective adoption and aggregation of only observed variable subsets and represents the current status of patient based on current observations. The experiment was conducted on three clinical datasets: PhysioNet 2012, MIMIC-III, and PhysioNet 2019. The TDE model performed competitively or better than the imputation-based baseline and several recent state-of-the-art methods with reduced training runtime. </p> </div> </dd> <dt> <a name='item43'>[43]</a> <a href ="/abs/2504.05812" title="Abstract" id="2504.05812"> arXiv:2504.05812 </a> [<a href="/pdf/2504.05812" title="Download PDF" id="pdf-2504.05812" aria-labelledby="pdf-2504.05812">pdf</a>, <a href="https://arxiv.org/html/2504.05812v1" title="View HTML" id="html-2504.05812" aria-labelledby="html-2504.05812" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05812" title="Other formats" id="oth-2504.05812" aria-labelledby="oth-2504.05812">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Right Question is Already Half the Answer: Fully Unsupervised LLM Reasoning Incentivization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+Q">Qingyang Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wu,+H">Haitao Wu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+C">Changqing Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+P">Peilin Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bian,+Y">Yatao Bian</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Ongoing work </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> While large language models (LLMs) have demonstrated exceptional capabilities in challenging tasks such as mathematical reasoning, existing methods to enhance reasoning ability predominantly rely on supervised fine-tuning (SFT) followed by reinforcement learning (RL) on reasoning-specific data after pre-training. However, these approaches critically depend on external supervisions--such as human labelled reasoning traces, verified golden answers, or pre-trained reward models--which limits scalability and practical applicability. In this work, we propose Entropy Minimized Policy Optimization (EMPO), which makes an early attempt at fully unsupervised LLM reasoning incentivization. EMPO does not require any supervised information for incentivizing reasoning capabilities (i.e., neither verifiable reasoning traces, problems with golden answers, nor additional pre-trained reward models). By continuously minimizing the predictive entropy of LLMs on unlabeled user queries in a latent semantic space, EMPO enables purely self-supervised evolution of reasoning capabilities with strong flexibility and practicality. Our experiments demonstrate competitive performance of EMPO on both mathematical reasoning and free-form commonsense reasoning tasks. Specifically, without any supervised signals, EMPO boosts the accuracy of Qwen2.5-Math-7B Base from 30.7\% to 48.1\% on mathematical benchmarks and improves truthfulness accuracy of Qwen2.5-7B Instruct from 87.16\% to 97.25\% on TruthfulQA. </p> </div> </dd> <dt> <a name='item44'>[44]</a> <a href ="/abs/2504.05822" title="Abstract" id="2504.05822"> arXiv:2504.05822 </a> [<a href="/pdf/2504.05822" title="Download PDF" id="pdf-2504.05822" aria-labelledby="pdf-2504.05822">pdf</a>, <a href="https://arxiv.org/html/2504.05822v1" title="View HTML" id="html-2504.05822" aria-labelledby="html-2504.05822" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05822" title="Other formats" id="oth-2504.05822" aria-labelledby="oth-2504.05822">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Federated Unlearning Made Practical: Seamless Integration via Negated Pseudo-Gradients </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mora,+A">Alessio Mora</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mazzocca,+C">Carlo Mazzocca</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Montanari,+R">Rebecca Montanari</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bellavista,+P">Paolo Bellavista</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> The right to be forgotten is a fundamental principle of privacy-preserving regulations and extends to Machine Learning (ML) paradigms such as Federated Learning (FL). While FL enhances privacy by enabling collaborative model training without sharing private data, trained models still retain the influence of training data. Federated Unlearning (FU) methods recently proposed often rely on impractical assumptions for real-world FL deployments, such as storing client update histories or requiring access to a publicly available dataset. To address these constraints, this paper introduces a novel method that leverages negated Pseudo-gradients Updates for Federated Unlearning (PUF). Our approach only uses standard client model updates, anyway employed during regular FL rounds, and interprets them as pseudo-gradients. When a client needs to be forgotten, we apply the negated of their pseudo-gradients, appropriately scaled, to the global model. Unlike state-of-the-art mechanisms, PUF seamlessly integrates with FL workflows, incurs no additional computational and communication overhead beyond standard FL rounds, and supports concurrent unlearning requests. We extensively evaluated the proposed method on two well-known benchmark image classification datasets (CIFAR-10 and CIFAR-100) and a real-world medical imaging dataset for segmentation (ProstateMRI), using three different neural architectures: two residual networks and a vision transformer. The experimental results across various settings demonstrate that PUF achieves state-of-the-art forgetting effectiveness and recovery time, without relying on any additional assumptions, thus underscoring its practical applicability. </p> </div> </dd> <dt> <a name='item45'>[45]</a> <a href ="/abs/2504.05840" title="Abstract" id="2504.05840"> arXiv:2504.05840 </a> [<a href="/pdf/2504.05840" title="Download PDF" id="pdf-2504.05840" aria-labelledby="pdf-2504.05840">pdf</a>, <a href="https://arxiv.org/html/2504.05840v1" title="View HTML" id="html-2504.05840" aria-labelledby="html-2504.05840" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05840" title="Other formats" id="oth-2504.05840" aria-labelledby="oth-2504.05840">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Momentum Boosted Episodic Memory for Improving Learning in Long-Tailed RL Environments </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Fernandes,+D">Dolton Fernandes</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kaushik,+P">Pramod Kaushik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shukla,+H">Harsh Shukla</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Surampudi,+B+R">Bapi Raju Surampudi</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Traditional Reinforcement Learning (RL) algorithms assume the distribution of the data to be uniform or mostly uniform. However, this is not the case with most real-world applications like autonomous driving or in nature where animals roam. Some experiences are encountered frequently, and most of the remaining experiences occur rarely; the resulting distribution is called Zipfian. Taking inspiration from the theory of complementary learning systems, an architecture for learning from Zipfian distributions is proposed where important long tail trajectories are discovered in an unsupervised manner. The proposal comprises an episodic memory buffer containing a prioritised memory module to ensure important rare trajectories are kept longer to address the Zipfian problem, which needs credit assignment to happen in a sample efficient manner. The experiences are then reinstated from episodic memory and given weighted importance forming the trajectory to be executed. Notably, the proposed architecture is modular, can be incorporated in any RL architecture and yields improved performance in multiple Zipfian tasks over traditional architectures. Our method outperforms IMPALA by a significant margin on all three tasks and all three evaluation metrics (Zipfian, Uniform, and Rare Accuracy) and also gives improvements on most Atari environments that are considered challenging </p> </div> </dd> <dt> <a name='item46'>[46]</a> <a href ="/abs/2504.05844" title="Abstract" id="2504.05844"> arXiv:2504.05844 </a> [<a href="/pdf/2504.05844" title="Download PDF" id="pdf-2504.05844" aria-labelledby="pdf-2504.05844">pdf</a>, <a href="https://arxiv.org/html/2504.05844v1" title="View HTML" id="html-2504.05844" aria-labelledby="html-2504.05844" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05844" title="Other formats" id="oth-2504.05844" aria-labelledby="oth-2504.05844">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Adaptive Substructure-Aware Expert Model for Molecular Property Prediction </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Jiang,+T">Tianyi Jiang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zeyu Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+S">Shanqing Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xuan,+Q">Qi Xuan</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Molecular property prediction is essential for applications such as drug discovery and toxicity assessment. While Graph Neural Networks (GNNs) have shown promising results by modeling molecules as molecular graphs, their reliance on data-driven learning limits their ability to generalize, particularly in the presence of data imbalance and diverse molecular substructures. Existing methods often overlook the varying contributions of different substructures to molecular properties, treating them uniformly. To address these challenges, we propose ASE-Mol, a novel GNN-based framework that leverages a Mixture-of-Experts (MoE) approach for molecular property prediction. ASE-Mol incorporates BRICS decomposition and significant substructure awareness to dynamically identify positive and negative substructures. By integrating a MoE architecture, it reduces the adverse impact of negative motifs while improving adaptability to positive motifs. Experimental results on eight benchmark datasets demonstrate that ASE-Mol achieves state-of-the-art performance, with significant improvements in both accuracy and interpretability. </p> </div> </dd> <dt> <a name='item47'>[47]</a> <a href ="/abs/2504.05868" title="Abstract" id="2504.05868"> arXiv:2504.05868 </a> [<a href="/pdf/2504.05868" title="Download PDF" id="pdf-2504.05868" aria-labelledby="pdf-2504.05868">pdf</a>, <a href="https://arxiv.org/html/2504.05868v1" title="View HTML" id="html-2504.05868" aria-labelledby="html-2504.05868" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05868" title="Other formats" id="oth-2504.05868" aria-labelledby="oth-2504.05868">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Energy-Conserving Neural Network Closure Model for Long-Time Accurate and Stable LES </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=van+Gastelen,+T">Toby van Gastelen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Edeling,+W">Wouter Edeling</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sanderse,+B">Benjamin Sanderse</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 40 pages, 11 figures, source code can be found at <a href="https://github.com/tobyvg/LES_ML.jl" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Numerical Analysis (math.NA) </div> <p class='mathjax'> Machine learning-based closure models for LES have shown promise in capturing complex turbulence dynamics but often suffer from instabilities and physical inconsistencies. In this work, we develop a novel skew-symmetric neural architecture as closure model that enforces stability while preserving key physical conservation laws. Our approach leverages a discretization that ensures mass, momentum, and energy conservation, along with a face-averaging filter to maintain mass conservation in coarse-grained velocity fields. We compare our model against several conventional data-driven closures (including unconstrained convolutional neural networks), and the physics-based Smagorinsky model. Performance is evaluated on decaying turbulence and Kolmogorov flow for multiple coarse-graining factors. In these test cases we observe that unconstrained machine learning models suffer from numerical instabilities. In contrast, our skew-symmetric model remains stable across all tests, though at the cost of increased dissipation. Despite this trade-off, we demonstrate that our model still outperforms the Smagorinsky model in unseen scenarios. These findings highlight the potential of structure-preserving machine learning closures for reliable long-time LES. </p> </div> </dd> <dt> <a name='item48'>[48]</a> <a href ="/abs/2504.05894" title="Abstract" id="2504.05894"> arXiv:2504.05894 </a> [<a href="/pdf/2504.05894" title="Download PDF" id="pdf-2504.05894" aria-labelledby="pdf-2504.05894">pdf</a>, <a href="https://arxiv.org/html/2504.05894v1" title="View HTML" id="html-2504.05894" aria-labelledby="html-2504.05894" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05894" title="Other formats" id="oth-2504.05894" aria-labelledby="oth-2504.05894">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Why do zeroes happen? A model-based approach for demand classification </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Svetunkov,+I">Ivan Svetunkov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Anna">Anna Sroginis</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 39 pages, 11 figures, 3 tables </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Methodology (stat.ME) </div> <p class='mathjax'> Effective demand forecasting is critical for inventory management, production planning, and decision making across industries. Selecting the appropriate model and suitable features to efficiently capture patterns in the data is one of the main challenges in demand forecasting. In reality, this becomes even more complicated when the recorded sales have zeroes, which can happen naturally or due to some anomalies, such as stockouts and recording errors. Mistreating the zeroes can lead to the application of inappropriate forecasting methods, and thus leading to poor decision making. Furthermore, the demand itself can have different fundamental characteristics, and being able to distinguish one type from another might bring substantial benefits in terms of accuracy and thus decision making. We propose a two-stage model-based classification framework that in the first step, identifies artificially occurring zeroes, and then classifies demand to one of the possible types: regular/intermittent, intermittent smooth/lumpy, fractional/count. The framework utilises statistical modelling and information criteria to detect anomalous zeroes and then classify demand into those categories. We then argue that different types of demand need different features, and show empirically that they tend to increase the accuracy of the forecasting methods compared to those applied directly to the dataset without the generated features and the two-stage framework. Our general practical recommendation based on that is to use the mixture approach for intermittent demand, capturing the demand sizes and demand probability separately, as it seems to improve the accuracy of different forecasting approaches. </p> </div> </dd> <dt> <a name='item49'>[49]</a> <a href ="/abs/2504.05897" title="Abstract" id="2504.05897"> arXiv:2504.05897 </a> [<a href="/pdf/2504.05897" title="Download PDF" id="pdf-2504.05897" aria-labelledby="pdf-2504.05897">pdf</a>, <a href="/format/2504.05897" title="Other formats" id="oth-2504.05897" aria-labelledby="oth-2504.05897">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> HybriMoE: Hybrid CPU-GPU Scheduling and Cache Management for Efficient MoE Inference </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhong,+S">Shuzhang Zhong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+Y">Yanfan Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liang,+L">Ling Liang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+R">Runsheng Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+R">Ru Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+M">Meng Li</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted by DAC 25 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Distributed, Parallel, and Cluster Computing (cs.DC) </div> <p class='mathjax'> The Mixture of Experts (MoE) architecture has demonstrated significant advantages as it enables to increase the model capacity without a proportional increase in computation. However, the large MoE model size still introduces substantial memory demands, which usually requires expert offloading on resource-constrained platforms and incurs significant overhead. Hybrid CPU-GPU inference has been proposed to leverage CPU computation to reduce expert loading overhead but faces major challenges: on one hand, the expert activation patterns of MoE models are highly unstable, rendering the fixed mapping strategies in existing works inefficient; on the other hand, the hybrid CPU-GPU schedule for MoE is inherently complex due to the diverse expert sizes, structures, uneven workload distribution, etc. To address these challenges, in this paper, we propose HybriMoE, a hybrid CPU-GPU inference framework that improves resource utilization through a novel CPU-GPU scheduling and cache management system. HybriMoE introduces (i) a dynamic intra-layer scheduling strategy to balance workloads across CPU and GPU, (ii) an impact-driven inter-layer prefetching algorithm, and (iii) a score-based caching algorithm to mitigate expert activation instability. We implement HybriMoE on top of the kTransformers framework and evaluate it on three widely used MoE-based LLMs. Experimental results demonstrate that HybriMoE achieves an average speedup of 1.33$\times$ in the prefill stage and 1.70$\times$ in the decode stage compared to state-of-the-art hybrid MoE inference framework. Our code is available at: <a href="https://github.com/PKU-SEC-Lab/HybriMoE" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item50'>[50]</a> <a href ="/abs/2504.05923" title="Abstract" id="2504.05923"> arXiv:2504.05923 </a> [<a href="/pdf/2504.05923" title="Download PDF" id="pdf-2504.05923" aria-labelledby="pdf-2504.05923">pdf</a>, <a href="https://arxiv.org/html/2504.05923v1" title="View HTML" id="html-2504.05923" aria-labelledby="html-2504.05923" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05923" title="Other formats" id="oth-2504.05923" aria-labelledby="oth-2504.05923">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Uncovering Fairness through Data Complexity as an Early Indicator </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Ferreira,+J+S">Juliett Su谩rez Ferreira</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Slavkovik,+M">Marija Slavkovik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Casillas,+J">Jorge Casillas</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Data Structures and Algorithms (cs.DS) </div> <p class='mathjax'> Fairness constitutes a concern within machine learning (ML) applications. Currently, there is no study on how disparities in classification complexity between privileged and unprivileged groups could influence the fairness of solutions, which serves as a preliminary indicator of potential unfairness. In this work, we investigate this gap, specifically, we focus on synthetic datasets designed to capture a variety of biases ranging from historical bias to measurement and representational bias to evaluate how various complexity metrics differences correlate with group fairness metrics. We then apply association rule mining to identify patterns that link disproportionate complexity differences between groups with fairness-related outcomes, offering data-centric indicators to guide bias mitigation. Our findings are also validated by their application in real-world problems, providing evidence that quantifying group-wise classification complexity can uncover early indicators of potential fairness challenges. This investigation helps practitioners to proactively address bias in classification tasks. </p> </div> </dd> <dt> <a name='item51'>[51]</a> <a href ="/abs/2504.05928" title="Abstract" id="2504.05928"> arXiv:2504.05928 </a> [<a href="/pdf/2504.05928" title="Download PDF" id="pdf-2504.05928" aria-labelledby="pdf-2504.05928">pdf</a>, <a href="/format/2504.05928" title="Other formats" id="oth-2504.05928" aria-labelledby="oth-2504.05928">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Evaluation of the impact of expert knowledge: How decision support scores impact the effectiveness of automatic knowledge-driven feature engineering (aKDFE) </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bj%C3%B6rneld,+O">Olof Bj枚rneld</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hammar,+T">Tora Hammar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nilsson,+D">Daniel Nilsson</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lincke,+A">Alisa Lincke</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=L%C3%B6we,+W">Welf L枚we</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 43 pages, including the Appendix, 19 tables and 13 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Adverse Drug Events (ADEs), harmful medication effects, pose significant healthcare challenges, impacting patient safety and costs. This study evaluates automatic Knowledge-Driven Feature Engineering (aKDFE) for improved ADE prediction from Electronic Health Record (EHR) data, comparing it with automated event-based Knowledge Discovery in Databases (KDD). We investigated how incorporating domain-specific ADE risk scores for prolonged heart QT interval, extracted from the Janusmed Riskprofile (Janusmed) Clinical Decision Support System (CDSS), affects prediction performance using EHR data and medication handling events. Results indicate that, while aKDFE step 1 (event-based feature generation) alone did not significantly improve ADE prediction performance, aKDFE step 2 (patient-centric transformation) enhances the prediction performance. High Area Under the Receiver Operating Characteristic curve (AUROC) values suggest strong feature correlations to the outcome, aligning with the predictive power of patients' prior healthcare history for ADEs. Statistical analysis did not confirm that incorporating the Janusmed information (i) risk scores and (ii) medication route of administration into the model's feature set enhanced predictive performance. However, the patient-centric transformation applied by aKDFE proved to be a highly effective feature engineering approach. Limitations include a single-project focus, potential bias from machine learning pipeline methods, and reliance on AUROC. In conclusion, aKDFE, particularly with patient-centric transformation, improves ADE prediction from EHR data. Future work will explore attention-based models, event feature sequences, and automatic methods for incorporating domain knowledge into the aKDFE framework. </p> </div> </dd> <dt> <a name='item52'>[52]</a> <a href ="/abs/2504.05945" title="Abstract" id="2504.05945"> arXiv:2504.05945 </a> [<a href="/pdf/2504.05945" title="Download PDF" id="pdf-2504.05945" aria-labelledby="pdf-2504.05945">pdf</a>, <a href="https://arxiv.org/html/2504.05945v1" title="View HTML" id="html-2504.05945" aria-labelledby="html-2504.05945" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05945" title="Other formats" id="oth-2504.05945" aria-labelledby="oth-2504.05945">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> CKGAN: Training Generative Adversarial Networks Using Characteristic Kernel Integral Probability Metrics </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+K">Kuntian Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+S">Simin Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Y">Yaoshu Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Onizuka,+M">Makoto Onizuka</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xiao,+C">Chuan Xiao</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Source codes are available at <a href="https://github.com/chuanxiao1983/CKGAN/" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computer Vision and Pattern Recognition (cs.CV) </div> <p class='mathjax'> In this paper, we propose CKGAN, a novel generative adversarial network (GAN) variant based on an integral probability metrics framework with characteristic kernel (CKIPM). CKIPM, as a distance between two probability distributions, is designed to optimize the lowerbound of the maximum mean discrepancy (MMD) in a reproducing kernel Hilbert space, and thus can be used to train GANs. CKGAN mitigates the notorious problem of mode collapse by mapping the generated images back to random noise. To save the effort of selecting the kernel function manually, we propose a soft selection method to automatically learn a characteristic kernel function. The experimental evaluation conducted on a set of synthetic and real image benchmarks (MNIST, CelebA, etc.) demonstrates that CKGAN generally outperforms other MMD-based GANs. The results also show that at the cost of moderately more training time, the automatically selected kernel function delivers very close performance to the best of manually fine-tuned one on real image benchmarks and is able to improve the performances of other MMD-based GANs. </p> </div> </dd> <dt> <a name='item53'>[53]</a> <a href ="/abs/2504.05957" title="Abstract" id="2504.05957"> arXiv:2504.05957 </a> [<a href="/pdf/2504.05957" title="Download PDF" id="pdf-2504.05957" aria-labelledby="pdf-2504.05957">pdf</a>, <a href="https://arxiv.org/html/2504.05957v1" title="View HTML" id="html-2504.05957" aria-labelledby="html-2504.05957" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05957" title="Other formats" id="oth-2504.05957" aria-labelledby="oth-2504.05957">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Drought forecasting using a hybrid neural architecture for integrating time series and static data </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Agudelo,+J">Julian Agudelo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Guigue,+V">Vincent Guigue</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Manfredotti,+C">Cristina Manfredotti</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Piot,+H">Hadrien Piot</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, 3 figures, published as a workshop paper at Tackling Climate Change with Machine Learning at ICLR 2025, Tackling Climate Change with Machine Learning is a non-archival workshop </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Reliable forecasting is critical for early warning systems and adaptive drought management. Most previous deep learning approaches focus solely on homogeneous regions and rely on single-structured data. This paper presents a hybrid neural architecture that integrates time series and static data, achieving state-of-the-art performance on the DroughtED dataset. Our results illustrate the potential of designing neural models for the treatment of heterogeneous data in climate related tasks and present reliable prediction of USDM categories, an expert-informed drought metric. Furthermore, this work validates the potential of DroughtED for enabling location-agnostic training of deep learning models. </p> </div> </dd> <dt> <a name='item54'>[54]</a> <a href ="/abs/2504.05962" title="Abstract" id="2504.05962"> arXiv:2504.05962 </a> [<a href="/pdf/2504.05962" title="Download PDF" id="pdf-2504.05962" aria-labelledby="pdf-2504.05962">pdf</a>, <a href="/format/2504.05962" title="Other formats" id="oth-2504.05962" aria-labelledby="oth-2504.05962">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Autoencoder-Based Detection of Anomalous Stokes V Spectra in the Flare-Producing Active Region 13663 Using Hinode/SP Observations </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Batmunkh,+J">Jargalmaa Batmunkh</a> (1), <a href="https://arxiv.org/search/cs?searchtype=author&query=Iida,+Y">Yusuke Iida</a> (1), <a href="https://arxiv.org/search/cs?searchtype=author&query=Oba,+T">Takayoshi Oba</a> (2) ((1) Niigata University, (2) Max Planck Institute for Solar System Research)</div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Instrumentation and Methods for Astrophysics (astro-ph.IM); Solar and Stellar Astrophysics (astro-ph.SR) </div> <p class='mathjax'> Detecting unusual signals in observational solar spectra is crucial for understanding the features associated with impactful solar events, such as solar flares. However, existing spectral analysis techniques face challenges, particularly when relying on pre-defined, physics-based calculations to process large volumes of noisy and complex observational data. To address these limitations, we applied deep learning to detect anomalies in the Stokes V spectra from the Hinode/SP instrument. Specifically, we developed an autoencoder model for spectral compression, which serves as an anomaly detection method. Our model effectively identifies anomalous spectra within spectro-polarimetric maps captured prior to the onset of the X1.3 flare on May 5, 2024, in NOAA AR 13663. These atypical spectral points exhibit highly complex profiles and spatially align with polarity inversion lines in magnetogram images, indicating their potential as sites of magnetic energy storage and possible triggers for flares. Notably, the detected anomalies are highly localized, making them particularly challenging to identify in magnetogram images using current manual methods. </p> </div> </dd> <dt> <a name='item55'>[55]</a> <a href ="/abs/2504.05978" title="Abstract" id="2504.05978"> arXiv:2504.05978 </a> [<a href="/pdf/2504.05978" title="Download PDF" id="pdf-2504.05978" aria-labelledby="pdf-2504.05978">pdf</a>, <a href="https://arxiv.org/html/2504.05978v1" title="View HTML" id="html-2504.05978" aria-labelledby="html-2504.05978" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05978" title="Other formats" id="oth-2504.05978" aria-labelledby="oth-2504.05978">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Smart Exploration in Reinforcement Learning using Bounded Uncertainty Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=van+Hulst,+J">J.S. van Hulst</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Heemels,+W">W.P.M.H. Heemels</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Antunes,+D">D.J. Antunes</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Submitted for publication </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Systems and Control (eess.SY) </div> <p class='mathjax'> Reinforcement learning (RL) is a powerful tool for decision-making in uncertain environments, but it often requires large amounts of data to learn an optimal policy. We propose using prior model knowledge to guide the exploration process to speed up this learning process. This model knowledge comes in the form of a model set to which the true transition kernel and reward function belong. We optimize over this model set to obtain upper and lower bounds on the Q-function, which are then used to guide the exploration of the agent. We provide theoretical guarantees on the convergence of the Q-function to the optimal Q-function under the proposed class of exploring policies. Furthermore, we also introduce a data-driven regularized version of the model set optimization problem that ensures the convergence of the class of exploring policies to the optimal policy. Lastly, we show that when the model set has a specific structure, namely the bounded-parameter MDP (BMDP) framework, the regularized model set optimization problem becomes convex and simple to implement. In this setting, we also show that we obtain finite-time convergence to the optimal policy under additional assumptions. We demonstrate the effectiveness of the proposed exploration strategy in a simulation study. The results indicate that the proposed method can significantly speed up the learning process in reinforcement learning. </p> </div> </dd> <dt> <a name='item56'>[56]</a> <a href ="/abs/2504.06006" title="Abstract" id="2504.06006"> arXiv:2504.06006 </a> [<a href="/pdf/2504.06006" title="Download PDF" id="pdf-2504.06006" aria-labelledby="pdf-2504.06006">pdf</a>, <a href="https://arxiv.org/html/2504.06006v1" title="View HTML" id="html-2504.06006" aria-labelledby="html-2504.06006" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06006" title="Other formats" id="oth-2504.06006" aria-labelledby="oth-2504.06006">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Optuna vs Code Llama: Are LLMs a New Paradigm for Hyperparameter Tuning? </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kochnev,+R">Roman Kochnev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Goodarzi,+A+T">Arash Torabi Goodarzi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bentyn,+Z+A">Zofia Antonina Bentyn</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ignatov,+D">Dmitry Ignatov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Timofte,+R">Radu Timofte</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Neural and Evolutionary Computing (cs.NE) </div> <p class='mathjax'> Optimal hyperparameter selection is critical for maximizing neural network performance, especially as models grow in complexity. This work investigates the viability of using large language models (LLMs) for hyperparameter optimization by employing a fine-tuned version of Code Llama. Through parameter-efficient fine-tuning using LoRA, we adapt the LLM to generate accurate and efficient hyperparameter recommendations tailored to diverse neural network architectures. Unlike traditional methods such as Optuna, which rely on exhaustive trials, the proposed approach achieves competitive or superior results in terms of Root Mean Square Error (RMSE) while significantly reducing computational overhead. Our approach highlights that LLM-based optimization not only matches state-of-the-art methods like Tree-structured Parzen Estimators but also accelerates the tuning process. This positions LLMs as a promising alternative to conventional optimization techniques, particularly for rapid experimentation. Furthermore, the ability to generate hyperparameters in a single inference step makes this method particularly well-suited for resource-constrained environments such as edge devices and mobile applications, where computational efficiency is paramount. The results confirm that LLMs, beyond their efficiency, offer substantial time savings and comparable stability, underscoring their value in advancing machine learning workflows. All generated hyperparameters are included in the LEMUR Neural Network (NN) Dataset, which is publicly available and serves as an open-source benchmark for hyperparameter optimization research. </p> </div> </dd> <dt> <a name='item57'>[57]</a> <a href ="/abs/2504.06048" title="Abstract" id="2504.06048"> arXiv:2504.06048 </a> [<a href="/pdf/2504.06048" title="Download PDF" id="pdf-2504.06048" aria-labelledby="pdf-2504.06048">pdf</a>, <a href="https://arxiv.org/html/2504.06048v1" title="View HTML" id="html-2504.06048" aria-labelledby="html-2504.06048" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06048" title="Other formats" id="oth-2504.06048" aria-labelledby="oth-2504.06048">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Trust-Region Twisted Policy Improvement </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=de+Vries,+J+A">Joery A. de Vries</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=He,+J">Jinke He</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Oren,+Y">Yaniv Oren</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Spaan,+M+T">Matthijs T.J. Spaan</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Monte-Carlo tree search (MCTS) has driven many recent breakthroughs in deep reinforcement learning (RL). However, scaling MCTS to parallel compute has proven challenging in practice which has motivated alternative planners like sequential Monte-Carlo (SMC). Many of these SMC methods adopt particle filters for smoothing through a reformulation of RL as a policy inference problem. Yet, persisting design choices of these particle filters often conflict with the aim of online planning in RL, which is to obtain a policy improvement at the start of planning. Drawing inspiration from MCTS, we tailor SMC planners specifically for RL by improving data generation within the planner through constrained action sampling and explicit terminal state handling, as well as improving policy and value target estimation. This leads to our Trust-Region Twisted SMC (TRT-SMC), which shows improved runtime and sample-efficiency over baseline MCTS and SMC methods in both discrete and continuous domains. </p> </div> </dd> <dt> <a name='item58'>[58]</a> <a href ="/abs/2504.06055" title="Abstract" id="2504.06055"> arXiv:2504.06055 </a> [<a href="/pdf/2504.06055" title="Download PDF" id="pdf-2504.06055" aria-labelledby="pdf-2504.06055">pdf</a>, <a href="/format/2504.06055" title="Other formats" id="oth-2504.06055" aria-labelledby="oth-2504.06055">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Explainable AI for building energy retrofitting under data scarcity </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Rempi,+P">Panagiota Rempi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pelekis,+S">Sotiris Pelekis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tzortzis,+A+M">Alexandros Menelaos Tzortzis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Karakolis,+E">Evangelos Karakolis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ntanos,+C">Christos Ntanos</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Askounis,+D">Dimitris Askounis</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Enhancing energy efficiency in residential buildings is a crucial step toward mitigating climate change and reducing greenhouse gas emissions. Retrofitting existing buildings, which account for a significant portion of energy consumption, is critical particularly in regions with outdated and inefficient building stocks. This study presents an Artificial Intelligence (AI) and Machine Learning (ML)-based framework to recommend energy efficiency measures for residential buildings, leveraging accessible building characteristics to achieve energy class targets. Using Latvia as a case study, the methodology addresses challenges associated with limited datasets, class imbalance and data scarcity. The proposed approach integrates Conditional Tabular Generative Adversarial Networks (CTGAN) to generate synthetic data, enriching and balancing the dataset. A Multi-Layer Perceptron (MLP) model serves as the predictive model performing multi-label classification to predict appropriate retrofit strategies. Explainable Artificial Intelligence (XAI), specifically SHapley Additive exPlanations (SHAP), ensures transparency and trust by identifying key features that influence recommendations and guiding feature engineering choices for improved reliability and performance. The evaluation of the approach shows that it notably overcomes data limitations, achieving improvements up to 54% in precision, recall and F1 score. Although this study focuses on Latvia, the methodology is adaptable to other regions, underscoring the potential of AI in reducing the complexity and cost of building energy retrofitting overcoming data limitations. By facilitating decision-making processes and promoting stakeholders engagement, this work supports the global transition toward sustainable energy use in the residential building sector. </p> </div> </dd> <dt> <a name='item59'>[59]</a> <a href ="/abs/2504.06070" title="Abstract" id="2504.06070"> arXiv:2504.06070 </a> [<a href="/pdf/2504.06070" title="Download PDF" id="pdf-2504.06070" aria-labelledby="pdf-2504.06070">pdf</a>, <a href="https://arxiv.org/html/2504.06070v1" title="View HTML" id="html-2504.06070" aria-labelledby="html-2504.06070" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06070" title="Other formats" id="oth-2504.06070" aria-labelledby="oth-2504.06070">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PINP: Physics-Informed Neural Predictor with latent estimation of fluid flows </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+H">Huaguan Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Y">Yang Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+H">Hao Sun</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Accurately predicting fluid dynamics and evolution has been a long-standing challenge in physical sciences. Conventional deep learning methods often rely on the nonlinear modeling capabilities of neural networks to establish mappings between past and future states, overlooking the fluid dynamics, or only modeling the velocity field, neglecting the coupling of multiple physical quantities. In this paper, we propose a new physics-informed learning approach that incorporates coupled physical quantities into the prediction process to assist with forecasting. Central to our method lies in the discretization of physical equations, which are directly integrated into the model architecture and loss function. This integration enables the model to provide robust, long-term future predictions. By incorporating physical equations, our model demonstrates temporal extrapolation and spatial generalization capabilities. Experimental results show that our approach achieves the state-of-the-art performance in spatiotemporal prediction across both numerical simulations and real-world extreme-precipitation nowcasting benchmarks. </p> </div> </dd> <dt> <a name='item60'>[60]</a> <a href ="/abs/2504.06075" title="Abstract" id="2504.06075"> arXiv:2504.06075 </a> [<a href="/pdf/2504.06075" title="Download PDF" id="pdf-2504.06075" aria-labelledby="pdf-2504.06075">pdf</a>, <a href="/format/2504.06075" title="Other formats" id="oth-2504.06075" aria-labelledby="oth-2504.06075">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Collaborative Prediction: Tractable Information Aggregation via Agreement </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Collina,+N">Natalie Collina</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Globus-Harris,+I">Ira Globus-Harris</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Goel,+S">Surbhi Goel</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gupta,+V">Varun Gupta</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Roth,+A">Aaron Roth</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shi,+M">Mirah Shi</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Data Structures and Algorithms (cs.DS); Computer Science and Game Theory (cs.GT) </div> <p class='mathjax'> We give efficient "collaboration protocols" through which two parties, who observe different features about the same instances, can interact to arrive at predictions that are more accurate than either could have obtained on their own. The parties only need to iteratively share and update their own label predictions-without either party ever having to share the actual features that they observe. Our protocols are efficient reductions to the problem of learning on each party's feature space alone, and so can be used even in settings in which each party's feature space is illegible to the other-which arises in models of human/AI interaction and in multi-modal learning. The communication requirements of our protocols are independent of the dimensionality of the data. In an online adversarial setting we show how to give regret bounds on the predictions that the parties arrive at with respect to a class of benchmark policies defined on the joint feature space of the two parties, despite the fact that neither party has access to this joint feature space. We also give simpler algorithms for the same task in the batch setting in which we assume that there is a fixed but unknown data distribution. We generalize our protocols to a decision theoretic setting with high dimensional outcome spaces, where parties communicate only "best response actions." <br>Our theorems give a computationally and statistically tractable generalization of past work on information aggregation amongst Bayesians who share a common and correct prior, as part of a literature studying "agreement" in the style of Aumann's agreement theorem. Our results require no knowledge of (or even the existence of) a prior distribution and are computationally efficient. Nevertheless we show how to lift our theorems back to this classical Bayesian setting, and in doing so, give new information aggregation theorems for Bayesian agreement. </p> </div> </dd> <dt> <a name='item61'>[61]</a> <a href ="/abs/2504.06111" title="Abstract" id="2504.06111"> arXiv:2504.06111 </a> [<a href="/pdf/2504.06111" title="Download PDF" id="pdf-2504.06111" aria-labelledby="pdf-2504.06111">pdf</a>, <a href="https://arxiv.org/html/2504.06111v1" title="View HTML" id="html-2504.06111" aria-labelledby="html-2504.06111" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06111" title="Other formats" id="oth-2504.06111" aria-labelledby="oth-2504.06111">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Leveraging Axis-Aligned Subspaces for High-Dimensional Bayesian Optimization with Group Testing </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Hellsten,+E">Erik Hellsten</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hvarfner,+C">Carl Hvarfner</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Papenmeier,+L">Leonard Papenmeier</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nardi,+L">Luigi Nardi</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Bayesian optimization (BO ) is an effective method for optimizing expensive-to-evaluate black-box functions. While high-dimensional problems can be particularly challenging, due to the multitude of parameter choices and the potentially high number of data points required to fit the model, this limitation can be addressed if the problem satisfies simplifying assumptions. Axis-aligned subspace approaches, where few dimensions have a significant impact on the objective, motivated several algorithms for high-dimensional BO . However, the validity of this assumption is rarely verified, and the assumption is rarely exploited to its full extent. We propose a group testing ( GT) approach to identify active variables to facilitate efficient optimization in these domains. The proposed algorithm, Group Testing Bayesian Optimization (GTBO), first runs a testing phase where groups of variables are systematically selected and tested on whether they influence the objective, then terminates once active dimensions are identified. To that end, we extend the well-established GT theory to functions over continuous domains. In the second phase, GTBO guides optimization by placing more importance on the active dimensions. By leveraging the axis-aligned subspace assumption, GTBO outperforms state-of-the-art methods on benchmarks satisfying the assumption of axis-aligned subspaces, while offering improved interpretability. </p> </div> </dd> <dt> <a name='item62'>[62]</a> <a href ="/abs/2504.06125" title="Abstract" id="2504.06125"> arXiv:2504.06125 </a> [<a href="/pdf/2504.06125" title="Download PDF" id="pdf-2504.06125" aria-labelledby="pdf-2504.06125">pdf</a>, <a href="https://arxiv.org/html/2504.06125v1" title="View HTML" id="html-2504.06125" aria-labelledby="html-2504.06125" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06125" title="Other formats" id="oth-2504.06125" aria-labelledby="oth-2504.06125">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Robo-taxi Fleet Coordination at Scale via Reinforcement Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Tresca,+L">Luigi Tresca</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Schmidt,+C">Carolin Schmidt</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Harrison,+J">James Harrison</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rodrigues,+F">Filipe Rodrigues</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zardini,+G">Gioele Zardini</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gammelli,+D">Daniele Gammelli</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pavone,+M">Marco Pavone</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 12 pages, 6 figures, 6 tables </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Systems and Control (eess.SY) </div> <p class='mathjax'> Fleets of robo-taxis offering on-demand transportation services, commonly known as Autonomous Mobility-on-Demand (AMoD) systems, hold significant promise for societal benefits, such as reducing pollution, energy consumption, and urban congestion. However, orchestrating these systems at scale remains a critical challenge, with existing coordination algorithms often failing to exploit the systems' full potential. This work introduces a novel decision-making framework that unites mathematical modeling with data-driven techniques. In particular, we present the AMoD coordination problem through the lens of reinforcement learning and propose a graph network-based framework that exploits the main strengths of graph representation learning, reinforcement learning, and classical operations research tools. Extensive evaluations across diverse simulation fidelities and scenarios demonstrate the flexibility of our approach, achieving superior system performance, computational efficiency, and generalizability compared to prior methods. Finally, motivated by the need to democratize research efforts in this area, we release publicly available benchmarks, datasets, and simulators for network-level coordination alongside an open-source codebase designed to provide accessible simulation platforms and establish a standardized validation process for comparing methodologies. Code available at: <a href="https://github.com/StanfordASL/RL4AMOD" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item63'>[63]</a> <a href ="/abs/2504.06126" title="Abstract" id="2504.06126"> arXiv:2504.06126 </a> [<a href="/pdf/2504.06126" title="Download PDF" id="pdf-2504.06126" aria-labelledby="pdf-2504.06126">pdf</a>, <a href="/format/2504.06126" title="Other formats" id="oth-2504.06126" aria-labelledby="oth-2504.06126">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Accelerating Vehicle Routing via AI-Initialized Genetic Algorithms </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Greenberg,+I">Ido Greenberg</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sielski,+P">Piotr Sielski</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Linsenmaier,+H">Hugo Linsenmaier</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gandham,+R">Rajesh Gandham</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mannor,+S">Shie Mannor</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fender,+A">Alex Fender</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chechik,+G">Gal Chechik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Meirom,+E">Eli Meirom</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Neural and Evolutionary Computing (cs.NE) </div> <p class='mathjax'> Vehicle Routing Problems (VRP) are an extension of the Traveling Salesperson Problem and are a fundamental NP-hard challenge in combinatorial optimization. Solving VRP in real-time at large scale has become critical in numerous applications, from growing markets like last-mile delivery to emerging use-cases like interactive logistics planning. Such applications involve solving similar problem instances repeatedly, yet current state-of-the-art solvers treat each instance on its own without leveraging previous examples. We introduce a novel optimization framework that uses a reinforcement learning agent - trained on prior instances - to quickly generate initial solutions, which are then further optimized by genetic algorithms. Our framework, Evolutionary Algorithm with Reinforcement Learning Initialization (EARLI), consistently outperforms current state-of-the-art solvers across various time scales. For example, EARLI handles vehicle routing with 500 locations within 1s, 10x faster than current solvers for the same solution quality, enabling applications like real-time and interactive routing. EARLI can generalize to new data, as demonstrated on real e-commerce delivery data of a previously unseen city. Our hybrid framework presents a new way to combine reinforcement learning and genetic algorithms, paving the road for closer interdisciplinary collaboration between AI and optimization communities towards real-time optimization in diverse domains. </p> </div> </dd> <dt> <a name='item64'>[64]</a> <a href ="/abs/2504.06141" title="Abstract" id="2504.06141"> arXiv:2504.06141 </a> [<a href="/pdf/2504.06141" title="Download PDF" id="pdf-2504.06141" aria-labelledby="pdf-2504.06141">pdf</a>, <a href="https://arxiv.org/html/2504.06141v1" title="View HTML" id="html-2504.06141" aria-labelledby="html-2504.06141" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06141" title="Other formats" id="oth-2504.06141" aria-labelledby="oth-2504.06141">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Adversarial Training of Reward Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bukharin,+A">Alexander Bukharin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Qian,+H">Haifeng Qian</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+S">Shengyang Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Renduchintala,+A">Adithya Renduchintala</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Singhal,+S">Soumye Singhal</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zhilin Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kuchaiev,+O">Oleksii Kuchaiev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Delalleau,+O">Olivier Delalleau</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+T">Tuo Zhao</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 16 pages, 7 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Reward modeling has emerged as a promising approach for the scalable alignment of language models. However, contemporary reward models (RMs) often lack robustness, awarding high rewards to low-quality, out-of-distribution (OOD) samples. This can lead to reward hacking, where policies exploit unintended shortcuts to maximize rewards, undermining alignment. To address this challenge, we introduce Adv-RM, a novel adversarial training framework that automatically identifies adversarial examples -- responses that receive high rewards from the target RM but are OOD and of low quality. By leveraging reinforcement learning, Adv-RM trains a policy to generate adversarial examples that reliably expose vulnerabilities in large state-of-the-art reward models such as Nemotron 340B RM. Incorporating these adversarial examples into the reward training process improves the robustness of RMs, mitigating reward hacking and enhancing downstream performance in RLHF. We demonstrate that Adv-RM significantly outperforms conventional RM training, increasing stability and enabling more effective RLHF training in both synthetic and real-data settings. </p> </div> </dd> <dt> <a name='item65'>[65]</a> <a href ="/abs/2504.06157" title="Abstract" id="2504.06157"> arXiv:2504.06157 </a> [<a href="/pdf/2504.06157" title="Download PDF" id="pdf-2504.06157" aria-labelledby="pdf-2504.06157">pdf</a>, <a href="https://arxiv.org/html/2504.06157v1" title="View HTML" id="html-2504.06157" aria-labelledby="html-2504.06157" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06157" title="Other formats" id="oth-2504.06157" aria-labelledby="oth-2504.06157">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Hall Effect Thruster Forecasting using a Topological Approach for Data Assimilation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chumley,+M+M">Max M. Chumley</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Khasawneh,+F+A">Firas A. Khasawneh</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 12 pages, 13 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Hall Effect Thrusters (HETs) are electric thrusters that eject heavy ionized gas particles from the spacecraft to generate thrust. Although traditionally they were used for station keeping, recently They have been used for interplanetary space missions due to their high delta-V potential and their operational longevity in contrast to other thrusters, e.g., chemical. However, the operation of HETs involves complex processes such as ionization of gases, strong magnetic fields, and complicated solar panel power supply interactions. Therefore, their operation is extremely difficult to model thus necessitating Data Assimilation (DA) approaches for estimating and predicting their operational states. Because HET's operating environment is often noisy with non-Gaussian sources, this significantly limits applicable DA tools. We describe a topological approach for data assimilation that bypasses these limitations that does not depend on the noise model, and utilize it to forecast spatiotemporal plume field states of HETs. Our approach is a generalization of the Topological Approach for Data Assimilation (TADA) method that allows including different forecast functions. We show how TADA can be combined with the Long Short-Term Memory network for accurate forecasting. We then apply our approach to high-fidelity Hall Effect Thruster (HET) simulation data from the Air Force Research Laboratory (AFRL) rocket propulsion division where we demonstrate the forecast resiliency of TADA on noise contaminated, high-dimensional data. </p> </div> </dd> <dt> <a name='item66'>[66]</a> <a href ="/abs/2504.06176" title="Abstract" id="2504.06176"> arXiv:2504.06176 </a> [<a href="/pdf/2504.06176" title="Download PDF" id="pdf-2504.06176" aria-labelledby="pdf-2504.06176">pdf</a>, <a href="https://arxiv.org/html/2504.06176v1" title="View HTML" id="html-2504.06176" aria-labelledby="html-2504.06176" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06176" title="Other formats" id="oth-2504.06176" aria-labelledby="oth-2504.06176">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Self-Supervised Framework for Space Object Behaviour Characterisation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Groves,+I">Ian Groves</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Campbell,+A">Andrew Campbell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fernandes,+J">James Fernandes</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rodriguez,+D">Diego Rodriguez</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Murray,+P">Paul Murray</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Vasile,+M">Massimiliano Vasile</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nockles,+V">Victoria Nockles</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 15 pages, 10 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Space Physics (physics.space-ph) </div> <p class='mathjax'> Foundation Models, pre-trained on large unlabelled datasets before task-specific fine-tuning, are increasingly being applied to specialised domains. Recent examples include ClimaX for climate and Clay for satellite Earth observation, but a Foundation Model for Space Object Behavioural Analysis has not yet been developed. As orbital populations grow, automated methods for characterising space object behaviour are crucial for space safety. We present a Space Safety and Sustainability Foundation Model focusing on space object behavioural analysis using light curves (LCs). We implemented a Perceiver-Variational Autoencoder (VAE) architecture, pre-trained with self-supervised reconstruction and masked reconstruction on 227,000 LCs from the MMT-9 observatory. The VAE enables anomaly detection, motion prediction, and LC generation. We fine-tuned the model for anomaly detection & motion prediction using two independent LC simulators (CASSANDRA and GRIAL respectively), using CAD models of boxwing, Sentinel-3, SMOS, and Starlink platforms. Our pre-trained model achieved a reconstruction error of 0.01%, identifying potentially anomalous light curves through reconstruction difficulty. After fine-tuning, the model scored 88% and 82% accuracy, with 0.90 and 0.95 ROC AUC scores respectively in both anomaly detection and motion mode prediction (sun-pointing, spin, etc.). Analysis of high-confidence anomaly predictions on real data revealed distinct patterns including characteristic object profiles and satellite glinting. Here, we demonstrate how self-supervised learning can simultaneously enable anomaly detection, motion prediction, and synthetic data generation from rich representations learned in pre-training. Our work therefore supports space safety and sustainability through automated monitoring and simulation capabilities. </p> </div> </dd> <dt> <a name='item67'>[67]</a> <a href ="/abs/2504.06193" title="Abstract" id="2504.06193"> arXiv:2504.06193 </a> [<a href="/pdf/2504.06193" title="Download PDF" id="pdf-2504.06193" aria-labelledby="pdf-2504.06193">pdf</a>, <a href="https://arxiv.org/html/2504.06193v1" title="View HTML" id="html-2504.06193" aria-labelledby="html-2504.06193" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06193" title="Other formats" id="oth-2504.06193" aria-labelledby="oth-2504.06193">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Heuristic Methods are Good Teachers to Distill MLPs for Graph Link Prediction </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Qin,+Z">Zongyue Qin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+S">Shichang Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ju,+M">Mingxuan Ju</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+T">Tong Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shah,+N">Neil Shah</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+Y">Yizhou Sun</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Link prediction is a crucial graph-learning task with applications including citation prediction and product recommendation. Distilling Graph Neural Networks (GNNs) teachers into Multi-Layer Perceptrons (MLPs) students has emerged as an effective approach to achieve strong performance and reducing computational cost by removing graph dependency. However, existing distillation methods only use standard GNNs and overlook alternative teachers such as specialized model for link prediction (GNN4LP) and heuristic methods (e.g., common neighbors). This paper first explores the impact of different teachers in GNN-to-MLP distillation. Surprisingly, we find that stronger teachers do not always produce stronger students: MLPs distilled from GNN4LP can underperform those distilled from simpler GNNs, while weaker heuristic methods can teach MLPs to near-GNN performance with drastically reduced training costs. Building on these insights, we propose Ensemble Heuristic-Distilled MLPs (EHDM), which eliminates graph dependencies while effectively integrating complementary signals via a gating mechanism. Experiments on ten datasets show an average 7.93% improvement over previous GNN-to-MLP approaches with 1.95-3.32 times less training time, indicating EHDM is an efficient and effective link prediction method. </p> </div> </dd> <dt> <a name='item68'>[68]</a> <a href ="/abs/2504.06207" title="Abstract" id="2504.06207"> arXiv:2504.06207 </a> [<a href="/pdf/2504.06207" title="Download PDF" id="pdf-2504.06207" aria-labelledby="pdf-2504.06207">pdf</a>, <a href="https://arxiv.org/html/2504.06207v1" title="View HTML" id="html-2504.06207" aria-labelledby="html-2504.06207" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06207" title="Other formats" id="oth-2504.06207" aria-labelledby="oth-2504.06207">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> An experimental survey and Perspective View on Meta-Learning for Automated Algorithms Selection and Parametrization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Garouani,+M">Moncef Garouani</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Considerable progress has been made in the recent literature studies to tackle the Algorithms Selection and Parametrization (ASP) problem, which is diversified in multiple meta-learning setups. Yet there is a lack of surveys and comparative evaluations that critically analyze, summarize and assess the performance of existing methods. In this paper, we provide an overview of the state of the art in this continuously evolving field. The survey sheds light on the motivational reasons for pursuing classifiers selection through meta-learning. In this regard, Automated Machine Learning (AutoML) is usually treated as an ASP problem under the umbrella of the democratization of machine learning. Accordingly, AutoML makes machine learning techniques accessible to domain scientists who are interested in applying advanced analytics but lack the required expertise. It can ease the task of manually selecting ML algorithms and tuning related hyperparameters. We comprehensively discuss the different phases of classifiers selection based on a generic framework that is formed as an outcome of reviewing prior works. Subsequently, we propose a benchmark knowledge base of 4 millions previously learned models and present extensive comparative evaluations of the prominent methods for classifiers selection based on 08 classification algorithms and 400 benchmark datasets. The comparative study quantitatively assesses the performance of algorithms selection methods along while emphasizing the strengths and limitations of existing studies. </p> </div> </dd> <dt> <a name='item69'>[69]</a> <a href ="/abs/2504.06209" title="Abstract" id="2504.06209"> arXiv:2504.06209 </a> [<a href="/pdf/2504.06209" title="Download PDF" id="pdf-2504.06209" aria-labelledby="pdf-2504.06209">pdf</a>, <a href="/format/2504.06209" title="Other formats" id="oth-2504.06209" aria-labelledby="oth-2504.06209">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> The Work Capacity of Channels with Memory: Maximum Extractable Work in Percept-Action Loops </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Fiderer,+L+J">Lukas J. Fiderer</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Barth,+P+C">Paul C. Barth</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Smith,+I+D">Isaac D. Smith</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Briegel,+H+J">Hans J. Briegel</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10+32 pages; 6+19 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Statistical Mechanics (cond-mat.stat-mech); Information Theory (cs.IT); Adaptation and Self-Organizing Systems (nlin.AO); Chaotic Dynamics (nlin.CD); Quantum Physics (quant-ph) </div> <p class='mathjax'> Predicting future observations plays a central role in machine learning, biology, economics, and many other fields. It lies at the heart of organizational principles such as the variational free energy principle and has even been shown -- based on the second law of thermodynamics -- to be necessary for reaching the fundamental energetic limits of sequential information processing. While the usefulness of the predictive paradigm is undisputed, complex adaptive systems that interact with their environment are more than just predictive machines: they have the power to act upon their environment and cause change. In this work, we develop a framework to analyze the thermodynamics of information processing in percept-action loops -- a model of agent-environment interaction -- allowing us to investigate the thermodynamic implications of actions and percepts on equal footing. To this end, we introduce the concept of work capacity -- the maximum rate at which an agent can expect to extract work from its environment. Our results reveal that neither of two previously established design principles for work-efficient agents -- maximizing predictive power and forgetting past actions -- remains optimal in environments where actions have observable consequences. Instead, a trade-off emerges: work-efficient agents must balance prediction and forgetting, as remembering past actions can reduce the available free energy. This highlights a fundamental departure from the thermodynamics of passive observation, suggesting that prediction and energy efficiency may be at odds in active learning systems. </p> </div> </dd> <dt> <a name='item70'>[70]</a> <a href ="/abs/2504.06212" title="Abstract" id="2504.06212"> arXiv:2504.06212 </a> [<a href="/pdf/2504.06212" title="Download PDF" id="pdf-2504.06212" aria-labelledby="pdf-2504.06212">pdf</a>, <a href="https://arxiv.org/html/2504.06212v1" title="View HTML" id="html-2504.06212" aria-labelledby="html-2504.06212" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06212" title="Other formats" id="oth-2504.06212" aria-labelledby="oth-2504.06212">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> NNN: Next-Generation Neural Networks for Marketing Mix Modeling </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mulc,+T">Thomas Mulc</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Anderson,+M">Mike Anderson</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cubre,+P">Paul Cubre</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+H">Huikun Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+I">Ivy Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kumar,+S">Saket Kumar</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Applications (stat.AP) </div> <p class='mathjax'> We present NNN, a Transformer-based neural network approach to Marketing Mix Modeling (MMM) designed to address key limitations of traditional methods. Unlike conventional MMMs which rely on scalar inputs and parametric decay functions, NNN uses rich embeddings to capture both quantitative and qualitative aspects of marketing and organic channels (e.g., search queries, ad creatives). This, combined with its attention mechanism, enables NNN to model complex interactions, capture long-term effects, and potentially improve sales attribution accuracy. We show that L1 regularization permits the use of such expressive models in typical data-constrained settings. Evaluating NNN on simulated and real-world data demonstrates its efficacy, particularly through considerable improvement in predictive power. Beyond attribution, NNN provides valuable, complementary insights through model probing, such as evaluating keyword or creative effectiveness, enhancing model interpretability. </p> </div> </dd> <dt> <a name='item71'>[71]</a> <a href ="/abs/2504.06235" title="Abstract" id="2504.06235"> arXiv:2504.06235 </a> [<a href="/pdf/2504.06235" title="Download PDF" id="pdf-2504.06235" aria-labelledby="pdf-2504.06235">pdf</a>, <a href="https://arxiv.org/html/2504.06235v1" title="View HTML" id="html-2504.06235" aria-labelledby="html-2504.06235" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06235" title="Other formats" id="oth-2504.06235" aria-labelledby="oth-2504.06235">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Decentralized Federated Domain Generalization with Style Sharing: A Formal Modeling and Convergence Analysis </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zehtabi,+S">Shahryar Zehtabi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Han,+D">Dong-Jun Han</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hosseinalipour,+S">Seyyedali Hosseinalipour</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Brinton,+C+G">Christopher G. Brinton</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Much of the federated learning (FL) literature focuses on settings where local dataset statistics remain the same between training and testing time. Recent advances in domain generalization (DG) aim to use data from source (training) domains to train a model that generalizes well to data from unseen target (testing) domains. In this paper, we are motivated by two major gaps in existing work on FL and DG: (1) the lack of formal mathematical analysis of DG objectives and training processes; and (2) DG research in FL being limited to the conventional star-topology architecture. Addressing the second gap, we develop $\textit{Decentralized Federated Domain Generalization with Style Sharing}$ ($\texttt{StyleDDG}$), a fully decentralized DG algorithm designed to allow devices in a peer-to-peer network to achieve DG based on sharing style information inferred from their datasets. Additionally, we fill the first gap by providing the first systematic approach to mathematically analyzing style-based DG training optimization. We cast existing centralized DG algorithms within our framework, and employ their formalisms to model $\texttt{StyleDDG}$. Based on this, we obtain analytical conditions under which a sub-linear convergence rate of $\texttt{StyleDDG}$ can be obtained. Through experiments on two popular DG datasets, we demonstrate that $\texttt{StyleDDG}$ can obtain significant improvements in accuracy across target domains with minimal added communication overhead compared to decentralized gradient methods that do not employ style sharing. </p> </div> </dd> <dt> <a name='item72'>[72]</a> <a href ="/abs/2504.06261" title="Abstract" id="2504.06261"> arXiv:2504.06261 </a> [<a href="/pdf/2504.06261" title="Download PDF" id="pdf-2504.06261" aria-labelledby="pdf-2504.06261">pdf</a>, <a href="https://arxiv.org/html/2504.06261v1" title="View HTML" id="html-2504.06261" aria-labelledby="html-2504.06261" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06261" title="Other formats" id="oth-2504.06261" aria-labelledby="oth-2504.06261">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Hogwild! Inference: Parallel LLM Generation via Concurrent Attention </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Rodionov,+G">Gleb Rodionov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Garipov,+R">Roman Garipov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shutova,+A">Alina Shutova</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yakushev,+G">George Yakushev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Egiazarian,+V">Vage Egiazarian</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sinitsin,+A">Anton Sinitsin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kuznedelev,+D">Denis Kuznedelev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Alistarh,+D">Dan Alistarh</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Preprint, work in progress </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computation and Language (cs.CL) </div> <p class='mathjax'> Large Language Models (LLMs) have demonstrated the ability to tackle increasingly complex tasks through advanced reasoning, long-form content generation, and tool use. Solving these tasks often involves long inference-time computations. In human problem solving, a common strategy to expedite work is collaboration: by dividing the problem into sub-tasks, exploring different strategies concurrently, etc. Recent research has shown that LLMs can also operate in parallel by implementing explicit cooperation frameworks, such as voting mechanisms or the explicit creation of independent sub-tasks that can be executed in parallel. However, each of these frameworks may not be suitable for all types of tasks, which can hinder their applicability. In this work, we propose a different design approach: we run LLM "workers" in parallel , allowing them to synchronize via a concurrently-updated attention cache and prompt these workers to decide how best to collaborate. Our approach allows the instances to come up with their own collaboration strategy for the problem at hand, all the while "seeing" each other's partial progress in the concurrent cache. We implement this approach via Hogwild! Inference: a parallel LLM inference engine where multiple instances of the same LLM run in parallel with the same attention cache, with "instant" access to each other's generated tokens. Hogwild! inference takes advantage of Rotary Position Embeddings (RoPE) to avoid recomputation while improving parallel hardware utilization. We find that modern reasoning-capable LLMs can perform inference with shared Key-Value cache out of the box, without additional fine-tuning. </p> </div> </dd> <dt> <a name='item73'>[73]</a> <a href ="/abs/2504.06265" title="Abstract" id="2504.06265"> arXiv:2504.06265 </a> [<a href="/pdf/2504.06265" title="Download PDF" id="pdf-2504.06265" aria-labelledby="pdf-2504.06265">pdf</a>, <a href="https://arxiv.org/html/2504.06265v1" title="View HTML" id="html-2504.06265" aria-labelledby="html-2504.06265" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06265" title="Other formats" id="oth-2504.06265" aria-labelledby="oth-2504.06265">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> GOLLuM: Gaussian Process Optimized LLMs -- Reframing LLM Finetuning through Bayesian Optimization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Rankovi%C4%87,+B">Bojana Rankovi膰</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Schwaller,+P">Philippe Schwaller</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Large Language Models (LLMs) can encode complex relationships in their latent spaces, yet harnessing them for optimization under uncertainty remains challenging. We address this gap with a novel architecture that reframes LLM finetuning as Gaussian process (GP) marginal likelihood optimization via deep kernel methods. We introduce LLM-based deep kernels, jointly optimized with GPs to preserve the benefits of both - LLMs to provide a rich and flexible input space for Bayesian optimization and - GPs to model this space with predictive uncertainty for more efficient sampling. Applied to Buchwald-Hartwig reaction optimization, our method nearly doubles the discovery rate of high-performing reactions compared to static LLM embeddings (from 24% to 43% coverage of the top 5% reactions in just 50 optimization iterations). We also observe a 14% improvement over domain-specific representations without requiring specialized features. Extensive empirical evaluation across 19 benchmarks - ranging from general chemistry to reaction and molecular property optimization - demonstrates our method's robustness, generality, and consistent improvements across: (1) tasks, (2) LLM architectures (encoder, decoder, encoder-decoder), (3) pretraining domains (chemistry-related or general-purpose) and (4) hyperparameter settings (tuned once on a single dataset). Finally, we explain these improvements: joint LLM-GP optimization through marginal likelihood implicitly performs contrastive learning, aligning representations to produce (1) better-structured embedding spaces, (2) improved uncertainty calibration, and (3) more efficient sampling - without requiring any external loss. This work provides both practical advances in sample-efficient optimization and insights into what makes effective Bayesian optimization. </p> </div> </dd> </dl> <dl id='articles'> <h3>Cross submissions (showing 58 of 58 entries)</h3> <dt> <a name='item74'>[74]</a> <a href ="/abs/2504.05313" title="Abstract" id="2504.05313"> arXiv:2504.05313 </a> (cross-list from cs.IR) [<a href="/pdf/2504.05313" title="Download PDF" id="pdf-2504.05313" aria-labelledby="pdf-2504.05313">pdf</a>, <a href="https://arxiv.org/html/2504.05313v1" title="View HTML" id="html-2504.05313" aria-labelledby="html-2504.05313" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05313" title="Other formats" id="oth-2504.05313" aria-labelledby="oth-2504.05313">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Systematic Survey on Federated Sequential Recommendation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Y">Yichen Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Qin,+Q">Qiyu Qin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhu,+G">Gaoyang Zhu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+W">Wenchao Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+H">Haozhao Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Y">Yuhua Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+R">Rui Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+R">Ruixuan Li</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Information Retrieval (cs.IR)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Sequential recommendation is an advanced recommendation technique that utilizes the sequence of user behaviors to generate personalized suggestions by modeling the temporal dependencies and patterns in user preferences. However, it requires a server to centrally collect users' data, which poses a threat to the data privacy of different users. In recent years, federated learning has emerged as a distributed architecture that allows participants to train a global model while keeping their private data locally. This survey pioneers Federated Sequential Recommendation (FedSR), where each user joins as a participant in federated training to achieve a recommendation service that balances data privacy and model performance. We begin with an introduction to the background and unique challenges of FedSR. Then, we review existing solutions from two levels, each of which includes two specific techniques. Additionally, we discuss the critical challenges and future research directions in FedSR. </p> </div> </dd> <dt> <a name='item75'>[75]</a> <a href ="/abs/2504.05317" title="Abstract" id="2504.05317"> arXiv:2504.05317 </a> (cross-list from cs.IR) [<a href="/pdf/2504.05317" title="Download PDF" id="pdf-2504.05317" aria-labelledby="pdf-2504.05317">pdf</a>, <a href="/format/2504.05317" title="Other formats" id="oth-2504.05317" aria-labelledby="oth-2504.05317">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> On Synthesizing Data for Context Attribution in Question Answering </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Radevski,+G">Gorjan Radevski</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gashteovski,+K">Kiril Gashteovski</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Syed,+S">Shahbaz Syed</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Malon,+C">Christopher Malon</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nicolas,+S">Sebastien Nicolas</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hung,+C">Chia-Chien Hung</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sztyler,+T">Timo Sztyler</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Heu%C3%9Fer,+V">Verena Heu脽er</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rim,+W+B">Wiem Ben Rim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Enomoto,+M">Masafumi Enomoto</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Takeoka,+K">Kunihiro Takeoka</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Oyamada,+M">Masafumi Oyamada</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Glava%C5%A1,+G">Goran Glava拧</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lawrence,+C">Carolin Lawrence</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Information Retrieval (cs.IR)</span>; Artificial Intelligence (cs.AI); Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> Question Answering (QA) accounts for a significant portion of LLM usage "in the wild". However, LLMs sometimes produce false or misleading responses, also known as "hallucinations". Therefore, grounding the generated answers in contextually provided information -- i.e., providing evidence for the generated text -- is paramount for LLMs' trustworthiness. Providing this information is the task of context attribution. In this paper, we systematically study LLM-based approaches for this task, namely we investigate (i) zero-shot inference, (ii) LLM ensembling, and (iii) fine-tuning of small LMs on synthetic data generated by larger LLMs. Our key contribution is SynQA: a novel generative strategy for synthesizing context attribution data. Given selected context sentences, an LLM generates QA pairs that are supported by these sentences. This leverages LLMs' natural strengths in text generation while ensuring clear attribution paths in the synthetic training data. We show that the attribution data synthesized via SynQA is highly effective for fine-tuning small LMs for context attribution in different QA tasks and domains. Finally, with a user study, we validate the usefulness of small LMs (fine-tuned on synthetic data from SynQA) in context attribution for QA. </p> </div> </dd> <dt> <a name='item76'>[76]</a> <a href ="/abs/2504.05320" title="Abstract" id="2504.05320"> arXiv:2504.05320 </a> (cross-list from cs.IR) [<a href="/pdf/2504.05320" title="Download PDF" id="pdf-2504.05320" aria-labelledby="pdf-2504.05320">pdf</a>, <a href="/format/2504.05320" title="Other formats" id="oth-2504.05320" aria-labelledby="oth-2504.05320">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Document clustering with evolved multiword search queries </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Hirsch,+L">Laurence Hirsch</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hirsch,+R">Robin Hirsch</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ogunleye,+B">Bayode Ogunleye</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 15 pages </div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> Evol. Intel. 18, 37. (2025) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Information Retrieval (cs.IR)</span>; Machine Learning (cs.LG); Neural and Evolutionary Computing (cs.NE) </div> <p class='mathjax'> Text clustering holds significant value across various domains due to its ability to identify patterns and group related information. Current approaches which rely heavily on a computed similarity measure between documents are often limited in accuracy and interpretability. We present a novel approach to the problem based on a set of evolved search queries. Clusters are formed as the set of documents matched by a single search query in the set of queries. The queries are optimized to maximize the number of documents returned and to minimize the overlap between clusters (documents returned by more than one query). Where queries contain more than one word they are interpreted disjunctively. We have found it useful to assign one word to be the root and constrain the query construction such that the set of documents returned by any additional query words intersect with the set returned by the root word. Not all documents in a collection are returned by any of the search queries in a set, so once the search query evolution is completed a second stage is performed whereby a KNN algorithm is applied to assign all unassigned documents to their nearest cluster. We describe the method and present results using 8 text datasets comparing effectiveness with well-known existing algorithms. We note that as well as achieving the highest accuracy on these datasets the search query format provides the qualitative benefits of being interpretable and modifiable whilst providing a causal explanation of cluster construction. </p> </div> </dd> <dt> <a name='item77'>[77]</a> <a href ="/abs/2504.05321" title="Abstract" id="2504.05321"> arXiv:2504.05321 </a> (cross-list from cs.IR) [<a href="/pdf/2504.05321" title="Download PDF" id="pdf-2504.05321" aria-labelledby="pdf-2504.05321">pdf</a>, <a href="https://arxiv.org/html/2504.05321v1" title="View HTML" id="html-2504.05321" aria-labelledby="html-2504.05321" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05321" title="Other formats" id="oth-2504.05321" aria-labelledby="oth-2504.05321">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> VALUE: Value-Aware Large Language Model for Query Rewriting via Weighted Trie in Sponsored Search </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zuo,+B">Boyang Zuo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+X">Xiao Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+F">Feng Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+P">Pengjie Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+J">Jian Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+B">Bo Zheng</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Information Retrieval (cs.IR)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> In the realm of sponsored search advertising, matching advertisements with the search intent of a user's query is crucial. Query-to-bidwords(i.e. bidding keywords) rewriting is a vital technique that has garnered significant attention. Recently, with the prevalence of LLMs, generative retrieval methods have proven effective in producing high-relevance rewrites. However, we have identified a significant limitation in existing approaches: While fine-tuning LLMs for specific domains enhances semantic relevance, these models have no perception of the intrinsic value of their generated outputs, such as commercial value. Therefore, after SFT, a RLHF phase is often employed to address this issue. Nevertheless, traditional preference alignment methods often face challenges in aligning fine-grained values and are susceptible to overfitting, which diminishes the effectiveness and quality of the generated results. To address these challenges, we propose VALUE(Value-Aware Large language model for qUery rewriting via wEighted trie), the first framework that ensures the generation of high-value and highly relevant bidwords. Our approach utilizes weighted trie, an innovative modification of the traditional trie data structure. By modulating the LLM's output probability distribution with value information from the trie during decoding process, we constrain the generation space and guide the trajectory of text production. Offline experiments demonstrate the effectiveness of our method in semantic matching and preference alignment, showing a remarkable improvement in the value attribute by more than fivefold. Online A/B tests further revealed that our Revenue Per Mille (RPM) metric increased by 1.64%. VALUE has been deployed on our advertising system since October 2024 and served the Double Eleven promotions, the biggest shopping carnival in China. </p> </div> </dd> <dt> <a name='item78'>[78]</a> <a href ="/abs/2504.05336" title="Abstract" id="2504.05336"> arXiv:2504.05336 </a> (cross-list from quant-ph) [<a href="/pdf/2504.05336" title="Download PDF" id="pdf-2504.05336" aria-labelledby="pdf-2504.05336">pdf</a>, <a href="https://arxiv.org/html/2504.05336v1" title="View HTML" id="html-2504.05336" aria-labelledby="html-2504.05336" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05336" title="Other formats" id="oth-2504.05336" aria-labelledby="oth-2504.05336">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Quantum Adaptive Self-Attention for Quantum Transformer Models </div> <div class='list-authors'><a href="https://arxiv.org/search/quant-ph?searchtype=author&query=Chen,+C">Chi-Sheng Chen</a>, <a href="https://arxiv.org/search/quant-ph?searchtype=author&query=Kuo,+E">En-Jui Kuo</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Quantum Physics (quant-ph)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Transformer models have revolutionized sequential learning across various domains, yet their self-attention mechanism incurs quadratic computational cost, posing limitations for real-time and resource-constrained tasks. To address this, we propose Quantum Adaptive Self-Attention (QASA), a novel hybrid architecture that enhances classical Transformer models with a quantum attention mechanism. QASA replaces dot-product attention with a parameterized quantum circuit (PQC) that adaptively captures inter-token relationships in the quantum Hilbert space. Additionally, a residual quantum projection module is introduced before the feedforward network to further refine temporal features. Our design retains classical efficiency in earlier layers while injecting quantum expressiveness in the final encoder block, ensuring compatibility with current NISQ hardware. Experiments on synthetic time-series tasks demonstrate that QASA achieves faster convergence and superior generalization compared to both standard Transformers and reduced classical variants. Preliminary complexity analysis suggests potential quantum advantages in gradient computation, opening new avenues for efficient quantum deep learning models. </p> </div> </dd> <dt> <a name='item79'>[79]</a> <a href ="/abs/2504.05341" title="Abstract" id="2504.05341"> arXiv:2504.05341 </a> (cross-list from cs.NE) [<a href="/pdf/2504.05341" title="Download PDF" id="pdf-2504.05341" aria-labelledby="pdf-2504.05341">pdf</a>, <a href="https://arxiv.org/html/2504.05341v1" title="View HTML" id="html-2504.05341" aria-labelledby="html-2504.05341" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05341" title="Other formats" id="oth-2504.05341" aria-labelledby="oth-2504.05341">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Three-Factor Learning in Spiking Neural Networks: An Overview of Methods and Trends from a Machine Learning Perspective </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mazurek,+S">Szymon Mazurek</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Caputa,+J">Jakub Caputa</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Argasi%C5%84ski,+J+K">Jan K. Argasi艅ski</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wielgosz,+M">Maciej Wielgosz</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Pre-print </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Neural and Evolutionary Computing (cs.NE)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Three-factor learning rules in Spiking Neural Networks (SNNs) have emerged as a crucial extension to traditional Hebbian learning and Spike-Timing-Dependent Plasticity (STDP), incorporating neuromodulatory signals to improve adaptation and learning efficiency. These mechanisms enhance biological plausibility and facilitate improved credit assignment in artificial neural systems. This paper takes a view on this topic from a machine learning perspective, providing an overview of recent advances in three-factor learning, discusses theoretical foundations, algorithmic implementations, and their relevance to reinforcement learning and neuromorphic computing. In addition, we explore interdisciplinary approaches, scalability challenges, and potential applications in robotics, cognitive modeling, and AI systems. Finally, we highlight key research gaps and propose future directions for bridging the gap between neuroscience and artificial intelligence. </p> </div> </dd> <dt> <a name='item80'>[80]</a> <a href ="/abs/2504.05347" title="Abstract" id="2504.05347"> arXiv:2504.05347 </a> (cross-list from cs.NE) [<a href="/pdf/2504.05347" title="Download PDF" id="pdf-2504.05347" aria-labelledby="pdf-2504.05347">pdf</a>, <a href="https://arxiv.org/html/2504.05347v1" title="View HTML" id="html-2504.05347" aria-labelledby="html-2504.05347" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05347" title="Other formats" id="oth-2504.05347" aria-labelledby="oth-2504.05347">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Structuring Multiple Simple Cycle Reservoirs with Particle Swarm Optimization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Z">Ziqiang Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fong,+R+S">Robert Simon Fong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fujiwara,+K">Kantaro Fujiwara</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Aihara,+K">Kazuyuki Aihara</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tanaka,+G">Gouhei Tanaka</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Neural and Evolutionary Computing (cs.NE)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Reservoir Computing (RC) is a time-efficient computational paradigm derived from Recurrent Neural Networks (RNNs). The Simple Cycle Reservoir (SCR) is an RC model that stands out for its minimalistic design, offering extremely low construction complexity and proven capability of universally approximating time-invariant causal fading memory filters, even in the linear dynamics regime. This paper introduces Multiple Simple Cycle Reservoirs (MSCRs), a multi-reservoir framework that extends Echo State Networks (ESNs) by replacing a single large reservoir with multiple interconnected SCRs. We demonstrate that optimizing MSCR using Particle Swarm Optimization (PSO) outperforms existing multi-reservoir models, achieving competitive predictive performance with a lower-dimensional state space. By modeling interconnections as a weighted Directed Acyclic Graph (DAG), our approach enables flexible, task-specific network topology adaptation. Numerical simulations on three benchmark time-series prediction tasks confirm these advantages over rival algorithms. These findings highlight the potential of MSCR-PSO as a promising framework for optimizing multi-reservoir systems, providing a foundation for further advancements and applications of interconnected SCRs for developing efficient AI devices. </p> </div> </dd> <dt> <a name='item81'>[81]</a> <a href ="/abs/2504.05349" title="Abstract" id="2504.05349"> arXiv:2504.05349 </a> (cross-list from stat.ML) [<a href="/pdf/2504.05349" title="Download PDF" id="pdf-2504.05349" aria-labelledby="pdf-2504.05349">pdf</a>, <a href="https://arxiv.org/html/2504.05349v1" title="View HTML" id="html-2504.05349" aria-labelledby="html-2504.05349" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05349" title="Other formats" id="oth-2504.05349" aria-labelledby="oth-2504.05349">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Hyperflows: Pruning Reveals the Importance of Weights </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Barbulescu,+E">Eugen Barbulescu</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Alexoaie,+A">Antonio Alexoaie</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Network pruning is used to reduce inference latency and power consumption in large neural networks. However, most existing methods struggle to accurately assess the importance of individual weights due to their inherent interrelatedness, leading to poor performance, especially at extreme sparsity levels. We introduce Hyperflows, a dynamic pruning approach that estimates each weight's importance by observing the network's gradient response to the weight's removal. A global pressure term continuously drives all weights toward pruning, with those critical for accuracy being automatically regrown based on their flow, the aggregated gradient signal when they are absent. We explore the relationship between final sparsity and pressure, deriving power-law equations similar to those found in neural scaling laws. Empirically, we demonstrate state-of-the-art results with ResNet-50 and VGG-19 on CIFAR-10 and CIFAR-100. </p> </div> </dd> <dt> <a name='item82'>[82]</a> <a href ="/abs/2504.05350" title="Abstract" id="2504.05350"> arXiv:2504.05350 </a> (cross-list from econ.EM) [<a href="/pdf/2504.05350" title="Download PDF" id="pdf-2504.05350" aria-labelledby="pdf-2504.05350">pdf</a>, <a href="https://arxiv.org/html/2504.05350v1" title="View HTML" id="html-2504.05350" aria-labelledby="html-2504.05350" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05350" title="Other formats" id="oth-2504.05350" aria-labelledby="oth-2504.05350">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Non-linear Phillips Curve for India: Evidence from Explainable Machine Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/econ?searchtype=author&query=Sengupta,+S">Shovon Sengupta</a>, <a href="https://arxiv.org/search/econ?searchtype=author&query=Pratap,+B">Bhanu Pratap</a>, <a href="https://arxiv.org/search/econ?searchtype=author&query=Pawar,+A">Amit Pawar</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Econometrics (econ.EM)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> The conventional linear Phillips curve model, while widely used in policymaking, often struggles to deliver accurate forecasts in the presence of structural breaks and inherent nonlinearities. This paper addresses these limitations by leveraging machine learning methods within a New Keynesian Phillips Curve framework to forecast and explain headline inflation in India, a major emerging economy. Our analysis demonstrates that machine learning-based approaches significantly outperform standard linear models in forecasting accuracy. Moreover, by employing explainable machine learning techniques, we reveal that the Phillips curve relationship in India is highly nonlinear, characterized by thresholds and interaction effects among key variables. Headline inflation is primarily driven by inflation expectations, followed by past inflation and the output gap, while supply shocks, except rainfall, exert only a marginal influence. These findings highlight the ability of machine learning models to improve forecast accuracy and uncover complex, nonlinear dynamics in inflation data, offering valuable insights for policymakers. </p> </div> </dd> <dt> <a name='item83'>[83]</a> <a href ="/abs/2504.05364" title="Abstract" id="2504.05364"> arXiv:2504.05364 </a> (cross-list from cs.SD) [<a href="/pdf/2504.05364" title="Download PDF" id="pdf-2504.05364" aria-labelledby="pdf-2504.05364">pdf</a>, <a href="/format/2504.05364" title="Other formats" id="oth-2504.05364" aria-labelledby="oth-2504.05364">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Of All StrIPEs: Investigating Structure-informed Positional Encoding for Efficient Music Generation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Agarwal,+M">Manvi Agarwal</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+C">Changhong Wang</a> (LTCI), <a href="https://arxiv.org/search/cs?searchtype=author&query=Richard,+G">Gael Richard</a> (S2A, IDS)</div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG); Machine Learning (stat.ML) </div> <p class='mathjax'> While music remains a challenging domain for generative models like Transformers, a two-pronged approach has recently proved successful: inserting musically-relevant structural information into the positional encoding (PE) module and using kernel approximation techniques based on Random Fourier Features (RFF) to lower the computational cost from quadratic to linear. Yet, it is not clear how such RFF-based efficient PEs compare with those based on rotation matrices, such as Rotary Positional Encoding (RoPE). In this paper, we present a unified framework based on kernel methods to analyze both families of efficient PEs. We use this framework to develop a novel PE method called RoPEPool, capable of extracting causal relationships from temporal sequences. Using RFF-based PEs and rotation-based PEs, we demonstrate how seemingly disparate PEs can be jointly studied by considering the content-context interactions they induce. For empirical validation, we use a symbolic music generation task, namely, melody harmonization. We show that RoPEPool, combined with highly-informative structural priors, outperforms all methods. </p> </div> </dd> <dt> <a name='item84'>[84]</a> <a href ="/abs/2504.05365" title="Abstract" id="2504.05365"> arXiv:2504.05365 </a> (cross-list from cs.NE) [<a href="/pdf/2504.05365" title="Download PDF" id="pdf-2504.05365" aria-labelledby="pdf-2504.05365">pdf</a>, <a href="https://arxiv.org/html/2504.05365v1" title="View HTML" id="html-2504.05365" aria-labelledby="html-2504.05365" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05365" title="Other formats" id="oth-2504.05365" aria-labelledby="oth-2504.05365">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Nature-Inspired Colony of Artificial Intelligence System with Fast, Detailed, and Organized Learner Agents for Enhancing Diversity and Quality </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Suthaharan,+S">Shan Suthaharan</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 12 pages, 8 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Neural and Evolutionary Computing (cs.NE)</span>; Artificial Intelligence (cs.AI); Computer Vision and Pattern Recognition (cs.CV); Machine Learning (cs.LG); Multiagent Systems (cs.MA) </div> <p class='mathjax'> The concepts of convolutional neural networks (CNNs) and multi-agent systems are two important areas of research in artificial intelligence (AI). In this paper, we present an approach that builds a CNN-based colony of AI agents to serve as a single system and perform multiple tasks (e.g., predictions or classifications) in an environment. The proposed system impersonates the natural environment of a biological system, like an ant colony or a human colony. The proposed colony of AI that is defined as a role-based system uniquely contributes to accomplish tasks in an environment by incorporating AI agents that are fast learners, detailed learners, and organized learners. These learners can enhance their localized learning and their collective decisions as a single system of colony of AI agents. This approach also enhances the diversity and quality of the colony of AI with the help of Genetic Algorithms and their crossover and mutation mechanisms. The evolution of fast, detailed, and organized learners in the colony of AI is achieved by introducing a unique one-to-one mapping between these learners and the pretrained VGG16, VGG19, and ResNet50 models, respectively. This role-based approach creates two parent-AI agents using the AI models through the processes, called the intra- and inter-marriage of AI, so that they can share their learned knowledge (weights and biases) based on a probabilistic rule and produce diversified child-AI agents to perform new tasks. This process will form a colony of AI that consists of families of multi-model and mixture-model AI agents to improve diversity and quality. Simulations show that the colony of AI, built using the VGG16, VGG19, and ResNet50 models, can provide a single system that generates child-AI agents of excellent predictive performance, ranging between 82% and 95% of F1-scores, to make diversified collective and quality decisions on a task. </p> </div> </dd> <dt> <a name='item85'>[85]</a> <a href ="/abs/2504.05407" title="Abstract" id="2504.05407"> arXiv:2504.05407 </a> (cross-list from cs.RO) [<a href="/pdf/2504.05407" title="Download PDF" id="pdf-2504.05407" aria-labelledby="pdf-2504.05407">pdf</a>, <a href="https://arxiv.org/html/2504.05407v1" title="View HTML" id="html-2504.05407" aria-labelledby="html-2504.05407" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05407" title="Other formats" id="oth-2504.05407" aria-labelledby="oth-2504.05407">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> TRATSS: Transformer-Based Task Scheduling System for Autonomous Vehicles </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Youssef,+Y">Yazan Youssef</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=de+Araujo,+P+R+M">Paulo Ricardo Marques de Araujo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Noureldin,+A">Aboelmagd Noureldin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Givigi,+S">Sidney Givigi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 9 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Robotics (cs.RO)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Efficient scheduling remains a critical challenge in various domains, requiring solutions to complex NP-hard optimization problems to achieve optimal resource allocation and maximize productivity. In this paper, we introduce a framework called Transformer-Based Task Scheduling System (TRATSS), designed to address the intricacies of single agent scheduling in graph-based environments. By integrating the latest advancements in reinforcement learning and transformer architecture, TRATSS provides a novel system that outputs optimized task scheduling decisions while dynamically adapting to evolving task requirements and resource availability. Leveraging the self-attention mechanism in transformers, TRATSS effectively captures complex task dependencies, thereby providing solutions with enhanced resource utilization and task completion efficiency. Experimental evaluations on benchmark datasets demonstrate TRATSS's effectiveness in providing high-quality solutions to scheduling problems that involve multiple action profiles. </p> </div> </dd> <dt> <a name='item86'>[86]</a> <a href ="/abs/2504.05410" title="Abstract" id="2504.05410"> arXiv:2504.05410 </a> (cross-list from cs.CL) [<a href="/pdf/2504.05410" title="Download PDF" id="pdf-2504.05410" aria-labelledby="pdf-2504.05410">pdf</a>, <a href="https://arxiv.org/html/2504.05410v1" title="View HTML" id="html-2504.05410" aria-labelledby="html-2504.05410" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05410" title="Other formats" id="oth-2504.05410" aria-labelledby="oth-2504.05410">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Fast Controlled Generation from Language Models with Adaptive Weighted Rejection Sampling </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Lipkin,+B">Benjamin Lipkin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=LeBrun,+B">Benjamin LeBrun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Vigly,+J+H">Jacob Hoover Vigly</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Loula,+J">Jo茫o Loula</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=MacIver,+D+R">David R. MacIver</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Du,+L">Li Du</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Eisner,+J">Jason Eisner</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cotterell,+R">Ryan Cotterell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mansinghka,+V">Vikash Mansinghka</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=O'Donnell,+T+J">Timothy J. O'Donnell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lew,+A+K">Alexander K. Lew</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Vieira,+T">Tim Vieira</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> The dominant approach to generating from language models subject to some constraint is locally constrained decoding (LCD), incrementally sampling tokens at each time step such that the constraint is never violated. Typically, this is achieved through token masking: looping over the vocabulary and excluding non-conforming tokens. There are two important problems with this approach. (i) Evaluating the constraint on every token can be prohibitively expensive -- LM vocabularies often exceed $100,000$ tokens. (ii) LCD can distort the global distribution over strings, sampling tokens based only on local information, even if they lead down dead-end paths. This work introduces a new algorithm that addresses both these problems. First, to avoid evaluating a constraint on the full vocabulary at each step of generation, we propose an adaptive rejection sampling algorithm that typically requires orders of magnitude fewer constraint evaluations. Second, we show how this algorithm can be extended to produce low-variance, unbiased estimates of importance weights at a very small additional cost -- estimates that can be soundly used within previously proposed sequential Monte Carlo algorithms to correct for the myopic behavior of local constraint enforcement. Through extensive empirical evaluation in text-to-SQL, molecular synthesis, goal inference, pattern matching, and JSON domains, we show that our approach is superior to state-of-the-art baselines, supporting a broader class of constraints and improving both runtime and performance. Additional theoretical and empirical analyses show that our method's runtime efficiency is driven by its dynamic use of computation, scaling with the divergence between the unconstrained and constrained LM, and as a consequence, runtime improvements are greater for better models. </p> </div> </dd> <dt> <a name='item87'>[87]</a> <a href ="/abs/2504.05411" title="Abstract" id="2504.05411"> arXiv:2504.05411 </a> (cross-list from cs.CL) [<a href="/pdf/2504.05411" title="Download PDF" id="pdf-2504.05411" aria-labelledby="pdf-2504.05411">pdf</a>, <a href="https://arxiv.org/html/2504.05411v1" title="View HTML" id="html-2504.05411" aria-labelledby="html-2504.05411" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05411" title="Other formats" id="oth-2504.05411" aria-labelledby="oth-2504.05411">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Less but Better: Parameter-Efficient Fine-Tuning of Large Language Models for Personality Detection </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Shen,+L">Lingzhi Shen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Long,+Y">Yunfei Long</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cai,+X">Xiaohao Cai</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+G">Guanming Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Razzak,+I">Imran Razzak</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jameel,+S">Shoaib Jameel</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Personality detection automatically identifies an individual's personality from various data sources, such as social media texts. However, as the parameter scale of language models continues to grow, the computational cost becomes increasingly difficult to manage. Fine-tuning also grows more complex, making it harder to justify the effort and reliably predict outcomes. We introduce a novel parameter-efficient fine-tuning framework, PersLLM, to address these challenges. In PersLLM, a large language model (LLM) extracts high-dimensional representations from raw data and stores them in a dynamic memory layer. PersLLM then updates the downstream layers with a replaceable output network, enabling flexible adaptation to various personality detection scenarios. By storing the features in the memory layer, we eliminate the need for repeated complex computations by the LLM. Meanwhile, the lightweight output network serves as a proxy for evaluating the overall effectiveness of the framework, improving the predictability of results. Experimental results on key benchmark datasets like Kaggle and Pandora show that PersLLM significantly reduces computational cost while maintaining competitive performance and strong adaptability. </p> </div> </dd> <dt> <a name='item88'>[88]</a> <a href ="/abs/2504.05422" title="Abstract" id="2504.05422"> arXiv:2504.05422 </a> (cross-list from cs.CV) [<a href="/pdf/2504.05422" title="Download PDF" id="pdf-2504.05422" aria-labelledby="pdf-2504.05422">pdf</a>, <a href="https://arxiv.org/html/2504.05422v1" title="View HTML" id="html-2504.05422" aria-labelledby="html-2504.05422" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05422" title="Other formats" id="oth-2504.05422" aria-labelledby="oth-2504.05422">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> EP-Diffuser: An Efficient Diffusion Model for Traffic Scene Generation and Prediction via Polynomial Representations </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yao,+Y">Yue Yao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bouzidi,+M">Mohamed-Khalil Bouzidi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Goehring,+D">Daniel Goehring</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Reichardt,+J">Joerg Reichardt</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG); Robotics (cs.RO) </div> <p class='mathjax'> As the prediction horizon increases, predicting the future evolution of traffic scenes becomes increasingly difficult due to the multi-modal nature of agent motion. Most state-of-the-art (SotA) prediction models primarily focus on forecasting the most likely future. However, for the safe operation of autonomous vehicles, it is equally important to cover the distribution for plausible motion alternatives. To address this, we introduce EP-Diffuser, a novel parameter-efficient diffusion-based generative model designed to capture the distribution of possible traffic scene evolutions. Conditioned on road layout and agent history, our model acts as a predictor and generates diverse, plausible scene continuations. We benchmark EP-Diffuser against two SotA models in terms of accuracy and plausibility of predictions on the Argoverse 2 dataset. Despite its significantly smaller model size, our approach achieves both highly accurate and plausible traffic scene predictions. We further evaluate model generalization ability in an out-of-distribution (OoD) test setting using Waymo Open dataset and show superior robustness of our approach. The code and model checkpoints can be found here: <a href="https://github.com/continental/EP-Diffuser" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item89'>[89]</a> <a href ="/abs/2504.05426" title="Abstract" id="2504.05426"> arXiv:2504.05426 </a> (cross-list from stat.ML) [<a href="/pdf/2504.05426" title="Download PDF" id="pdf-2504.05426" aria-labelledby="pdf-2504.05426">pdf</a>, <a href="https://arxiv.org/html/2504.05426v1" title="View HTML" id="html-2504.05426" aria-labelledby="html-2504.05426" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05426" title="Other formats" id="oth-2504.05426" aria-labelledby="oth-2504.05426">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Survey on Algorithms for multi-index models </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Bruna,+J">Joan Bruna</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Hsu,+D">Daniel Hsu</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG); Methodology (stat.ME) </div> <p class='mathjax'> We review the literature on algorithms for estimating the index space in a multi-index model. The primary focus is on computationally efficient (polynomial-time) algorithms in Gaussian space, the assumptions under which consistency is guaranteed by these methods, and their sample complexity. In many cases, a gap is observed between the sample complexity of the best known computationally efficient methods and the information-theoretical minimum. We also review algorithms based on estimating the span of gradients using nonparametric methods, and algorithms based on fitting neural networks using gradient descent </p> </div> </dd> <dt> <a name='item90'>[90]</a> <a href ="/abs/2504.05455" title="Abstract" id="2504.05455"> arXiv:2504.05455 </a> (cross-list from eess.SP) [<a href="/pdf/2504.05455" title="Download PDF" id="pdf-2504.05455" aria-labelledby="pdf-2504.05455">pdf</a>, <a href="https://arxiv.org/html/2504.05455v1" title="View HTML" id="html-2504.05455" aria-labelledby="html-2504.05455" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05455" title="Other formats" id="oth-2504.05455" aria-labelledby="oth-2504.05455">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Large-Scale Classification of Shortwave Communication Signals with Machine Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&query=Scholl,+S">Stefan Scholl</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Signal Processing (eess.SP)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> This paper presents a deep learning approach to the classification of 160 shortwave radio signals. It addresses the typical challenges of the shortwave spectrum, which are the large number of different signal types, the presence of various analog modulations and ionospheric propagation. As a classifier a deep convolutional neural network is used, that is trained to recognize 160 typical shortwave signal classes. The approach is blind and therefore does not require preknowledge or special preprocessing of the signal and no manual design of discriminative features for each signal class. The network is trained on a large number of synthetically generated signals and high quality recordings. Finally, the network is evaluated on real-world radio signals obtained from globally deployed receiver hardware and achieves up to 90% accuracy for an observation time of only 1 second. </p> </div> </dd> <dt> <a name='item91'>[91]</a> <a href ="/abs/2504.05462" title="Abstract" id="2504.05462"> arXiv:2504.05462 </a> (cross-list from hep-th) [<a href="/pdf/2504.05462" title="Download PDF" id="pdf-2504.05462" aria-labelledby="pdf-2504.05462">pdf</a>, <a href="https://arxiv.org/html/2504.05462v1" title="View HTML" id="html-2504.05462" aria-labelledby="html-2504.05462" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05462" title="Other formats" id="oth-2504.05462" aria-labelledby="oth-2504.05462">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Quantum Mechanics and Neural Networks </div> <div class='list-authors'><a href="https://arxiv.org/search/hep-th?searchtype=author&query=Ferko,+C">Christian Ferko</a>, <a href="https://arxiv.org/search/hep-th?searchtype=author&query=Halverson,+J">James Halverson</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 67 pages, 8 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">High Energy Physics - Theory (hep-th)</span>; Machine Learning (cs.LG); Probability (math.PR); Quantum Physics (quant-ph) </div> <p class='mathjax'> We demonstrate that any Euclidean-time quantum mechanical theory may be represented as a neural network, ensured by the Kosambi-Karhunen-Lo猫ve theorem, mean-square path continuity, and finite two-point functions. The additional constraint of reflection positivity, which is related to unitarity, may be achieved by a number of mechanisms, such as imposing neural network parameter space splitting or the Markov property. Non-differentiability of the networks is related to the appearance of non-trivial commutators. Neural networks acting on Markov processes are no longer Markov, but still reflection positive, which facilitates the definition of deep neural network quantum systems. We illustrate these principles in several examples using numerical implementations, recovering classic quantum mechanical results such as Heisenberg uncertainty, non-trivial commutators, and the spectrum. </p> </div> </dd> <dt> <a name='item92'>[92]</a> <a href ="/abs/2504.05493" title="Abstract" id="2504.05493"> arXiv:2504.05493 </a> (cross-list from math.NA) [<a href="/pdf/2504.05493" title="Download PDF" id="pdf-2504.05493" aria-labelledby="pdf-2504.05493">pdf</a>, <a href="https://arxiv.org/html/2504.05493v1" title="View HTML" id="html-2504.05493" aria-labelledby="html-2504.05493" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05493" title="Other formats" id="oth-2504.05493" aria-labelledby="oth-2504.05493">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Neural network-enhanced integrators for simulating ordinary differential equations </div> <div class='list-authors'><a href="https://arxiv.org/search/math?searchtype=author&query=Othmane,+A">Amine Othmane</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Fla%C3%9Fkamp,+K">Kathrin Fla脽kamp</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Numerical Analysis (math.NA)</span>; Machine Learning (cs.LG); Systems and Control (eess.SY) </div> <p class='mathjax'> Numerous applications necessitate the computation of numerical solutions to differential equations across a wide range of initial conditions and system parameters, which feeds the demand for efficient yet accurate numerical integration <a href="http://methods.This" rel="external noopener nofollow" class="link-external link-http">this http URL</a> study proposes a neural network (NN) enhancement of classical numerical integrators. NNs are trained to learn integration errors, which are then used as additive correction terms in numerical schemes. The performance of these enhanced integrators is compared with well-established methods through numerical studies, with a particular emphasis on computational efficiency. Analytical properties are examined in terms of local errors and backward error analysis. Embedded Runge-Kutta schemes are then employed to develop enhanced integrators that mitigate generalization risk, ensuring that the neural network's evaluation in previously unseen regions of the state space does not destabilize the integrator. It is guaranteed that the enhanced integrators perform at least as well as the desired classical Runge-Kutta schemes. The effectiveness of the proposed approaches is demonstrated through extensive numerical studies using a realistic model of a wind turbine, with parameters derived from the established simulation framework OpenFast. </p> </div> </dd> <dt> <a name='item93'>[93]</a> <a href ="/abs/2504.05500" title="Abstract" id="2504.05500"> arXiv:2504.05500 </a> (cross-list from cs.AI) [<a href="/pdf/2504.05500" title="Download PDF" id="pdf-2504.05500" aria-labelledby="pdf-2504.05500">pdf</a>, <a href="https://arxiv.org/html/2504.05500v1" title="View HTML" id="html-2504.05500" aria-labelledby="html-2504.05500" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05500" title="Other formats" id="oth-2504.05500" aria-labelledby="oth-2504.05500">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Prism: Dynamic and Flexible Benchmarking of LLMs Code Generation with Monte Carlo Tree Search </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Majdinasab,+V">Vahid Majdinasab</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nikanjam,+A">Amin Nikanjam</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Khomh,+F">Foutse Khomh</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Machine Learning (cs.LG); Software Engineering (cs.SE) </div> <p class='mathjax'> The rapid advancement of Large Language Models (LLMs) has outpaced traditional evaluation methods. Static benchmarks fail to capture the depth and breadth of LLM capabilities and eventually become obsolete, while most dynamic approaches either rely too heavily on LLM-based evaluation or remain constrained by predefined test sets. We introduce Prism, a flexible, dynamic benchmarking framework designed for comprehensive LLM assessment. Prism builds on three key components: (1) a tree-based state representation that models evaluation as a Markov Decision Process, (2) a Monte Carlo Tree Search algorithm adapted to uncover challenging evaluation scenarios, and (3) a multi-agent evaluation pipeline that enables simultaneous assessment of diverse capabilities. To ensure robust evaluation, Prism integrates structural measurements of tree exploration patterns with performance metrics across difficulty levels, providing detailed diagnostics of error patterns, test coverage, and solution approaches. Through extensive experiments on five state-of-the-art LLMs, we analyze how model architecture and scale influence code generation performance across varying task difficulties. Our results demonstrate Prism's effectiveness as a dynamic benchmark that evolves with model advancements while offering deeper insights into their limitations. </p> </div> </dd> <dt> <a name='item94'>[94]</a> <a href ="/abs/2504.05517" title="Abstract" id="2504.05517"> arXiv:2504.05517 </a> (cross-list from cs.GR) [<a href="/pdf/2504.05517" title="Download PDF" id="pdf-2504.05517" aria-labelledby="pdf-2504.05517">pdf</a>, <a href="https://arxiv.org/html/2504.05517v1" title="View HTML" id="html-2504.05517" aria-labelledby="html-2504.05517" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05517" title="Other formats" id="oth-2504.05517" aria-labelledby="oth-2504.05517">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> L3GS: Layered 3D Gaussian Splats for Efficient 3D Scene Delivery </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Tsai,+Y">Yi-Zhen Tsai</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+X">Xuechen Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Z">Zheng Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+J">Jiasi Chen</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Graphics (cs.GR)</span>; Machine Learning (cs.LG); Multimedia (cs.MM) </div> <p class='mathjax'> Traditional 3D content representations include dense point clouds that consume large amounts of data and hence network bandwidth, while newer representations such as neural radiance fields suffer from poor frame rates due to their non-standard volumetric rendering pipeline. 3D Gaussian splats (3DGS) can be seen as a generalization of point clouds that meet the best of both worlds, with high visual quality and efficient rendering for real-time frame rates. However, delivering 3DGS scenes from a hosting server to client devices is still challenging due to high network data consumption (e.g., 1.5 GB for a single scene). The goal of this work is to create an efficient 3D content delivery framework that allows users to view high quality 3D scenes with 3DGS as the underlying data representation. The main contributions of the paper are: (1) Creating new layered 3DGS scenes for efficient delivery, (2) Scheduling algorithms to choose what splats to download at what time, and (3) Trace-driven experiments from users wearing virtual reality headsets to evaluate the visual quality and latency. Our system for Layered 3D Gaussian Splats delivery L3GS demonstrates high visual quality, achieving 16.9% higher average SSIM compared to baselines, and also works with other compressed 3DGS representations. </p> </div> </dd> <dt> <a name='item95'>[95]</a> <a href ="/abs/2504.05518" title="Abstract" id="2504.05518"> arXiv:2504.05518 </a> (cross-list from cs.SE) [<a href="/pdf/2504.05518" title="Download PDF" id="pdf-2504.05518" aria-labelledby="pdf-2504.05518">pdf</a>, <a href="https://arxiv.org/html/2504.05518v1" title="View HTML" id="html-2504.05518" aria-labelledby="html-2504.05518" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05518" title="Other formats" id="oth-2504.05518" aria-labelledby="oth-2504.05518">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Evaluating the Generalization Capabilities of Large Language Models on Code Reasoning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+R">Rem Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dai,+J">Julian Dai</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Vasilakis,+N">Nikos Vasilakis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rinard,+M">Martin Rinard</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Software Engineering (cs.SE)</span>; Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> We assess how the code reasoning abilities of large language models (LLMs) generalize to different kinds of programs. We present techniques for obtaining in- and out-of-distribution programs with different characteristics: code sampled from a domain-specific language, code automatically generated by an LLM, code collected from competitive programming contests, and mutated versions of these programs. We also present an experimental methodology for evaluating LLM generalization by comparing their performance on these programs. We perform an extensive evaluation across 10 state-of-the-art models from the past year, obtaining insights into their generalization capabilities over time and across different classes of programs. Our results highlight that while earlier models exhibit behavior consistent with pattern matching, the latest models exhibit strong generalization abilities on code reasoning. </p> </div> </dd> <dt> <a name='item96'>[96]</a> <a href ="/abs/2504.05534" title="Abstract" id="2504.05534"> arXiv:2504.05534 </a> (cross-list from q-bio.NC) [<a href="/pdf/2504.05534" title="Download PDF" id="pdf-2504.05534" aria-labelledby="pdf-2504.05534">pdf</a>, <a href="/format/2504.05534" title="Other formats" id="oth-2504.05534" aria-labelledby="oth-2504.05534">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Riemannian Geometry for the classification of brain states with intracortical brain-computer interfaces </div> <div class='list-authors'><a href="https://arxiv.org/search/q-bio?searchtype=author&query=Marin-Llobet,+A">Arnau Marin-Llobet</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Manasanch,+A">Arnau Manasanch</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Sanchez-Manso,+S">Sergio Sanchez-Manso</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Tresserras,+L">Lluc Tresserras</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Zhang,+X">Xinhe Zhang</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Hua,+Y">Yining Hua</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Zhao,+H">Hao Zhao</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Torao-Angosto,+M">Melody Torao-Angosto</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Sanchez-Vives,+M+V">Maria V Sanchez-Vives</a>, <a href="https://arxiv.org/search/q-bio?searchtype=author&query=Porta,+L+D">Leonardo Dalla Porta</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Preprint </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Neurons and Cognition (q-bio.NC)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> This study investigates the application of Riemannian geometry-based methods for brain decoding using invasive electrophysiological recordings. Although previously employed in non-invasive, the utility of Riemannian geometry for invasive datasets, which are typically smaller and scarcer, remains less explored. Here, we propose a Minimum Distance to Mean (MDM) classifier using a Riemannian geometry approach based on covariance matrices extracted from intracortical Local Field Potential (LFP) recordings across various regions during different brain state dynamics. For benchmarking, we evaluated the performance of our approach against Convolutional Neural Networks (CNNs) and Euclidean MDM classifiers. Our results indicate that the Riemannian geometry-based classification not only achieves a superior mean F1 macro-averaged score across different channel configurations but also requires up to two orders of magnitude less computational training time. Additionally, the geometric framework reveals distinct spatial contributions of brain regions across varying brain states, suggesting a state-dependent organization that traditional time series-based methods often fail to capture. Our findings align with previous studies supporting the efficacy of geometry-based methods and extending their application to invasive brain recordings, highlighting their potential for broader clinical use, such as brain computer interface applications. </p> </div> </dd> <dt> <a name='item97'>[97]</a> <a href ="/abs/2504.05563" title="Abstract" id="2504.05563"> arXiv:2504.05563 </a> (cross-list from cs.GT) [<a href="/pdf/2504.05563" title="Download PDF" id="pdf-2504.05563" aria-labelledby="pdf-2504.05563">pdf</a>, <a href="https://arxiv.org/html/2504.05563v1" title="View HTML" id="html-2504.05563" aria-labelledby="html-2504.05563" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05563" title="Other formats" id="oth-2504.05563" aria-labelledby="oth-2504.05563">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> From Fairness to Truthfulness: Rethinking Data Valuation Design </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Fan,+D">Dongyang Fan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rotello,+T+J">Tyler J. Rotello</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Karimireddy,+S+P">Sai Praneeth Karimireddy</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Science and Game Theory (cs.GT)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> As large language models increasingly rely on external data sources, fairly compensating data contributors has become a central concern. In this paper, we revisit the design of data markets through a game-theoretic lens, where data owners face private, heterogeneous costs for data sharing. We show that commonly used valuation methods--such as Leave-One-Out and Data Shapley--fail to ensure truthful reporting of these costs, leading to inefficient market outcomes. To address this, we adapt well-established payment rules from mechanism design, namely Myerson and Vickrey-Clarke-Groves (VCG), to the data market setting. We demonstrate that the Myerson payment is the minimal truthful payment mechanism, optimal from the buyer's perspective, and that VCG and Myerson payments coincide in unconstrained allocation settings. Our findings highlight the importance of incorporating incentive compatibility into data valuation, paving the way for more robust and efficient data markets. </p> </div> </dd> <dt> <a name='item98'>[98]</a> <a href ="/abs/2504.05565" title="Abstract" id="2504.05565"> arXiv:2504.05565 </a> (cross-list from cond-mat.mtrl-sci) [<a href="/pdf/2504.05565" title="Download PDF" id="pdf-2504.05565" aria-labelledby="pdf-2504.05565">pdf</a>, <a href="https://arxiv.org/html/2504.05565v1" title="View HTML" id="html-2504.05565" aria-labelledby="html-2504.05565" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05565" title="Other formats" id="oth-2504.05565" aria-labelledby="oth-2504.05565">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Cross-functional transferability in universal machine learning interatomic potentials </div> <div class='list-authors'><a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Huang,+X">Xu Huang</a>, <a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Deng,+B">Bowen Deng</a>, <a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Zhong,+P">Peichen Zhong</a>, <a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Kaplan,+A+D">Aaron D. Kaplan</a>, <a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Persson,+K+A">Kristin A. Persson</a>, <a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Ceder,+G">Gerbrand Ceder</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Materials Science (cond-mat.mtrl-sci)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> The rapid development of universal machine learning interatomic potentials (uMLIPs) has demonstrated the possibility for generalizable learning of the universal potential energy surface. In principle, the accuracy of uMLIPs can be further improved by bridging the model from lower-fidelity datasets to high-fidelity ones. In this work, we analyze the challenge of this transfer learning problem within the CHGNet framework. We show that significant energy scale shifts and poor correlations between GGA and r$^2$SCAN pose challenges to cross-functional data transferability in uMLIPs. By benchmarking different transfer learning approaches on the MP-r$^2$SCAN dataset of 0.24 million structures, we demonstrate the importance of elemental energy referencing in the transfer learning of uMLIPs. By comparing the scaling law with and without the pre-training on a low-fidelity dataset, we show that significant data efficiency can still be achieved through transfer learning, even with a target dataset of sub-million structures. We highlight the importance of proper transfer learning and multi-fidelity learning in creating next-generation uMLIPs on high-fidelity data. </p> </div> </dd> <dt> <a name='item99'>[99]</a> <a href ="/abs/2504.05575" title="Abstract" id="2504.05575"> arXiv:2504.05575 </a> (cross-list from cs.CV) [<a href="/pdf/2504.05575" title="Download PDF" id="pdf-2504.05575" aria-labelledby="pdf-2504.05575">pdf</a>, <a href="https://arxiv.org/html/2504.05575v1" title="View HTML" id="html-2504.05575" aria-labelledby="html-2504.05575" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05575" title="Other formats" id="oth-2504.05575" aria-labelledby="oth-2504.05575">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Lightweight Large Vision-language Model for Multimodal Medical Images </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Alsinglawi,+B">Belal Alsinglawi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=McCarthy,+C">Chris McCarthy</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Webb,+S">Sara Webb</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fluke,+C">Christopher Fluke</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Saidy,+N+T">Navid Toosy Saidy</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, 4 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Medical Visual Question Answering (VQA) enhances clinical decision-making by enabling systems to interpret medical images and answer clinical queries. However, developing efficient, high-performance VQA models is challenging due to the complexity of medical imagery and diverse modalities. In this paper, we introduce a lightweight, multimodal VQA model integrating BiomedCLIP for image feature extraction and LLaMA-3 for text processing. Designed for medical VQA tasks, our model achieves state-of-the-art performance on the OmniMedVQA dataset. With approximately 8 billion parameters, it requires only two NVIDIA 40 GB A100 GPUs, demonstrating superior efficiency over larger models. Our results show 73.4% accuracy for open-end questions, surpassing existing models and validating its potential for real-world medical applications. Key contributions include a specialized multimodal VQA model, a resource-efficient architecture, and strong performance in answering open-ended clinical questions. </p> </div> </dd> <dt> <a name='item100'>[100]</a> <a href ="/abs/2504.05598" title="Abstract" id="2504.05598"> arXiv:2504.05598 </a> (cross-list from cs.CL) [<a href="/pdf/2504.05598" title="Download PDF" id="pdf-2504.05598" aria-labelledby="pdf-2504.05598">pdf</a>, <a href="https://arxiv.org/html/2504.05598v1" title="View HTML" id="html-2504.05598" aria-labelledby="html-2504.05598" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05598" title="Other formats" id="oth-2504.05598" aria-labelledby="oth-2504.05598">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> DEL: Context-Aware Dynamic Exit Layer for Efficient Self-Speculative Decoding </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zarch,+H+E">Hossein Entezari Zarch</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gao,+L">Lei Gao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jiang,+C">Chaoyi Jiang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Annavaram,+M">Murali Annavaram</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Speculative Decoding (SD) is a widely used approach to accelerate the inference of large language models (LLMs) without reducing generation quality. It operates by first using a compact model to draft multiple tokens efficiently, followed by parallel verification using the target LLM. This approach leads to faster inference compared to auto-regressive decoding. While there are multiple approaches to create a draft model, one promising approach is to use early-exit methods. These methods draft candidate tokens by using a subset of layers of the primary model and applying the remaining layers for verification, allowing a single model to handle both drafting and verification. While this technique reduces memory usage and computational cost, its performance relies on the choice of the exit layer for drafting and the number of tokens drafted (speculation length) in each SD round. Prior works use hyperparameter exploration to statically select these values. However, our evaluations show that these hyperparameter values are task-specific, and even within a task they are dependent on the current sequence context. We introduce DEL, a plug-and-play method that adaptively selects the exit layer and speculation length during inference. DEL dynamically tracks the token acceptance rate if the tokens are drafted at each layer of an LLM and uses that knowledge to heuristically select the optimal exit layer and speculation length. Our experiments across a broad range of models and downstream tasks show that DEL achieves overall speedups of $2.16\times$$\sim$$2.50\times$ over vanilla auto-regressive decoding and improves upon the state-of-the-art SD methods by up to $0.27\times$. </p> </div> </dd> <dt> <a name='item101'>[101]</a> <a href ="/abs/2504.05603" title="Abstract" id="2504.05603"> arXiv:2504.05603 </a> (cross-list from cs.CL) [<a href="/pdf/2504.05603" title="Download PDF" id="pdf-2504.05603" aria-labelledby="pdf-2504.05603">pdf</a>, <a href="https://arxiv.org/html/2504.05603v1" title="View HTML" id="html-2504.05603" aria-labelledby="html-2504.05603" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05603" title="Other formats" id="oth-2504.05603" aria-labelledby="oth-2504.05603">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> On the Impact of Language Nuances on Sentiment Analysis with Large Language Models: Paraphrasing, Sarcasm, and Emojis </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bhargava,+N">Naman Bhargava</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Radaideh,+M+I">Mohammed I. Radaideh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kwon,+O+H">O Hwang Kwon</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Verma,+A">Aditi Verma</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Radaideh,+M+I">Majdi I. Radaideh</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 21 pages, 10 Tables, 5 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Large Language Models (LLMs) have demonstrated impressive performance across various tasks, including sentiment analysis. However, data quality--particularly when sourced from social media--can significantly impact their accuracy. This research explores how textual nuances, including emojis and sarcasm, affect sentiment analysis, with a particular focus on improving data quality through text paraphrasing techniques. To address the lack of labeled sarcasm data, the authors created a human-labeled dataset of 5929 tweets that enabled the assessment of LLM in various sarcasm contexts. The results show that when topic-specific datasets, such as those related to nuclear power, are used to finetune LLMs these models are not able to comprehend accurate sentiment in presence of sarcasm due to less diverse text, requiring external interventions like sarcasm removal to boost model accuracy. Sarcasm removal led to up to 21% improvement in sentiment accuracy, as LLMs trained on nuclear power-related content struggled with sarcastic tweets, achieving only 30% accuracy. In contrast, LLMs trained on general tweet datasets, covering a broader range of topics, showed considerable improvements in predicting sentiment for sarcastic tweets (60% accuracy), indicating that incorporating general text data can enhance sarcasm detection. The study also utilized adversarial text augmentation, showing that creating synthetic text variants by making minor changes significantly increased model robustness and accuracy for sarcastic tweets (approximately 85%). Additionally, text paraphrasing of tweets with fragmented language transformed around 40% of the tweets with low-confidence labels into high-confidence ones, improving LLMs sentiment analysis accuracy by 6%. </p> </div> </dd> <dt> <a name='item102'>[102]</a> <a href ="/abs/2504.05632" title="Abstract" id="2504.05632"> arXiv:2504.05632 </a> (cross-list from cs.CL) [<a href="/pdf/2504.05632" title="Download PDF" id="pdf-2504.05632" aria-labelledby="pdf-2504.05632">pdf</a>, <a href="https://arxiv.org/html/2504.05632v1" title="View HTML" id="html-2504.05632" aria-labelledby="html-2504.05632" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05632" title="Other formats" id="oth-2504.05632" aria-labelledby="oth-2504.05632">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Reasoning Towards Fairness: Mitigating Bias in Language Models through Reasoning-Guided Fine-Tuning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kabra,+S">Sanchit Kabra</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jha,+A">Akshita Jha</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Reddy,+C">Chandan Reddy</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 17 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Recent advances in large-scale generative language models have shown that reasoning capabilities can significantly improve model performance across a variety of tasks. However, the impact of reasoning on a model's ability to mitigate stereotypical responses remains largely underexplored. In this work, we investigate the crucial relationship between a model's reasoning ability and fairness, and ask whether improved reasoning capabilities can mitigate harmful stereotypical responses, especially those arising due to shallow or flawed reasoning. We conduct a comprehensive evaluation of multiple open-source LLMs, and find that larger models with stronger reasoning abilities exhibit substantially lower stereotypical bias on existing fairness benchmarks. Building on this insight, we introduce ReGiFT -- Reasoning Guided Fine-Tuning, a novel approach that extracts structured reasoning traces from advanced reasoning models and infuses them into models that lack such capabilities. We use only general-purpose reasoning and do not require any fairness-specific supervision for bias mitigation. Notably, we see that models fine-tuned using ReGiFT not only improve fairness relative to their non-reasoning counterparts but also outperform advanced reasoning models on fairness benchmarks. We also analyze how variations in the correctness of the reasoning traces and their length influence model fairness and their overall performance. Our findings highlight that enhancing reasoning capabilities is an effective, fairness-agnostic strategy for mitigating stereotypical bias caused by reasoning flaws. </p> </div> </dd> <dt> <a name='item103'>[103]</a> <a href ="/abs/2504.05636" title="Abstract" id="2504.05636"> arXiv:2504.05636 </a> (cross-list from eess.IV) [<a href="/pdf/2504.05636" title="Download PDF" id="pdf-2504.05636" aria-labelledby="pdf-2504.05636">pdf</a>, <a href="https://arxiv.org/html/2504.05636v1" title="View HTML" id="html-2504.05636" aria-labelledby="html-2504.05636" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05636" title="Other formats" id="oth-2504.05636" aria-labelledby="oth-2504.05636">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Multi-Modal AI System for Screening Mammography: Integrating 2D and 3D Imaging to Improve Breast Cancer Detection in a Prospective Clinical Study </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&query=Park,+J">Jungkyu Park</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Witowski,+J">Jan Witowski</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Xu,+Y">Yanqi Xu</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Trivedi,+H">Hari Trivedi</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Gichoya,+J">Judy Gichoya</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Brown-Mulry,+B">Beatrice Brown-Mulry</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Westerhoff,+M">Malte Westerhoff</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Moy,+L">Linda Moy</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Heacock,+L">Laura Heacock</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Lewin,+A">Alana Lewin</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Geras,+K+J">Krzysztof J. Geras</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Image and Video Processing (eess.IV)</span>; Computer Vision and Pattern Recognition (cs.CV); Machine Learning (cs.LG) </div> <p class='mathjax'> Although digital breast tomosynthesis (DBT) improves diagnostic performance over full-field digital mammography (FFDM), false-positive recalls remain a concern in breast cancer screening. We developed a multi-modal artificial intelligence system integrating FFDM, synthetic mammography, and DBT to provide breast-level predictions and bounding-box localizations of suspicious findings. Our AI system, trained on approximately 500,000 mammography exams, achieved 0.945 AUROC on an internal test set. It demonstrated capacity to reduce recalls by 31.7% and radiologist workload by 43.8% while maintaining 100% sensitivity, underscoring its potential to improve clinical workflows. External validation confirmed strong generalizability, reducing the gap to a perfect AUROC by 35.31%-69.14% relative to strong baselines. In prospective deployment across 18 sites, the system reduced recall rates for low-risk cases. An improved version, trained on over 750,000 exams with additional labels, further reduced the gap by 18.86%-56.62% across large external datasets. Overall, these results underscore the importance of utilizing all available imaging modalities, demonstrate the potential for clinical impact, and indicate feasibility of further reduction of the test error with increased training set when using large-capacity neural networks. </p> </div> </dd> <dt> <a name='item104'>[104]</a> <a href ="/abs/2504.05643" title="Abstract" id="2504.05643"> arXiv:2504.05643 </a> (cross-list from stat.ML) [<a href="/pdf/2504.05643" title="Download PDF" id="pdf-2504.05643" aria-labelledby="pdf-2504.05643">pdf</a>, <a href="https://arxiv.org/html/2504.05643v1" title="View HTML" id="html-2504.05643" aria-labelledby="html-2504.05643" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05643" title="Other formats" id="oth-2504.05643" aria-labelledby="oth-2504.05643">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Improved Inference of Inverse Ising Problems under Missing Observations in Restricted Boltzmann Machines </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Sekimoto,+K">Kaiji Sekimoto</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Yasuda,+M">Muneki Yasuda</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Disordered Systems and Neural Networks (cond-mat.dis-nn); Machine Learning (cs.LG); Data Analysis, Statistics and Probability (physics.data-an) </div> <p class='mathjax'> Restricted Boltzmann machines (RBMs) are energy-based models analogous to the Ising model and are widely applied in statistical machine learning. The standard inverse Ising problem with a complete dataset requires computing both data and model expectations and is computationally challenging because model expectations have a combinatorial explosion. Furthermore, in many applications, the available datasets are partially incomplete, making it difficult to compute even data expectations. In this study, we propose a approximation framework for these expectations in the practical inverse Ising problems that integrates mean-field approximation or persistent contrastive divergence to generate refined initial points and spatial Monte Carlo integration to enhance estimator accuracy. We demonstrate that the proposed method effectively and accurately tunes the model parameters in comparison to the conventional method. </p> </div> </dd> <dt> <a name='item105'>[105]</a> <a href ="/abs/2504.05654" title="Abstract" id="2504.05654"> arXiv:2504.05654 </a> (cross-list from cs.IT) [<a href="/pdf/2504.05654" title="Download PDF" id="pdf-2504.05654" aria-labelledby="pdf-2504.05654">pdf</a>, <a href="https://arxiv.org/html/2504.05654v1" title="View HTML" id="html-2504.05654" aria-labelledby="html-2504.05654" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05654" title="Other formats" id="oth-2504.05654" aria-labelledby="oth-2504.05654">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Curved representational Bregman divergences and their applications </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Nielsen,+F">Frank Nielsen</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 9 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Information Theory (cs.IT)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> By analogy to curved exponential families, we define curved Bregman divergences as restrictions of Bregman divergences to sub-dimensional parameter subspaces, and prove that the barycenter of a finite weighted parameter set with respect to a curved Bregman divergence amounts to the Bregman projection onto the subspace induced by the constraint of the barycenter with respect to the unconstrained full Bregman divergence. We demonstrate the significance of curved Bregman divergences with two examples: (1) symmetrized Bregman divergences and (2) the Kullback-Leibler divergence between circular complex normal distributions. We then consider monotonic embeddings to define representational curved Bregman divergences and show that the $\alpha$-divergences are representational curved Bregman divergences with respect to $\alpha$-embeddings of the probability simplex into the positive measure cone. As an application, we report an efficient method to calculate the intersection of a finite set of $\alpha$-divergence spheres. </p> </div> </dd> <dt> <a name='item106'>[106]</a> <a href ="/abs/2504.05686" title="Abstract" id="2504.05686"> arXiv:2504.05686 </a> (cross-list from cs.SD) [<a href="/pdf/2504.05686" title="Download PDF" id="pdf-2504.05686" aria-labelledby="pdf-2504.05686">pdf</a>, <a href="https://arxiv.org/html/2504.05686v1" title="View HTML" id="html-2504.05686" aria-labelledby="html-2504.05686" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05686" title="Other formats" id="oth-2504.05686" aria-labelledby="oth-2504.05686">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> kNN-SVC: Robust Zero-Shot Singing Voice Conversion with Additive Synthesis and Concatenation Smoothness Optimization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Shao,+K">Keren Shao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+K">Ke Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Baas,+M">Matthew Baas</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dubnov,+S">Shlomo Dubnov</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 5 pages, 6 figures, 1 table, Proceedings of the International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Sound (cs.SD)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG); Multimedia (cs.MM) </div> <p class='mathjax'> Robustness is critical in zero-shot singing voice conversion (SVC). This paper introduces two novel methods to strengthen the robustness of the kNN-VC framework for SVC. First, kNN-VC's core representation, WavLM, lacks harmonic emphasis, resulting in dull sounds and ringing artifacts. To address this, we leverage the bijection between WavLM, pitch contours, and spectrograms to perform additive synthesis, integrating the resulting waveform into the model to mitigate these issues. Second, kNN-VC overlooks concatenative smoothness, a key perceptual factor in SVC. To enhance smoothness, we propose a new distance metric that filters out unsuitable kNN candidates and optimize the summing weights of the candidates during inference. Although our techniques are built on the kNN-VC framework for implementation convenience, they are broadly applicable to general concatenative neural synthesis models. Experimental results validate the effectiveness of these modifications in achieving robust SVC. Demo: <a href="http://knnsvc.com" rel="external noopener nofollow" class="link-external link-http">this http URL</a> Code: <a href="https://github.com/SmoothKen/knn-svc" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item107'>[107]</a> <a href ="/abs/2504.05696" title="Abstract" id="2504.05696"> arXiv:2504.05696 </a> (cross-list from eess.IV) [<a href="/pdf/2504.05696" title="Download PDF" id="pdf-2504.05696" aria-labelledby="pdf-2504.05696">pdf</a>, <a href="/format/2504.05696" title="Other formats" id="oth-2504.05696" aria-labelledby="oth-2504.05696">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Diabetic Retinopathy Detection Based on Convolutional Neural Networks with SMOTE and CLAHE Techniques Applied to Fundus Images </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&query=Mardianta,+S">Sidhiq Mardianta</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Affandy">Affandy</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Supriyanto,+C">Catur Supriyanto</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Supriyanto,+C">Catur Supriyanto</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Wijaya,+A">Adi Wijaya</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 6 pages, 6 figures, 2 tables </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Image and Video Processing (eess.IV)</span>; Computer Vision and Pattern Recognition (cs.CV); Machine Learning (cs.LG); Neurons and Cognition (q-bio.NC) </div> <p class='mathjax'> Diabetic retinopathy (DR) is one of the major complications in diabetic patients' eyes, potentially leading to permanent blindness if not detected timely. This study aims to evaluate the accuracy of artificial intelligence (AI) in diagnosing DR. The method employed is the Synthetic Minority Over-sampling Technique (SMOTE) algorithm, applied to identify DR and its severity stages from fundus images using the public dataset "APTOS 2019 Blindness Detection." Literature was reviewed via ScienceDirect, ResearchGate, Google Scholar, and IEEE Xplore. Classification results using Convolutional Neural Network (CNN) showed the best performance for the binary classes normal (0) and DR (1) with an accuracy of 99.55%, precision of 99.54%, recall of 99.54%, and F1-score of 99.54%. For the multiclass classification No_DR (0), Mild (1), Moderate (2), Severe (3), Proliferate_DR (4), the accuracy was 95.26%, precision 95.26%, recall 95.17%, and F1-score 95.23%. Evaluation using the confusion matrix yielded results of 99.68% for binary classification and 96.65% for multiclass. This study highlights the significant potential in enhancing the accuracy of DR diagnosis compared to traditional human analysis </p> </div> </dd> <dt> <a name='item108'>[108]</a> <a href ="/abs/2504.05711" title="Abstract" id="2504.05711"> arXiv:2504.05711 </a> (cross-list from cs.AI) [<a href="/pdf/2504.05711" title="Download PDF" id="pdf-2504.05711" aria-labelledby="pdf-2504.05711">pdf</a>, <a href="/format/2504.05711" title="Other formats" id="oth-2504.05711" aria-labelledby="oth-2504.05711">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Automated Archival Descriptions with Federated Intelligence of LLMs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Groppe,+J">Jinghua Groppe</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Marquet,+A">Andreas Marquet</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Walz,+A">Annabel Walz</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Groppe,+S">Sven Groppe</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 15 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Digital Libraries (cs.DL); Information Retrieval (cs.IR); Machine Learning (cs.LG) </div> <p class='mathjax'> Enforcing archival standards requires specialized expertise, and manually creating metadata descriptions for archival materials is a tedious and error-prone task. This work aims at exploring the potential of agentic AI and large language models (LLMs) in addressing the challenges of implementing a standardized archival description process. To this end, we introduce an agentic AI-driven system for automated generation of high-quality metadata descriptions of archival materials. We develop a federated optimization approach that unites the intelligence of multiple LLMs to construct optimal archival metadata. We also suggest methods to overcome the challenges associated with using LLMs for consistent metadata generation. To evaluate the feasibility and effectiveness of our techniques, we conducted extensive experiments using a real-world dataset of archival materials, which covers a variety of document types and data formats. The evaluation results demonstrate the feasibility of our techniques and highlight the superior performance of the federated optimization approach compared to single-model solutions in metadata quality and reliability. </p> </div> </dd> <dt> <a name='item109'>[109]</a> <a href ="/abs/2504.05728" title="Abstract" id="2504.05728"> arXiv:2504.05728 </a> (cross-list from cs.AI) [<a href="/pdf/2504.05728" title="Download PDF" id="pdf-2504.05728" aria-labelledby="pdf-2504.05728">pdf</a>, <a href="/format/2504.05728" title="Other formats" id="oth-2504.05728" aria-labelledby="oth-2504.05728">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> AI-Driven Prognostics for State of Health Prediction in Li-ion Batteries: A Comprehensive Analysis with Validation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Ding,+T">Tianqi Ding</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xiang,+D">Dawei Xiang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+T">Tianyao Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Qi,+Y">YiJiashum Qi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+Z">Zunduo Zhao</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 8 pages, 12 figures, Accepted by 2025 6th International Conference on Electrical Technology and Automatic Control(ICETAC 2025) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> This paper presents a comprehensive review of AI-driven prognostics for State of Health (SoH) prediction in lithium-ion batteries. We compare the effectiveness of various AI algorithms, including FFNN, LSTM, and BiLSTM, across multiple datasets (CALCE, NASA, UDDS) and scenarios (e.g., varying temperatures and driving conditions). Additionally, we analyze the factors influencing SoH fluctuations, such as temperature and charge-discharge rates, and validate our findings through simulations. The results demonstrate that BiLSTM achieves the highest accuracy, with an average RMSE reduction of 15% compared to LSTM, highlighting its robustness in real-world applications. </p> </div> </dd> <dt> <a name='item110'>[110]</a> <a href ="/abs/2504.05800" title="Abstract" id="2504.05800"> arXiv:2504.05800 </a> (cross-list from cs.CV) [<a href="/pdf/2504.05800" title="Download PDF" id="pdf-2504.05800" aria-labelledby="pdf-2504.05800">pdf</a>, <a href="https://arxiv.org/html/2504.05800v1" title="View HTML" id="html-2504.05800" aria-labelledby="html-2504.05800" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05800" title="Other formats" id="oth-2504.05800" aria-labelledby="oth-2504.05800">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Storybooth: Training-free Multi-Subject Consistency for Improved Visual Storytelling </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Singh,+J">Jaskirat Singh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+J+K">Junshen Kevin Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kohler,+J">Jonas Kohler</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cohen,+M">Michael Cohen</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG); Multimedia (cs.MM) </div> <p class='mathjax'> Training-free consistent text-to-image generation depicting the same subjects across different images is a topic of widespread recent interest. Existing works in this direction predominantly rely on cross-frame self-attention; which improves subject-consistency by allowing tokens in each frame to pay attention to tokens in other frames during self-attention computation. While useful for single subjects, we find that it struggles when scaling to multiple characters. In this work, we first analyze the reason for these limitations. Our exploration reveals that the primary-issue stems from self-attention-leakage, which is exacerbated when trying to ensure consistency across multiple-characters. This happens when tokens from one subject pay attention to other characters, causing them to appear like each other (e.g., a dog appearing like a duck). Motivated by these findings, we propose StoryBooth: a training-free approach for improving multi-character consistency. In particular, we first leverage multi-modal chain-of-thought reasoning and region-based generation to apriori localize the different subjects across the desired story outputs. The final outputs are then generated using a modified diffusion model which consists of two novel layers: 1) a bounded cross-frame self-attention layer for reducing inter-character attention leakage, and 2) token-merging layer for improving consistency of fine-grain subject details. Through both qualitative and quantitative results we find that the proposed approach surpasses prior state-of-the-art, exhibiting improved consistency across both multiple-characters and fine-grain subject details. </p> </div> </dd> <dt> <a name='item111'>[111]</a> <a href ="/abs/2504.05846" title="Abstract" id="2504.05846"> arXiv:2504.05846 </a> (cross-list from cs.IR) [<a href="/pdf/2504.05846" title="Download PDF" id="pdf-2504.05846" aria-labelledby="pdf-2504.05846">pdf</a>, <a href="https://arxiv.org/html/2504.05846v1" title="View HTML" id="html-2504.05846" aria-labelledby="html-2504.05846" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05846" title="Other formats" id="oth-2504.05846" aria-labelledby="oth-2504.05846">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PathGPT: Leveraging Large Language Models for Personalized Route Generation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Marcelyn,+S+C">Steeve Cuthbert Marcelyn</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gao,+Y">Yucen Gao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+Y">Yuzhe Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gao,+X">Xiaofeng Gao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+G">Guihai Chen</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Information Retrieval (cs.IR)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> The proliferation of GPS enabled devices has led to the accumulation of a substantial corpus of historical trajectory data. By leveraging these data for training machine learning models,researchers have devised novel data-driven methodologies that address the personalized route recommendation (PRR) problem. In contrast to conventional algorithms such as Dijkstra shortest path algorithm,these novel algorithms possess the capacity to discern and learn patterns within the data,thereby facilitating the generation of more personalized paths. However,once these models have been trained,their application is constrained to the generation of routes that align with their training patterns. This limitation renders them less adaptable to novel scenarios and the deployment of multiple machine learning models might be necessary to address new possible scenarios,which can be costly as each model must be trained separately. Inspired by recent advances in the field of Large Language Models (LLMs),we leveraged their natural language understanding capabilities to develop a unified model to solve the PRR problem while being seamlessly adaptable to new scenarios without additional training. To accomplish this,we combined the extensive knowledge LLMs acquired during training with further access to external hand-crafted context information,similar to RAG (Retrieved Augmented Generation) systems,to enhance their ability to generate paths according to user-defined requirements. Extensive experiments on different datasets show a considerable uplift in LLM performance on the PRR problem. </p> </div> </dd> <dt> <a name='item112'>[112]</a> <a href ="/abs/2504.05881" title="Abstract" id="2504.05881"> arXiv:2504.05881 </a> (cross-list from stat.ML) [<a href="/pdf/2504.05881" title="Download PDF" id="pdf-2504.05881" aria-labelledby="pdf-2504.05881">pdf</a>, <a href="https://arxiv.org/html/2504.05881v1" title="View HTML" id="html-2504.05881" aria-labelledby="html-2504.05881" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05881" title="Other formats" id="oth-2504.05881" aria-labelledby="oth-2504.05881">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Actuarial Learning for Pension Fund Mortality Forecasting </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=de+Melo,+E+F+L">Eduardo Fraga L. de Melo</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Graziadei,+H">Helton Graziadei</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Targino,+R">Rodrigo Targino</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 27 pages, 12 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> For the assessment of the financial soundness of a pension fund, it is necessary to take into account mortality forecasting so that longevity risk is consistently incorporated into future cash flows. In this article, we employ machine learning models applied to actuarial science ({\it actuarial learning}) to make mortality predictions for a relevant sample of pension funds' participants. Actuarial learning represents an emerging field that involves the application of machine learning (ML) and artificial intelligence (AI) techniques in actuarial science. This encompasses the use of algorithms and computational models to analyze large sets of actuarial data, such as regression trees, random forest, boosting, XGBoost, CatBoost, and neural networks (eg. FNN, LSTM, and MHA). Our results indicate that some ML/AI algorithms present competitive out-of-sample performance when compared to the classical Lee-Carter model. This may indicate interesting alternatives for consistent liability evaluation and effective pension fund risk management. </p> </div> </dd> <dt> <a name='item113'>[113]</a> <a href ="/abs/2504.05891" title="Abstract" id="2504.05891"> arXiv:2504.05891 </a> (cross-list from cs.GT) [<a href="/pdf/2504.05891" title="Download PDF" id="pdf-2504.05891" aria-labelledby="pdf-2504.05891">pdf</a>, <a href="https://arxiv.org/html/2504.05891v1" title="View HTML" id="html-2504.05891" aria-labelledby="html-2504.05891" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05891" title="Other formats" id="oth-2504.05891" aria-labelledby="oth-2504.05891">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> To Give or Not to Give? The Impacts of Strategically Withheld Recourse </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+Y">Yatong Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Estornell,+A">Andrew Estornell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Vorobeychik,+Y">Yevgeniy Vorobeychik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Y">Yang Liu</a></div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> Artificial Intelligence and Statistics (AISTATS 2025) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Science and Game Theory (cs.GT)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Individuals often aim to reverse undesired outcomes in interactions with automated systems, like loan denials, by either implementing system-recommended actions (recourse), or manipulating their features. While providing recourse benefits users and enhances system utility, it also provides information about the decision process that can be used for more effective strategic manipulation, especially when the individuals collectively share such information with each other. <br>We show that this tension leads rational utility-maximizing systems to frequently withhold recourse, resulting in decreased population utility, particularly impacting sensitive groups. <br>To mitigate these effects, we explore the role of recourse subsidies, finding them effective in increasing the provision of recourse actions by rational systems, as well as lowering the potential social cost and mitigating unfairness caused by recourse withholding. </p> </div> </dd> <dt> <a name='item114'>[114]</a> <a href ="/abs/2504.05904" title="Abstract" id="2504.05904"> arXiv:2504.05904 </a> (cross-list from cs.CV) [<a href="/pdf/2504.05904" title="Download PDF" id="pdf-2504.05904" aria-labelledby="pdf-2504.05904">pdf</a>, <a href="https://arxiv.org/html/2504.05904v1" title="View HTML" id="html-2504.05904" aria-labelledby="html-2504.05904" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05904" title="Other formats" id="oth-2504.05904" aria-labelledby="oth-2504.05904">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Intrinsic Saliency Guided Trunk-Collateral Network for Unsupervised Video Object Segmentation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+X">Xiangyu Zheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+W">Wanyun Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=He,+S">Songcheng He</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+X">Xiaoqiang Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+W">We Zhang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Recent unsupervised video object segmentation (UVOS) methods predominantly adopt the motion-appearance paradigm. Mainstream motion-appearance approaches use either the two-encoder structure to separately encode motion and appearance features, or the single-encoder structure for joint encoding. However, these methods fail to properly balance the motion-appearance relationship. Consequently, even with complex fusion modules for motion-appearance integration, the extracted suboptimal features degrade the models' overall performance. Moreover, the quality of optical flow varies across scenarios, making it insufficient to rely solely on optical flow to achieve high-quality segmentation results. To address these challenges, we propose the Intrinsic Saliency guided Trunk-Collateral Net}work (ISTC-Net), which better balances the motion-appearance relationship and incorporates model's intrinsic saliency information to enhance segmentation performance. Specifically, considering that optical flow maps are derived from RGB images, they share both commonalities and differences. We propose a novel Trunk-Collateral structure. The shared trunk backbone captures the motion-appearance commonality, while the collateral branch learns the uniqueness of motion features. Furthermore, an Intrinsic Saliency guided Refinement Module (ISRM) is devised to efficiently leverage the model's intrinsic saliency information to refine high-level features, and provide pixel-level guidance for motion-appearance fusion, thereby enhancing performance without additional input. Experimental results show that ISTC-Net achieved state-of-the-art performance on three UVOS datasets (89.2% J&F on DAVIS-16, 76% J on YouTube-Objects, 86.4% J on FBMS) and four standard video salient object detection (VSOD) benchmarks with the notable increase, demonstrating its effectiveness and superiority over previous methods. </p> </div> </dd> <dt> <a name='item115'>[115]</a> <a href ="/abs/2504.05908" title="Abstract" id="2504.05908"> arXiv:2504.05908 </a> (cross-list from cs.CV) [<a href="/pdf/2504.05908" title="Download PDF" id="pdf-2504.05908" aria-labelledby="pdf-2504.05908">pdf</a>, <a href="https://arxiv.org/html/2504.05908v1" title="View HTML" id="html-2504.05908" aria-labelledby="html-2504.05908" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05908" title="Other formats" id="oth-2504.05908" aria-labelledby="oth-2504.05908">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PRIMEDrive-CoT: A Precognitive Chain-of-Thought Framework for Uncertainty-Aware Object Interaction in Driving Scene Scenario </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mandalika,+S">Sriram Mandalika</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=V,+L">Lalitha V</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nambiar,+A">Athira Nambiar</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at The IEEE/CVF Conference on Computer Vision and Pattern Recognition 2025 - CVPRW </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Driving scene understanding is a critical real-world problem that involves interpreting and associating various elements of a driving environment, such as vehicles, pedestrians, and traffic signals. Despite advancements in autonomous driving, traditional pipelines rely on deterministic models that fail to capture the probabilistic nature and inherent uncertainty of real-world driving. To address this, we propose PRIMEDrive-CoT, a novel uncertainty-aware model for object interaction and Chain-of-Thought (CoT) reasoning in driving scenarios. In particular, our approach combines LiDAR-based 3D object detection with multi-view RGB references to ensure interpretable and reliable scene understanding. Uncertainty and risk assessment, along with object interactions, are modelled using Bayesian Graph Neural Networks (BGNNs) for probabilistic reasoning under ambiguous conditions. Interpretable decisions are facilitated through CoT reasoning, leveraging object dynamics and contextual cues, while Grad-CAM visualizations highlight attention regions. Extensive evaluations on the DriveCoT dataset demonstrate that PRIMEDrive-CoT outperforms state-of-the-art CoT and risk-aware models. </p> </div> </dd> <dt> <a name='item116'>[116]</a> <a href ="/abs/2504.05918" title="Abstract" id="2504.05918"> arXiv:2504.05918 </a> (cross-list from cs.RO) [<a href="/pdf/2504.05918" title="Download PDF" id="pdf-2504.05918" aria-labelledby="pdf-2504.05918">pdf</a>, <a href="https://arxiv.org/html/2504.05918v1" title="View HTML" id="html-2504.05918" aria-labelledby="html-2504.05918" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05918" title="Other formats" id="oth-2504.05918" aria-labelledby="oth-2504.05918">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Deep RL-based Autonomous Navigation of Micro Aerial Vehicles (MAVs) in a complex GPS-denied Indoor Environment </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Singh,+A+K">Amit Kumar Singh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Duba,+P+K">Prasanth Kumar Duba</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rajalakshmi,+P">P. Rajalakshmi</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Robotics (cs.RO)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> The Autonomy of Unmanned Aerial Vehicles (UAVs) in indoor environments poses significant challenges due to the lack of reliable GPS signals in enclosed spaces such as warehouses, factories, and indoor facilities. Micro Aerial Vehicles (MAVs) are preferred for navigating in these complex, GPS-denied scenarios because of their agility, low power consumption, and limited computational capabilities. In this paper, we propose a Reinforcement Learning based Deep-Proximal Policy Optimization (D-PPO) algorithm to enhance realtime navigation through improving the computation efficiency. The end-to-end network is trained in 3D realistic meta-environments created using the Unreal Engine. With these trained meta-weights, the MAV system underwent extensive experimental trials in real-world indoor environments. The results indicate that the proposed method reduces computational latency by 91\% during training period without significant degradation in performance. The algorithm was tested on a DJI Tello drone, yielding similar results. </p> </div> </dd> <dt> <a name='item117'>[117]</a> <a href ="/abs/2504.05954" title="Abstract" id="2504.05954"> arXiv:2504.05954 </a> (cross-list from cs.CL) [<a href="/pdf/2504.05954" title="Download PDF" id="pdf-2504.05954" aria-labelledby="pdf-2504.05954">pdf</a>, <a href="https://arxiv.org/html/2504.05954v1" title="View HTML" id="html-2504.05954" aria-labelledby="html-2504.05954" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05954" title="Other formats" id="oth-2504.05954" aria-labelledby="oth-2504.05954">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Unsupervised Location Mapping for Narrative Corpora </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Wagner,+E">Eitan Wagner</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Keydar,+R">Renana Keydar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Abend,+O">Omri Abend</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> This work presents the task of unsupervised location mapping, which seeks to map the trajectory of an individual narrative on a spatial map of locations in which a large set of narratives take place. Despite the fundamentality and generality of the task, very little work addressed the spatial mapping of narrative texts. The task consists of two parts: (1) inducing a ``map'' with the locations mentioned in a set of texts, and (2) extracting a trajectory from a single narrative and positioning it on the map. Following recent advances in increasing the context length of large language models, we propose a pipeline for this task in a completely unsupervised manner without predefining the set of labels. We test our method on two different domains: (1) Holocaust testimonies and (2) Lake District writing, namely multi-century literature on travels in the English Lake District. We perform both intrinsic and extrinsic evaluations for the task, with encouraging results, thereby setting a benchmark and evaluation practices for the task, as well as highlighting challenges. </p> </div> </dd> <dt> <a name='item118'>[118]</a> <a href ="/abs/2504.05970" title="Abstract" id="2504.05970"> arXiv:2504.05970 </a> (cross-list from cs.CE) [<a href="/pdf/2504.05970" title="Download PDF" id="pdf-2504.05970" aria-labelledby="pdf-2504.05970">pdf</a>, <a href="https://arxiv.org/html/2504.05970v1" title="View HTML" id="html-2504.05970" aria-labelledby="html-2504.05970" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05970" title="Other formats" id="oth-2504.05970" aria-labelledby="oth-2504.05970">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> MLPROP -- an open interactive web interface for thermophysical property prediction with machine learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Hoffmann,+M">Marco Hoffmann</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Specht,+T">Thomas Specht</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hayer,+N">Nicolas Hayer</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hasse,+H">Hans Hasse</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jirasek,+F">Fabian Jirasek</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 6 pages, 2 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computational Engineering, Finance, and Science (cs.CE)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Machine learning (ML) enables the development of powerful methods for predicting thermophysical properties with unprecedented scope and accuracy. However, technical barriers like cumbersome implementation in established workflows hinder their application in practice. With MLPROP, we provide an interactive web interface for directly applying advanced ML methods to predict thermophysical properties without requiring ML expertise, thereby substantially increasing the accessibility of novel models. MLPROP currently includes models for predicting the vapor pressure of pure components (GRAPPA), activity coefficients and vapor-liquid equilibria in binary mixtures (UNIFAC 2.0, mod. UNIFAC 2.0, and HANNA), and a routine to fit NRTL parameters to the model predictions. MLPROP will be continuously updated and extended and is accessible free of charge via <a href="https://ml-prop.mv.rptu.de/" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. MLPROP removes the barrier to learning and experimenting with new ML-based methods for predicting thermophysical properties. The source code of all models is available as open source, which allows integration into existing workflows. </p> </div> </dd> <dt> <a name='item119'>[119]</a> <a href ="/abs/2504.06037" title="Abstract" id="2504.06037"> arXiv:2504.06037 </a> (cross-list from cs.CL) [<a href="/pdf/2504.06037" title="Download PDF" id="pdf-2504.06037" aria-labelledby="pdf-2504.06037">pdf</a>, <a href="/format/2504.06037" title="Other formats" id="oth-2504.06037" aria-labelledby="oth-2504.06037">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Confidence Regularized Masked Language Modeling using Text Length </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Ji,+S">Seunghyun Ji</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lee,+S">Soowon Lee</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, 1 figure </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Masked language modeling, which is a task to predict a randomly masked word in the input text, is an efficient language representation learning method. Masked language modeling ignores various words which people can think of for filling in the masked position and calculates the loss with a single word. Especially when the input text is short, the entropy of the word distribution that can fill in the masked position can be high. This may cause the model to be overconfident in the single answer. To address this issue, we propose a novel confidence regularizer that controls regularizing strength dynamically by the input text length. Experiments with GLUE and SQuAD datasets showed that our method achieves better accuracy and lower expected calibration error. </p> </div> </dd> <dt> <a name='item120'>[120]</a> <a href ="/abs/2504.06087" title="Abstract" id="2504.06087"> arXiv:2504.06087 </a> (cross-list from physics.comp-ph) [<a href="/pdf/2504.06087" title="Download PDF" id="pdf-2504.06087" aria-labelledby="pdf-2504.06087">pdf</a>, <a href="https://arxiv.org/html/2504.06087v1" title="View HTML" id="html-2504.06087" aria-labelledby="html-2504.06087" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06087" title="Other formats" id="oth-2504.06087" aria-labelledby="oth-2504.06087">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Accurate Ab-initio Neural-network Solutions to Large-Scale Electronic Structure Problems </div> <div class='list-authors'><a href="https://arxiv.org/search/physics?searchtype=author&query=Scherbela,+M">Michael Scherbela</a>, <a href="https://arxiv.org/search/physics?searchtype=author&query=Gao,+N">Nicholas Gao</a>, <a href="https://arxiv.org/search/physics?searchtype=author&query=Grohs,+P">Philipp Grohs</a>, <a href="https://arxiv.org/search/physics?searchtype=author&query=G%C3%BCnnemann,+S">Stephan G眉nnemann</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 13 pages, 5 figures + 9 pages supplementary information </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computational Physics (physics.comp-ph)</span>; Machine Learning (cs.LG); Chemical Physics (physics.chem-ph) </div> <p class='mathjax'> We present finite-range embeddings (FiRE), a novel wave function ansatz for accurate large-scale ab-initio electronic structure calculations. Compared to contemporary neural-network wave functions, FiRE reduces the asymptotic complexity of neural-network variational Monte Carlo (NN-VMC) by $\sim n_\text{el}$, the number of electrons. By restricting electron-electron interactions within the neural network, FiRE accelerates all key operations -- sampling, pseudopotentials, and Laplacian computations -- resulting in a real-world $10\times$ acceleration in now-feasible 180-electron calculations. We validate our method's accuracy on various challenging systems, including biochemical compounds, conjugated hydrocarbons, and organometallic compounds. On these systems, FiRE's energies are consistently within chemical accuracy of the most reliable data, including experiments, even in cases where high-accuracy methods such as CCSD(T), AFQMC, or contemporary NN-VMC fall short. With these improvements in both runtime and accuracy, FiRE represents a new `gold-standard' method for fast and accurate large-scale ab-initio calculations, potentially enabling new computational studies in fields like quantum chemistry, solid-state physics, and material design. </p> </div> </dd> <dt> <a name='item121'>[121]</a> <a href ="/abs/2504.06088" title="Abstract" id="2504.06088"> arXiv:2504.06088 </a> (cross-list from cs.CV) [<a href="/pdf/2504.06088" title="Download PDF" id="pdf-2504.06088" aria-labelledby="pdf-2504.06088">pdf</a>, <a href="https://arxiv.org/html/2504.06088v1" title="View HTML" id="html-2504.06088" aria-labelledby="html-2504.06088" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06088" title="Other formats" id="oth-2504.06088" aria-labelledby="oth-2504.06088">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> MCAT: Visual Query-Based Localization of Standard Anatomical Clips in Fetal Ultrasound Videos Using Multi-Tier Class-Aware Token Transformer </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mishra,+D">Divyanshu Mishra</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Saha,+P">Pramit Saha</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+H">He Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hernandez-Cruz,+N">Netzahualcoyotl Hernandez-Cruz</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Patey,+O">Olga Patey</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Papageorghiou,+A">Aris Papageorghiou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Noble,+J+A">J. Alison Noble</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted in AAAI 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Accurate standard plane acquisition in fetal ultrasound (US) videos is crucial for fetal growth assessment, anomaly detection, and adherence to clinical guidelines. However, manually selecting standard frames is time-consuming and prone to intra- and inter-sonographer variability. Existing methods primarily rely on image-based approaches that capture standard frames and then classify the input frames across different anatomies. This ignores the dynamic nature of video acquisition and its interpretation. To address these challenges, we introduce Multi-Tier Class-Aware Token Transformer (MCAT), a visual query-based video clip localization (VQ-VCL) method, to assist sonographers by enabling them to capture a quick US sweep. By then providing a visual query of the anatomy they wish to analyze, MCAT returns the video clip containing the standard frames for that anatomy, facilitating thorough screening for potential anomalies. We evaluate MCAT on two ultrasound video datasets and a natural image VQ-VCL dataset based on Ego4D. Our model outperforms state-of-the-art methods by 10% and 13% mIoU on the ultrasound datasets and by 5.35% mIoU on the Ego4D dataset, using 96% fewer tokens. MCAT's efficiency and accuracy have significant potential implications for public health, especially in low- and middle-income countries (LMICs), where it may enhance prenatal care by streamlining standard plane acquisition, simplifying US-based screening, diagnosis and allowing sonographers to examine more patients. </p> </div> </dd> <dt> <a name='item122'>[122]</a> <a href ="/abs/2504.06095" title="Abstract" id="2504.06095"> arXiv:2504.06095 </a> (cross-list from cs.DC) [<a href="/pdf/2504.06095" title="Download PDF" id="pdf-2504.06095" aria-labelledby="pdf-2504.06095">pdf</a>, <a href="https://arxiv.org/html/2504.06095v1" title="View HTML" id="html-2504.06095" aria-labelledby="html-2504.06095" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06095" title="Other formats" id="oth-2504.06095" aria-labelledby="oth-2504.06095">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Nonuniform-Tensor-Parallelism: Mitigating GPU failure impact for Scaled-up LLM Training </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Arfeen,+D">Daiyaan Arfeen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mudigere,+D">Dheevatsa Mudigere</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=More,+A">Ankit More</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gopireddy,+B">Bhargava Gopireddy</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Inci,+A">Ahmet Inci</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ganger,+G+R">Gregory R. Ganger</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Distributed, Parallel, and Cluster Computing (cs.DC)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> LLM training is scaled up to 10Ks of GPUs by a mix of data-(DP) and model-parallel (MP) execution. Critical to achieving efficiency is tensor-parallel (TP; a form of MP) execution within tightly-coupled subsets of GPUs, referred to as a scale-up domain, and the larger the scale-up domain the better the performance. New datacenter architectures are emerging with more GPUs able to be tightly-coupled in a scale-up domain, such as moving from 8 GPUs to 72 GPUs connected via NVLink. Unfortunately, larger scale-up domains increase the blast-radius of failures, with a failure of single GPU potentially impacting TP execution on the full scale-up domain, which can degrade overall LLM training throughput dramatically. With as few as 0.1% of GPUs being in a failed state, a high TP-degree job can experience nearly 10% reduction in LLM training throughput. We propose nonuniform-tensor-parallelism (NTP) to mitigate this amplified impact of GPU failures. In NTP, a DP replica that experiences GPU failures operates at a reduced TP degree, contributing throughput equal to the percentage of still-functional GPUs. We also propose a rack-design with improved electrical and thermal capabilities in order to sustain power-boosting of scale-up domains that have experienced failures; combined with NTP, this can allow the DP replica with the reduced TP degree (i.e., with failed GPUs) to keep up with the others, thereby achieving near-zero throughput loss for large-scale LLM training. </p> </div> </dd> <dt> <a name='item123'>[123]</a> <a href ="/abs/2504.06099" title="Abstract" id="2504.06099"> arXiv:2504.06099 </a> (cross-list from cs.CV) [<a href="/pdf/2504.06099" title="Download PDF" id="pdf-2504.06099" aria-labelledby="pdf-2504.06099">pdf</a>, <a href="https://arxiv.org/html/2504.06099v1" title="View HTML" id="html-2504.06099" aria-labelledby="html-2504.06099" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06099" title="Other formats" id="oth-2504.06099" aria-labelledby="oth-2504.06099">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Towards Varroa destructor mite detection using a narrow spectra illumination </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bielik,+S">Samuel Bielik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bilik,+S">Simon Bilik</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> This paper focuses on the development and modification of a beehive monitoring device and Varroa destructor detection on the bees with the help of hyperspectral imagery while utilizing a U-net, semantic segmentation architecture, and conventional computer vision methods. The main objectives were to collect a dataset of bees and mites, and propose the computer vision model which can achieve the detection between bees and mites. </p> </div> </dd> <dt> <a name='item124'>[124]</a> <a href ="/abs/2504.06105" title="Abstract" id="2504.06105"> arXiv:2504.06105 </a> (cross-list from cs.RO) [<a href="/pdf/2504.06105" title="Download PDF" id="pdf-2504.06105" aria-labelledby="pdf-2504.06105">pdf</a>, <a href="https://arxiv.org/html/2504.06105v1" title="View HTML" id="html-2504.06105" aria-labelledby="html-2504.06105" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06105" title="Other formats" id="oth-2504.06105" aria-labelledby="oth-2504.06105">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Uncertainty-Aware Hybrid Machine Learning in Virtual Sensors for Vehicle Sideslip Angle Estimation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kalyanasundaram,+A">Abinav Kalyanasundaram</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sekaran,+K+C">Karthikeyan Chandra Sekaran</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Stauber,+P">Philipp Stauber</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lange,+M">Michael Lange</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Utschick,+W">Wolfgang Utschick</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Botsch,+M">Michael Botsch</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at the 2025 IEEE Intelligent Vehicles Symposium (IV) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Robotics (cs.RO)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Precise vehicle state estimation is crucial for safe and reliable autonomous driving. The number of measurable states and their precision offered by the onboard vehicle sensor system are often constrained by cost. For instance, measuring critical quantities such as the Vehicle Sideslip Angle (VSA) poses significant commercial challenges using current optical sensors. This paper addresses these limitations by focusing on the development of high-performance virtual sensors to enhance vehicle state estimation for active safety. The proposed Uncertainty-Aware Hybrid Learning (UAHL) architecture integrates a machine learning model with vehicle motion models to estimate VSA directly from onboard sensor data. A key aspect of the UAHL architecture is its focus on uncertainty quantification for individual model estimates and hybrid fusion. These mechanisms enable the dynamic weighting of uncertainty-aware predictions from machine learning and vehicle motion models to produce accurate and reliable hybrid VSA estimates. This work also presents a novel dataset named Real-world Vehicle State Estimation Dataset (ReV-StED), comprising synchronized measurements from advanced vehicle dynamic sensors. The experimental results demonstrate the superior performance of the proposed method for VSA estimation, highlighting UAHL as a promising architecture for advancing virtual sensors and enhancing active safety in autonomous vehicles. </p> </div> </dd> <dt> <a name='item125'>[125]</a> <a href ="/abs/2504.06160" title="Abstract" id="2504.06160"> arXiv:2504.06160 </a> (cross-list from cs.CL) [<a href="/pdf/2504.06160" title="Download PDF" id="pdf-2504.06160" aria-labelledby="pdf-2504.06160">pdf</a>, <a href="https://arxiv.org/html/2504.06160v1" title="View HTML" id="html-2504.06160" aria-labelledby="html-2504.06160" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06160" title="Other formats" id="oth-2504.06160" aria-labelledby="oth-2504.06160">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Navigating the Rabbit Hole: Emergent Biases in LLM-Generated Attack Narratives Targeting Mental Health Groups </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Magu,+R">Rijul Magu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dutta,+A">Arka Dutta</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kim,+S">Sean Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=KhudaBukhsh,+A+R">Ashiqur R. KhudaBukhsh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=De+Choudhury,+M">Munmun De Choudhury</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Computers and Society (cs.CY); Machine Learning (cs.LG); Social and Information Networks (cs.SI) </div> <p class='mathjax'> Large Language Models (LLMs) have been shown to demonstrate imbalanced biases against certain groups. However, the study of unprovoked targeted attacks by LLMs towards at-risk populations remains underexplored. Our paper presents three novel contributions: (1) the explicit evaluation of LLM-generated attacks on highly vulnerable mental health groups; (2) a network-based framework to study the propagation of relative biases; and (3) an assessment of the relative degree of stigmatization that emerges from these attacks. Our analysis of a recently released large-scale bias audit dataset reveals that mental health entities occupy central positions within attack narrative networks, as revealed by a significantly higher mean centrality of closeness (p-value = 4.06e-10) and dense clustering (Gini coefficient = 0.7). Drawing from sociological foundations of stigmatization theory, our stigmatization analysis indicates increased labeling components for mental health disorder-related targets relative to initial targets in generation chains. Taken together, these insights shed light on the structural predilections of large language models to heighten harmful discourse and highlight the need for suitable approaches for mitigation. </p> </div> </dd> <dt> <a name='item126'>[126]</a> <a href ="/abs/2504.06173" title="Abstract" id="2504.06173"> arXiv:2504.06173 </a> (cross-list from cs.NI) [<a href="/pdf/2504.06173" title="Download PDF" id="pdf-2504.06173" aria-labelledby="pdf-2504.06173">pdf</a>, <a href="https://arxiv.org/html/2504.06173v1" title="View HTML" id="html-2504.06173" aria-labelledby="html-2504.06173" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06173" title="Other formats" id="oth-2504.06173" aria-labelledby="oth-2504.06173">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Multi-Modality Sensing in mmWave Beamforming for Connected Vehicles Using Deep Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mollah,+M+B">Muhammad Baqer Mollah</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+H">Honggang Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Karim,+M+A">Mohammad Ataul Karim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fang,+H">Hua Fang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 15 Pages </div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> IEEE Transactions on Cognitive Communications and Networking, 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Networking and Internet Architecture (cs.NI)</span>; Artificial Intelligence (cs.AI); Emerging Technologies (cs.ET); Machine Learning (cs.LG); Signal Processing (eess.SP) </div> <p class='mathjax'> Beamforming techniques are considered as essential parts to compensate severe path losses in millimeter-wave (mmWave) communications. In particular, these techniques adopt large antenna arrays and formulate narrow beams to obtain satisfactory received powers. However, performing accurate beam alignment over narrow beams for efficient link configuration by traditional standard defined beam selection approaches, which mainly rely on channel state information and beam sweeping through exhaustive searching, imposes computational and communications overheads. And, such resulting overheads limit their potential use in vehicle-to-infrastructure (V2I) and vehicle-to-vehicle (V2V) communications involving highly dynamic scenarios. In comparison, utilizing out-of-band contextual information, such as sensing data obtained from sensor devices, provides a better alternative to reduce overheads. This paper presents a deep learning-based solution for utilizing the multi-modality sensing data for predicting the optimal beams having sufficient mmWave received powers so that the best V2I and V2V line-of-sight links can be ensured proactively. The proposed solution has been tested on real-world measured mmWave sensing and communication data, and the results show that it can achieve up to 98.19% accuracies while predicting top-13 beams. Correspondingly, when compared to existing been sweeping approach, the beam sweeping searching space and time overheads are greatly shortened roughly by 79.67% and 91.89%, respectively which confirm a promising solution for beamforming in mmWave enabled communications. </p> </div> </dd> <dt> <a name='item127'>[127]</a> <a href ="/abs/2504.06196" title="Abstract" id="2504.06196"> arXiv:2504.06196 </a> (cross-list from cs.AI) [<a href="/pdf/2504.06196" title="Download PDF" id="pdf-2504.06196" aria-labelledby="pdf-2504.06196">pdf</a>, <a href="/format/2504.06196" title="Other formats" id="oth-2504.06196" aria-labelledby="oth-2504.06196">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> TxGemma: Efficient and Agentic LLMs for Therapeutics </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+E">Eric Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Schmidgall,+S">Samuel Schmidgall</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jaeger,+P+F">Paul F. Jaeger</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+F">Fan Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pilgrim,+R">Rory Pilgrim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Matias,+Y">Yossi Matias</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Barral,+J">Joelle Barral</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fleet,+D">David Fleet</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Azizi,+S">Shekoofeh Azizi</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> Therapeutic development is a costly and high-risk endeavor that is often plagued by high failure rates. To address this, we introduce TxGemma, a suite of efficient, generalist large language models (LLMs) capable of therapeutic property prediction as well as interactive reasoning and explainability. Unlike task-specific models, TxGemma synthesizes information from diverse sources, enabling broad application across the therapeutic development pipeline. The suite includes 2B, 9B, and 27B parameter models, fine-tuned from Gemma-2 on a comprehensive dataset of small molecules, proteins, nucleic acids, diseases, and cell lines. Across 66 therapeutic development tasks, TxGemma achieved superior or comparable performance to the state-of-the-art generalist model on 64 (superior on 45), and against state-of-the-art specialist models on 50 (superior on 26). Fine-tuning TxGemma models on therapeutic downstream tasks, such as clinical trial adverse event prediction, requires less training data than fine-tuning base LLMs, making TxGemma suitable for data-limited applications. Beyond these predictive capabilities, TxGemma features conversational models that bridge the gap between general LLMs and specialized property predictors. These allow scientists to interact in natural language, provide mechanistic reasoning for predictions based on molecular structure, and engage in scientific discussions. Building on this, we further introduce Agentic-Tx, a generalist therapeutic agentic system powered by Gemini 2.5 that reasons, acts, manages diverse workflows, and acquires external domain knowledge. Agentic-Tx surpasses prior leading models on the Humanity's Last Exam benchmark (Chemistry & Biology) with 52.3% relative improvement over o3-mini (high) and 26.7% over o3-mini (high) on GPQA (Chemistry) and excels with improvements of 6.3% (ChemBench-Preference) and 2.4% (ChemBench-Mini) over o3-mini (high). </p> </div> </dd> <dt> <a name='item128'>[128]</a> <a href ="/abs/2504.06214" title="Abstract" id="2504.06214"> arXiv:2504.06214 </a> (cross-list from cs.CL) [<a href="/pdf/2504.06214" title="Download PDF" id="pdf-2504.06214" aria-labelledby="pdf-2504.06214">pdf</a>, <a href="https://arxiv.org/html/2504.06214v1" title="View HTML" id="html-2504.06214" aria-labelledby="html-2504.06214" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06214" title="Other formats" id="oth-2504.06214" aria-labelledby="oth-2504.06214">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> From 128K to 4M: Efficient Training of Ultra-Long Context Large Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+C">Chejian Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ping,+W">Wei Ping</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+P">Peng Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Z">Zihan Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+B">Boxin Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shoeybi,+M">Mohammad Shoeybi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+B">Bo Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Catanzaro,+B">Bryan Catanzaro</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Long-context capabilities are essential for a wide range of applications, including document and video understanding, in-context learning, and inference-time scaling, all of which require models to process and reason over long sequences of text and multimodal data. In this work, we introduce a efficient training recipe for building ultra-long context LLMs from aligned instruct model, pushing the boundaries of context lengths from 128K to 1M, 2M, and 4M tokens. Our approach leverages efficient continued pretraining strategies to extend the context window and employs effective instruction tuning to maintain the instruction-following and reasoning abilities. Our UltraLong-8B, built on Llama3.1-Instruct with our recipe, achieves state-of-the-art performance across a diverse set of long-context benchmarks. Importantly, models trained with our approach maintain competitive performance on standard benchmarks, demonstrating balanced improvements for both long and short context tasks. We further provide an in-depth analysis of key design choices, highlighting the impacts of scaling strategies and data composition. Our findings establish a robust framework for efficiently scaling context lengths while preserving general model capabilities. We release all model weights at: <a href="https://ultralong.github.io/" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item129'>[129]</a> <a href ="/abs/2504.06219" title="Abstract" id="2504.06219"> arXiv:2504.06219 </a> (cross-list from cs.CL) [<a href="/pdf/2504.06219" title="Download PDF" id="pdf-2504.06219" aria-labelledby="pdf-2504.06219">pdf</a>, <a href="https://arxiv.org/html/2504.06219v1" title="View HTML" id="html-2504.06219" aria-labelledby="html-2504.06219" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06219" title="Other formats" id="oth-2504.06219" aria-labelledby="oth-2504.06219">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Can Performant LLMs Be Ethical? Quantifying the Impact of Web Crawling Opt-Outs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Fan,+D">Dongyang Fan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sabol%C4%8Dec,+V">Vinko Sabol膷ec</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ansaripour,+M">Matin Ansaripour</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tarun,+A+K">Ayush Kumar Tarun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jaggi,+M">Martin Jaggi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bosselut,+A">Antoine Bosselut</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Schlag,+I">Imanol Schlag</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> The increasing adoption of web crawling opt-outs by copyright holders of online content raises critical questions about the impact of data compliance on large language model (LLM) performance. However, little is known about how these restrictions (and the resultant filtering of pretraining datasets) affect the capabilities of models trained using these corpora. In this work, we conceptualize this effect as the $\textit{data compliance gap}$ (DCG), which quantifies the performance difference between models trained on datasets that comply with web crawling opt-outs, and those that do not. We measure the data compliance gap in two settings: pretraining models from scratch and continual pretraining from existing compliant models (simulating a setting where copyrighted data could be integrated later in pretraining). Our experiments with 1.5B models show that, as of January 2025, compliance with web data opt-outs does not degrade general knowledge acquisition (close to 0\% DCG). However, in specialized domains such as biomedical research, excluding major publishers leads to performance declines. These findings suggest that while general-purpose LLMs can be trained to perform equally well using fully open data, performance in specialized domains may benefit from access to high-quality copyrighted sources later in training. Our study provides empirical insights into the long-debated trade-off between data compliance and downstream model performance, informing future discussions on AI training practices and policy decisions. </p> </div> </dd> <dt> <a name='item130'>[130]</a> <a href ="/abs/2504.06225" title="Abstract" id="2504.06225"> arXiv:2504.06225 </a> (cross-list from cs.CL) [<a href="/pdf/2504.06225" title="Download PDF" id="pdf-2504.06225" aria-labelledby="pdf-2504.06225">pdf</a>, <a href="https://arxiv.org/html/2504.06225v1" title="View HTML" id="html-2504.06225" aria-labelledby="html-2504.06225" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06225" title="Other formats" id="oth-2504.06225" aria-labelledby="oth-2504.06225">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Encoder-Decoder Gemma: Improving the Quality-Efficiency Trade-Off via Adaptation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+B">Biao Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Moiseev,+F">Fedor Moiseev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ainslie,+J">Joshua Ainslie</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Suganthan,+P">Paul Suganthan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ma,+M">Min Ma</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bhupatiraju,+S">Surya Bhupatiraju</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lebron,+F">Fede Lebron</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Firat,+O">Orhan Firat</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Joulin,+A">Armand Joulin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dong,+Z">Zhe Dong</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> While decoder-only large language models (LLMs) have shown impressive results, encoder-decoder models are still widely adopted in real-world applications for their inference efficiency and richer encoder representation. In this paper, we study a novel problem: adapting pretrained decoder-only LLMs to encoder-decoder, with the goal of leveraging the strengths of both approaches to achieve a more favorable quality-efficiency trade-off. We argue that adaptation not only enables inheriting the capability of decoder-only LLMs but also reduces the demand for computation compared to pretraining from scratch. We rigorously explore different pretraining objectives and parameter initialization/optimization techniques. Through extensive experiments based on Gemma 2 (2B and 9B) and a suite of newly pretrained mT5-sized models (up to 1.6B), we demonstrate the effectiveness of adaptation and the advantage of encoder-decoder LLMs. Under similar inference budget, encoder-decoder LLMs achieve comparable (often better) pretraining performance but substantially better finetuning performance than their decoder-only counterpart. For example, Gemma 2B-2B outperforms Gemma 2B by $\sim$7\% after instruction tuning. Encoder-decoder adaptation also allows for flexible combination of different-sized models, where Gemma 9B-2B significantly surpasses Gemma 2B-2B by $>$3\%. The adapted encoder representation also yields better results on SuperGLUE. We will release our checkpoints to facilitate future research. </p> </div> </dd> <dt> <a name='item131'>[131]</a> <a href ="/abs/2504.06250" title="Abstract" id="2504.06250"> arXiv:2504.06250 </a> (cross-list from math.PR) [<a href="/pdf/2504.06250" title="Download PDF" id="pdf-2504.06250" aria-labelledby="pdf-2504.06250">pdf</a>, <a href="https://arxiv.org/html/2504.06250v1" title="View HTML" id="html-2504.06250" aria-labelledby="html-2504.06250" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.06250" title="Other formats" id="oth-2504.06250" aria-labelledby="oth-2504.06250">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Fractal and Regular Geometry of Deep Neural Networks </div> <div class='list-authors'><a href="https://arxiv.org/search/math?searchtype=author&query=Di+Lillo,+S">Simmaco Di Lillo</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Marinucci,+D">Domenico Marinucci</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Salvi,+M">Michele Salvi</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Vigogna,+S">Stefano Vigogna</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Probability (math.PR)</span>; Machine Learning (cs.LG); Machine Learning (stat.ML) </div> <p class='mathjax'> We study the geometric properties of random neural networks by investigating the boundary volumes of their excursion sets for different activation functions, as the depth increases. More specifically, we show that, for activations which are not very regular (e.g., the Heaviside step function), the boundary volumes exhibit fractal behavior, with their Hausdorff dimension monotonically increasing with the depth. On the other hand, for activations which are more regular (e.g., ReLU, logistic and $\tanh$), as the depth increases, the expected boundary volumes can either converge to zero, remain constant or diverge exponentially, depending on a single spectral parameter which can be easily computed. Our theoretical results are confirmed in some numerical experiments based on Monte Carlo simulations. </p> </div> </dd> </dl> <dl id='articles'> <h3>Replacement submissions (showing 118 of 118 entries)</h3> <dt> <a name='item132'>[132]</a> <a href ="/abs/2210.15527" title="Abstract" id="2210.15527"> arXiv:2210.15527 </a> (replaced) [<a href="/pdf/2210.15527" title="Download PDF" id="pdf-2210.15527" aria-labelledby="pdf-2210.15527">pdf</a>, <a href="https://arxiv.org/html/2210.15527v2" title="View HTML" id="html-2210.15527" aria-labelledby="html-2210.15527" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2210.15527" title="Other formats" id="oth-2210.15527" aria-labelledby="oth-2210.15527">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Exploiting Features and Logits in Heterogeneous Federated Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chan,+Y">Yun-Hin Chan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ngai,+E+C">Edith C.-H. Ngai</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted by Computer Networks </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Due to the rapid growth of IoT and artificial intelligence, deploying neural networks on IoT devices is becoming increasingly crucial for edge intelligence. Federated learning (FL) facilitates the management of edge devices to collaboratively train a shared model while maintaining training data local and private. However, a general assumption in FL is that all edge devices are trained on the same machine learning model, which may be impractical considering diverse device capabilities. For instance, less capable devices may slow down the updating process because they struggle to handle large models appropriate for ordinary devices. In this paper, we propose a novel data-free FL method that supports heterogeneous client models by managing features and logits, called Felo; and its extension with a conditional VAE deployed in the server, called Velo. Felo averages the mid-level features and logits from the clients at the server based on their class labels to provide the average features and logits, which are utilized for further training the client models. Unlike Felo, the server has a conditional VAE in Velo, which is used for training mid-level features and generating synthetic features according to the labels. The clients optimize their models based on the synthetic features and the average logits. We conduct experiments on two datasets and show satisfactory performances of our methods compared with the state-of-the-art methods. </p> </div> </dd> <dt> <a name='item133'>[133]</a> <a href ="/abs/2212.06653" title="Abstract" id="2212.06653"> arXiv:2212.06653 </a> (replaced) [<a href="/pdf/2212.06653" title="Download PDF" id="pdf-2212.06653" aria-labelledby="pdf-2212.06653">pdf</a>, <a href="https://arxiv.org/html/2212.06653v4" title="View HTML" id="html-2212.06653" aria-labelledby="html-2212.06653" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2212.06653" title="Other formats" id="oth-2212.06653" aria-labelledby="oth-2212.06653">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Scalable Dynamic Mixture Model with Full Covariance for Probabilistic Traffic Forecasting </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Choi,+S">Seongjin Choi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Saunier,+N">Nicolas Saunier</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+V+Z">Vincent Zhihao Zheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Trepanier,+M">Martin Trepanier</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+L">Lijun Sun</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 11 pages, 4 figures, 2 table </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Deep learning-based multivariate and multistep-ahead traffic forecasting models are typically trained with the mean squared error (MSE) or mean absolute error (MAE) as the loss function in a sequence-to-sequence setting, simply assuming that the errors follow an independent and isotropic Gaussian or Laplacian distributions. However, such assumptions are often unrealistic for real-world traffic forecasting tasks, where the probabilistic distribution of spatiotemporal forecasting is very complex with strong concurrent correlations across both sensors and forecasting horizons in a time-varying manner. In this paper, we model the time-varying distribution for the matrix-variate error process as a dynamic mixture of zero-mean Gaussian distributions. To achieve efficiency, flexibility, and scalability, we parameterize each mixture component using a matrix normal distribution and allow the mixture weight to change and be predictable over time. The proposed method can be seamlessly integrated into existing deep-learning frameworks with only a few additional parameters to be learned. We evaluate the performance of the proposed method on a traffic speed forecasting task and find that our method not only improves model performance but also provides interpretable spatiotemporal correlation structures. </p> </div> </dd> <dt> <a name='item134'>[134]</a> <a href ="/abs/2301.06650" title="Abstract" id="2301.06650"> arXiv:2301.06650 </a> (replaced) [<a href="/pdf/2301.06650" title="Download PDF" id="pdf-2301.06650" aria-labelledby="pdf-2301.06650">pdf</a>, <a href="https://arxiv.org/html/2301.06650v3" title="View HTML" id="html-2301.06650" aria-labelledby="html-2301.06650" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2301.06650" title="Other formats" id="oth-2301.06650" aria-labelledby="oth-2301.06650">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Probabilistic Traffic Forecasting with Dynamic Regression </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+V+Z">Vincent Zhihao Zheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Choi,+S">Seongjin Choi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+L">Lijun Sun</a></div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> Probabilistic Traffic Forecasting with Dynamic Regression. Transportation Science (2025) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Machine Learning (stat.ML) </div> <p class='mathjax'> This paper proposes a dynamic regression (DR) framework that enhances existing deep spatiotemporal models by incorporating structured learning for the error process in traffic forecasting. The framework relaxes the assumption of time independence by modeling the error series of the base model (i.e., a well-established traffic forecasting model) using a matrix-variate autoregressive (AR) model. The AR model is integrated into training by redesigning the loss function. The newly designed loss function is based on the likelihood of a non-isotropic error term, enabling the model to generate probabilistic forecasts while preserving the original outputs of the base model. Importantly, the additional parameters introduced by the DR framework can be jointly optimized alongside the base model. Evaluation on state-of-the-art (SOTA) traffic forecasting models using speed and flow datasets demonstrates improved performance, with interpretable AR coefficients and spatiotemporal covariance matrices enhancing the understanding of the model. </p> </div> </dd> <dt> <a name='item135'>[135]</a> <a href ="/abs/2302.04406" title="Abstract" id="2302.04406"> arXiv:2302.04406 </a> (replaced) [<a href="/pdf/2302.04406" title="Download PDF" id="pdf-2302.04406" aria-labelledby="pdf-2302.04406">pdf</a>, <a href="https://arxiv.org/html/2302.04406v3" title="View HTML" id="html-2302.04406" aria-labelledby="html-2302.04406" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2302.04406" title="Other formats" id="oth-2302.04406" aria-labelledby="oth-2302.04406">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Neural Architecture Search: Two Constant Shared Weights Initialisations </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Gracheva,+E">Ekaterina Gracheva</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> In the last decade, zero-cost metrics have gained prominence in neural architecture search (NAS) due to their ability to evaluate architectures without training. These metrics are significantly faster and less computationally expensive than traditional NAS methods and provide insights into neural architectures' internal workings. This paper introduces epsinas, a novel zero-cost NAS metric that assesses architecture potential using two constant shared weight initialisations and the statistics of their outputs. We show that the dispersion of raw outputs, normalised by their average magnitude, strongly correlates with trained accuracy. This effect holds across image classification and language tasks on NAS-Bench-101, NAS-Bench-201, and NAS-Bench-NLP. Our method requires no data labels, operates on a single minibatch, and eliminates the need for gradient computation, making it independent of training hyperparameters, loss metrics, and human annotations. It evaluates a network in a fraction of a GPU second and integrates seamlessly into existing NAS frameworks. The code supporting this study can be found on GitHub at <a href="https://github.com/egracheva/epsinas" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item136'>[136]</a> <a href ="/abs/2304.02549" title="Abstract" id="2304.02549"> arXiv:2304.02549 </a> (replaced) [<a href="/pdf/2304.02549" title="Download PDF" id="pdf-2304.02549" aria-labelledby="pdf-2304.02549">pdf</a>, <a href="https://arxiv.org/html/2304.02549v2" title="View HTML" id="html-2304.02549" aria-labelledby="html-2304.02549" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2304.02549" title="Other formats" id="oth-2304.02549" aria-labelledby="oth-2304.02549">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Self-Supervised Siamese Autoencoders </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Baier,+F">Friederike Baier</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mair,+S">Sebastian Mair</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fadel,+S+G">Samuel G. Fadel</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 13 pages, 7 figures, accepted at Intelligent Data Analysis (IDA 2024) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computer Vision and Pattern Recognition (cs.CV); Machine Learning (stat.ML) </div> <p class='mathjax'> In contrast to fully-supervised models, self-supervised representation learning only needs a fraction of data to be labeled and often achieves the same or even higher downstream performance. The goal is to pre-train deep neural networks on a self-supervised task, making them able to extract meaningful features from raw input data afterwards. Previously, autoencoders and Siamese networks have been successfully employed as feature extractors for tasks such as image classification. However, both have their individual shortcomings and benefits. In this paper, we combine their complementary strengths by proposing a new method called SidAE (Siamese denoising autoencoder). Using an image classification downstream task, we show that our model outperforms two self-supervised baselines across multiple data sets and scenarios. Crucially, this includes conditions in which only a small amount of labeled data is available. Empirically, the Siamese component has more impact, but the denoising autoencoder is nevertheless necessary to improve performance. </p> </div> </dd> <dt> <a name='item137'>[137]</a> <a href ="/abs/2305.15203" title="Abstract" id="2305.15203"> arXiv:2305.15203 </a> (replaced) [<a href="/pdf/2305.15203" title="Download PDF" id="pdf-2305.15203" aria-labelledby="pdf-2305.15203">pdf</a>, <a href="https://arxiv.org/html/2305.15203v3" title="View HTML" id="html-2305.15203" aria-labelledby="html-2305.15203" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2305.15203" title="Other formats" id="oth-2305.15203" aria-labelledby="oth-2305.15203">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Frequency maps reveal the correlation between Adversarial Attacks and Implicit Bias </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Basile,+L">Lorenzo Basile</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Karantzas,+N">Nikos Karantzas</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=d'Onofrio,+A">Alberto d'Onofrio</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Manzoni,+L">Luca Manzoni</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bortolussi,+L">Luca Bortolussi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rodriguez,+A">Alex Rodriguez</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Anselmi,+F">Fabio Anselmi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at IJCNN 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Cryptography and Security (cs.CR); Machine Learning (stat.ML) </div> <p class='mathjax'> Despite their impressive performance in classification tasks, neural networks are known to be vulnerable to adversarial attacks, subtle perturbations of the input data designed to deceive the model. In this work, we investigate the correlation between these perturbations and the implicit bias of neural networks trained with gradient-based algorithms. To this end, we analyse a representation of the network's implicit bias through the lens of the Fourier transform. Specifically, we identify unique fingerprints of implicit bias and adversarial attacks by calculating the minimal, essential frequencies needed for accurate classification of each image, as well as the frequencies that drive misclassification in its adversarially perturbed counterpart. This approach enables us to uncover and analyse the correlation between these essential frequencies, providing a precise map of how the network's biases align or contrast with the frequency components exploited by adversarial attacks. To this end, among other methods, we use a newly introduced technique capable of detecting nonlinear correlations between high-dimensional datasets. Our results provide empirical evidence that the network bias in Fourier space and the target frequencies of adversarial attacks are highly correlated and suggest new potential strategies for adversarial defence. </p> </div> </dd> <dt> <a name='item138'>[138]</a> <a href ="/abs/2311.01759" title="Abstract" id="2311.01759"> arXiv:2311.01759 </a> (replaced) [<a href="/pdf/2311.01759" title="Download PDF" id="pdf-2311.01759" aria-labelledby="pdf-2311.01759">pdf</a>, <a href="https://arxiv.org/html/2311.01759v2" title="View HTML" id="html-2311.01759" aria-labelledby="html-2311.01759" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2311.01759" title="Other formats" id="oth-2311.01759" aria-labelledby="oth-2311.01759">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> TinyFormer: Efficient Transformer Design and Deployment on Tiny Devices </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+J">Jianlei Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liao,+J">Jiacheng Liao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lei,+F">Fanding Lei</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+M">Meichen Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+J">Junyi Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Long,+L">Lingkun Long</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wan,+H">Han Wan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+B">Bei Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+W">Weisheng Zhao</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> This work has been submitted to the IEEE for possible publication </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Hardware Architecture (cs.AR) </div> <p class='mathjax'> Developing deep learning models on tiny devices (e.g. Microcontroller units, MCUs) has attracted much attention in various embedded IoT applications. However, it is challenging to efficiently design and deploy recent advanced models (e.g. transformers) on tiny devices due to their severe hardware resource constraints. In this work, we propose TinyFormer, a framework specifically designed to develop and deploy resource-efficient transformers on MCUs. TinyFormer mainly consists of SuperNAS, SparseNAS and SparseEngine. Separately, SuperNAS aims to search for an appropriate supernet from a vast search space. SparseNAS evaluates the best sparse single-path model including transformer architecture from the identified supernet. Finally, SparseEngine efficiently deploys the searched sparse models onto MCUs. To the best of our knowledge, SparseEngine is the first deployment framework capable of performing inference of sparse models with transformer on MCUs. Evaluation results on the CIFAR-10 dataset demonstrate that TinyFormer can develop efficient transformers with an accuracy of 96.1% while adhering to hardware constraints of 1MB storage and $320$KB memory. Additionally, TinyFormer achieves significant speedups in sparse inference, up to 12.2x, when compared to the CMSIS-NN library. TinyFormer is believed to bring powerful transformers into TinyML scenarios and greatly expand the scope of deep learning applications. </p> </div> </dd> <dt> <a name='item139'>[139]</a> <a href ="/abs/2312.10431" title="Abstract" id="2312.10431"> arXiv:2312.10431 </a> (replaced) [<a href="/pdf/2312.10431" title="Download PDF" id="pdf-2312.10431" aria-labelledby="pdf-2312.10431">pdf</a>, <a href="https://arxiv.org/html/2312.10431v5" title="View HTML" id="html-2312.10431" aria-labelledby="html-2312.10431" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2312.10431" title="Other formats" id="oth-2312.10431" aria-labelledby="oth-2312.10431">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Continuous Diffusion for Mixed-Type Tabular Data </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mueller,+M">Markus Mueller</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gruber,+K">Kathrin Gruber</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fok,+D">Dennis Fok</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> published at ICLR 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Machine Learning (stat.ML) </div> <p class='mathjax'> Score-based generative models, commonly referred to as diffusion models, have proven to be successful at generating text and image data. However, their adaptation to mixed-type tabular data remains underexplored. In this work, we propose CDTD, a Continuous Diffusion model for mixed-type Tabular Data. CDTD is based on a novel combination of score matching and score interpolation to enforce a unified continuous noise distribution for both continuous and categorical features. We explicitly acknowledge the necessity of homogenizing distinct data types by relying on model-specific loss calibration and initialization <a href="http://schemes.To" rel="external noopener nofollow" class="link-external link-http">this http URL</a> further address the high heterogeneity in mixed-type tabular data, we introduce adaptive feature- or type-specific noise schedules. These ensure balanced generative performance across features and optimize the allocation of model capacity across features and diffusion time. Our experimental results show that CDTD consistently outperforms state-of-the-art benchmark models, captures feature correlations exceptionally well, and that heterogeneity in the noise schedule design boosts sample quality. Replication code is available at <a href="https://github.com/muellermarkus/cdtd" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item140'>[140]</a> <a href ="/abs/2312.16379" title="Abstract" id="2312.16379"> arXiv:2312.16379 </a> (replaced) [<a href="/pdf/2312.16379" title="Download PDF" id="pdf-2312.16379" aria-labelledby="pdf-2312.16379">pdf</a>, <a href="https://arxiv.org/html/2312.16379v2" title="View HTML" id="html-2312.16379" aria-labelledby="html-2312.16379" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2312.16379" title="Other formats" id="oth-2312.16379" aria-labelledby="oth-2312.16379">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Photovoltaic power forecasting using quantum machine learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Sagingalieva,+A">Asel Sagingalieva</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Komornyik,+S">Stefan Komornyik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Joshi,+A">Ayush Joshi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mansell,+C">Christopher Mansell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pinto,+K">Karan Pinto</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pflitsch,+M">Markus Pflitsch</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Melnikov,+A">Alexey Melnikov</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 12 pages, 4 figures, 1 table </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Emerging Technologies (cs.ET); Quantum Physics (quant-ph) </div> <p class='mathjax'> Predicting solar panel power output is crucial for advancing the transition to renewable energy but is complicated by the variable and non-linear nature of solar energy. This is influenced by numerous meteorological factors, geographical positioning, and photovoltaic cell properties, posing significant challenges to forecasting accuracy and grid stability. Our study introduces a suite of solutions centered around hybrid quantum neural networks designed to tackle these complexities. The first proposed model, the Hybrid Quantum Long Short-Term Memory, surpasses all tested models by achieving mean absolute errors and mean squared errors that are more than 40% lower. The second proposed model, the Hybrid Quantum Sequence-to-Sequence neural network, once trained, predicts photovoltaic power with 16% lower mean absolute error for arbitrary time intervals without the need for prior meteorological data, highlighting its versatility. Moreover, our hybrid models perform better even when trained on limited datasets, underlining their potential utility in data-scarce scenarios. These findings represent progress towards resolving time series prediction challenges in energy forecasting through hybrid quantum models, showcasing the transformative potential of quantum machine learning in catalyzing the renewable energy transition. </p> </div> </dd> <dt> <a name='item141'>[141]</a> <a href ="/abs/2402.04051" title="Abstract" id="2402.04051"> arXiv:2402.04051 </a> (replaced) [<a href="/pdf/2402.04051" title="Download PDF" id="pdf-2402.04051" aria-labelledby="pdf-2402.04051">pdf</a>, <a href="https://arxiv.org/html/2402.04051v5" title="View HTML" id="html-2402.04051" aria-labelledby="html-2402.04051" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2402.04051" title="Other formats" id="oth-2402.04051" aria-labelledby="oth-2402.04051">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Analysis of Linear Mode Connectivity via Permutation-Based Weight Matching: With Insights into Other Permutation Search Methods </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Ito,+A">Akira Ito</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yamada,+M">Masanori Yamada</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kumagai,+A">Atsutoshi Kumagai</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> In Proceedings of the Thirteenth International Conference on Learning Representations (ICLR 2025) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Recently, Ainsworth et al. showed that using weight matching (WM) to minimize the $L^2$ distance in a permutation search of model parameters effectively identifies permutations that satisfy linear mode connectivity (LMC), where the loss along a linear path between two independently trained models with different seeds remains nearly constant. This paper analyzes LMC using WM, which is useful for understanding stochastic gradient descent's effectiveness and its application in areas like model merging. We first empirically show that permutations found by WM do not significantly reduce the $L^2$ distance between two models, and the occurrence of LMC is not merely due to distance reduction by WM itself. We then demonstrate that permutations can change the directions of the singular vectors, but not the singular values, of the weight matrices in each layer. This finding shows that permutations found by WM primarily align the directions of singular vectors associated with large singular values across models. This alignment brings the singular vectors with large singular values, which determine the model's functionality, closer between the original and merged models, allowing the merged model to retain functionality similar to the original models, thereby satisfying LMC. This paper also analyzes activation matching (AM) in terms of singular vectors and finds that the principle of AM is likely the same as that of WM. Finally, we analyze the difference between WM and the straight-through estimator (STE), a dataset-dependent permutation search method, and show that WM can be more advantageous than STE in achieving LMC among three or more models. </p> </div> </dd> <dt> <a name='item142'>[142]</a> <a href ="/abs/2402.10206" title="Abstract" id="2402.10206"> arXiv:2402.10206 </a> (replaced) [<a href="/pdf/2402.10206" title="Download PDF" id="pdf-2402.10206" aria-labelledby="pdf-2402.10206">pdf</a>, <a href="https://arxiv.org/html/2402.10206v3" title="View HTML" id="html-2402.10206" aria-labelledby="html-2402.10206" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2402.10206" title="Other formats" id="oth-2402.10206" aria-labelledby="oth-2402.10206">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Ising on the Graph: Task-specific Graph Subsampling via the Ising Model </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=B%C3%A5nkestad,+M">Maria B氓nkestad</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Andersson,+J+R">Jennifer R. Andersson</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mair,+S">Sebastian Mair</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sj%C3%B6lund,+J">Jens Sj枚lund</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 29 pages, 22 figures, accepted at the Learning on Graphs conference (LoG 2024) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Reducing a graph while preserving its overall properties is an important problem with many applications. Typically, reduction approaches either remove edges (sparsification) or merge nodes (coarsening) in an unsupervised way with no specific downstream task in mind. In this paper, we present an approach for subsampling graph structures using an Ising model defined on either the nodes or edges and learning the external magnetic field of the Ising model using a graph neural network. Our approach is task-specific as it can learn how to reduce a graph for a specific downstream task in an end-to-end fashion without requiring a differentiable loss function for the task. We showcase the versatility of our approach on four distinct applications: image segmentation, explainability for graph classification, 3D shape sparsification, and sparse approximate matrix inverse determination. </p> </div> </dd> <dt> <a name='item143'>[143]</a> <a href ="/abs/2403.02437" title="Abstract" id="2403.02437"> arXiv:2403.02437 </a> (replaced) [<a href="/pdf/2403.02437" title="Download PDF" id="pdf-2403.02437" aria-labelledby="pdf-2403.02437">pdf</a>, <a href="/format/2403.02437" title="Other formats" id="oth-2403.02437" aria-labelledby="oth-2403.02437">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Survey on Federated Unlearning: Challenges and Opportunities </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Jeong,+H">Hyejun Jeong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ma,+S">Shiqing Ma</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Houmansadr,+A">Amir Houmansadr</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Distributed, Parallel, and Cluster Computing (cs.DC) </div> <p class='mathjax'> Federated learning (FL), introduced in 2017, facilitates collaborative learning between non-trusting parties with no need for the parties to explicitly share their data among themselves. This allows training models on user data while respecting privacy regulations such as GDPR and CPRA. However, emerging privacy requirements may mandate model owners to be able to \emph{forget} some learned data, e.g., when requested by data owners or law enforcement. This has given birth to an active field of research called \emph{machine unlearning}. In the context of FL, many techniques developed for unlearning in centralized settings are not trivially applicable! This is due to the unique differences between centralized and distributed learning, in particular, interactivity, stochasticity, heterogeneity, and limited accessibility in FL. In response, a recent line of work has focused on developing unlearning mechanisms tailored to FL. <br>This SoK paper aims to take a deep look at the \emph{federated unlearning} literature, with the goal of identifying research trends and challenges in this emerging field. By carefully categorizing papers published on FL unlearning (since 2020), we aim to pinpoint the unique complexities of federated unlearning, highlighting limitations on directly applying centralized unlearning methods. We compare existing federated unlearning methods regarding influence removal and performance recovery, compare their threat models and assumptions, and discuss their implications and limitations. For instance, we analyze the experimental setup of FL unlearning studies from various perspectives, including data heterogeneity and its simulation, the datasets used for demonstration, and evaluation metrics. Our work aims to offer insights and suggestions for future research on federated unlearning. </p> </div> </dd> <dt> <a name='item144'>[144]</a> <a href ="/abs/2403.07300" title="Abstract" id="2403.07300"> arXiv:2403.07300 </a> (replaced) [<a href="/pdf/2403.07300" title="Download PDF" id="pdf-2403.07300" aria-labelledby="pdf-2403.07300">pdf</a>, <a href="https://arxiv.org/html/2403.07300v3" title="View HTML" id="html-2403.07300" aria-labelledby="html-2403.07300" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2403.07300" title="Other formats" id="oth-2403.07300" aria-labelledby="oth-2403.07300">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> CALF: Aligning LLMs for Time Series Forecasting via Cross-modal Fine-Tuning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+P">Peiyuan Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Guo,+H">Hang Guo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dai,+T">Tao Dai</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+N">Naiqi Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bao,+J">Jigang Bao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ren,+X">Xudong Ren</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jiang,+Y">Yong Jiang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xia,+S">Shu-Tao Xia</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computation and Language (cs.CL) </div> <p class='mathjax'> Deep learning (e.g., Transformer) has been widely and successfully used in multivariate time series forecasting (MTSF). Unlike existing methods that focus on training models from a single modal of time series input, large language models (LLMs) based MTSF methods with cross-modal text and time series input have recently shown great superiority, especially with limited temporal data. However, current LLM-based MTSF methods usually focus on adapting and fine-tuning LLMs, while neglecting the distribution discrepancy between textual and temporal input tokens, thus leading to sub-optimal performance. To address this issue, we propose a novel Cross-Modal LLM Fine-Tuning (CALF) framework for MTSF by reducing the distribution discrepancy between textual and temporal data, which mainly consists of the temporal target branch with temporal input and the textual source branch with aligned textual input. To reduce the distribution discrepancy, we develop the cross-modal match module to first align cross-modal input distributions. Additionally, to minimize the modality distribution gap in both feature and output spaces, feature regularization loss is developed to align the intermediate features between the two branches for better weight updates, while output consistency loss is introduced to allow the output representations of both branches to correspond effectively. Thanks to the modality alignment, CALF establishes state-of-the-art performance for both long-term and short-term forecasting tasks with low computational complexity, and exhibiting favorable few-shot and zero-shot abilities similar to that in LLMs. Code is available at <a href="https://github.com/Hank0626/LLaTA" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item145'>[145]</a> <a href ="/abs/2404.08624" title="Abstract" id="2404.08624"> arXiv:2404.08624 </a> (replaced) [<a href="/pdf/2404.08624" title="Download PDF" id="pdf-2404.08624" aria-labelledby="pdf-2404.08624">pdf</a>, <a href="https://arxiv.org/html/2404.08624v2" title="View HTML" id="html-2404.08624" aria-labelledby="html-2404.08624" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2404.08624" title="Other formats" id="oth-2404.08624" aria-labelledby="oth-2404.08624">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Regularized Gradient Clipping Provably Trains Wide and Deep Neural Networks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Tucat,+M">Matteo Tucat</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mukherjee,+A">Anirbit Mukherjee</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sen,+P">Procheta Sen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+M">Mingfei Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rivasplata,+O">Omar Rivasplata</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 20 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Optimization and Control (math.OC) </div> <p class='mathjax'> We present and analyze a novel regularized form of the gradient clipping algorithm, proving that it converges to global minima of the loss surface of deep neural networks under the squared loss, provided that the layers are of sufficient width. The algorithm presented here, dubbed $\delta-$GClip, introduces a modification to gradient clipping that leads to a first-of-its-kind example of a step size scheduling for gradient descent that provably minimizes training losses of deep neural nets. We also present empirical evidence that our theoretically founded $\delta-$GClip algorithm is competitive with the state-of-the-art deep learning heuristics on various neural architectures including modern transformer based architectures. The modification we do to standard gradient clipping is designed to leverage the PL* condition, a variant of the Polyak-Lojasiewicz inequality which was recently proven to be true for sufficiently wide neural networks at any depth within a neighbourhood of the initialization. </p> </div> </dd> <dt> <a name='item146'>[146]</a> <a href ="/abs/2404.16792" title="Abstract" id="2404.16792"> arXiv:2404.16792 </a> (replaced) [<a href="/pdf/2404.16792" title="Download PDF" id="pdf-2404.16792" aria-labelledby="pdf-2404.16792">pdf</a>, <a href="https://arxiv.org/html/2404.16792v3" title="View HTML" id="html-2404.16792" aria-labelledby="html-2404.16792" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2404.16792" title="Other formats" id="oth-2404.16792" aria-labelledby="oth-2404.16792">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Model Extrapolation Expedites Alignment </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+C">Chujie Zheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Ziqi Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ji,+H">Heng Ji</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+M">Minlie Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Peng,+N">Nanyun Peng</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computation and Language (cs.CL) </div> <p class='mathjax'> Given the high computational cost of preference alignment training of large language models (LLMs), exploring efficient methods to reduce the training overhead remains an important and compelling research problem. Motivated by the observation that alignment training typically involves only small parameter changes without injecting new knowledge into models, we propose a straightforward method called ExPO (model extrapolation) to expedite LLMs' alignment with human preferences. Given a partially-trained model and its initial SFT checkpoint, ExPO improves the implicit optimization objective of alignment training by simply amplifying the parameter change based on a first-order approximation, without any additional training overhead. Through controlled experiments, we demonstrate that ExPO boosts a DPO model trained with only 20% steps to outperform the fully-trained one. Moreover, we show that ExPO notably improves existing open-source LLMs (ranging from 1.8B to 70B parameters) on the leading AlpacaEval 2.0 and MT-Bench benchmarks, which highlights ExPO's broader utility in efficiently enhancing LLM alignment. </p> </div> </dd> <dt> <a name='item147'>[147]</a> <a href ="/abs/2405.00746" title="Abstract" id="2405.00746"> arXiv:2405.00746 </a> (replaced) [<a href="/pdf/2405.00746" title="Download PDF" id="pdf-2405.00746" aria-labelledby="pdf-2405.00746">pdf</a>, <a href="https://arxiv.org/html/2405.00746v2" title="View HTML" id="html-2405.00746" aria-labelledby="html-2405.00746" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2405.00746" title="Other formats" id="oth-2405.00746" aria-labelledby="oth-2405.00746">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Leveraging Sub-Optimal Data for Human-in-the-Loop Reinforcement Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Muslimani,+C">Calarina Muslimani</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Taylor,+M+E">Matthew E. Taylor</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Robotics (cs.RO) </div> <p class='mathjax'> To create useful reinforcement learning (RL) agents, step zero is to design a suitable reward function that captures the nuances of the task. However, reward engineering can be a difficult and time-consuming process. Instead, human-in-the-loop RL methods hold the promise of learning reward functions from human feedback. Despite recent successes, many of the human-in-the-loop RL methods still require numerous human interactions to learn successful reward functions. To improve the feedback efficiency of human-in-the-loop RL methods (i.e., require less human interaction), this paper introduces Sub-optimal Data Pre-training, SDP, an approach that leverages reward-free, sub-optimal data to improve scalar- and preference-based RL algorithms. In SDP, we start by pseudo-labeling all low-quality data with the minimum environment reward. Through this process, we obtain reward labels to pre-train our reward model without requiring human labeling or preferences. This pre-training phase provides the reward model a head start in learning, enabling it to recognize that low-quality transitions should be assigned low rewards. Through extensive experiments with both simulated and human teachers, we find that SDP can at least meet, but often significantly improve, state of the art human-in-the-loop RL performance across a variety of simulated robotic tasks. </p> </div> </dd> <dt> <a name='item148'>[148]</a> <a href ="/abs/2405.16391" title="Abstract" id="2405.16391"> arXiv:2405.16391 </a> (replaced) [<a href="/pdf/2405.16391" title="Download PDF" id="pdf-2405.16391" aria-labelledby="pdf-2405.16391">pdf</a>, <a href="https://arxiv.org/html/2405.16391v3" title="View HTML" id="html-2405.16391" aria-labelledby="html-2405.16391" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2405.16391" title="Other formats" id="oth-2405.16391" aria-labelledby="oth-2405.16391">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> When does compositional structure yield compositional generalization? A kernel theory </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Lippl,+S">Samuel Lippl</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Stachenfeld,+K">Kim Stachenfeld</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Published at ICLR 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Neurons and Cognition (q-bio.NC) </div> <p class='mathjax'> Compositional generalization (the ability to respond correctly to novel combinations of familiar components) is thought to be a cornerstone of intelligent behavior. Compositionally structured (e.g. disentangled) representations support this ability; however, the conditions under which they are sufficient for the emergence of compositional generalization remain unclear. To address this gap, we present a theory of compositional generalization in kernel models with fixed, compositionally structured representations. This provides a tractable framework for characterizing the impact of training data statistics on generalization. We find that these models are limited to functions that assign values to each combination of components seen during training, and then sum up these values ("conjunction-wise additivity"). This imposes fundamental restrictions on the set of tasks compositionally structured kernel models can learn, in particular preventing them from transitively generalizing equivalence relations. Even for compositional tasks that they can learn in principle, we identify novel failure modes in compositional generalization (memorization leak and shortcut bias) that arise from biases in the training data. Finally, we empirically validate our theory, showing that it captures the behavior of deep neural networks (convolutional networks, residual networks, and Vision Transformers) trained on a set of compositional tasks with similarly structured data. Ultimately, this work examines how statistical structure in the training data can affect compositional generalization, with implications for how to identify and remedy failure modes in deep learning models. </p> </div> </dd> <dt> <a name='item149'>[149]</a> <a href ="/abs/2405.20445" title="Abstract" id="2405.20445"> arXiv:2405.20445 </a> (replaced) [<a href="/pdf/2405.20445" title="Download PDF" id="pdf-2405.20445" aria-labelledby="pdf-2405.20445">pdf</a>, <a href="https://arxiv.org/html/2405.20445v5" title="View HTML" id="html-2405.20445" aria-labelledby="html-2405.20445" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2405.20445" title="Other formats" id="oth-2405.20445" aria-labelledby="oth-2405.20445">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Fully-inductive Node Classification on Arbitrary Graphs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+J">Jianan Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhu,+Z">Zhaocheng Zhu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Galkin,+M">Mikhail Galkin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mostafa,+H">Hesham Mostafa</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bronstein,+M">Michael Bronstein</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tang,+J">Jian Tang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> ICLR2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Social and Information Networks (cs.SI) </div> <p class='mathjax'> One fundamental challenge in graph machine learning is generalizing to new graphs. Many existing methods following the inductive setup can generalize to test graphs with new structures, but assuming the feature and label spaces remain the same as the training ones. This paper introduces a fully-inductive setup, where models should perform inference on arbitrary test graphs with new structures, feature and label spaces. We propose GraphAny as the first attempt at this challenging setup. GraphAny models inference on a new graph as an analytical solution to a LinearGNN, which can be naturally applied to graphs with any feature and label spaces. To further build a stronger model with learning capacity, we fuse multiple LinearGNN predictions with learned inductive attention scores. Specifically, the attention module is carefully parameterized as a function of the entropy-normalized distance features between pairs of LinearGNN predictions to ensure generalization to new graphs. Empirically, GraphAny trained on a single Wisconsin dataset with only 120 labeled nodes can generalize to 30 new graphs with an average accuracy of 67.26%, surpassing not only all inductive baselines, but also strong transductive methods trained separately on each of the 30 test graphs. </p> </div> </dd> <dt> <a name='item150'>[150]</a> <a href ="/abs/2406.06984" title="Abstract" id="2406.06984"> arXiv:2406.06984 </a> (replaced) [<a href="/pdf/2406.06984" title="Download PDF" id="pdf-2406.06984" aria-labelledby="pdf-2406.06984">pdf</a>, <a href="https://arxiv.org/html/2406.06984v3" title="View HTML" id="html-2406.06984" aria-labelledby="html-2406.06984" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2406.06984" title="Other formats" id="oth-2406.06984" aria-labelledby="oth-2406.06984">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> On the H枚lder Stability of Multiset and Graph Neural Networks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Davidson,+Y">Yair Davidson</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dym,+N">Nadav Dym</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Extensive research efforts have been put into characterizing and constructing maximally separating multiset and graph neural networks. However, recent empirical evidence suggests the notion of separation itself doesn't capture several interesting phenomena. On the one hand, the quality of this separation may be very weak, to the extent that the embeddings of "separable" objects might even be considered identical when using fixed finite precision. On the other hand, architectures which aren't capable of separation in theory, somehow achieve separation when taking the network to be wide enough. <br>In this work, we address both of these issues, by proposing a novel pair-wise separation quality analysis framework which is based on an adaptation of Lipschitz and \Holder{} stability to parametric functions. The proposed framework, which we name \emph{\Holder{} in expectation}, allows for separation quality analysis, without restricting the analysis to embeddings that can separate all the input space simultaneously. We prove that common sum-based models are lower-\Holder{} in expectation, with an exponent <br>that decays rapidly with the network's depth . Our analysis leads to adversarial examples of graphs which can be separated by three 1-WL iterations, but cannot be separated in practice by standard maximally powerful Message Passing Neural Networks (MPNNs). To remedy this, we propose two novel MPNNs with improved separation quality, one of which is lower Lipschitz in expectation. We show these MPNNs can easily classify our adversarial examples, and compare favorably with standard MPNNs on standard graph learning tasks. </p> </div> </dd> <dt> <a name='item151'>[151]</a> <a href ="/abs/2406.11917" title="Abstract" id="2406.11917"> arXiv:2406.11917 </a> (replaced) [<a href="/pdf/2406.11917" title="Download PDF" id="pdf-2406.11917" aria-labelledby="pdf-2406.11917">pdf</a>, <a href="https://arxiv.org/html/2406.11917v2" title="View HTML" id="html-2406.11917" aria-labelledby="html-2406.11917" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2406.11917" title="Other formats" id="oth-2406.11917" aria-labelledby="oth-2406.11917">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Modulated Differentiable STFT and Balanced Spectrum Metric for Freight Train Wheelset Bearing Cross-machine Transfer Fault Diagnosis under Speed Fluctuations </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=He,+C">Chao He</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shi,+H">Hongmei Shi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+R">Ruixin Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+J">Jianbo Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+Z">ZuJun Yu</a></div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> Advanced Engineering Informatics 62 (2024) 102568 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Signal Processing (eess.SP) </div> <p class='mathjax'> The service conditions of wheelset bearings has a direct impact on the safe operation of railway heavy haul freight trains as the key components. However, speed fluctuation of the trains and few fault samples are the two main problems that restrict the accuracy of bearing fault diagnosis. Therefore, a cross-machine transfer diagnosis (pyDSN) network coupled with interpretable modulated differentiable short-time Fourier transform (STFT) and physics-informed balanced spectrum quality metric is proposed to learn domain-invariant and discriminative features under time-varying speeds. Firstly, due to insufficiency in extracting extract frequency components of time-varying speed signals using fixed windows, a modulated differentiable STFT (MDSTFT) that is interpretable with STFT-informed theoretical support, is proposed to extract the robust time-frequency spectrum (TFS). During training process, multiple windows with different lengths dynamically change. Also, in addition to the classification metric and domain discrepancy metric, we creatively introduce a third kind of metric, referred to as the physics-informed metric, to enhance transferable TFS. A physics-informed balanced spectrum quality (BSQ) regularization loss is devised to guide an optimization direction for MDSTFT and model. With it, not only can model acquire high-quality TFS, but also a physics-restricted domain adaptation network can be also acquired, making it learn real-world physics knowledge, ultimately diminish the domain discrepancy across different datasets. The experiment is conducted in the scenario of migrating from the laboratory datasets to the freight train dataset, indicating that the hybrid-driven pyDSN outperforms existing methods and has practical value. </p> </div> </dd> <dt> <a name='item152'>[152]</a> <a href ="/abs/2406.12065" title="Abstract" id="2406.12065"> arXiv:2406.12065 </a> (replaced) [<a href="/pdf/2406.12065" title="Download PDF" id="pdf-2406.12065" aria-labelledby="pdf-2406.12065">pdf</a>, <a href="https://arxiv.org/html/2406.12065v2" title="View HTML" id="html-2406.12065" aria-labelledby="html-2406.12065" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2406.12065" title="Other formats" id="oth-2406.12065" aria-labelledby="oth-2406.12065">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> STNAGNN: Data-driven Spatio-temporal Brain Connectivity beyond FC </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+J">Jiyao Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dvornek,+N+C">Nicha C. Dvornek</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Duan,+P">Peiyu Duan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Staib,+L+H">Lawrence H. Staib</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ventola,+P">Pamela Ventola</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Duncan,+J+S">James S. Duncan</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Neurons and Cognition (q-bio.NC) </div> <p class='mathjax'> In recent years, graph neural networks (GNNs) have been widely applied in the analysis of brain fMRI, yet defining the connectivity between ROIs remains a challenge in noisy fMRI data. Among all approaches, Functional Connectome (FC) is the most popular method. Computed by the correlation coefficients between ROI time series, FC is a powerful and computationally efficient way to estimate ROI connectivity. However, it is well known for neglecting structural connections and causality in ROI interactions. Also, FC becomes much more noisy in the short spatio-temporal sliding-window subsequences of fMRI. Effective Connectome (EC) is proposed as a directional alternative, but is difficult to accurately estimate. Furthermore, for optimal GNN performance, usually only a small percentage of the strongest connections are selected as sparse edges, resulting in oversimplification of complex brain connections. To tackle these challenges, we propose the Spatio-Temporal Node Attention Graph Neural Network (STNAGNN) as a data-driven alternative that combines sparse predefined FC with dense data-driven spatio-temporal connections, allowing for flexible and spatio-temporal learning of ROI interaction patterns. </p> </div> </dd> <dt> <a name='item153'>[153]</a> <a href ="/abs/2406.15341" title="Abstract" id="2406.15341"> arXiv:2406.15341 </a> (replaced) [<a href="/pdf/2406.15341" title="Download PDF" id="pdf-2406.15341" aria-labelledby="pdf-2406.15341">pdf</a>, <a href="https://arxiv.org/html/2406.15341v3" title="View HTML" id="html-2406.15341" aria-labelledby="html-2406.15341" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2406.15341" title="Other formats" id="oth-2406.15341" aria-labelledby="oth-2406.15341">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> GenoTEX: An LLM Agent Benchmark for Automated Gene Expression Data Analysis </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+H">Haoyang Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+S">Shuyu Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+Y">Ye Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+H">Haohan Wang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 31 pages, 4 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Genomics (q-bio.GN) </div> <p class='mathjax'> Recent advancements in machine learning have significantly improved the identification of disease-associated genes from gene expression datasets. However, these processes often require extensive expertise and manual effort, limiting their scalability. Large Language Model (LLM)-based agents have shown promise in automating these tasks due to their increasing problem-solving abilities. To support the evaluation and development of such methods, we introduce GenoTEX, a benchmark dataset for the automated analysis of gene expression data. GenoTEX provides analysis code and results for solving a wide range of gene-trait association problems, encompassing dataset selection, preprocessing, and statistical analysis, in a pipeline that follows computational genomics standards. The benchmark includes expert-curated annotations from bioinformaticians to ensure accuracy and reliability. To provide baselines for these tasks, we present GenoAgent, a team of LLM-based agents that adopt a multi-step programming workflow with flexible self-correction, to collaboratively analyze gene expression datasets. Our experiments demonstrate the potential of LLM-based methods in analyzing genomic data, while error analysis highlights the challenges and areas for future improvement. We propose GenoTEX as a promising resource for benchmarking and enhancing automated methods for gene expression data analysis. The benchmark is available at <a href="https://github.com/Liu-Hy/GenoTEX" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item154'>[154]</a> <a href ="/abs/2406.17374" title="Abstract" id="2406.17374"> arXiv:2406.17374 </a> (replaced) [<a href="/pdf/2406.17374" title="Download PDF" id="pdf-2406.17374" aria-labelledby="pdf-2406.17374">pdf</a>, <a href="/format/2406.17374" title="Other formats" id="oth-2406.17374" aria-labelledby="oth-2406.17374">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Generalizability of experimental studies </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Matteucci,+F">Federico Matteucci</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Arzamasov,+V">Vadim Arzamasov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cribeiro-Ramallo,+J">Jose Cribeiro-Ramallo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Heyden,+M">Marco Heyden</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ntounas,+K">Konstantin Ntounas</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=B%C3%B6hm,+K">Klemens B枚hm</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Under review at TMLR </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Statistics Theory (math.ST) </div> <p class='mathjax'> Experimental studies are a cornerstone of machine learning (ML) research. A common, but often implicit, assumption is that the results of a study will generalize beyond the study itself, e.g. to new data. That is, there is a high probability that repeating the study under different conditions will yield similar results. Despite the importance of the concept, the problem of measuring generalizability remains open. This is probably due to the lack of a mathematical formalization of experimental studies. In this paper, we propose such a formalization and develop a quantifiable notion of generalizability. This notion allows to explore the generalizability of existing studies and to estimate the number of experiments needed to achieve the generalizability of new studies. To demonstrate its usefulness, we apply it to two recently published benchmarks to discern generalizable and non-generalizable results. We also publish a Python module that allows our analysis to be repeated for other experimental studies. </p> </div> </dd> <dt> <a name='item155'>[155]</a> <a href ="/abs/2406.17811" title="Abstract" id="2406.17811"> arXiv:2406.17811 </a> (replaced) [<a href="/pdf/2406.17811" title="Download PDF" id="pdf-2406.17811" aria-labelledby="pdf-2406.17811">pdf</a>, <a href="https://arxiv.org/html/2406.17811v2" title="View HTML" id="html-2406.17811" aria-labelledby="html-2406.17811" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2406.17811" title="Other formats" id="oth-2406.17811" aria-labelledby="oth-2406.17811">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> CATBench: A Compiler Autotuning Benchmarking Suite for Black-box Optimization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=T%C3%B8rring,+J+O">Jacob O. T酶rring</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hvarfner,+C">Carl Hvarfner</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nardi,+L">Luigi Nardi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sj%C3%A4lander,+M">Magnus Sj盲lander</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Neural and Evolutionary Computing (cs.NE) </div> <p class='mathjax'> Bayesian optimization is a powerful method for automating tuning of compilers. The complex landscape of autotuning provides a myriad of rarely considered structural challenges for black-box optimizers, and the lack of standardized benchmarks has limited the study of Bayesian optimization within the domain. To address this, we present CATBench, a comprehensive benchmarking suite that captures the complexities of compiler autotuning, ranging from discrete, conditional, and permutation parameter types to known and unknown binary constraints, as well as both multi-fidelity and multi-objective evaluations. The benchmarks in CATBench span a range of machine learning-oriented computations, from tensor algebra to image processing and clustering, and uses state-of-the-art compilers, such as TACO and RISE/ELEVATE. CATBench offers a unified interface for evaluating Bayesian optimization algorithms, promoting reproducibility and innovation through an easy-to-use, fully containerized setup of both surrogate and real-world compiler optimization tasks. We validate CATBench on several state-of-the-art algorithms, revealing their strengths and weaknesses and demonstrating the suite's potential for advancing both Bayesian optimization and compiler autotuning research. </p> </div> </dd> <dt> <a name='item156'>[156]</a> <a href ="/abs/2406.18332" title="Abstract" id="2406.18332"> arXiv:2406.18332 </a> (replaced) [<a href="/pdf/2406.18332" title="Download PDF" id="pdf-2406.18332" aria-labelledby="pdf-2406.18332">pdf</a>, <a href="https://arxiv.org/html/2406.18332v5" title="View HTML" id="html-2406.18332" aria-labelledby="html-2406.18332" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2406.18332" title="Other formats" id="oth-2406.18332" aria-labelledby="oth-2406.18332">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Early Classification of Time Series: Taxonomy and Benchmark </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Renault,+A">Aur茅lien Renault</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bondu,+A">Alexis Bondu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cornu%C3%A9jols,+A">Antoine Cornu茅jols</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lemaire,+V">Vincent Lemaire</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> In many situations, the measurements of a studied phenomenon are provided sequentially, and the prediction of its class needs to be made as early as possible so as not to incur too high a time penalty, but not too early and risk paying the cost of misclassification. This problem has been particularly studied in the case of time series, and is known as Early Classification of Time Series (ECTS). Although it has been the subject of a growing body of literature, there is still a lack of a systematic, shared evaluation protocol to compare the relative merits of the various existing methods. This document begins by situating these methods within a principle-based taxonomy. It defines dimensions for organizing their evaluation, and then reports the results of a very extensive set of experiments along these dimensions involving nine state-of-the art ECTS algorithms. In addition, these and other experiments can be carried out using an open-source library in which most of the existing ECTS algorithms have been implemented (see <a href="https://github.com/ML-EDM/ml_edm" rel="external noopener nofollow" class="link-external link-https">this https URL</a>). </p> </div> </dd> <dt> <a name='item157'>[157]</a> <a href ="/abs/2407.14931" title="Abstract" id="2407.14931"> arXiv:2407.14931 </a> (replaced) [<a href="/pdf/2407.14931" title="Download PDF" id="pdf-2407.14931" aria-labelledby="pdf-2407.14931">pdf</a>, <a href="https://arxiv.org/html/2407.14931v3" title="View HTML" id="html-2407.14931" aria-labelledby="html-2407.14931" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2407.14931" title="Other formats" id="oth-2407.14931" aria-labelledby="oth-2407.14931">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> POGEMA: A Benchmark Platform for Cooperative Multi-Agent Pathfinding </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Skrynnik,+A">Alexey Skrynnik</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Andreychuk,+A">Anton Andreychuk</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Borzilov,+A">Anatolii Borzilov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chernyavskiy,+A">Alexander Chernyavskiy</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yakovlev,+K">Konstantin Yakovlev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Panov,+A">Aleksandr Panov</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Published as a conference paper at The International Conference on Learning Representations 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Multiagent Systems (cs.MA) </div> <p class='mathjax'> Multi-agent reinforcement learning (MARL) has recently excelled in solving challenging cooperative and competitive multi-agent problems in various environments, typically involving a small number of agents and full observability. Moreover, a range of crucial robotics-related tasks, such as multi-robot pathfinding, which have traditionally been approached with classical non-learnable methods (e.g., heuristic search), are now being suggested for solution using learning-based or hybrid methods. However, in this domain, it remains difficult, if not impossible, to conduct a fair comparison between classical, learning-based, and hybrid approaches due to the lack of a unified framework that supports both learning and evaluation. To address this, we introduce POGEMA, a comprehensive set of tools that includes a fast environment for learning, a problem instance generator, a collection of predefined problem instances, a visualization toolkit, and a benchmarking tool for automated evaluation. We also introduce and define an evaluation protocol that specifies a range of domain-related metrics, computed based on primary evaluation indicators (such as success rate and path length), enabling a fair multi-fold comparison. The results of this comparison, which involves a variety of state-of-the-art MARL, search-based, and hybrid methods, are presented. </p> </div> </dd> <dt> <a name='item158'>[158]</a> <a href ="/abs/2408.02349" title="Abstract" id="2408.02349"> arXiv:2408.02349 </a> (replaced) [<a href="/pdf/2408.02349" title="Download PDF" id="pdf-2408.02349" aria-labelledby="pdf-2408.02349">pdf</a>, <a href="https://arxiv.org/html/2408.02349v4" title="View HTML" id="html-2408.02349" aria-labelledby="html-2408.02349" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2408.02349" title="Other formats" id="oth-2408.02349" aria-labelledby="oth-2408.02349">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Toward Cost-efficient Adaptive Clinical Trials in Knee Osteoarthritis with Reinforcement Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Nguyen,+K">Khanh Nguyen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nguyen,+H+H">Huy Hoang Nguyen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Panfilov,+E">Egor Panfilov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tiulpin,+A">Aleksei Tiulpin</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Osteoarthritis (OA) is the most common musculoskeletal disease, with knee OA (KOA) being one of the leading causes of disability and a significant economic burden. Predicting KOA progression is crucial for improving patient outcomes, optimizing healthcare resources, studying the disease, and developing new treatments. The latter application particularly requires one to understand the disease progression in order to collect the most informative data at the right time. Existing methods, however, are limited by their static nature and their focus on individual joints, leading to suboptimal predictive performance and downstream utility. Our study proposes a new method that allows to dynamically monitor patients rather than individual joints with KOA using a novel Active Sensing (AS) approach powered by Reinforcement Learning (RL). Our key idea is to directly optimize for the downstream task by training an agent that maximizes informative data collection while minimizing overall costs. Our RL-based method leverages a specially designed reward function to monitor disease progression across multiple body parts, employs multimodal deep learning, and requires no human input during testing. Extensive numerical experiments demonstrate that our approach outperforms current state-of-the-art models, paving the way for the next generation of KOA trials. </p> </div> </dd> <dt> <a name='item159'>[159]</a> <a href ="/abs/2409.10489" title="Abstract" id="2409.10489"> arXiv:2409.10489 </a> (replaced) [<a href="/pdf/2409.10489" title="Download PDF" id="pdf-2409.10489" aria-labelledby="pdf-2409.10489">pdf</a>, <a href="https://arxiv.org/html/2409.10489v4" title="View HTML" id="html-2409.10489" aria-labelledby="html-2409.10489" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2409.10489" title="Other formats" id="oth-2409.10489" aria-labelledby="oth-2409.10489">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Flash STU: Fast Spectral Transform Units </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Y+I">Y. Isabel Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nguyen,+W">Windsor Nguyen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Devre,+Y">Yagiz Devre</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dogariu,+E">Evan Dogariu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Majumdar,+A">Anirudha Majumdar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hazan,+E">Elad Hazan</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Recent advances in state-space model architectures have shown great promise for efficient sequence modeling, but challenges remain in balancing computational efficiency with model expressiveness. We propose the Flash STU architecture, a hybrid model that interleaves spectral state space model layers with sliding window attention, enabling scalability to billions of parameters for language modeling while maintaining a near-linear time complexity. We evaluate the Flash STU and its variants on diverse sequence prediction tasks, including linear dynamical systems, robotics control, and language modeling. We find that, given a fixed parameter budget, the Flash STU architecture consistently outperforms the Transformer and other leading state-space models such as S4 and Mamba-2. </p> </div> </dd> <dt> <a name='item160'>[160]</a> <a href ="/abs/2410.12779" title="Abstract" id="2410.12779"> arXiv:2410.12779 </a> (replaced) [<a href="/pdf/2410.12779" title="Download PDF" id="pdf-2410.12779" aria-labelledby="pdf-2410.12779">pdf</a>, <a href="/format/2410.12779" title="Other formats" id="oth-2410.12779" aria-labelledby="oth-2410.12779">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Geometry-Aware Generative Autoencoders for Warped Riemannian Metric Learning and Generative Modeling on Data Manifolds </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+X">Xingzhi Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liao,+D">Danqi Liao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=MacDonald,+K">Kincaid MacDonald</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+Y">Yanlei Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+C">Chen Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Huguet,+G">Guillaume Huguet</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wolf,+G">Guy Wolf</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Adelstein,+I">Ian Adelstein</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rudner,+T+G+J">Tim G. J. Rudner</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Krishnaswamy,+S">Smita Krishnaswamy</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Published in Proceedings of the 28th International Conference on Artificial Intelligence and Statistics (AISTATS 2025) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Differential Geometry (math.DG); Machine Learning (stat.ML) </div> <p class='mathjax'> Rapid growth of high-dimensional datasets in fields such as single-cell RNA sequencing and spatial genomics has led to unprecedented opportunities for scientific discovery, but it also presents unique computational and statistical challenges. Traditional methods struggle with geometry-aware data generation, interpolation along meaningful trajectories, and transporting populations via feasible paths. To address these issues, we introduce Geometry-Aware Generative Autoencoder (GAGA), a novel framework that combines extensible manifold learning with generative modeling. GAGA constructs a neural network embedding space that respects the intrinsic geometries discovered by manifold learning and learns a novel warped Riemannian metric on the data space. This warped metric is derived from both the points on the data manifold and negative samples off the manifold, allowing it to characterize a meaningful geometry across the entire latent space. Using this metric, GAGA can uniformly sample points on the manifold, generate points along geodesics, and interpolate between populations across the learned manifold using geodesic-guided flows. GAGA shows competitive performance in simulated and real-world datasets, including a 30% improvement over the state-of-the-art methods in single-cell population-level trajectory inference. </p> </div> </dd> <dt> <a name='item161'>[161]</a> <a href ="/abs/2410.13012" title="Abstract" id="2410.13012"> arXiv:2410.13012 </a> (replaced) [<a href="/pdf/2410.13012" title="Download PDF" id="pdf-2410.13012" aria-labelledby="pdf-2410.13012">pdf</a>, <a href="https://arxiv.org/html/2410.13012v3" title="View HTML" id="html-2410.13012" aria-labelledby="html-2410.13012" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.13012" title="Other formats" id="oth-2410.13012" aria-labelledby="oth-2410.13012">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Sample Compression Scheme Reductions </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Attias,+I">Idan Attias</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hanneke,+S">Steve Hanneke</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ramaswami,+A">Arvind Ramaswami</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Machine Learning (stat.ML) </div> <p class='mathjax'> We present novel reductions from sample compression schemes in multiclass classification, regression, and adversarially robust learning settings to binary sample compression schemes. Assuming we have a compression scheme for binary classes of size $f(d_\mathrm{VC})$, where $d_\mathrm{VC}$ is the VC dimension, then we have the following results: (1) If the binary compression scheme is a majority-vote or a stable compression scheme, then there exists a multiclass compression scheme of size $O(f(d_\mathrm{G}))$, where $d_\mathrm{G}$ is the graph dimension. Moreover, for general binary compression schemes, we obtain a compression of size $O(f(d_\mathrm{G})\log|Y|)$, where $Y$ is the label space. (2) If the binary compression scheme is a majority-vote or a stable compression scheme, then there exists an $\epsilon$-approximate compression scheme for regression over $[0,1]$-valued functions of size $O(f(d_\mathrm{P}))$, where $d_\mathrm{P}$ is the pseudo-dimension. For general binary compression schemes, we obtain a compression of size $O(f(d_\mathrm{P})\log(1/\epsilon))$. These results would have significant implications if the sample compression conjecture, which posits that any binary concept class with a finite VC dimension admits a binary compression scheme of size $O(d_\mathrm{VC})$, is resolved (Littlestone and Warmuth, 1986; Floyd and Warmuth, 1995; Warmuth, 2003). Our results would then extend the proof of the conjecture immediately to other settings. We establish similar results for adversarially robust learning and also provide an example of a concept class that is robustly learnable but has no bounded-size compression scheme, demonstrating that learnability is not equivalent to having a compression scheme independent of the sample size, unlike in binary classification, where compression of size $2^{O(d_\mathrm{VC})}$ is attainable (Moran and Yehudayoff, 2016). </p> </div> </dd> <dt> <a name='item162'>[162]</a> <a href ="/abs/2410.16208" title="Abstract" id="2410.16208"> arXiv:2410.16208 </a> (replaced) [<a href="/pdf/2410.16208" title="Download PDF" id="pdf-2410.16208" aria-labelledby="pdf-2410.16208">pdf</a>, <a href="https://arxiv.org/html/2410.16208v4" title="View HTML" id="html-2410.16208" aria-labelledby="html-2410.16208" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.16208" title="Other formats" id="oth-2410.16208" aria-labelledby="oth-2410.16208">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Compute-Constrained Data Selection </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yin,+J+O">Junjie Oscar Yin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rush,+A+M">Alexander M. Rush</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Published as a conference paper at ICLR 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computation and Language (cs.CL) </div> <p class='mathjax'> Data selection can reduce the amount of training data needed to finetune LLMs; however, the efficacy of data selection scales directly with its compute. Motivated by the practical challenge of compute-constrained finetuning, we consider the setting in which both the cost of selecting data and training are budgeted for. We first formalize the problem of data selection with a cost-aware utility function, and model the data selection problem as trading off initial-selection cost for training gain. We run a comprehensive sweep of experiments across multiple tasks, varying compute budget by scaling finetuning tokens, model sizes, and data selection compute. Interestingly we find that many powerful data selection methods are almost never compute-optimal, and that cheaper data selection alternatives dominate both from a theoretical and empirical perspective. For compute-optimal training, we find that perplexity and gradient data selection require training-to-selection model size ratios of 5x and 10x, respectively. </p> </div> </dd> <dt> <a name='item163'>[163]</a> <a href ="/abs/2410.19426" title="Abstract" id="2410.19426"> arXiv:2410.19426 </a> (replaced) [<a href="/pdf/2410.19426" title="Download PDF" id="pdf-2410.19426" aria-labelledby="pdf-2410.19426">pdf</a>, <a href="https://arxiv.org/html/2410.19426v2" title="View HTML" id="html-2410.19426" aria-labelledby="html-2410.19426" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.19426" title="Other formats" id="oth-2410.19426" aria-labelledby="oth-2410.19426">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Analyzing Generative Models by Manifold Entropic Metrics </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Galperin,+D">Daniel Galperin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=K%C3%B6the,+U">Ullrich K枚the</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Camera-ready version: accepted at AISTATS 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Machine Learning (stat.ML) </div> <p class='mathjax'> Good generative models should not only synthesize high quality data, but also utilize interpretable representations that aid human understanding of their behavior. However, it is difficult to measure objectively if and to what degree desirable properties of disentangled representations have been achieved. Inspired by the principle of independent mechanisms, we address this difficulty by introducing a novel set of tractable information-theoretic evaluation metrics. We demonstrate the usefulness of our metrics on illustrative toy examples and conduct an in-depth comparison of various normalizing flow architectures and $\beta$-VAEs on the EMNIST dataset. Our method allows to sort latent features by importance and assess the amount of residual correlations of the resulting concepts. The most interesting finding of our experiments is a ranking of model architectures and training procedures in terms of their inductive bias to converge to aligned and disentangled representations during training. </p> </div> </dd> <dt> <a name='item164'>[164]</a> <a href ="/abs/2411.02770" title="Abstract" id="2411.02770"> arXiv:2411.02770 </a> (replaced) [<a href="/pdf/2411.02770" title="Download PDF" id="pdf-2411.02770" aria-labelledby="pdf-2411.02770">pdf</a>, <a href="https://arxiv.org/html/2411.02770v3" title="View HTML" id="html-2411.02770" aria-labelledby="html-2411.02770" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2411.02770" title="Other formats" id="oth-2411.02770" aria-labelledby="oth-2411.02770">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A spectral mixture representation of isotropic kernels to generalize random Fourier features </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Langren%C3%A9,+N">Nicolas Langren茅</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Warin,+X">Xavier Warin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gruet,+P">Pierre Gruet</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 19 pages, 16 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Probability (math.PR); Computation (stat.CO); Machine Learning (stat.ML) </div> <p class='mathjax'> Rahimi and Recht (2007) introduced the idea of decomposing positive definite shift-invariant kernels by randomly sampling from their spectral distribution. This famous technique, known as Random Fourier Features (RFF), is in principle applicable to any such kernel whose spectral distribution can be identified and simulated. In practice, however, it is usually applied to the Gaussian kernel because of its simplicity, since its spectral distribution is also Gaussian. Clearly, simple spectral sampling formulas would be desirable for broader classes of kernels. In this paper, we show that the spectral distribution of positive definite isotropic kernels in $\mathbb{R}^{d}$ for all $d\geq1$ can be decomposed as a scale mixture of $\alpha$-stable random vectors, and we identify the mixing distribution as a function of the kernel. This constructive decomposition provides a simple and ready-to-use spectral sampling formula for many multivariate positive definite shift-invariant kernels, including exponential power kernels, generalized Mat茅rn kernels, generalized Cauchy kernels, as well as newly introduced kernels such as the Beta, Kummer, and Tricomi kernels. In particular, we retrieve the fact that the spectral distributions of these kernels are scale mixtures of the multivariate Gaussian distribution, along with an explicit mixing distribution formula. This result has broad applications for support vector machines, kernel ridge regression, Gaussian processes, and other kernel-based machine learning techniques for which the random Fourier features technique is applicable. </p> </div> </dd> <dt> <a name='item165'>[165]</a> <a href ="/abs/2411.09821" title="Abstract" id="2411.09821"> arXiv:2411.09821 </a> (replaced) [<a href="/pdf/2411.09821" title="Download PDF" id="pdf-2411.09821" aria-labelledby="pdf-2411.09821">pdf</a>, <a href="https://arxiv.org/html/2411.09821v3" title="View HTML" id="html-2411.09821" aria-labelledby="html-2411.09821" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2411.09821" title="Other formats" id="oth-2411.09821" aria-labelledby="oth-2411.09821">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Towards Scalable Newborn Screening: Automated General Movement Assessment in Uncontrolled Settings </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chopard,+D">Daphn茅 Chopard</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Laguna,+S">Sonia Laguna</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chin-Cheong,+K">Kieran Chin-Cheong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dietz,+A">Annika Dietz</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Badura,+A">Anna Badura</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wellmann,+S">Sven Wellmann</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Vogt,+J+E">Julia E. Vogt</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Paper accepted (oral) at ICLR 2025 Workshop on AI for Children. Preliminary version previously accepted at Findings track presented at Machine Learning for Health (ML4H) symposium 2024 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computer Vision and Pattern Recognition (cs.CV) </div> <p class='mathjax'> General movements (GMs) are spontaneous, coordinated body movements in infants that offer valuable insights into the developing nervous system. Assessed through the Prechtl GM Assessment (GMA), GMs are reliable predictors for neurodevelopmental disorders. However, GMA requires specifically trained clinicians, who are limited in number. To scale up newborn screening, there is a need for an algorithm that can automatically classify GMs from infant video recordings. This data poses challenges, including variability in recording length, device type, and setting, with each video coarsely annotated for overall movement quality. In this work, we introduce a tool for extracting features from these recordings and explore various machine learning techniques for automated GM classification. </p> </div> </dd> <dt> <a name='item166'>[166]</a> <a href ="/abs/2411.10087" title="Abstract" id="2411.10087"> arXiv:2411.10087 </a> (replaced) [<a href="/pdf/2411.10087" title="Download PDF" id="pdf-2411.10087" aria-labelledby="pdf-2411.10087">pdf</a>, <a href="https://arxiv.org/html/2411.10087v3" title="View HTML" id="html-2411.10087" aria-labelledby="html-2411.10087" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2411.10087" title="Other formats" id="oth-2411.10087" aria-labelledby="oth-2411.10087">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PFML: Self-Supervised Learning of Time-Series Data Without Representation Collapse </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Vaaras,+E">Einari Vaaras</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Airaksinen,+M">Manu Airaksinen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=R%C3%A4s%C3%A4nen,+O">Okko R盲s盲nen</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted for publication in IEEE Access </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Self-supervised learning (SSL) is a data-driven learning approach that utilizes the innate structure of the data to guide the learning process. In contrast to supervised learning, which depends on external labels, SSL utilizes the inherent characteristics of the data to produce its own supervisory signal. However, one frequent issue with SSL methods is representation collapse, where the model outputs a constant input-invariant feature representation. This issue hinders the potential application of SSL methods to new data modalities, as trying to avoid representation collapse wastes researchers' time and effort. This paper introduces a novel SSL algorithm for time-series data called Prediction of Functionals from Masked Latents (PFML). Instead of predicting masked input signals or their latent representations directly, PFML operates by predicting statistical functionals of the input signal corresponding to masked embeddings, given a sequence of unmasked embeddings. The algorithm is designed to avoid representation collapse, rendering it straightforwardly applicable to different time-series data domains, such as novel sensor modalities in clinical data. We demonstrate the effectiveness of PFML through complex, real-life classification tasks across three different data modalities: infant posture and movement classification from multi-sensor inertial measurement unit data, emotion recognition from speech data, and sleep stage classification from EEG data. The results show that PFML is superior to a conceptually similar SSL method and a contrastive learning-based SSL method. Additionally, PFML is on par with the current state-of-the-art SSL method, while also being conceptually simpler and without suffering from representation collapse. </p> </div> </dd> <dt> <a name='item167'>[167]</a> <a href ="/abs/2411.13951" title="Abstract" id="2411.13951"> arXiv:2411.13951 </a> (replaced) [<a href="/pdf/2411.13951" title="Download PDF" id="pdf-2411.13951" aria-labelledby="pdf-2411.13951">pdf</a>, <a href="https://arxiv.org/html/2411.13951v4" title="View HTML" id="html-2411.13951" aria-labelledby="html-2411.13951" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2411.13951" title="Other formats" id="oth-2411.13951" aria-labelledby="oth-2411.13951">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PATH: A Discrete-sequence Dataset for Evaluating Online Unsupervised Anomaly Detection Approaches for Multivariate Time Series </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Correia,+L">Lucas Correia</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Goos,+J">Jan-Christoph Goos</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=B%C3%A4ck,+T">Thomas B盲ck</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kononova,+A+V">Anna V. Kononova</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Submitted to the Big Data Research journal </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computational Engineering, Finance, and Science (cs.CE); Systems and Control (eess.SY) </div> <p class='mathjax'> Benchmarking anomaly detection approaches for multivariate time series is a challenging task due to a lack of high-quality datasets. Current publicly available datasets are too small, not diverse and feature trivial anomalies, which hinders measurable progress in this research area. We propose a solution: a diverse, extensive, and non-trivial dataset generated via state-of-the-art simulation tools that reflects realistic behaviour of an automotive powertrain, including its multivariate, dynamic and variable-state properties. Additionally, our dataset represents a discrete-sequence problem, which remains unaddressed by previously-proposed solutions in literature. To cater for both unsupervised and semi-supervised anomaly detection settings, as well as time series generation and forecasting, we make different versions of the dataset available, where training and test subsets are offered in contaminated and clean versions, depending on the task. We also provide baseline results from a selection of approaches based on deterministic and variational autoencoders, as well as a non-parametric approach. As expected, the baseline experimentation shows that the approaches trained on the semi-supervised version of the dataset outperform their unsupervised counterparts, highlighting a need for approaches more robust to contaminated training data. Furthermore, results show that the threshold used can have a large influence on detection performance, hence more work needs to be invested in methods to find a suitable threshold without the need for labelled data. </p> </div> </dd> <dt> <a name='item168'>[168]</a> <a href ="/abs/2411.16260" title="Abstract" id="2411.16260"> arXiv:2411.16260 </a> (replaced) [<a href="/pdf/2411.16260" title="Download PDF" id="pdf-2411.16260" aria-labelledby="pdf-2411.16260">pdf</a>, <a href="/format/2411.16260" title="Other formats" id="oth-2411.16260" aria-labelledby="oth-2411.16260">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Unraveling Arithmetic in Large Language Models: The Role of Algebraic Structures </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chang,+F">Fu-Chieh Chang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lin,+Y">You-Chen Lin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wu,+P">Pei-Yuan Wu</a></div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> ICLR 2025 Workshop on Reasoning and Planning for Large Language Models </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computation and Language (cs.CL) </div> <p class='mathjax'> The reasoning abilities of large language models (LLMs) have improved with chain-of-thought (CoT) prompting, allowing models to solve complex tasks stepwise. However, training CoT capabilities requires detailed reasoning data, which is often scarce. The self-taught reasoner (STaR) framework addresses this by using reinforcement learning to automatically generate reasoning steps, reducing reliance on human-labeled data. Although STaR and its variants have demonstrated empirical success, a theoretical foundation explaining these improvements is lacking. Large language models (LLMs) have demonstrated remarkable mathematical capabilities, largely driven by chain-of-thought (CoT) prompting, which decomposes complex reasoning into step-by-step solutions. However, the mechanisms underlying LLMs' ability to perform arithmetic in a single step of CoT remain poorly understood. In this work, we propose that LLMs learn arithmetic by capturing algebraic structures, such as commutativity and identity properties. Since these structures are observable through input-output relationships, they can generalize to unseen data. We empirically demonstrate that LLMs can learn algebraic structures using a custom dataset of arithmetic problems, as well as providing theoretical evidence showing that, under specific configurations of weights and biases, the transformer-based LLMs can generate embeddings that remain invariant to both permutations of input tokens and the presence of identity elements. Our findings indicate that leveraging algebraic structures can enhance the LLMs' arithmetic capabilities, offering insights into improving their arithmetic performance. </p> </div> </dd> <dt> <a name='item169'>[169]</a> <a href ="/abs/2412.03131" title="Abstract" id="2412.03131"> arXiv:2412.03131 </a> (replaced) [<a href="/pdf/2412.03131" title="Download PDF" id="pdf-2412.03131" aria-labelledby="pdf-2412.03131">pdf</a>, <a href="https://arxiv.org/html/2412.03131v2" title="View HTML" id="html-2412.03131" aria-labelledby="html-2412.03131" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2412.03131" title="Other formats" id="oth-2412.03131" aria-labelledby="oth-2412.03131">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Unifying KV Cache Compression for Large Language Models with LeanKV </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+Y">Yanqi Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hu,+Y">Yuwei Hu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+R">Runyuan Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lui,+J+C">John C.S. Lui</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+H">Haibo Chen</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Distributed, Parallel, and Cluster Computing (cs.DC) </div> <p class='mathjax'> Large language models (LLMs) exhibit exceptional performance but incur significant serving costs due to their substantial memory requirements, with the key-value (KV) cache being a primary bottleneck. Existing KV cache compression techniques, such as quantization and pruning, apply uniform treatment to both keys and values, and discard unimportant tokens entirely, overlooking the fine-grained differences in significance of various components within the KV cache. To address these limitations, we introduce LeanKV, a framework that advances KV cache compression by exploiting three levels of differentiation in the KV cache: (1) the differing impact of keys and values on attention computation, (2) the varying importance of tokens, and (3) the diverse dynamic sparsity patterns across attention heads. At the core of LeanKV is an on-GPU memory manager that compacts fragmented free memory list into contiguous regions in parallel, effectively translating sparsity in the KV cache into performance gains. We evaluate LeanKV on several mainstream models, including the recent "thinking model". LeanKV is able to compress the KV cache by $2.7\times$ to $5.7\times$ with near-lossless accuracy on complex workloads requiring sophisticated reasoning and long-generation capabilities, and enhances throughput by $1.9\times$ to $5.4\times$. </p> </div> </dd> <dt> <a name='item170'>[170]</a> <a href ="/abs/2412.04404" title="Abstract" id="2412.04404"> arXiv:2412.04404 </a> (replaced) [<a href="/pdf/2412.04404" title="Download PDF" id="pdf-2412.04404" aria-labelledby="pdf-2412.04404">pdf</a>, <a href="https://arxiv.org/html/2412.04404v2" title="View HTML" id="html-2412.04404" aria-labelledby="html-2412.04404" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2412.04404" title="Other formats" id="oth-2412.04404" aria-labelledby="oth-2412.04404">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Federated Automated Feature Engineering </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Overman,+T">Tom Overman</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Klabjan,+D">Diego Klabjan</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Preliminary Work </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Distributed, Parallel, and Cluster Computing (cs.DC) </div> <p class='mathjax'> Automated feature engineering (AutoFE) is used to automatically create new features from original features to improve predictive performance without needing significant human intervention and domain expertise. Many algorithms exist for AutoFE, but very few approaches exist for the federated learning (FL) setting where data is gathered across many clients and is not shared between clients or a central server. We introduce AutoFE algorithms for the horizontal, vertical, and hybrid FL settings, which differ in how the data is gathered across clients. To the best of our knowledge, we are the first to develop AutoFE algorithms for the horizontal and hybrid FL cases, and we show that the downstream test scores of our federated AutoFE algorithms is close in performance to the case where data is held centrally and AutoFE is performed centrally. </p> </div> </dd> <dt> <a name='item171'>[171]</a> <a href ="/abs/2412.11215" title="Abstract" id="2412.11215"> arXiv:2412.11215 </a> (replaced) [<a href="/pdf/2412.11215" title="Download PDF" id="pdf-2412.11215" aria-labelledby="pdf-2412.11215">pdf</a>, <a href="/format/2412.11215" title="Other formats" id="oth-2412.11215" aria-labelledby="oth-2412.11215">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Neural Port-Hamiltonian Differential Algebraic Equations for Compositional Learning of Electrical Networks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Neary,+C">Cyrus Neary</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tsao,+N">Nathan Tsao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Topcu,+U">Ufuk Topcu</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Systems and Control (eess.SY) </div> <p class='mathjax'> We develop compositional learning algorithms for coupled dynamical systems. While deep learning has proven effective at modeling complex relationships from data, compositional couplings between system components typically introduce algebraic constraints on state variables, posing challenges to many existing data-driven approaches to modeling dynamical systems. Towards developing deep learning models for constrained dynamical systems, we introduce neural port-Hamiltonian differential algebraic equations (N-PHDAEs), which use neural networks to parametrize unknown terms in both the differential and algebraic components of a port-Hamiltonian DAE. To train these models, we propose an algorithm that uses automatic differentiation to perform index reduction, automatically transforming the neural DAE into an equivalent system of neural ordinary differential equations (N-ODEs), for which established model inference and backpropagation methods exist. The proposed compositional modeling framework and learning algorithms may be applied broadly to learn control-oriented models of dynamical systems in a variety of application areas, however, in this work, we focus on their application to the modeling of electrical networks. Experiments simulating the dynamics of nonlinear circuits exemplify the benefits of our approach: the proposed N-PHDAE model achieves an order of magnitude improvement in prediction accuracy and constraint satisfaction when compared to a baseline N-ODE over long prediction time horizons. We also validate the compositional capabilities of our approach through experiments on a simulated D.C. microgrid: we train individual N-PHDAE models for separate grid components, before coupling them to accurately predict the behavior of larger-scale networks. </p> </div> </dd> <dt> <a name='item172'>[172]</a> <a href ="/abs/2501.03017" title="Abstract" id="2501.03017"> arXiv:2501.03017 </a> (replaced) [<a href="/pdf/2501.03017" title="Download PDF" id="pdf-2501.03017" aria-labelledby="pdf-2501.03017">pdf</a>, <a href="https://arxiv.org/html/2501.03017v2" title="View HTML" id="html-2501.03017" aria-labelledby="html-2501.03017" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2501.03017" title="Other formats" id="oth-2501.03017" aria-labelledby="oth-2501.03017">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Convexity in ReLU Neural Networks: beyond ICNNs? </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Gagneux,+A">Anne Gagneux</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Massias,+M">Mathurin Massias</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Soubies,+E">Emmanuel Soubies</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gribonval,+R">R茅mi Gribonval</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Convex functions and their gradients play a critical role in mathematical imaging, from proximal optimization to Optimal Transport. The successes of deep learning has led many to use learning-based methods, where fixed functions or operators are replaced by learned neural networks. Regardless of their empirical superiority, establishing rigorous guarantees for these methods often requires to impose structural constraints on neural architectures, in particular convexity. The most popular way to do so is to use so-called Input Convex Neural Networks (ICNNs). In order to explore the expressivity of ICNNs, we provide necessary and sufficient conditions for a ReLU neural network to be convex. Such characterizations are based on product of weights and activations, and write nicely for any architecture in the path-lifting framework. As particular applications, we study our characterizations in depth for 1 and 2-hidden-layer neural networks: we show that every convex function implemented by a 1-hidden-layer ReLU network can be also expressed by an ICNN with the same architecture; however this property no longer holds with more layers. Finally, we provide a numerical procedure that allows an exact check of convexity for ReLU neural networks with a large number of affine regions. </p> </div> </dd> <dt> <a name='item173'>[173]</a> <a href ="/abs/2501.16918" title="Abstract" id="2501.16918"> arXiv:2501.16918 </a> (replaced) [<a href="/pdf/2501.16918" title="Download PDF" id="pdf-2501.16918" aria-labelledby="pdf-2501.16918">pdf</a>, <a href="https://arxiv.org/html/2501.16918v2" title="View HTML" id="html-2501.16918" aria-labelledby="html-2501.16918" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2501.16918" title="Other formats" id="oth-2501.16918" aria-labelledby="oth-2501.16918">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> On Rollouts in Model-Based Reinforcement Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Frauenknecht,+B">Bernd Frauenknecht</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Subhasish,+D">Devdutt Subhasish</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Solowjow,+F">Friedrich Solowjow</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Trimpe,+S">Sebastian Trimpe</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Model-based reinforcement learning (MBRL) seeks to enhance data efficiency by learning a model of the environment and generating synthetic rollouts from it. However, accumulated model errors during these rollouts can distort the data distribution, negatively impacting policy learning and hindering long-term planning. Thus, the accumulation of model errors is a key bottleneck in current MBRL methods. We propose Infoprop, a model-based rollout mechanism that separates aleatoric from epistemic model uncertainty and reduces the influence of the latter on the data distribution. Further, Infoprop keeps track of accumulated model errors along a model rollout and provides termination criteria to limit data corruption. We demonstrate the capabilities of Infoprop in the Infoprop-Dyna algorithm, reporting state-of-the-art performance in Dyna-style MBRL on common MuJoCo benchmark tasks while substantially increasing rollout length and data quality. </p> </div> </dd> <dt> <a name='item174'>[174]</a> <a href ="/abs/2501.17848" title="Abstract" id="2501.17848"> arXiv:2501.17848 </a> (replaced) [<a href="/pdf/2501.17848" title="Download PDF" id="pdf-2501.17848" aria-labelledby="pdf-2501.17848">pdf</a>, <a href="https://arxiv.org/html/2501.17848v2" title="View HTML" id="html-2501.17848" aria-labelledby="html-2501.17848" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2501.17848" title="Other formats" id="oth-2501.17848" aria-labelledby="oth-2501.17848">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Improving Genetic Programming for Symbolic Regression with Equality Graphs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=de+Franca,+F+O">Fabricio Olivetti de Franca</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kronberger,+G">Gabriel Kronberger</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, 5 figures, 4 tables. In Genetic and Evolutionary Computation Conference (GECCO 25) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> The search for symbolic regression models with genetic programming (GP) has a tendency of revisiting expressions in their original or equivalent forms. Repeatedly evaluating equivalent expressions is inefficient, as it does not immediately lead to better solutions. However, evolutionary algorithms require diversity and should allow the accumulation of inactive building blocks that can play an important role at a later point. The equality graph is a data structure capable of compactly storing expressions and their equivalent forms allowing an efficient verification of whether an expression has been visited in any of their stored equivalent forms. We exploit the e-graph to adapt the subtree operators to reduce the chances of revisiting expressions. Our adaptation, called eggp, stores every visited expression in the e-graph, allowing us to filter out from the available selection of subtrees all the combinations that would create already visited expressions. Results show that, for small expressions, this approach improves the performance of a simple GP algorithm to compete with PySR and Operon without increasing computational cost. As a highlight, eggp was capable of reliably delivering short and at the same time accurate models for a selected set of benchmarks from SRBench and a set of real-world datasets. </p> </div> </dd> <dt> <a name='item175'>[175]</a> <a href ="/abs/2501.17859" title="Abstract" id="2501.17859"> arXiv:2501.17859 </a> (replaced) [<a href="/pdf/2501.17859" title="Download PDF" id="pdf-2501.17859" aria-labelledby="pdf-2501.17859">pdf</a>, <a href="https://arxiv.org/html/2501.17859v2" title="View HTML" id="html-2501.17859" aria-labelledby="html-2501.17859" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2501.17859" title="Other formats" id="oth-2501.17859" aria-labelledby="oth-2501.17859">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> rEGGression: an Interactive and Agnostic Tool for the Exploration of Symbolic Regression Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=de+Franca,+F+O">Fabricio Olivetti de Franca</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kronberger,+G">Gabriel Kronberger</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 9 pages, 4 figures, 2 tables. Genetic and Evolutionary Computation Conference (GECCO 25) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Regression analysis is used for prediction and to understand the effect of independent variables on dependent variables. Symbolic regression (SR) automates the search for non-linear regression models, delivering a set of hypotheses that balances accuracy with the possibility to understand the phenomena. Many SR implementations return a Pareto front allowing the choice of the best trade-off. However, this hides alternatives that are close to non-domination, limiting these choices. Equality graphs (e-graphs) allow to represent large sets of expressions compactly by efficiently handling duplicated parts occurring in multiple expressions. E-graphs allow to store and query all SR solution candidates visited in one or multiple GP runs efficiently and open the possibility to analyse much larger sets of SR solution candidates. We introduce rEGGression, a tool using e-graphs to enable the exploration of a large set of symbolic expressions which provides querying, filtering, and pattern matching features creating an interactive experience to gain insights about SR models. The main highlight is its focus in the exploration of the building blocks found during the search that can help the experts to find insights about the studied <a href="http://phenomena.This" rel="external noopener nofollow" class="link-external link-http">this http URL</a> is possible by exploiting the pattern matching capability of the e-graph data structure. </p> </div> </dd> <dt> <a name='item176'>[176]</a> <a href ="/abs/2501.18812" title="Abstract" id="2501.18812"> arXiv:2501.18812 </a> (replaced) [<a href="/pdf/2501.18812" title="Download PDF" id="pdf-2501.18812" aria-labelledby="pdf-2501.18812">pdf</a>, <a href="https://arxiv.org/html/2501.18812v2" title="View HTML" id="html-2501.18812" aria-labelledby="html-2501.18812" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2501.18812" title="Other formats" id="oth-2501.18812" aria-labelledby="oth-2501.18812">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Estimating the Probability of Sampling a Trained Neural Network at Random </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Scherlis,+A">Adam Scherlis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Belrose,+N">Nora Belrose</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> New experiments and clearer exposition </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> We present and analyze an algorithm for estimating the size, under a Gaussian or uniform measure, of a localized neighborhood in neural network parameter space with behavior similar to an ``anchor'' point. We refer to this as the "local volume" of the anchor. We adapt an existing basin-volume estimator, which is very fast but in many cases only provides a lower bound. We show that this lower bound can be improved with an importance-sampling method using gradient information that is already provided by popular optimizers. The negative logarithm of local volume can also be interpreted as a measure of the anchor network's information content. As expected for a measure of complexity, this quantity increases during language model training. We find that overfit, badly-generalizing neighborhoods are smaller, indicating a more complex learned behavior. This smaller volume can also be interpreted in an MDL sense as suboptimal compression. Our results are consistent with a picture of generalization we call the "volume hypothesis": that neural net training produces good generalization primarily because the architecture gives simple functions more volume in parameter space, and the optimizer samples from the low-loss manifold in a volume-sensitive way. We believe that fast local-volume estimators are a promising practical metric of network complexity and architectural inductive bias for interpretability purposes. </p> </div> </dd> <dt> <a name='item177'>[177]</a> <a href ="/abs/2502.03251" title="Abstract" id="2502.03251"> arXiv:2502.03251 </a> (replaced) [<a href="/pdf/2502.03251" title="Download PDF" id="pdf-2502.03251" aria-labelledby="pdf-2502.03251">pdf</a>, <a href="https://arxiv.org/html/2502.03251v2" title="View HTML" id="html-2502.03251" aria-labelledby="html-2502.03251" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.03251" title="Other formats" id="oth-2502.03251" aria-labelledby="oth-2502.03251">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> RiemannGFM: Learning a Graph Foundation Model from Riemannian Geometry </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+L">Li Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+Z">Zhenhao Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhou,+S">Suyang Zhou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wan,+Q">Qiqi Wan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Peng,+H">Hao Peng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+P">Philip Yu</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted by WWW 2025 (Oral) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> The foundation model has heralded a new era in artificial intelligence, pretraining a single model to offer cross-domain transferability on different datasets. Graph neural networks excel at learning graph data, the omnipresent non-Euclidean structure, but often lack the generalization capacity. Hence, graph foundation model is drawing increasing attention, and recent efforts have been made to leverage Large Language Models. On the one hand, existing studies primarily focus on text-attributed graphs, while a wider range of real graphs do not contain fruitful textual attributes. On the other hand, the sequential graph description tailored for the Large Language Model neglects the structural complexity, which is a predominant characteristic of the graph. Such limitations motivate an important question: Can we go beyond Large Language Models, and pretrain a universal model to learn the structural knowledge for any graph? The answer in the language or vision domain is a shared vocabulary. We observe the fact that there also exist shared substructures underlying graph domain, and thereby open a new opportunity of graph foundation model with structural vocabulary. The key innovation is the discovery of a simple yet effective structural vocabulary of trees and cycles, and we explore its inherent connection to Riemannian geometry. Herein, we present a universal pretraining model, RiemannGFM. Concretely, we first construct a novel product bundle to incorporate the diverse geometries of the vocabulary. Then, on this constructed space, we stack Riemannian layers where the structural vocabulary, regardless of specific graph, is learned in Riemannian manifold offering cross-domain transferability. Extensive experiments show the effectiveness of RiemannGFM on a diversity of real graphs. </p> </div> </dd> <dt> <a name='item178'>[178]</a> <a href ="/abs/2502.04760" title="Abstract" id="2502.04760"> arXiv:2502.04760 </a> (replaced) [<a href="/pdf/2502.04760" title="Download PDF" id="pdf-2502.04760" aria-labelledby="pdf-2502.04760">pdf</a>, <a href="https://arxiv.org/html/2502.04760v2" title="View HTML" id="html-2502.04760" aria-labelledby="html-2502.04760" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.04760" title="Other formats" id="oth-2502.04760" aria-labelledby="oth-2502.04760">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Graph Federated Learning Based Proactive Content Caching in Edge Computing </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+R">Rui Wang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> With the rapid growth of mobile data traffic and the increasing prevalence of video streaming, proactive content caching in edge computing has become crucial for reducing latency and alleviating network congestion. However, traditional caching strategies such as FIFO, LRU, and LFU fail to effectively predict future content popularity, while existing proactive caching approaches often require users to upload data to a central server, raising concerns regarding privacy and scalability. To address these challenges, this paper proposes a Graph Federated Learning-based Proactive Content Caching (GFPCC) scheme that enhances caching efficiency while preserving user privacy. The proposed approach integrates federated learning and graph neural networks, enabling users to locally train Light Graph Convolutional Networks (LightGCN) to capture user-item relationships and predict content popularity. Instead of sharing raw data, only the trained model parameters are transmitted to the central server, where a federated averaging algorithm aggregates updates, refines the global model, and selects the most popular files for proactive caching. Experimental evaluations on real-world datasets, such as MovieLens, demonstrate that GFPCC outperforms baseline caching algorithms by achieving higher cache efficiency through more accurate content popularity predictions. Moreover, the federated learning framework strengthens privacy protection while maintaining efficient model training; however, scalability remains a challenge in large-scale networks with dynamic user preferences. </p> </div> </dd> <dt> <a name='item179'>[179]</a> <a href ="/abs/2502.11007" title="Abstract" id="2502.11007"> arXiv:2502.11007 </a> (replaced) [<a href="/pdf/2502.11007" title="Download PDF" id="pdf-2502.11007" aria-labelledby="pdf-2502.11007">pdf</a>, <a href="/format/2502.11007" title="Other formats" id="oth-2502.11007" aria-labelledby="oth-2502.11007">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Local-Cloud Inference Offloading for LLMs in Multi-Modal, Multi-Task, Multi-Dialogue Settings </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yuan,+L">Liangqi Yuan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Han,+D">Dong-Jun Han</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+S">Shiqiang Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Brinton,+C+G">Christopher G. Brinton</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Distributed, Parallel, and Cluster Computing (cs.DC) </div> <p class='mathjax'> Compared to traditional machine learning models, recent large language models (LLMs) can exhibit multi-task-solving capabilities through multiple dialogues and multi-modal data sources. These unique characteristics of LLMs, together with their large model size, make their deployment more challenging. Specifically, (i) deploying LLMs on local devices faces computational, memory, and energy resource issues, while (ii) deploying them in the cloud cannot guarantee real-time service and incurs communication/usage costs. In this paper, we design TMO, a local-cloud LLM inference system with Three-M Offloading: Multi-modal, Multi-task, and Multi-dialogue. TMO incorporates (i) a lightweight local LLM that can process simple tasks at high speed and (ii) a large-scale cloud LLM that can handle multi-modal data sources. We develop a resource-constrained reinforcement learning (RCRL) strategy for TMO that optimizes the inference location (i.e., local vs. cloud) and multi-modal data sources to use for each task/dialogue, aiming to maximize the long-term reward (response quality, latency, and usage cost) while adhering to resource constraints. We also contribute M4A1, a new dataset we curated that contains reward and cost metrics across multiple modality, task, dialogue, and LLM configurations, enabling evaluation of offloading decisions. We demonstrate the effectiveness of TMO compared to several exploration-decision and LLM-as-Agent baselines, showing significant improvements in latency, cost, and response quality. </p> </div> </dd> <dt> <a name='item180'>[180]</a> <a href ="/abs/2502.14270" title="Abstract" id="2502.14270"> arXiv:2502.14270 </a> (replaced) [<a href="/pdf/2502.14270" title="Download PDF" id="pdf-2502.14270" aria-labelledby="pdf-2502.14270">pdf</a>, <a href="https://arxiv.org/html/2502.14270v2" title="View HTML" id="html-2502.14270" aria-labelledby="html-2502.14270" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.14270" title="Other formats" id="oth-2502.14270" aria-labelledby="oth-2502.14270">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Predicting Fetal Birthweight from High Dimensional Data using Advanced Machine Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kapure,+N">Nachiket Kapure</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Joshi,+H">Harsh Joshi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mistri,+R">Rajeshwari Mistri</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kumari,+P">Parul Kumari</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mali,+M">Manasi Mali</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Purohit,+S">Seema Purohit</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sharma,+N">Neha Sharma</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Panday,+M">Mrityunjoy Panday</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yajnik,+C+S">Chittaranjan S. Yajnik</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Birth weight serves as a fundamental indicator of neonatal health, closely linked to both early medical interventions and long-term developmental risks. Traditional predictive models, often constrained by limited feature selection and incomplete datasets, struggle to achieve overlooking complex maternal and fetal interactions in diverse clinical settings. This research explores machine learning to address these limitations, utilizing a structured methodology that integrates advanced imputation strategies, supervised feature selection techniques, and predictive modeling. Given the constraints of the dataset, the research strengthens the role of data preprocessing in improving the model performance. Among the various methodologies explored, tree-based feature selection methods demonstrated superior capability in identifying the most relevant predictors, while ensemble-based regression models proved highly effective in capturing non-linear relationships and complex maternal-fetal interactions within the data. Beyond model performance, the study highlights the clinical significance of key physiological determinants, offering insights into maternal and fetal health factors that influence birth weight, offering insights that extend over statistical modeling. By bridging computational intelligence with perinatal research, this work underscores the transformative role of machine learning in enhancing predictive accuracy, refining risk assessment and informing data-driven decision-making in maternal and neonatal care. Keywords: Birth weight prediction, maternal-fetal health, MICE, BART, Gradient Boosting, neonatal outcomes, Clinipredictive. </p> </div> </dd> <dt> <a name='item181'>[181]</a> <a href ="/abs/2502.15568" title="Abstract" id="2502.15568"> arXiv:2502.15568 </a> (replaced) [<a href="/pdf/2502.15568" title="Download PDF" id="pdf-2502.15568" aria-labelledby="pdf-2502.15568">pdf</a>, <a href="https://arxiv.org/html/2502.15568v2" title="View HTML" id="html-2502.15568" aria-labelledby="html-2502.15568" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.15568" title="Other formats" id="oth-2502.15568" aria-labelledby="oth-2502.15568">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Cautionary Tale About "Neutrally" Informative AI Tools Ahead of the 2025 Federal Elections in Germany </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Dormuth,+I">Ina Dormuth</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Franke,+S">Sven Franke</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hafer,+M">Marlies Hafer</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Katzke,+T">Tim Katzke</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Marx,+A">Alexander Marx</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=M%C3%BCller,+E">Emmanuel M眉ller</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Neider,+D">Daniel Neider</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pauly,+M">Markus Pauly</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rutinowski,+J">J茅r么me Rutinowski</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> In this study, we examine the reliability of AI-based Voting Advice Applications (VAAs) and large language models (LLMs) in providing objective political information. Our analysis is based upon a comparison with party responses to 38 statements of the Wahl-O-Mat, a well-established German online tool that helps inform voters by comparing their views with political party positions. For the LLMs, we identify significant biases. They exhibit a strong alignment (over 75% on average) with left-wing parties and a substantially lower alignment with center-right (smaller 50%) and right-wing parties (around 30%). Furthermore, for the VAAs, intended to objectively inform voters, we found substantial deviations from the parties' stated positions in Wahl-O-Mat: While one VAA deviated in 25% of cases, another VAA showed deviations in more than 50% of cases. For the latter, we even observed that simple prompt injections led to severe hallucinations, including false claims such as non-existent connections between political parties and right-wing extremist ties. </p> </div> </dd> <dt> <a name='item182'>[182]</a> <a href ="/abs/2502.17936" title="Abstract" id="2502.17936"> arXiv:2502.17936 </a> (replaced) [<a href="/pdf/2502.17936" title="Download PDF" id="pdf-2502.17936" aria-labelledby="pdf-2502.17936">pdf</a>, <a href="https://arxiv.org/html/2502.17936v3" title="View HTML" id="html-2502.17936" aria-labelledby="html-2502.17936" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.17936" title="Other formats" id="oth-2502.17936" aria-labelledby="oth-2502.17936">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> The Art of Beating the Odds with Predictor-Guided Random Design Space Exploration </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Arnold,+F">Felix Arnold</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bouvier,+M">Maxence Bouvier</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Amaudruz,+R">Ryan Amaudruz</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Andri,+R">Renzo Andri</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cavigelli,+L">Lukas Cavigelli</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 2 pages, 3 figures, conference, this research manuscript has been accepted as work in progress poster at the 62nd Design Automation Conference </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Hardware Architecture (cs.AR) </div> <p class='mathjax'> This work introduces an innovative method for improving combinational digital circuits through random exploration in MIG-based synthesis. High-quality circuits are crucial for performance, power, and cost, making this a critical area of active research. Our approach incorporates next-state prediction and iterative selection, significantly accelerating the synthesis process. This novel method achieves up to 14x synthesis speedup and up to 20.94% better MIG minimization on the EPFL Combinational Benchmark Suite compared to state-of-the-art techniques. We further explore various predictor models and show that increased prediction accuracy does not guarantee an equivalent increase in synthesis quality of results or speedup, observing that randomness remains a desirable factor. </p> </div> </dd> <dt> <a name='item183'>[183]</a> <a href ="/abs/2503.12645" title="Abstract" id="2503.12645"> arXiv:2503.12645 </a> (replaced) [<a href="/pdf/2503.12645" title="Download PDF" id="pdf-2503.12645" aria-labelledby="pdf-2503.12645">pdf</a>, <a href="/format/2503.12645" title="Other formats" id="oth-2503.12645" aria-labelledby="oth-2503.12645">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Understanding Gradient Orthogonalization for Deep Learning via Non-Euclidean Trust-Region Optimization </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kovalev,+D">Dmitry Kovalev</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Optimization and Control (math.OC); Machine Learning (stat.ML) </div> <p class='mathjax'> Optimization with matrix gradient orthogonalization has recently demonstrated impressive results in the training of deep neural networks (Jordan et al., 2024; Liu et al., 2025). In this paper, we provide a theoretical analysis of this approach. In particular, we show that the orthogonalized gradient method can be seen as a first-order trust-region optimization method, where the trust-region is defined in terms of the matrix spectral norm. Motivated by this observation, we develop the stochastic non-Euclidean trust-region gradient method with momentum, which recovers the Muon optimizer (Jordan et al., 2024) as a special case, along with normalized SGD and signSGD with momentum (Cutkosky and Mehta, 2020; Sun et al., 2023). In addition, we prove state-of-the-art convergence results for the proposed algorithm in a range of scenarios, which involve arbitrary non-Euclidean norms, constrained and composite problems, and non-convex, star-convex, first- and second-order smooth functions. Finally, our theoretical findings provide an explanation for several practical observations, including the practical superiority of Muon compared to the Orthogonal-SGDM algorithm of Tuddenham et al. (2022) and the importance of weight decay in the training of large-scale language models. </p> </div> </dd> <dt> <a name='item184'>[184]</a> <a href ="/abs/2503.22480" title="Abstract" id="2503.22480"> arXiv:2503.22480 </a> (replaced) [<a href="/pdf/2503.22480" title="Download PDF" id="pdf-2503.22480" aria-labelledby="pdf-2503.22480">pdf</a>, <a href="https://arxiv.org/html/2503.22480v3" title="View HTML" id="html-2503.22480" aria-labelledby="html-2503.22480" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.22480" title="Other formats" id="oth-2503.22480" aria-labelledby="oth-2503.22480">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Probabilistic Uncertain Reward Model: A Natural Generalization of Bradley-Terry Reward Model </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+W">Wangtao Sun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cheng,+X">Xiang Cheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+X">Xing Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+H">Haotian Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+Z">Zhao Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=He,+S">Shizhu He</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+J">Jun Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+K">Kang Liu</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Reinforcement Learning from Human Feedback (RLHF) has emerged as a critical technique for training large language models. However, reward hacking-a phenomenon where models exploit flaws in the reward model-remains a significant barrier to achieving robust and scalable intelligence through long-term training. Existing studies have proposed uncertain reward model to address reward hacking, however, they often lack systematic or theoretical foundations, failing to model the uncertainty intrinsically emerging from preference data, thus cannot sufficiently mitigate reward hacking to sustain prolonged RLHF training and exploration. In this paper, we propose the Probabilistic Uncertain Reward Model (PURM), a natural generalization of the classical Bradley-Terry reward model, that directly model the reward distribution emerged from the preference data. We theoretically derived PURM's loss function and the reward distribution uncertainty calculation based on Bhattacharyya Coefficient. To mitigate reward hacking with PURM, we further introduce an uncertainty-aware penalty into Proximal Policy Optimization (PPO), which leverages the learned uncertainty to dynamically balance reward optimization and exploration. We propose a lightweight and easy-to-use implementation of PURM. Experiments demonstrate that PURM significantly delays the onset of reward hacking while improving final reward performance, outperforming baseline methods in both stability and effectiveness. </p> </div> </dd> <dt> <a name='item185'>[185]</a> <a href ="/abs/2503.22733" title="Abstract" id="2503.22733"> arXiv:2503.22733 </a> (replaced) [<a href="/pdf/2503.22733" title="Download PDF" id="pdf-2503.22733" aria-labelledby="pdf-2503.22733">pdf</a>, <a href="https://arxiv.org/html/2503.22733v2" title="View HTML" id="html-2503.22733" aria-labelledby="html-2503.22733" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.22733" title="Other formats" id="oth-2503.22733" aria-labelledby="oth-2503.22733">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> RBFleX-NAS: Training-Free Neural Architecture Search Using Radial Basis Function Kernel and Hyperparameter Detection </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yamasaki,+T">Tomomasa Yamasaki</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zhehui Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Luo,+T">Tao Luo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+N">Niangjun Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+B">Bo Wang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 15 pages, 17 figures, Accepted to IEEE Transactions on Neural Networks and Learning Systems </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Neural Architecture Search (NAS) is an automated technique to design optimal neural network architectures for a specific workload. Conventionally, evaluating candidate networks in NAS involves extensive training, which requires significant time and computational resources. To address this, training-free NAS has been proposed to expedite network evaluation with minimal search time. However, state-of-the-art training-free NAS algorithms struggle to precisely distinguish well-performing networks from poorly-performing networks, resulting in inaccurate performance predictions and consequently sub-optimal top-1 network accuracy. Moreover, they are less effective in activation function exploration. To tackle the challenges, this paper proposes RBFleX-NAS, a novel training-free NAS framework that accounts for both activation outputs and input features of the last layer with a Radial Basis Function (RBF) kernel. We also present a detection algorithm to identify optimal hyperparameters using the obtained activation outputs and input feature maps. We verify the efficacy of RBFleX-NAS over a variety of NAS benchmarks. RBFleX-NAS significantly outperforms state-of-the-art training-free NAS methods in terms of top-1 accuracy, achieving this with short search time in NAS-Bench-201 and NAS-Bench-SSS. In addition, it demonstrates higher Kendall correlation compared to layer-based training-free NAS algorithms. Furthermore, we propose NAFBee, a new activation design space that extends the activation type to encompass various commonly used functions. In this extended design space, RBFleX-NAS demonstrates its superiority by accurately identifying the best-performing network during activation function search, providing a significant advantage over other NAS algorithms. </p> </div> </dd> <dt> <a name='item186'>[186]</a> <a href ="/abs/2504.02329" title="Abstract" id="2504.02329"> arXiv:2504.02329 </a> (replaced) [<a href="/pdf/2504.02329" title="Download PDF" id="pdf-2504.02329" aria-labelledby="pdf-2504.02329">pdf</a>, <a href="https://arxiv.org/html/2504.02329v2" title="View HTML" id="html-2504.02329" aria-labelledby="html-2504.02329" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.02329" title="Other formats" id="oth-2504.02329" aria-labelledby="oth-2504.02329">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Towards Assessing Deep Learning Test Input Generators </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mzoughi,+S">Seif Mzoughi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=yahmed,+A+H">Ahmed Haj yahmed</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Elshafei,+M">Mohamed Elshafei</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Khomh,+F">Foutse Khomh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Costa,+D+E">Diego Elias Costa</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted to EASE 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Computer Vision and Pattern Recognition (cs.CV); Software Engineering (cs.SE) </div> <p class='mathjax'> Deep Learning (DL) systems are increasingly deployed in safety-critical applications, yet they remain vulnerable to robustness issues that can lead to significant failures. While numerous Test Input Generators (TIGs) have been developed to evaluate DL robustness, a comprehensive assessment of their effectiveness across different dimensions is still lacking. This paper presents a comprehensive assessment of four state-of-the-art TIGs--DeepHunter, DeepFault, AdvGAN, and SinVAD--across multiple critical aspects: fault-revealing capability, naturalness, diversity, and efficiency. Our empirical study leverages three pre-trained models (LeNet-5, VGG16, and EfficientNetB3) on datasets of varying complexity (MNIST, CIFAR-10, and ImageNet-1K) to evaluate TIG performance. Our findings reveal important trade-offs in robustness revealing capability, variation in test case generation, and computational efficiency across TIGs. The results also show that TIG performance varies significantly with dataset complexity, as tools that perform well on simpler datasets may struggle with more complex ones. In contrast, others maintain steadier performance or better scalability. This paper offers practical guidance for selecting appropriate TIGs aligned with specific objectives and dataset characteristics. Nonetheless, more work is needed to address TIG limitations and advance TIGs for real-world, safety-critical systems. </p> </div> </dd> <dt> <a name='item187'>[187]</a> <a href ="/abs/2504.02618" title="Abstract" id="2504.02618"> arXiv:2504.02618 </a> (replaced) [<a href="/pdf/2504.02618" title="Download PDF" id="pdf-2504.02618" aria-labelledby="pdf-2504.02618">pdf</a>, <a href="/format/2504.02618" title="Other formats" id="oth-2504.02618" aria-labelledby="oth-2504.02618">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Variational Online Mirror Descent for Robust Learning in Schr枚dinger Bridge </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Han,+D">Dong-Sig Han</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kim,+J">Jaein Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yoo,+H+B">Hee Bin Yoo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+B">Byoung-Tak Zhang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Machine Learning (stat.ML) </div> <p class='mathjax'> Sch枚dinger bridge (SB) has evolved into a universal class of probabilistic generative models. In practice, however, estimated learning signals are often uncertain, and the reliability promised by existing methods is often based on speculative optimal-case scenarios. Recent studies regarding the Sinkhorn algorithm through mirror descent (MD) have gained attention, revealing geometric insights into solution acquisition of the SB problems. In this paper, we propose a variational online MD (OMD) framework for the SB problems, which provides further stability to SB solvers. We formally prove convergence and a regret bound for the novel OMD formulation of SB acquisition. As a result, we propose a simulation-free SB algorithm called Variational Mirrored Schr枚dinger Bridge (VMSB) by utilizing the Wasserstein-Fisher-Rao geometry of the Gaussian mixture parameterization for Schr枚dinger potentials. Based on the Wasserstein gradient flow theory, the algorithm offers tractable learning dynamics that precisely approximate each OMD step. In experiments, we validate the performance of the proposed VMSB algorithm across an extensive suite of benchmarks. VMSB consistently outperforms contemporary SB solvers on a range of SB problems, demonstrating the robustness predicted by our theory. </p> </div> </dd> <dt> <a name='item188'>[188]</a> <a href ="/abs/2504.03152" title="Abstract" id="2504.03152"> arXiv:2504.03152 </a> (replaced) [<a href="/pdf/2504.03152" title="Download PDF" id="pdf-2504.03152" aria-labelledby="pdf-2504.03152">pdf</a>, <a href="https://arxiv.org/html/2504.03152v2" title="View HTML" id="html-2504.03152" aria-labelledby="html-2504.03152" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03152" title="Other formats" id="oth-2504.03152" aria-labelledby="oth-2504.03152">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Safe Screening Rules for Group OWL Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bao,+R">Runxue Bao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lu,+Q">Quanchao Lu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+Y">Yanfu Zhang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 8 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Machine Learning (stat.ML) </div> <p class='mathjax'> Group Ordered Weighted $L_{1}$-Norm (Group OWL) regularized models have emerged as a useful procedure for high-dimensional sparse multi-task learning with correlated features. Proximal gradient methods are used as standard approaches to solving Group OWL models. However, Group OWL models usually suffer huge computational costs and memory usage when the feature size is large in the high-dimensional scenario. To address this challenge, in this paper, we are the first to propose the safe screening rule for Group OWL models by effectively tackling the structured non-separable penalty, which can quickly identify the inactive features that have zero coefficients across all the tasks. Thus, by removing the inactive features during the training process, we may achieve substantial computational gain and memory savings. More importantly, the proposed screening rule can be directly integrated with the existing solvers both in the batch and stochastic settings. Theoretically, we prove our screening rule is safe and also can be safely applied to the existing iterative optimization algorithms. Our experimental results demonstrate that our screening rule can effectively identify the inactive features and leads to a significant computational speedup without any loss of accuracy. </p> </div> </dd> <dt> <a name='item189'>[189]</a> <a href ="/abs/2504.03814" title="Abstract" id="2504.03814"> arXiv:2504.03814 </a> (replaced) [<a href="/pdf/2504.03814" title="Download PDF" id="pdf-2504.03814" aria-labelledby="pdf-2504.03814">pdf</a>, <a href="https://arxiv.org/html/2504.03814v2" title="View HTML" id="html-2504.03814" aria-labelledby="html-2504.03814" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03814" title="Other formats" id="oth-2504.03814" aria-labelledby="oth-2504.03814">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Recursive Training Loops in LLMs: How training data properties modulate distribution shift in generated data? </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kova%C4%8D,+G">Grgur Kova膷</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Perez,+J">J茅r茅my Perez</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Portelas,+R">R茅my Portelas</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dominey,+P+F">Peter Ford Dominey</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Oudeyer,+P">Pierre-Yves Oudeyer</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Computation and Language (cs.CL) </div> <p class='mathjax'> Large language models (LLMs) are increasingly contributing to the creation of content on the Internet. This creates a feedback loop as subsequent generations of models will be trained on this generated, synthetic data. This phenomenon is receiving increasing interest, in particular because previous studies have shown that it may lead to distribution shift - models misrepresent and forget the true underlying distributions of human data they are expected to approximate (e.g. resulting in a drastic loss of quality). In this study, we study the impact of human data properties on distribution shift dynamics in iterated training loops. We first confirm that the distribution shift dynamics greatly vary depending on the human data by comparing four datasets (two based on Twitter and two on Reddit). We then test whether data quality may influence the rate of this shift. We find that it does on the twitter, but not on the Reddit datasets. We then focus on a Reddit dataset and conduct a more exhaustive evaluation of a large set of dataset properties. This experiment associated lexical diversity with larger, and semantic diversity with smaller detrimental shifts, suggesting that incorporating text with high lexical (but limited semantic) diversity could exacerbate the degradation of generated text. We then focus on the evolution of political bias, and find that the type of shift observed (bias reduction, amplification or inversion) depends on the political lean of the human (true) distribution. Overall, our work extends the existing literature on the consequences of recursive fine-tuning by showing that this phenomenon is highly dependent on features of the human data on which training occurs. This suggests that different parts of internet (e.g. GitHub, Reddit) may undergo different types of shift depending on their properties. </p> </div> </dd> <dt> <a name='item190'>[190]</a> <a href ="/abs/2504.03994" title="Abstract" id="2504.03994"> arXiv:2504.03994 </a> (replaced) [<a href="/pdf/2504.03994" title="Download PDF" id="pdf-2504.03994" aria-labelledby="pdf-2504.03994">pdf</a>, <a href="https://arxiv.org/html/2504.03994v2" title="View HTML" id="html-2504.03994" aria-labelledby="html-2504.03994" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03994" title="Other formats" id="oth-2504.03994" aria-labelledby="oth-2504.03994">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Improving Mixed-Criticality Scheduling with Reinforcement Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=El-Mahdy,+M">Muhammad El-Mahdy</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sakr,+N">Nourhan Sakr</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Carrasco,+R">Rodrigo Carrasco</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> This work was submitted to the 32nd International Conference on Real-Time Networks and Systems (RTNS) on June 8, 2024 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Artificial Intelligence (cs.AI); Multiagent Systems (cs.MA); Systems and Control (eess.SY) </div> <p class='mathjax'> This paper introduces a novel reinforcement learning (RL) approach to scheduling mixed-criticality (MC) systems on processors with varying speeds. Building upon the foundation laid by [1], we extend their work to address the non-preemptive scheduling problem, which is known to be NP-hard. By modeling this scheduling challenge as a Markov Decision Process (MDP), we develop an RL agent capable of generating near-optimal schedules for real-time MC systems. Our RL-based scheduler prioritizes high-critical tasks while maintaining overall system performance. <br>Through extensive experiments, we demonstrate the scalability and effectiveness of our approach. The RL scheduler significantly improves task completion rates, achieving around 80% overall and 85% for high-criticality tasks across 100,000 instances of synthetic data and real data under varying system conditions. Moreover, under stable conditions without degradation, the scheduler achieves 94% overall task completion and 93% for high-criticality tasks. These results highlight the potential of RL-based schedulers in real-time and safety-critical applications, offering substantial improvements in handling complex and dynamic scheduling scenarios. </p> </div> </dd> <dt> <a name='item191'>[191]</a> <a href ="/abs/2504.04798" title="Abstract" id="2504.04798"> arXiv:2504.04798 </a> (replaced) [<a href="/pdf/2504.04798" title="Download PDF" id="pdf-2504.04798" aria-labelledby="pdf-2504.04798">pdf</a>, <a href="https://arxiv.org/html/2504.04798v2" title="View HTML" id="html-2504.04798" aria-labelledby="html-2504.04798" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.04798" title="Other formats" id="oth-2504.04798" aria-labelledby="oth-2504.04798">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> TabRep: a Simple and Effective Continuous Representation for Training Tabular Diffusion Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Si,+J">Jacob Si</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ou,+Z">Zijing Ou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Qu,+M">Mike Qu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xiang,+Z">Zhengrui Xiang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Y">Yingzhen Li</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span> </div> <p class='mathjax'> Diffusion models have been the predominant generative model for tabular data generation. However, they face the conundrum of modeling under a separate versus a unified data representation. The former encounters the challenge of jointly modeling all multi-modal distributions of tabular data in one model. While the latter alleviates this by learning a single representation for all features, it currently leverages sparse suboptimal encoding heuristics and necessitates additional computation costs. In this work, we address the latter by presenting TabRep, a tabular diffusion architecture trained with a unified continuous representation. To motivate the design of our representation, we provide geometric insights into how the data manifold affects diffusion models. The key attributes of our representation are composed of its density, flexibility to provide ample separability for nominal features, and ability to preserve intrinsic relationships. Ultimately, TabRep provides a simple yet effective approach for training tabular diffusion models under a continuous data manifold. Our results showcase that TabRep achieves superior performance across a broad suite of evaluations. It is the first to synthesize tabular data that exceeds the downstream quality of the original datasets while preserving privacy and remaining computationally efficient. </p> </div> </dd> <dt> <a name='item192'>[192]</a> <a href ="/abs/2504.05045" title="Abstract" id="2504.05045"> arXiv:2504.05045 </a> (replaced) [<a href="/pdf/2504.05045" title="Download PDF" id="pdf-2504.05045" aria-labelledby="pdf-2504.05045">pdf</a>, <a href="https://arxiv.org/html/2504.05045v2" title="View HTML" id="html-2504.05045" aria-labelledby="html-2504.05045" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05045" title="Other formats" id="oth-2504.05045" aria-labelledby="oth-2504.05045">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Attention-Augmented Inverse Reinforcement Learning with Graph Convolutions for Multi-Agent Task Allocation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yin,+H">Huilin Yin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+Z">Zhikun Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Watzenig,+D">Daniel Watzenig</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Added a clarification on the source of expert trajectories in Section V </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Multiagent Systems (cs.MA) </div> <p class='mathjax'> Multi-agent task allocation (MATA) plays a vital role in cooperative multi-agent systems, with significant implications for applications such as logistics, search and rescue, and robotic coordination. Although traditional deep reinforcement learning (DRL) methods have been shown to be promising, their effectiveness is hindered by a reliance on manually designed reward functions and inefficiencies in dynamic environments. In this paper, an inverse reinforcement learning (IRL)-based framework is proposed, in which multi-head self-attention (MHSA) and graph attention mechanisms are incorporated to enhance reward function learning and task execution efficiency. Expert demonstrations are utilized to infer optimal reward densities, allowing dependence on handcrafted designs to be reduced and adaptability to be improved. Extensive experiments validate the superiority of the proposed method over widely used multi-agent reinforcement learning (MARL) algorithms in terms of both cumulative rewards and task execution efficiency. </p> </div> </dd> <dt> <a name='item193'>[193]</a> <a href ="/abs/2504.05138" title="Abstract" id="2504.05138"> arXiv:2504.05138 </a> (replaced) [<a href="/pdf/2504.05138" title="Download PDF" id="pdf-2504.05138" aria-labelledby="pdf-2504.05138">pdf</a>, <a href="https://arxiv.org/html/2504.05138v2" title="View HTML" id="html-2504.05138" aria-labelledby="html-2504.05138" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05138" title="Other formats" id="oth-2504.05138" aria-labelledby="oth-2504.05138">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Towards Optimal Heterogeneous Client Sampling in Multi-Model Federated Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+H">Haoran Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gong,+Z">Zejun Gong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Z">Zekai Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Siew,+M">Marie Siew</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Joe-Wong,+C">Carlee Joe-Wong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=El-Azouzi,+R">Rachid El-Azouzi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, solved a hyperlink problem </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Distributed, Parallel, and Cluster Computing (cs.DC) </div> <p class='mathjax'> Federated learning (FL) allows edge devices to collaboratively train models without sharing local data. As FL gains popularity, clients may need to train multiple unrelated FL models, but communication constraints limit their ability to train all models simultaneously. While clients could train FL models sequentially, opportunistically having FL clients concurrently train different models -- termed multi-model federated learning (MMFL) -- can reduce the overall training time. Prior work uses simple client-to-model assignments that do not optimize the contribution of each client to each model over the course of its training. Prior work on single-model FL shows that intelligent client selection can greatly accelerate convergence, but na茂ve extensions to MMFL can violate heterogeneous resource constraints at both the server and the clients. In this work, we develop a novel convergence analysis of MMFL with arbitrary client sampling methods, theoretically demonstrating the strengths and limitations of previous well-established gradient-based methods. Motivated by this analysis, we propose MMFL-LVR, a loss-based sampling method that minimizes training variance while explicitly respecting communication limits at the server and reducing computational costs at the clients. We extend this to MMFL-StaleVR, which incorporates stale updates for improved efficiency and stability, and MMFL-StaleVRE, a lightweight variant suitable for low-overhead deployment. Experiments show our methods improve average accuracy by up to 19.1% over random sampling, with only a 5.4% gap from the theoretical optimum (full client participation). </p> </div> </dd> <dt> <a name='item194'>[194]</a> <a href ="/abs/2504.05250" title="Abstract" id="2504.05250"> arXiv:2504.05250 </a> (replaced) [<a href="/pdf/2504.05250" title="Download PDF" id="pdf-2504.05250" aria-labelledby="pdf-2504.05250">pdf</a>, <a href="https://arxiv.org/html/2504.05250v2" title="View HTML" id="html-2504.05250" aria-labelledby="html-2504.05250" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05250" title="Other formats" id="oth-2504.05250" aria-labelledby="oth-2504.05250">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PEAKS: Selecting Key Training Examples Incrementally via Prediction Error Anchored by Kernel Similarity </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Gurbuz,+M+B">Mustafa Burak Gurbuz</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+X">Xingyu Zheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dovrolis,+C">Constantine Dovrolis</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Machine Learning (stat.ML) </div> <p class='mathjax'> As deep learning continues to be driven by ever-larger datasets, understanding which examples are most important for generalization has become a critical question. While progress in data selection continues, emerging applications require studying this problem in dynamic contexts. To bridge this gap, we pose the Incremental Data Selection (IDS) problem, where examples arrive as a continuous stream, and need to be selected without access to the full data source. In this setting, the learner must incrementally build a training dataset of predefined size while simultaneously learning the underlying task. We find that in IDS, the impact of a new sample on the model state depends fundamentally on both its geometric relationship in the feature space and its prediction error. Leveraging this insight, we propose PEAKS (Prediction Error Anchored by Kernel Similarity), an efficient data selection method tailored for IDS. Our comprehensive evaluations demonstrate that PEAKS consistently outperforms existing selection strategies. Furthermore, PEAKS yields increasingly better performance returns than random selection as training data size grows on real-world datasets. </p> </div> </dd> <dt> <a name='item195'>[195]</a> <a href ="/abs/2010.12059" title="Abstract" id="2010.12059"> arXiv:2010.12059 </a> (replaced) [<a href="/pdf/2010.12059" title="Download PDF" id="pdf-2010.12059" aria-labelledby="pdf-2010.12059">pdf</a>, <a href="https://arxiv.org/html/2010.12059v2" title="View HTML" id="html-2010.12059" aria-labelledby="html-2010.12059" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2010.12059" title="Other formats" id="oth-2010.12059" aria-labelledby="oth-2010.12059">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Principled Interpolation in Normalizing Flows </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Fadel,+S+G">Samuel G. Fadel</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Mair,+S">Sebastian Mair</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=da+S.+Torres,+R">Ricardo da S. Torres</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Brefeld,+U">Ulf Brefeld</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 20 pages, 11 figures, accepted at the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML PKDD 2021) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Generative models based on normalizing flows are very successful in modeling complex data distributions using simpler ones. However, straightforward linear interpolations show unexpected side effects, as interpolation paths lie outside the area where samples are observed. This is caused by the standard choice of Gaussian base distributions and can be seen in the norms of the interpolated samples as they are outside the data manifold. This observation suggests that changing the way of interpolating should generally result in better interpolations, but it is not clear how to do that in an unambiguous way. In this paper, we solve this issue by enforcing a specific manifold and, hence, change the base distribution, to allow for a principled way of interpolation. Specifically, we use the Dirichlet and von Mises-Fisher base distributions on the probability simplex and the hypersphere, respectively. Our experimental results show superior performance in terms of bits per dimension, Fr茅chet Inception Distance (FID), and Kernel Inception Distance (KID) scores for interpolation, while maintaining the generative performance. </p> </div> </dd> <dt> <a name='item196'>[196]</a> <a href ="/abs/2301.00922" title="Abstract" id="2301.00922"> arXiv:2301.00922 </a> (replaced) [<a href="/pdf/2301.00922" title="Download PDF" id="pdf-2301.00922" aria-labelledby="pdf-2301.00922">pdf</a>, <a href="https://arxiv.org/html/2301.00922v2" title="View HTML" id="html-2301.00922" aria-labelledby="html-2301.00922" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2301.00922" title="Other formats" id="oth-2301.00922" aria-labelledby="oth-2301.00922">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Faster Reinforcement Learning by Freezing Slow States </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Y">Yijia Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jiang,+D+R">Daniel R. Jiang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 66 pages, 10 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Machine Learning (cs.LG); Systems and Control (eess.SY); Optimization and Control (math.OC) </div> <p class='mathjax'> We study infinite horizon Markov decision processes (MDPs) with "fast-slow" structure, where some state variables evolve rapidly ("fast states") while others change more gradually ("slow states"). Such structure is common in real-world problems where sequential decisions need to be made at high frequencies over long horizons, where slowly evolving information also influences optimal decisions. Examples include inventory control under slowly changing demand, or dynamic pricing with gradually shifting consumer behavior. Modeling the problem at the natural decision frequency leads to MDPs with discount factors close to one, making them computationally challenging. We propose a novel approximation strategy that "freezes" slow states during a phase of lower-level planning, solving finite-horizon MDPs conditioned on a fixed slow state, and then applying value iteration to an auxiliary upper-level MDP that evolves on a slower timescale. Freezing states for short periods of time leads to easier-to-solve lower-level problems, while a slower upper-level timescale allows for a more favorable discount factor. On the theoretical side, we analyze the regret incurred by our frozen-state approach, which leads to simple insights on how to trade off computational budget versus regret. Empirically, we demonstrate that frozen-state methods produce high-quality policies with significantly less computation, and we show that simply omitting slow states is often a poor heuristic. </p> </div> </dd> <dt> <a name='item197'>[197]</a> <a href ="/abs/2303.01353" title="Abstract" id="2303.01353"> arXiv:2303.01353 </a> (replaced) [<a href="/pdf/2303.01353" title="Download PDF" id="pdf-2303.01353" aria-labelledby="pdf-2303.01353">pdf</a>, <a href="https://arxiv.org/html/2303.01353v4" title="View HTML" id="html-2303.01353" aria-labelledby="html-2303.01353" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2303.01353" title="Other formats" id="oth-2303.01353" aria-labelledby="oth-2303.01353">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Penalising the biases in norm regularisation enforces sparsity </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Boursier,+E">Etienne Boursier</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Flammarion,+N">Nicolas Flammarion</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Corrected a mistake in the previous version of Theorem 4 (appendix) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Controlling the parameters' norm often yields good generalisation when training neural networks. Beyond simple intuitions, the relation between regularising parameters' norm and obtained estimators remains theoretically misunderstood. For one hidden ReLU layer networks with unidimensional data, this work shows the parameters' norm required to represent a function is given by the total variation of its second derivative, weighted by a $\sqrt{1+x^2}$ factor. Notably, this weighting factor disappears when the norm of bias terms is not regularised. The presence of this additional weighting factor is of utmost significance as it is shown to enforce the uniqueness and sparsity (in the number of kinks) of the minimal norm interpolator. Conversely, omitting the bias' norm allows for non-sparse solutions. Penalising the bias terms in the regularisation, either explicitly or implicitly, thus leads to sparse estimators. </p> </div> </dd> <dt> <a name='item198'>[198]</a> <a href ="/abs/2306.11908" title="Abstract" id="2306.11908"> arXiv:2306.11908 </a> (replaced) [<a href="/pdf/2306.11908" title="Download PDF" id="pdf-2306.11908" aria-labelledby="pdf-2306.11908">pdf</a>, <a href="https://arxiv.org/html/2306.11908v3" title="View HTML" id="html-2306.11908" aria-labelledby="html-2306.11908" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2306.11908" title="Other formats" id="oth-2306.11908" aria-labelledby="oth-2306.11908">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Generalized Random Forests using Fixed-Point Trees </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Fleischer,+D">David Fleischer</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Stephens,+D+A">David A. Stephens</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Yang,+A">Archer Yang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 34 pages, 26 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG); Methodology (stat.ME) </div> <p class='mathjax'> We propose a computationally efficient alternative to generalized random forests <a href="https://arxiv.org/abs/1610.01271" data-arxiv-id="1610.01271" class="link-https">arXiv:1610.01271</a> (GRFs) for estimating heterogeneous effects in large dimensions. While GRFs rely on a gradient-based splitting criterion, which in large dimensions is computationally expensive and unstable, our method introduces a fixed-point approximation that eliminates the need for Jacobian estimation. This gradient-free approach preserves GRFs theoretical guarantees of consistency and asymptotic normality while significantly improving computational efficiency. We demonstrate that our method achieves multiple times the speed over standard GRFs without compromising statistical accuracy. Experiments on both simulated and real-world data, validate our approach. Our findings suggest that the proposed method is a scalable alternative for localized effect estimation in machine learning and causal inference applications. </p> </div> </dd> <dt> <a name='item199'>[199]</a> <a href ="/abs/2306.11950" title="Abstract" id="2306.11950"> arXiv:2306.11950 </a> (replaced) [<a href="/pdf/2306.11950" title="Download PDF" id="pdf-2306.11950" aria-labelledby="pdf-2306.11950">pdf</a>, <a href="https://arxiv.org/html/2306.11950v2" title="View HTML" id="html-2306.11950" aria-labelledby="html-2306.11950" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2306.11950" title="Other formats" id="oth-2306.11950" aria-labelledby="oth-2306.11950">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Mitigating Communication Costs in Neural Networks: The Role of Dendritic Nonlinearity </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Wu,+X">Xundong Wu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhao,+P">Pengfei Zhao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+Z">Zilin Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ma,+L">Lei Ma</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yip,+K">Ka-Wa Yip</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tang,+H">Huajin Tang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pan,+G">Gang Pan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Panayiota,+P">Poirazi Panayiota</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+T">Tiejun Huang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Neural and Evolutionary Computing (cs.NE)</span>; Machine Learning (cs.LG); Neurons and Cognition (q-bio.NC) </div> <p class='mathjax'> Our understanding of biological neuronal networks has profoundly influenced the development of artificial neural networks (ANNs). However, neurons utilized in ANNs differ considerably from their biological counterparts, primarily due to the absence of complex dendritic trees with local nonlinearities. Early studies have suggested that dendritic nonlinearities could substantially improve the learning capabilities of neural network models. In this study, we systematically examined the role of nonlinear dendrites within neural networks. Utilizing machine-learning methodologies, we assessed how dendritic nonlinearities influence neural network performance. Our findings demonstrate that dendritic nonlinearities do not substantially affect learning capacity; rather, their primary benefit lies in enabling network capacity expansion while minimizing communication costs through effective localized feature aggregation. This research provides critical insights with significant implications for designing future neural network accelerators aimed at reducing communication overhead during neural network training and inference. </p> </div> </dd> <dt> <a name='item200'>[200]</a> <a href ="/abs/2308.10098" title="Abstract" id="2308.10098"> arXiv:2308.10098 </a> (replaced) [<a href="/pdf/2308.10098" title="Download PDF" id="pdf-2308.10098" aria-labelledby="pdf-2308.10098">pdf</a>, <a href="https://arxiv.org/html/2308.10098v4" title="View HTML" id="html-2308.10098" aria-labelledby="html-2308.10098" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2308.10098" title="Other formats" id="oth-2308.10098" aria-labelledby="oth-2308.10098">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> An adaptively inexact first-order method for bilevel optimization with application to hyperparameter learning </div> <div class='list-authors'><a href="https://arxiv.org/search/math?searchtype=author&query=Salehi,+M+S">Mohammad Sadegh Salehi</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Mukherjee,+S">Subhadip Mukherjee</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Roberts,+L">Lindon Roberts</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Ehrhardt,+M+J">Matthias J. Ehrhardt</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Optimization and Control (math.OC)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Various tasks in data science are modeled utilizing the variational regularization approach, where manually selecting regularization parameters presents a challenge. The difficulty gets exacerbated when employing regularizers involving a large number of hyperparameters. To overcome this challenge, bilevel learning can be employed to learn such parameters from data. However, neither exact function values nor exact gradients with respect to the hyperparameters are attainable, necessitating methods that only rely on inexact evaluation of such quantities. State-of-the-art inexact gradient-based methods a priori select a sequence of the required accuracies and cannot identify an appropriate step size since the Lipschitz constant of the hypergradient is unknown. In this work, we propose an algorithm with backtracking line search that only relies on inexact function evaluations and hypergradients and show convergence to a stationary point. Furthermore, the proposed algorithm determines the required accuracy dynamically rather than manually selected before running it. Our numerical experiments demonstrate the efficiency and feasibility of our approach for hyperparameter estimation on a range of relevant problems in imaging and data science such as total variation and field of experts denoising and multinomial logistic regression. Particularly, the results show that the algorithm is robust to its own hyperparameters such as the initial accuracies and step size. </p> </div> </dd> <dt> <a name='item201'>[201]</a> <a href ="/abs/2311.11882" title="Abstract" id="2311.11882"> arXiv:2311.11882 </a> (replaced) [<a href="/pdf/2311.11882" title="Download PDF" id="pdf-2311.11882" aria-labelledby="pdf-2311.11882">pdf</a>, <a href="https://arxiv.org/html/2311.11882v2" title="View HTML" id="html-2311.11882" aria-labelledby="html-2311.11882" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2311.11882" title="Other formats" id="oth-2311.11882" aria-labelledby="oth-2311.11882">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Multi-Task Faces (MTF) Data Set: A Legally and Ethically Compliant Collection of Face Images for Various Classification Tasks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Haffar,+R">Rami Haffar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=S%C3%A1nchez,+D">David S谩nchez</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Domingo-Ferrer,+J">Josep Domingo-Ferrer</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 21 pages, 2 figures, 9 Tables, </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Human facial data offers valuable potential for tackling classification problems, including face recognition, age estimation, gender identification, emotion analysis, and race classification. However, recent privacy regulations, particularly the EU General Data Protection Regulation, have restricted the collection and usage of human images in research. As a result, several previously published face data sets have been removed from the internet due to inadequate data collection methods and privacy concerns. While synthetic data sets have been suggested as an alternative, they fall short of accurately representing the real data distribution. Additionally, most existing data sets are labeled for just a single task, which limits their versatility. To address these limitations, we introduce the Multi-Task Face (MTF) data set, designed for various tasks, including face recognition and classification by race, gender, and age, as well as for aiding in training generative networks. The MTF data set comes in two versions: a non-curated set containing 132,816 images of 640 individuals and a manually curated set with 5,246 images of 240 individuals, meticulously selected to maximize their classification quality. Both data sets were ethically sourced, using publicly available celebrity images in full compliance with copyright regulations. Along with providing detailed descriptions of data collection and processing, we evaluated the effectiveness of the MTF data set in training five deep learning models across the aforementioned classification tasks, achieving up to 98.88\% accuracy for gender classification, 95.77\% for race classification, 97.60\% for age classification, and 79.87\% for face recognition with the ConvNeXT model. Both MTF data sets can be accessed through the following link. <a href="https://github.com/RamiHaf/MTF_data_set" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item202'>[202]</a> <a href ="/abs/2404.03543" title="Abstract" id="2404.03543"> arXiv:2404.03543 </a> (replaced) [<a href="/pdf/2404.03543" title="Download PDF" id="pdf-2404.03543" aria-labelledby="pdf-2404.03543">pdf</a>, <a href="https://arxiv.org/html/2404.03543v3" title="View HTML" id="html-2404.03543" aria-labelledby="html-2404.03543" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2404.03543" title="Other formats" id="oth-2404.03543" aria-labelledby="oth-2404.03543">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> CodeEditorBench: Evaluating Code Editing Capability of Large Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Guo,+J">Jiawei Guo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Z">Ziming Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+X">Xueling Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ma,+K">Kaijing Ma</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+T">Tianyu Zheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+Z">Zhouliang Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pan,+D">Ding Pan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=LI,+Y">Yizhi LI</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+R">Ruibo Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Y">Yue Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Guo,+S">Shuyue Guo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Qu,+X">Xingwei Qu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yue,+X">Xiang Yue</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+G">Ge Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+W">Wenhu Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fu,+J">Jie Fu</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Software Engineering (cs.SE)</span>; Artificial Intelligence (cs.AI); Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> Large Language Models (LLMs) for code are rapidly evolving, with code editing emerging as a critical capability. We introduce CodeEditorBench, an evaluation framework designed to rigorously assess the performance of LLMs in code editing tasks, including debugging, translating, polishing, and requirement switching. Unlike existing benchmarks focusing solely on code generation, CodeEditorBench emphasizes real-world scenarios and practical aspects of software development. We curate diverse coding challenges and scenarios from five sources, covering various programming languages, complexity levels, and editing tasks. Evaluation of 19 LLMs reveals that closed-source models (particularly Gemini-Ultra and GPT-4), outperform open-source models in CodeEditorBench, highlighting differences in model performance based on problem types and prompt sensitivities. CodeEditorBench aims to catalyze advancements in LLMs by providing a robust platform for assessing code editing capabilities. We will release all prompts and datasets to enable the community to expand the dataset and benchmark emerging LLMs. By introducing CodeEditorBench, we contribute to the advancement of LLMs in code editing and provide a valuable resource for researchers and practitioners. </p> </div> </dd> <dt> <a name='item203'>[203]</a> <a href ="/abs/2405.05187" title="Abstract" id="2405.05187"> arXiv:2405.05187 </a> (replaced) [<a href="/pdf/2405.05187" title="Download PDF" id="pdf-2405.05187" aria-labelledby="pdf-2405.05187">pdf</a>, <a href="https://arxiv.org/html/2405.05187v2" title="View HTML" id="html-2405.05187" aria-labelledby="html-2405.05187" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2405.05187" title="Other formats" id="oth-2405.05187" aria-labelledby="oth-2405.05187">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A score-based particle method for homogeneous Landau equation </div> <div class='list-authors'><a href="https://arxiv.org/search/math?searchtype=author&query=Huang,+Y">Yan Huang</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Wang,+L">Li Wang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Numerical Analysis (math.NA)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> We propose a novel score-based particle method for solving the Landau equation in plasmas, that seamlessly integrates learning with structure-preserving particle methods [<a href="https://arxiv.org/abs/1910.03080" data-arxiv-id="1910.03080" class="link-https">arXiv:1910.03080</a>]. Building upon the Lagrangian viewpoint of the Landau equation, a central challenge stems from the nonlinear dependence of the velocity field on the density. Our primary innovation lies in recognizing that this nonlinearity is in the form of the score function, which can be approximated dynamically via techniques from score-matching. The resulting method inherits the conservation properties of the deterministic particle method while sidestepping the necessity for kernel density estimation in [<a href="https://arxiv.org/abs/1910.03080" data-arxiv-id="1910.03080" class="link-https">arXiv:1910.03080</a>]. This streamlines computation and enhances scalability with dimensionality. Furthermore, we provide a theoretical estimate by demonstrating that the KL divergence between our approximation and the true solution can be effectively controlled by the score-matching loss. Additionally, by adopting the flow map viewpoint, we derive an update formula for exact density computation. Extensive examples have been provided to show the efficiency of the method, including a physically relevant case of Coulomb interaction. </p> </div> </dd> <dt> <a name='item204'>[204]</a> <a href ="/abs/2405.05733" title="Abstract" id="2405.05733"> arXiv:2405.05733 </a> (replaced) [<a href="/pdf/2405.05733" title="Download PDF" id="pdf-2405.05733" aria-labelledby="pdf-2405.05733">pdf</a>, <a href="https://arxiv.org/html/2405.05733v3" title="View HTML" id="html-2405.05733" aria-labelledby="html-2405.05733" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2405.05733" title="Other formats" id="oth-2405.05733" aria-labelledby="oth-2405.05733">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Batched Stochastic Bandit for Nondegenerate Functions </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Liu,+Y">Yu Liu</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Shu,+Y">Yunlu Shu</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Wang,+T">Tianyu Wang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 34 pages, 14 colored figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> This paper studies batched bandit learning problems for nondegenerate functions. We introduce an algorithm that solves the batched bandit problem for nondegenerate functions near-optimally. More specifically, we introduce an algorithm, called Geometric Narrowing (GN), whose regret bound is of order $\widetilde{\mathcal{O}} ( A_{+}^d \sqrt{T} )$. In addition, GN only needs $\mathcal{O} (\log \log T)$ batches to achieve this regret. We also provide lower bound analysis for this problem. More specifically, we prove that over some (compact) doubling metric space of doubling dimension $d$: 1. For any policy $\pi$, there exists a problem instance on which $\pi$ admits a regret of order ${\Omega} ( A_-^d \sqrt{T})$; 2. No policy can achieve a regret of order $ A_-^d \sqrt{T} $ over all problem instances, using less than $ \Omega ( \log \log T ) $ rounds of communications. Our lower bound analysis shows that the GN algorithm achieves near optimal regret with minimal number of batches. </p> </div> </dd> <dt> <a name='item205'>[205]</a> <a href ="/abs/2405.13944" title="Abstract" id="2405.13944"> arXiv:2405.13944 </a> (replaced) [<a href="/pdf/2405.13944" title="Download PDF" id="pdf-2405.13944" aria-labelledby="pdf-2405.13944">pdf</a>, <a href="https://arxiv.org/html/2405.13944v2" title="View HTML" id="html-2405.13944" aria-labelledby="html-2405.13944" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2405.13944" title="Other formats" id="oth-2405.13944" aria-labelledby="oth-2405.13944">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Survey on Design-space Dimensionality Reduction Methods for Shape Optimization </div> <div class='list-authors'><a href="https://arxiv.org/search/math?searchtype=author&query=Serani,+A">Andrea Serani</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Diez,+M">Matteo Diez</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Optimization and Control (math.OC)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> The rapidly evolving field of engineering design of functional surfaces necessitates sophisticated tools to manage the inherent complexity of high-dimensional design spaces. This survey paper offers a scoping review, i.e., a literature mapping synthesis borrowed from clinical medicine, delving into the field of design-space dimensionality reduction techniques tailored for shape optimization, bridging traditional methods and cutting-edge technologies. Dissecting the spectrum of these techniques, from classical linear approaches like principal component analysis to more nuanced nonlinear methods such as autoencoders, the discussion extends to innovative physics-informed methods that integrate physical data into the dimensionality reduction process, enhancing the physical relevance and effectiveness of reduced design spaces. By integrating these methods into optimization frameworks, it is shown how they significantly mitigate the curse of dimensionality, streamline computational processes, and refine the design exploration and optimization of complex functional surfaces. The survey provides a classification of methods and highlights the transformative impact of these techniques in simplifying design challenges, thereby fostering more efficient and effective engineering solutions. </p> </div> </dd> <dt> <a name='item206'>[206]</a> <a href ="/abs/2405.18220" title="Abstract" id="2405.18220"> arXiv:2405.18220 </a> (replaced) [<a href="/pdf/2405.18220" title="Download PDF" id="pdf-2405.18220" aria-labelledby="pdf-2405.18220">pdf</a>, <a href="https://arxiv.org/html/2405.18220v2" title="View HTML" id="html-2405.18220" aria-labelledby="html-2405.18220" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2405.18220" title="Other formats" id="oth-2405.18220" aria-labelledby="oth-2405.18220">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Non-negative Tensor Mixture Learning for Discrete Density Estimation </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Ghalamkari,+K">Kazu Ghalamkari</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Hinrich,+J+L">Jesper L酶ve Hinrich</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=M%C3%B8rup,+M">Morten M酶rup</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 31 pages, 7 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> We present an expectation-maximization (EM) based unified framework for non-negative tensor decomposition that optimizes the Kullback-Leibler divergence. To avoid iterations in each M-step and learning rate tuning, we establish a general relationship between low-rank decompositions and many-body approximations. Using this connection, we exploit that the closed-form solution of the many-body approximation updates all parameters simultaneously in the M-step. Our framework offers not only a unified methodology for a variety of low-rank structures, including CP, Tucker, and Tensor Train decompositions, but also their mixtures. Notably, the weights of each low-rank tensor in the mixture can be learned from the data, which enables us to leverage the advantage of different low-rank structures without careful selection of the structure in advance. We empirically demonstrate that our framework overall provides superior generalization in terms of discrete density estimation and classification when compared to conventional tensor-based approaches. </p> </div> </dd> <dt> <a name='item207'>[207]</a> <a href ="/abs/2405.20769" title="Abstract" id="2405.20769"> arXiv:2405.20769 </a> (replaced) [<a href="/pdf/2405.20769" title="Download PDF" id="pdf-2405.20769" aria-labelledby="pdf-2405.20769">pdf</a>, <a href="https://arxiv.org/html/2405.20769v2" title="View HTML" id="html-2405.20769" aria-labelledby="html-2405.20769" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2405.20769" title="Other formats" id="oth-2405.20769" aria-labelledby="oth-2405.20769">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Avoiding Pitfalls for Privacy Accounting of Subsampled Mechanisms under Composition </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Lebeda,+C+J">Christian Janos Lebeda</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Regehr,+M">Matthew Regehr</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kamath,+G">Gautam Kamath</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Steinke,+T">Thomas Steinke</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Data Structures and Algorithms (cs.DS); Machine Learning (cs.LG); Machine Learning (stat.ML) </div> <p class='mathjax'> We consider the problem of computing tight privacy guarantees for the composition of subsampled differentially private mechanisms. Recent algorithms can numerically compute the privacy parameters to arbitrary precision but must be carefully applied. <br>Our main contribution is to address two common points of confusion. First, some privacy accountants assume that the privacy guarantees for the composition of a subsampled mechanism are determined by self-composing the worst-case datasets for the uncomposed mechanism. We show that this is not true in general. Second, Poisson subsampling is sometimes assumed to have similar privacy guarantees compared to sampling without replacement. We show that the privacy guarantees may in fact differ significantly between the two sampling schemes. In particular, we give an example of hyperparameters that result in $\varepsilon \approx 1$ for Poisson subsampling and $\varepsilon > 10$ for sampling without replacement. This occurs for some parameters that could realistically be chosen for DP-SGD. </p> </div> </dd> <dt> <a name='item208'>[208]</a> <a href ="/abs/2407.09550" title="Abstract" id="2407.09550"> arXiv:2407.09550 </a> (replaced) [<a href="/pdf/2407.09550" title="Download PDF" id="pdf-2407.09550" aria-labelledby="pdf-2407.09550">pdf</a>, <a href="https://arxiv.org/html/2407.09550v2" title="View HTML" id="html-2407.09550" aria-labelledby="html-2407.09550" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2407.09550" title="Other formats" id="oth-2407.09550" aria-labelledby="oth-2407.09550">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> CAPM: Fast and Robust Verification on Maxpool-based CNN via Dual Network </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bai,+J">Jia-Hau Bai</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+C">Chi-Ting Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Y">Yu Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chang,+F">Fu-Chieh Chang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wu,+P">Pei-Yuan Wu</a></div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> ICLR 2025 Workshop: VerifAI: AI Verification in the Wild </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> This study uses CAPM (Convex Adversarial Polytope for Maxpool-based CNN) to improve the verified bound for general purpose maxpool-based convolutional neural networks (CNNs) under bounded norm adversarial perturbations. The maxpool function is decomposed as a series of ReLU functions to extend the convex relaxation technique to maxpool functions, by which the verified bound can be efficiently computed through a dual network. The experimental results demonstrate that this technique allows the state-of-the-art verification precision for maxpool-based CNNs and involves a much lower computational cost than current verification methods, such as DeepZ, DeepPoly and PRIMA. This method is also applicable to large-scale CNNs, which previous studies show to be often computationally prohibitively expensive. Under certain circumstances, CAPM is 40-times, 20-times or twice as fast and give a significantly higher verification bound (CAPM 98% vs. PRIMA 76%/DeepPoly 73%/DeepZ 8%) as compared to PRIMA/DeepPoly/DeepZ. Furthermore, we additionally present the time complexity of our algorithm as $O(W^2NK)$, where $W$ is the maximum width of the neural network, $N$ is the number of neurons, and $K$ is the size of the maxpool layer's kernel. </p> </div> </dd> <dt> <a name='item209'>[209]</a> <a href ="/abs/2407.09722" title="Abstract" id="2407.09722"> arXiv:2407.09722 </a> (replaced) [<a href="/pdf/2407.09722" title="Download PDF" id="pdf-2407.09722" aria-labelledby="pdf-2407.09722">pdf</a>, <a href="https://arxiv.org/html/2407.09722v3" title="View HTML" id="html-2407.09722" aria-labelledby="html-2407.09722" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2407.09722" title="Other formats" id="oth-2407.09722" aria-labelledby="oth-2407.09722">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Optimized Multi-Token Joint Decoding with Auxiliary Model for LLM Inference </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Qin,+Z">Zongyue Qin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hu,+Z">Ziniu Hu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=He,+Z">Zifan He</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Prakriya,+N">Neha Prakriya</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cong,+J">Jason Cong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sun,+Y">Yizhou Sun</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Large language models (LLMs) have achieved remarkable success across diverse tasks, yet their inference processes are hindered by substantial time and energy demands due to single-token generation at each decoding step. While previous methods such as speculative decoding mitigate these inefficiencies by producing multiple tokens per step, each token is still generated by its single-token distribution, thereby enhancing speed without improving effectiveness. In contrast, our work simultaneously enhances inference speed and improves the output effectiveness. We consider multi-token joint decoding (MTJD), which generates multiple tokens from their joint distribution at each iteration, theoretically reducing perplexity and enhancing task performance. However, MTJD suffers from the high cost of sampling from the joint distribution of multiple tokens. Inspired by speculative decoding, we introduce multi-token assisted decoding (MTAD), a novel framework designed to accelerate MTJD. MTAD leverages a smaller auxiliary model to approximate the joint distribution of a larger model, incorporating a verification mechanism that not only ensures the accuracy of this approximation, but also improves the decoding efficiency over conventional speculative decoding. Theoretically, we demonstrate that MTAD closely approximates exact MTJD with bounded error. Empirical evaluations using Llama-2 and OPT models ranging from 13B to 70B parameters across various tasks reveal that MTAD reduces perplexity by 21.2% and improves downstream performance compared to standard single-token sampling. Furthermore, MTAD achieves a 1.42x speed-up and consumes 1.54x less energy than conventional speculative decoding methods. These results highlight MTAD's ability to make multi-token joint decoding both effective and efficient, promoting more sustainable and high-performance deployment of LLMs. </p> </div> </dd> <dt> <a name='item210'>[210]</a> <a href ="/abs/2407.14158" title="Abstract" id="2407.14158"> arXiv:2407.14158 </a> (replaced) [<a href="/pdf/2407.14158" title="Download PDF" id="pdf-2407.14158" aria-labelledby="pdf-2407.14158">pdf</a>, <a href="https://arxiv.org/html/2407.14158v2" title="View HTML" id="html-2407.14158" aria-labelledby="html-2407.14158" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2407.14158" title="Other formats" id="oth-2407.14158" aria-labelledby="oth-2407.14158">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Machine learning emulation of precipitation from km-scale regional climate simulations using a diffusion model </div> <div class='list-authors'><a href="https://arxiv.org/search/physics?searchtype=author&query=Addison,+H">Henry Addison</a>, <a href="https://arxiv.org/search/physics?searchtype=author&query=Kendon,+E">Elizabeth Kendon</a>, <a href="https://arxiv.org/search/physics?searchtype=author&query=Ravuri,+S">Suman Ravuri</a>, <a href="https://arxiv.org/search/physics?searchtype=author&query=Aitchison,+L">Laurence Aitchison</a>, <a href="https://arxiv.org/search/physics?searchtype=author&query=Watson,+P+A">Peter AG Watson</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 47 pages, 11 figures, 5 tables; re-ordered sections; further evaluation of future change in heavy precipitation </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Atmospheric and Oceanic Physics (physics.ao-ph)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> High-resolution climate simulations are valuable for understanding climate change impacts. This has motivated use of regional convection-permitting climate models (CPMs), but these are very computationally expensive. We present a convection-permitting model generative emulator (CPMGEM), to skilfully emulate precipitation simulations by a 2.2km-resolution regional CPM at much lower cost. This utilises a generative machine learning approach, a diffusion model. It takes inputs at the 60km resolution of the driving global climate model and downscales these to 8.8km, with daily-mean time resolution, capturing the effect of convective processes represented in the CPM at these scales. The emulator is trained on simulations over England and Wales from the United Kingdom Climate Projections Local product, covering years between 1980 and 2080 following a high emissions scenario. The output precipitation has a similarly realistic spatial structure and intensity distribution to the CPM simulations. The emulator is stochastic, which improves the realism of samples. We show evidence that the emulator has skill for extreme events with ~100 year return times. It captures the main features of the simulated 21st century climate change, but exhibits some error in the magnitude. We demonstrate successful transfer from a "perfect model" training setting to application using GCM variable inputs. We also show that the method can be useful in situations with limited amounts of high-resolution data. Potential applications include producing high-resolution precipitation predictions for large-ensemble climate simulations and producing output based on different GCMs and climate change scenarios to better sample uncertainty. </p> </div> </dd> <dt> <a name='item211'>[211]</a> <a href ="/abs/2407.21077" title="Abstract" id="2407.21077"> arXiv:2407.21077 </a> (replaced) [<a href="/pdf/2407.21077" title="Download PDF" id="pdf-2407.21077" aria-labelledby="pdf-2407.21077">pdf</a>, <a href="https://arxiv.org/html/2407.21077v2" title="View HTML" id="html-2407.21077" aria-labelledby="html-2407.21077" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2407.21077" title="Other formats" id="oth-2407.21077" aria-labelledby="oth-2407.21077">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Genetic Instruct: Scaling up Synthetic Generation of Coding Instructions for Large Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Majumdar,+S">Somshubra Majumdar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Noroozi,+V">Vahid Noroozi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Samadi,+M">Mehrzad Samadi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Narenthiran,+S">Sean Narenthiran</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ficek,+A">Aleksander Ficek</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ahmad,+W+U">Wasi Uddin Ahmad</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+J">Jocelyn Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Balam,+J">Jagadeesh Balam</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ginsburg,+B">Boris Ginsburg</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG); Neural and Evolutionary Computing (cs.NE) </div> <p class='mathjax'> Large Language Models (LLMs) require high quality instruction data for effective alignment, particularly in code generation tasks where expert curated datasets are expensive to produce. We present Genetic-Instruct, a scalable algorithm for synthesizing large-scale, high quality coding instructions using evolutionary principles. Starting from a small set of seed instructions, Genetic-Instruct generates diverse and challenging instruction-code pairs by leveraging an Instructor-LLM for generation, a Coder-LLM for code synthesis, and a Judge-LLM for automatic quality evaluation. Our proposed approach is highly parallelizable and effective even with a small seed data and weaker generator models. We generated more than 7.5 million coding instructions with the proposed approach. Then we evaluated it by fine-tuning LLMs with the synthetic samples and demonstrated a significant improvement in their code generation capability compared to the other synthetic generation approaches and publicly available datasets. Our results highlight the efficiency, scalability, and generalizability of the Genetic-Instruct framework. </p> </div> </dd> <dt> <a name='item212'>[212]</a> <a href ="/abs/2408.04290" title="Abstract" id="2408.04290"> arXiv:2408.04290 </a> (replaced) [<a href="/pdf/2408.04290" title="Download PDF" id="pdf-2408.04290" aria-labelledby="pdf-2408.04290">pdf</a>, <a href="https://arxiv.org/html/2408.04290v4" title="View HTML" id="html-2408.04290" aria-labelledby="html-2408.04290" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2408.04290" title="Other formats" id="oth-2408.04290" aria-labelledby="oth-2408.04290">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Efficient and Accurate Pneumonia Detection Using a Novel Multi-Scale Transformer Approach </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&query=Saber,+A">Alireza Saber</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Parhami,+P">Pouria Parhami</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Siahkarzadeh,+A">Alimohammad Siahkarzadeh</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Fateh,+M">Mansoor Fateh</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Fateh,+A">Amirreza Fateh</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Image and Video Processing (eess.IV)</span>; Computer Vision and Pattern Recognition (cs.CV); Machine Learning (cs.LG) </div> <p class='mathjax'> Pneumonia, a prevalent respiratory infection, remains a leading cause of morbidity and mortality worldwide, particularly among vulnerable populations. Chest X-rays serve as a primary tool for pneumonia detection; however, variations in imaging conditions and subtle visual indicators complicate consistent interpretation. Automated tools can enhance traditional methods by improving diagnostic reliability and supporting clinical decision-making. In this study, we propose a novel multi-scale transformer approach for pneumonia detection that integrates lung segmentation and classification into a unified framework. Our method introduces a lightweight transformer-enhanced TransUNet for precise lung segmentation, achieving a Dice score of 95.68% on the "Chest X-ray Masks and Labels" dataset with fewer parameters than traditional transformers. For classification, we employ pre-trained ResNet models (ResNet-50 and ResNet-101) to extract multi-scale feature maps, which are then processed through a modified transformer module to enhance pneumonia detection. This integration of multi-scale feature extraction and lightweight transformer modules ensures robust performance, making our method suitable for resource-constrained clinical environments. Our approach achieves 93.75% accuracy on the "Kermany" dataset and 96.04% accuracy on the "Cohen" dataset, outperforming existing methods while maintaining computational efficiency. This work demonstrates the potential of multi-scale transformer architectures to improve pneumonia diagnosis, offering a scalable and accurate solution to global healthcare challenges."<a href="https://github.com/amirrezafateh/Multi-Scale-Transformer-Pneumonia"" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item213'>[213]</a> <a href ="/abs/2408.13378" title="Abstract" id="2408.13378"> arXiv:2408.13378 </a> (replaced) [<a href="/pdf/2408.13378" title="Download PDF" id="pdf-2408.13378" aria-labelledby="pdf-2408.13378">pdf</a>, <a href="https://arxiv.org/html/2408.13378v4" title="View HTML" id="html-2408.13378" aria-labelledby="html-2408.13378" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2408.13378" title="Other formats" id="oth-2408.13378" aria-labelledby="oth-2408.13378">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> DrugAgent: Multi-Agent Large Language Model-Based Reasoning for Drug-Target Interaction Prediction </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Inoue,+Y">Yoshitaka Inoue</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Song,+T">Tianci Song</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+X">Xinling Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Luna,+A">Augustin Luna</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Fu,+T">Tianfan Fu</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 15 pages, 1 figure </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Computation and Language (cs.CL); Information Retrieval (cs.IR); Machine Learning (cs.LG); Quantitative Methods (q-bio.QM) </div> <p class='mathjax'> Advancements in large language models (LLMs) allow them to address diverse questions using human-like interfaces. Still, limitations in their training prevent them from answering accurately in scenarios that could benefit from multiple perspectives. Multi-agent systems allow the resolution of questions to enhance result consistency and reliability. While drug-target interaction (DTI) prediction is important for drug discovery, existing approaches face challenges due to complex biological systems and the lack of interpretability needed for clinical applications. DrugAgent is a multi-agent LLM system for DTI prediction that combines multiple specialized perspectives with transparent reasoning. Our system adapts and extends existing multi-agent frameworks by (1) applying coordinator-based architecture to the DTI domain, (2) integrating domain-specific data sources, including ML predictions, knowledge graphs, and literature evidence, and (3) incorporating Chain-of-Thought (CoT) and ReAct (Reason+Act) frameworks for transparent DTI reasoning. We conducted comprehensive experiments using a kinase inhibitor dataset, where our multi-agent LLM method outperformed the non-reasoning multi-agent model (GPT-4o mini) by 45% in F1 score (0.514 vs 0.355). Through ablation studies, we demonstrated the contributions of each agent, with the AI agent being the most impactful, followed by the KG agent and search agent. Most importantly, our approach provides detailed, human-interpretable reasoning for each prediction by combining evidence from multiple sources - a critical feature for biomedical applications where understanding the rationale behind predictions is essential for clinical decision-making and regulatory compliance. Code is available at <a href="https://anonymous.4open.science/r/DrugAgent-B2EA" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item214'>[214]</a> <a href ="/abs/2408.15313" title="Abstract" id="2408.15313"> arXiv:2408.15313 </a> (replaced) [<a href="/pdf/2408.15313" title="Download PDF" id="pdf-2408.15313" aria-labelledby="pdf-2408.15313">pdf</a>, <a href="https://arxiv.org/html/2408.15313v2" title="View HTML" id="html-2408.15313" aria-labelledby="html-2408.15313" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2408.15313" title="Other formats" id="oth-2408.15313" aria-labelledby="oth-2408.15313">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Bi-Factorial Preference Optimization: Balancing Safety-Helpfulness in Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+W">Wenxuan Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Torr,+P+H">Philip H.S. Torr</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Elhoseiny,+M">Mohamed Elhoseiny</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bibi,+A">Adel Bibi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> The paper has been accepted in ICLR 2025 as spotlight presentation </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> Fine-tuning large language models (LLMs) on human preferences, typically through reinforcement learning from human feedback (RLHF), has proven successful in enhancing their capabilities. However, ensuring the safety of LLMs during fine-tuning remains a critical concern, and mitigating the potential conflicts in safety and helpfulness is costly in RLHF. To address this issue, we propose a supervised learning framework called Bi-Factorial Preference Optimization (BFPO), which re-parameterizes a joint RLHF objective of both safety and helpfulness into a single supervised learning objective. In supervised optimization, a labeling function is used to capture the global preferences ranking to balance both safety and helpfulness. To evaluate BFPO, we develop a benchmark that includes comprehensive discriminative and generative tasks for helpfulness and harmlessness. The results indicate that our method significantly outperforms existing approaches in both safety and helpfulness. Moreover, BFPO achieves the same level of safety as methods that heavily rely on human labor with less than 10\% of the computational resources and human prompting and annotation process. The training recipes can be found here: <a href="https://github.com/wx-zhang/bfpo" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item215'>[215]</a> <a href ="/abs/2409.00134" title="Abstract" id="2409.00134"> arXiv:2409.00134 </a> (replaced) [<a href="/pdf/2409.00134" title="Download PDF" id="pdf-2409.00134" aria-labelledby="pdf-2409.00134">pdf</a>, <a href="https://arxiv.org/html/2409.00134v5" title="View HTML" id="html-2409.00134" aria-labelledby="html-2409.00134" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2409.00134" title="Other formats" id="oth-2409.00134" aria-labelledby="oth-2409.00134">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> MAPF-GPT: Imitation Learning for Multi-Agent Pathfinding at Scale </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Andreychuk,+A">Anton Andreychuk</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yakovlev,+K">Konstantin Yakovlev</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Panov,+A">Aleksandr Panov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Skrynnik,+A">Alexey Skrynnik</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Multiagent Systems (cs.MA)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Multi-agent pathfinding (MAPF) is a problem that generally requires finding collision-free paths for multiple agents in a shared environment. Solving MAPF optimally, even under restrictive assumptions, is NP-hard, yet efficient solutions for this problem are critical for numerous applications, such as automated warehouses and transportation systems. Recently, learning-based approaches to MAPF have gained attention, particularly those leveraging deep reinforcement learning. Typically, such learning-based MAPF solvers are augmented with additional components like single-agent planning or communication. Orthogonally, in this work we rely solely on imitation learning that leverages a large dataset of expert MAPF solutions and transformer-based neural network to create a foundation model for MAPF called MAPF-GPT. The latter is capable of generating actions without additional heuristics or communication. MAPF-GPT demonstrates zero-shot learning abilities when solving the MAPF problems that are not present in the training dataset. We show that MAPF-GPT notably outperforms the current best-performing learnable MAPF solvers on a diverse range of problem instances and is computationally efficient during inference. </p> </div> </dd> <dt> <a name='item216'>[216]</a> <a href ="/abs/2409.04067" title="Abstract" id="2409.04067"> arXiv:2409.04067 </a> (replaced) [<a href="/pdf/2409.04067" title="Download PDF" id="pdf-2409.04067" aria-labelledby="pdf-2409.04067">pdf</a>, <a href="https://arxiv.org/html/2409.04067v2" title="View HTML" id="html-2409.04067" aria-labelledby="html-2409.04067" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2409.04067" title="Other formats" id="oth-2409.04067" aria-labelledby="oth-2409.04067">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Preconditioned FEM-based Neural Networks for Solving Incompressible Fluid Flows and Related Inverse Problems </div> <div class='list-authors'><a href="https://arxiv.org/search/math?searchtype=author&query=Griese,+F">Franziska Griese</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Hoppe,+F">Fabian Hoppe</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=R%C3%BCttgers,+A">Alexander R眉ttgers</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Knechtges,+P">Philipp Knechtges</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Numerical Analysis (math.NA)</span>; Machine Learning (cs.LG); Fluid Dynamics (physics.flu-dyn) </div> <p class='mathjax'> The numerical simulation and optimization of technical systems described by partial differential equations is expensive, especially in multi-query scenarios in which the underlying equations have to be solved for different parameters. A comparatively new approach in this context is to combine the good approximation properties of neural networks (for parameter dependence) with the classical finite element method (for discretization). However, instead of considering the solution mapping of the PDE from the parameter space into the FEM-discretized solution space as a purely data-driven regression problem, so-called physically informed regression problems have proven to be useful. In these, the equation residual is minimized during the training of the neural network, i.e., the neural network "learns" the physics underlying the problem. In this paper, we extend this approach to saddle-point and non-linear fluid dynamics problems, respectively, namely stationary Stokes and stationary Navier-Stokes equations. In particular, we propose a modification of the existing approach: Instead of minimizing the plain vanilla equation residual during training, we minimize the equation residual modified by a preconditioner. By analogy with the linear case, this also improves the condition in the present non-linear case. Our numerical examples demonstrate that this approach significantly reduces the training effort and greatly increases accuracy and generalizability. Finally, we show the application of the resulting parameterized model to a related inverse problem. </p> </div> </dd> <dt> <a name='item217'>[217]</a> <a href ="/abs/2410.02810" title="Abstract" id="2410.02810"> arXiv:2410.02810 </a> (replaced) [<a href="/pdf/2410.02810" title="Download PDF" id="pdf-2410.02810" aria-labelledby="pdf-2410.02810">pdf</a>, <a href="https://arxiv.org/html/2410.02810v3" title="View HTML" id="html-2410.02810" aria-labelledby="html-2410.02810" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.02810" title="Other formats" id="oth-2410.02810" aria-labelledby="oth-2410.02810">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> StateAct: Enhancing LLM Base Agents via Self-prompting and State-tracking </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Rozanov,+N">Nikolai Rozanov</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Rei,+M">Marek Rei</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 9 pages, 5 pages appendix, 7 figures, 5 tables </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> Large language models (LLMs) are increasingly used as autonomous agents, tackling tasks from robotics to web navigation. Their performance depends on the underlying base agent. Existing methods, however, struggle with long-context reasoning and goal adherence. We introduce StateAct, a novel and efficient base agent that enhances decision-making through (1) self-prompting, which reinforces task goals at every step, and (2) chain-of-states, an extension of chain-of-thought that tracks state information over time. StateAct outperforms ReAct, the previous best base agent, by over 10% on Alfworld, 30% on Textcraft, and 7% on Webshop across multiple frontier LLMs. We also demonstrate that StateAct can be used as a drop-in replacement for ReAct with advanced LLM agent methods such as test-time scaling, yielding an additional 12% gain on Textcraft. By improving efficiency and long-range reasoning without requiring additional training or retrieval, StateAct provides a scalable foundation for LLM agents. We open source our code to support further research at <a href="https://github.com/ai-nikolai/stateact" rel="external noopener nofollow" class="link-external link-https">this https URL</a> . </p> </div> </dd> <dt> <a name='item218'>[218]</a> <a href ="/abs/2410.05454" title="Abstract" id="2410.05454"> arXiv:2410.05454 </a> (replaced) [<a href="/pdf/2410.05454" title="Download PDF" id="pdf-2410.05454" aria-labelledby="pdf-2410.05454">pdf</a>, <a href="https://arxiv.org/html/2410.05454v2" title="View HTML" id="html-2410.05454" aria-labelledby="html-2410.05454" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.05454" title="Other formats" id="oth-2410.05454" aria-labelledby="oth-2410.05454">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Meta-Dynamical State Space Models for Integrative Neural Data Analysis </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Vermani,+A">Ayesha Vermani</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Nassar,+J">Josue Nassar</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Jeon,+H">Hyungju Jeon</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Dowling,+M">Matthew Dowling</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Park,+I+M">Il Memming Park</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG); Neurons and Cognition (q-bio.NC) </div> <p class='mathjax'> Learning shared structure across environments facilitates rapid learning and adaptive behavior in neural systems. This has been widely demonstrated and applied in machine learning to train models that are capable of generalizing to novel settings. However, there has been limited work exploiting the shared structure in neural activity during similar tasks for learning latent dynamics from neural recordings. Existing approaches are designed to infer dynamics from a single dataset and cannot be readily adapted to account for statistical heterogeneities across recordings. In this work, we hypothesize that similar tasks admit a corresponding family of related solutions and propose a novel approach for meta-learning this solution space from task-related neural activity of trained animals. Specifically, we capture the variabilities across recordings on a low-dimensional manifold which concisely parametrizes this family of dynamics, thereby facilitating rapid learning of latent dynamics given new recordings. We demonstrate the efficacy of our approach on few-shot reconstruction and forecasting of synthetic dynamical systems, and neural recordings from the motor cortex during different arm reaching tasks. </p> </div> </dd> <dt> <a name='item219'>[219]</a> <a href ="/abs/2410.08527" title="Abstract" id="2410.08527"> arXiv:2410.08527 </a> (replaced) [<a href="/pdf/2410.08527" title="Download PDF" id="pdf-2410.08527" aria-labelledby="pdf-2410.08527">pdf</a>, <a href="https://arxiv.org/html/2410.08527v2" title="View HTML" id="html-2410.08527" aria-labelledby="html-2410.08527" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.08527" title="Other formats" id="oth-2410.08527" aria-labelledby="oth-2410.08527">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Scaling Laws for Predicting Downstream Performance in LLMs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+Y">Yangyi Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+B">Binxuan Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gao,+Y">Yifan Gao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zhengyang Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+J">Jingfeng Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ji,+H">Heng Ji</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted to TMLR </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Precise estimation of downstream performance in large language models (LLMs) prior to training is essential for guiding their development process. Scaling laws analysis utilizes the statistics of a series of significantly smaller sampling language models (LMs) to predict the performance of the target LLM. For downstream performance prediction, the critical challenge lies in the emergent abilities in LLMs that occur beyond task-specific computational thresholds. In this work, we focus on the pre-training loss as a more computation-efficient metric for performance estimation. Our two-stage approach FLP consists of first estimating a function that maps computational resources (e.g., FLOPs) to the pre-training Loss using a series of fully-converged sampling models, followed by mapping the pre-training loss to downstream task Performance using the intermediate models with emerged performance. In our experiments, this FLP solution accurately predicts the performance of LLMs with 7B and 13B parameters using a series of sampling LMs up to 3B, achieving error margins of 5% and 10%, respectively, and significantly outperforming the FLOPs-to-Performance approach. Further, we present FLP-M, a fundamental approach for performance prediction that addresses the practical need to integrate datasets from multiple sources during pre-training. FLP-M extends the power law analytical function to predict domain-specific pre-training loss based on FLOPs across data sources, and employs a two-layer neural network to model the non-linear relationship between multiple domain-specific loss and downstream performance. By utilizing a 3B LLM trained on a specific ratio and a series of smaller sampling LMs, FLP-M can effectively forecast the performance of 3B and 7B LLMs across various data mixtures for most benchmarks within 10% error margins. </p> </div> </dd> <dt> <a name='item220'>[220]</a> <a href ="/abs/2410.09697" title="Abstract" id="2410.09697"> arXiv:2410.09697 </a> (replaced) [<a href="/pdf/2410.09697" title="Download PDF" id="pdf-2410.09697" aria-labelledby="pdf-2410.09697">pdf</a>, <a href="https://arxiv.org/html/2410.09697v2" title="View HTML" id="html-2410.09697" aria-labelledby="html-2410.09697" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.09697" title="Other formats" id="oth-2410.09697" aria-labelledby="oth-2410.09697">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Provable Convergence and Limitations of Geometric Tempering for Langevin Dynamics </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Chehab,+O">Omar Chehab</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Korba,+A">Anna Korba</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Stromme,+A">Austin Stromme</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Vacher,+A">Adrien Vacher</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG); Computation (stat.CO) </div> <p class='mathjax'> Geometric tempering is a popular approach to sampling from challenging multi-modal probability distributions by instead sampling from a sequence of distributions which interpolate, using the geometric mean, between an easier proposal distribution and the target distribution. In this paper, we theoretically investigate the soundness of this approach when the sampling algorithm is Langevin dynamics, proving both upper and lower bounds. Our upper bounds are the first analysis in the literature under functional inequalities. They assert the convergence of tempered Langevin in continuous and discrete-time, and their minimization leads to closed-form optimal tempering schedules for some pairs of proposal and target distributions. Our lower bounds demonstrate a simple case where the geometric tempering takes exponential time, and further reveal that the geometric tempering can suffer from poor functional inequalities and slow convergence, even when the target distribution is well-conditioned. Overall, our results indicate that geometric tempering may not help, and can even be harmful for convergence. </p> </div> </dd> <dt> <a name='item221'>[221]</a> <a href ="/abs/2410.18804" title="Abstract" id="2410.18804"> arXiv:2410.18804 </a> (replaced) [<a href="/pdf/2410.18804" title="Download PDF" id="pdf-2410.18804" aria-labelledby="pdf-2410.18804">pdf</a>, <a href="https://arxiv.org/html/2410.18804v2" title="View HTML" id="html-2410.18804" aria-labelledby="html-2410.18804" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.18804" title="Other formats" id="oth-2410.18804" aria-labelledby="oth-2410.18804">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Fast constrained sampling in pre-trained diffusion models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Graikos,+A">Alexandros Graikos</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jojic,+N">Nebojsa Jojic</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Samaras,+D">Dimitris Samaras</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Large denoising diffusion models, such as Stable Diffusion, have been trained on billions of image-caption pairs to perform text-conditioned image generation. As a byproduct of this training, these models have acquired general knowledge about image statistics, which can be useful for other inference tasks. However, when confronted with sampling an image under new constraints, e.g. generating the missing parts of an image, using large pre-trained text-to-image diffusion models is inefficient and often unreliable. Previous approaches either utilize backpropagation, making them significantly slower and more memory-demanding than text-to-image inference, or only enforce the constraint locally, failing to capture critical long-range correlations. In this work, we propose an algorithm that enables fast and high-quality generation under arbitrary constraints. We observe that, during inference, we can interchange between gradient updates computed on the noisy image and updates computed on the final, clean image. This allows us to employ a numerical approximation to expensive gradient computations, incurring significant speed-ups in inference. Our approach produces results that rival or surpass the state-of-the-art training-free inference approaches while requiring a fraction of the time. We demonstrate the effectiveness of our algorithm under both linear and non-linear constraints. An implementation is provided at <a href="https://github.com/cvlab-stonybrook/fast-constrained-sampling" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item222'>[222]</a> <a href ="/abs/2410.20812" title="Abstract" id="2410.20812"> arXiv:2410.20812 </a> (replaced) [<a href="/pdf/2410.20812" title="Download PDF" id="pdf-2410.20812" aria-labelledby="pdf-2410.20812">pdf</a>, <a href="https://arxiv.org/html/2410.20812v3" title="View HTML" id="html-2410.20812" aria-labelledby="html-2410.20812" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.20812" title="Other formats" id="oth-2410.20812" aria-labelledby="oth-2410.20812">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Fidelity-Imposed Displacement Editing for the Learn2Reg 2024 SHG-BF Challenge </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+J">Jiacheng Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+X">Xiang Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hu,+R">Renjiu Hu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+R">Rongguang Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+J">Jiazheng Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+M">Min Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Y">Yaonan Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+H">Hang Zhang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at IEEE ISBI 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG); Image and Video Processing (eess.IV) </div> <p class='mathjax'> Co-examination of second-harmonic generation (SHG) and bright-field (BF) microscopy enables the differentiation of tissue components and collagen fibers, aiding the analysis of human breast and pancreatic cancer tissues. However, large discrepancies between SHG and BF images pose challenges for current learning-based registration models in aligning SHG to BF. In this paper, we propose a novel multi-modal registration framework that employs fidelity-imposed displacement editing to address these challenges. The framework integrates batch-wise contrastive learning, feature-based pre-alignment, and instance-level optimization. Experimental results from the Learn2Reg COMULISglobe SHG-BF Challenge validate the effectiveness of our method, securing the 1st place on the online leaderboard. </p> </div> </dd> <dt> <a name='item223'>[223]</a> <a href ="/abs/2410.23602" title="Abstract" id="2410.23602"> arXiv:2410.23602 </a> (replaced) [<a href="/pdf/2410.23602" title="Download PDF" id="pdf-2410.23602" aria-labelledby="pdf-2410.23602">pdf</a>, <a href="https://arxiv.org/html/2410.23602v2" title="View HTML" id="html-2410.23602" aria-labelledby="html-2410.23602" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2410.23602" title="Other formats" id="oth-2410.23602" aria-labelledby="oth-2410.23602">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Linearized Wasserstein Barycenters: Synthesis, Analysis, Representational Capacity, and Applications </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Werenski,+M">Matthew Werenski</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Mallery,+B">Brendan Mallery</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Aeron,+S">Shuchin Aeron</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Murphy,+J+M">James M. Murphy</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 40 pages, 6 figures Minor revisions and proof fixes, accepted to AISTATS 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> We propose the linear barycentric coding model (LBCM) which utilizes the linear optimal transport (LOT) metric for analysis and synthesis of probability measures. We provide a closed-form solution to the variational problem characterizing the probability measures in the LBCM and establish equivalence of the LBCM to the set of 2-Wasserstein barycenters in the special case of compatible measures. Computational methods for synthesizing and analyzing measures in the LBCM are developed with finite sample guarantees. One of our main theoretical contributions is to identify an LBCM, expressed in terms of a simple family, which is sufficient to express all probability measures on the closed unit interval. We show that a natural analogous construction of an LBCM in 2 dimensions fails, and we leave it as an open problem to identify the proper extension in more than 1 dimension. We conclude by demonstrating the utility of LBCM for covariance estimation and data imputation. </p> </div> </dd> <dt> <a name='item224'>[224]</a> <a href ="/abs/2411.04794" title="Abstract" id="2411.04794"> arXiv:2411.04794 </a> (replaced) [<a href="/pdf/2411.04794" title="Download PDF" id="pdf-2411.04794" aria-labelledby="pdf-2411.04794">pdf</a>, <a href="https://arxiv.org/html/2411.04794v2" title="View HTML" id="html-2411.04794" aria-labelledby="html-2411.04794" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2411.04794" title="Other formats" id="oth-2411.04794" aria-labelledby="oth-2411.04794">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> KnowCoder-X: Boosting Multilingual Information Extraction via Code </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zuo,+Y">Yuxin Zuo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jiang,+W">Wenxuan Jiang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+W">Wenxuan Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Z">Zixuan Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bai,+L">Long Bai</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+H">Hanbin Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zeng,+Y">Yutao Zeng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jin,+X">Xiaolong Jin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Guo,+J">Jiafeng Guo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cheng,+X">Xueqi Cheng</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 26 pages, 3 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Empirical evidence indicates that LLMs exhibit spontaneous cross-lingual alignment. However, although LLMs show promising cross-lingual alignment in IE, a significant imbalance across languages persists, highlighting an underlying deficiency. To address this, we propose KnowCoder-X, a powerful code LLM with advanced cross-lingual and multilingual capabilities for universal information extraction. Firstly, it standardizes the representation of multilingual schemas using Python classes, ensuring a consistent ontology across different languages. Then, IE across languages is formulated as a unified code generation task. Secondly, we enhance the model's cross-lingual transferability through IE cross-lingual alignment instruction tuning on a translated instance prediction task we proposed. During this phase, we also construct a high-quality and diverse bilingual IE parallel dataset with 257k samples, called ParallelNER, synthesized by our proposed robust three-stage pipeline, with manual annotation to ensure quality. Although without training in 29 unseen languages, KnowCoder-X surpasses ChatGPT by $30.17\%$ and SoTA by $20.03\%$, thereby demonstrating superior cross-lingual IE capabilities. Comprehensive evaluations on 64 IE benchmarks in Chinese and English under various settings demonstrate that KnowCoder-X significantly enhances cross-lingual IE transfer through boosting the IE alignment. Our code and dataset are available at: <a href="https://github.com/ICT-GoKnow/KnowCoder" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item225'>[225]</a> <a href ="/abs/2411.05281" title="Abstract" id="2411.05281"> arXiv:2411.05281 </a> (replaced) [<a href="/pdf/2411.05281" title="Download PDF" id="pdf-2411.05281" aria-labelledby="pdf-2411.05281">pdf</a>, <a href="https://arxiv.org/html/2411.05281v3" title="View HTML" id="html-2411.05281" aria-labelledby="html-2411.05281" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2411.05281" title="Other formats" id="oth-2411.05281" aria-labelledby="oth-2411.05281">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Fox-1: Open Small Language Model for Cloud and Edge </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Hu,+Z">Zijian Hu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+J">Jipeng Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pan,+R">Rui Pan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+Z">Zhaozhuo Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Han,+S">Shanshan Han</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jin,+H">Han Jin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shah,+A+D">Alay Dilipbhai Shah</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Stripelis,+D">Dimitris Stripelis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yao,+Y">Yuhang Yao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Avestimehr,+S">Salman Avestimehr</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+T">Tong Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=He,+C">Chaoyang He</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Base model is available at <a href="https://huggingface.co/tensoropera/Fox-1-1.6B" rel="external noopener nofollow" class="link-external link-https">this https URL</a> and the instruction-tuned version is available at <a href="https://huggingface.co/tensoropera/Fox-1-1.6B-Instruct-v0.1" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> We present Fox-1, a series of small language models (SLMs) consisting of Fox-1-1.6B and Fox-1-1.6B-Instruct-v0.1. These models are pre-trained on 3 trillion tokens of web-scraped document data and fine-tuned with 5 billion tokens of instruction-following and multi-turn conversation data. Aiming to improve the pre-training efficiency, Fox-1-1.6B model introduces a novel 3-stage data curriculum across all the training data with 2K-8K sequence length. In architecture design, Fox-1 features a deeper layer structure, an expanded vocabulary, and utilizes Grouped Query Attention (GQA), offering a performant and efficient architecture compared to other SLMs. Fox-1 achieves better or on-par performance in various benchmarks compared to StableLM-2-1.6B, Gemma-2B, Qwen1.5-1.8B, and OpenELM1.1B, with competitive inference speed and throughput. The model weights have been released under the Apache 2.0 license, where we aim to promote the democratization of LLMs and make them fully accessible to the whole open-source community. </p> </div> </dd> <dt> <a name='item226'>[226]</a> <a href ="/abs/2411.18627" title="Abstract" id="2411.18627"> arXiv:2411.18627 </a> (replaced) [<a href="/pdf/2411.18627" title="Download PDF" id="pdf-2411.18627" aria-labelledby="pdf-2411.18627">pdf</a>, <a href="https://arxiv.org/html/2411.18627v2" title="View HTML" id="html-2411.18627" aria-labelledby="html-2411.18627" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2411.18627" title="Other formats" id="oth-2411.18627" aria-labelledby="oth-2411.18627">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Topological Approach for Data Assimilation </div> <div class='list-authors'><a href="https://arxiv.org/search/nlin?searchtype=author&query=Chumley,+M+M">Max M. Chumley</a>, <a href="https://arxiv.org/search/nlin?searchtype=author&query=Khasawneh,+F+A">Firas A. Khasawneh</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 18 pages, 15 figures. Results have been improved by showing noise robustness testing at two relevant learning rates. We also added an example applying the algorithm to the 6 dimensional Lorenz 96 system </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Chaotic Dynamics (nlin.CD)</span>; Machine Learning (cs.LG); Algebraic Topology (math.AT) </div> <p class='mathjax'> Many dynamical systems are difficult or impossible to model using high fidelity physics based models. Consequently, researchers are relying more on data driven models to make predictions and forecasts. Based on limited training data, machine learning models often deviate from the true system states over time and need to be continually updated as new measurements are taken using data assimilation. Classical data assimilation algorithms typically require knowledge of the measurement noise statistics which may be unknown. In this paper, we introduce a new data assimilation algorithm with a foundation in topological data analysis. By leveraging the differentiability of functions of persistence, gradient descent optimization is used to minimize topological differences between measurements and forecast predictions by tuning data driven model coefficients without using noise information from the measurements. We describe the method and focus on its capabilities performance using the chaotic Lorenz 63 system as an example and we also show that the method works on a higher dimensional example with the Lorenz 96 system. </p> </div> </dd> <dt> <a name='item227'>[227]</a> <a href ="/abs/2412.06333" title="Abstract" id="2412.06333"> arXiv:2412.06333 </a> (replaced) [<a href="/pdf/2412.06333" title="Download PDF" id="pdf-2412.06333" aria-labelledby="pdf-2412.06333">pdf</a>, <a href="https://arxiv.org/html/2412.06333v2" title="View HTML" id="html-2412.06333" aria-labelledby="html-2412.06333" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2412.06333" title="Other formats" id="oth-2412.06333" aria-labelledby="oth-2412.06333">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Augmenting the action space with conventions to improve multi-agent cooperation in Hanabi </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bredell,+F">F. Bredell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Engelbrecht,+H">H.A. Engelbrecht</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Schoeman,+J">J.C. Schoeman</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> This paper is under review at the journal of autonomous agents and multi-agent systems (JAAMAS). The updated manuscript is the revised version after the first round of peer revision </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Multiagent Systems (cs.MA)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> The card game Hanabi is considered a strong medium for the testing and development of multi-agent reinforcement learning (MARL) algorithms, due to its cooperative nature, hidden information, limited communication and remarkable complexity. Previous research efforts have explored the capabilities of MARL algorithms within Hanabi, focusing largely on advanced architecture design and algorithmic manipulations to achieve state-of-the-art performance for a various number of cooperators. However, this often leads to complex solution strategies with high computational cost and requiring large amounts of training data. For humans to solve the Hanabi game effectively, they require the use of conventions, which often allows for a means to implicitly convey ideas or knowledge based on a predefined, and mutually agreed upon, set of ``rules''. Multi-agent problems containing partial observability, especially when limited communication is present, can benefit greatly from the use of implicit knowledge sharing. In this paper, we propose a novel approach to augmenting the action space using conventions, which act as special cooperative actions that span over multiple time steps and multiple agents, requiring agents to actively opt in for it to reach fruition. These conventions are based on existing human conventions, and result in a significant improvement on the performance of existing techniques for self-play and cross-play across a various number of cooperators within Hanabi. </p> </div> </dd> <dt> <a name='item228'>[228]</a> <a href ="/abs/2412.06947" title="Abstract" id="2412.06947"> arXiv:2412.06947 </a> (replaced) [<a href="/pdf/2412.06947" title="Download PDF" id="pdf-2412.06947" aria-labelledby="pdf-2412.06947">pdf</a>, <a href="https://arxiv.org/html/2412.06947v3" title="View HTML" id="html-2412.06947" aria-labelledby="html-2412.06947" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2412.06947" title="Other formats" id="oth-2412.06947" aria-labelledby="oth-2412.06947">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PyraNet: A Multi-Layered Hierarchical Dataset for Verilog </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Nadimi,+B">Bardia Nadimi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Boutaib,+G+O">Ghali Omar Boutaib</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zheng,+H">Hao Zheng</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Hardware Architecture (cs.AR)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG); Programming Languages (cs.PL) </div> <p class='mathjax'> Recently, there has been a growing interest in leveraging Large Language Models for Verilog code generation. However, the current quality of the generated Verilog code remains suboptimal. This is largely due to the absence of well-defined, well-organized datasets with high-quality samples, as well as a lack of innovative fine-tuning methods and models specifically trained on Verilog. In this paper, we introduce a novel open-source dataset and a corresponding fine-tuning technique, which utilizes a multi-layered structure that we refer to as PyraNet. Our experiments demonstrate that employing the proposed dataset and fine-tuning approach leads to a more accurate fine-tuned model, producing syntactically and functionally correct Verilog code. The evaluation results show improvements by up-to $32.6\%$ in comparison to the CodeLlama-7B baseline model and up-to $16.7\%$ in comparison to the state-of-the-art models using VerilogEval evaluation platform. </p> </div> </dd> <dt> <a name='item229'>[229]</a> <a href ="/abs/2412.08755" title="Abstract" id="2412.08755"> arXiv:2412.08755 </a> (replaced) [<a href="/pdf/2412.08755" title="Download PDF" id="pdf-2412.08755" aria-labelledby="pdf-2412.08755">pdf</a>, <a href="https://arxiv.org/html/2412.08755v4" title="View HTML" id="html-2412.08755" aria-labelledby="html-2412.08755" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2412.08755" title="Other formats" id="oth-2412.08755" aria-labelledby="oth-2412.08755">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Proactive Adversarial Defense: Harnessing Prompt Tuning in Vision-Language Models to Detect Unseen Backdoored Images </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Stein,+K">Kyle Stein</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mahyari,+A+A">Andrew Arash Mahyari</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Francia,+G">Guillermo Francia</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=El-Sheikh,+E">Eman El-Sheikh</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Cryptography and Security (cs.CR); Machine Learning (cs.LG) </div> <p class='mathjax'> Backdoor attacks pose a critical threat by embedding hidden triggers into inputs, causing models to misclassify them into target labels. While extensive research has focused on mitigating these attacks in object recognition models through weight fine-tuning, much less attention has been given to detecting backdoored samples directly. Given the vast datasets used in training, manual inspection for backdoor triggers is impractical, and even state-of-the-art defense mechanisms fail to fully neutralize their impact. To address this gap, we introduce a groundbreaking method to detect unseen backdoored images during both training and inference. Leveraging the transformative success of prompt tuning in Vision Language Models (VLMs), our approach trains learnable text prompts to differentiate clean images from those with hidden backdoor triggers. Experiments demonstrate the exceptional efficacy of this method, achieving an impressive average accuracy of 86% across two renowned datasets for detecting unseen backdoor triggers, establishing a new standard in backdoor defense. </p> </div> </dd> <dt> <a name='item230'>[230]</a> <a href ="/abs/2412.14488" title="Abstract" id="2412.14488"> arXiv:2412.14488 </a> (replaced) [<a href="/pdf/2412.14488" title="Download PDF" id="pdf-2412.14488" aria-labelledby="pdf-2412.14488">pdf</a>, <a href="https://arxiv.org/html/2412.14488v4" title="View HTML" id="html-2412.14488" aria-labelledby="html-2412.14488" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2412.14488" title="Other formats" id="oth-2412.14488" aria-labelledby="oth-2412.14488">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A stochastic first-order method with multi-extrapolated momentum for highly smooth unconstrained optimization </div> <div class='list-authors'><a href="https://arxiv.org/search/math?searchtype=author&query=He,+C">Chuan He</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> An example is provided to illustrate the gap between the smoothness of the objective function itself and the mean-squared smoothness of the stochastic gradient estimator </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Optimization and Control (math.OC)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> In this paper, we consider an unconstrained stochastic optimization problem where the objective function exhibits high-order smoothness. Specifically, we propose a new stochastic first-order method (SFOM) with multi-extrapolated momentum, in which multiple extrapolations are performed in each iteration, followed by a momentum update based on these extrapolations. We demonstrate that the proposed SFOM can accelerate optimization by exploiting the high-order smoothness of the objective function $f$. Assuming that the $p$th-order derivative of $f$ is Lipschitz continuous for some $p\ge2$, and under additional mild assumptions, we establish that our method achieves a sample complexity of $\widetilde{\mathcal{O}}(\epsilon^{-(3p+1)/p})$ for finding a point $x$ such that $\mathbb{E}[\|\nabla f(x)\|]\le\epsilon$. To the best of our knowledge, this is the first SFOM to leverage arbitrary-order smoothness of the objective function for acceleration, resulting in a sample complexity that improves upon the best-known results without assuming the mean-squared smoothness condition. Preliminary numerical experiments validate the practical performance of our method and support our theoretical findings. </p> </div> </dd> <dt> <a name='item231'>[231]</a> <a href ="/abs/2412.15322" title="Abstract" id="2412.15322"> arXiv:2412.15322 </a> (replaced) [<a href="/pdf/2412.15322" title="Download PDF" id="pdf-2412.15322" aria-labelledby="pdf-2412.15322">pdf</a>, <a href="https://arxiv.org/html/2412.15322v2" title="View HTML" id="html-2412.15322" aria-labelledby="html-2412.15322" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2412.15322" title="Other formats" id="oth-2412.15322" aria-labelledby="oth-2412.15322">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> MMAudio: Taming Multimodal Joint Training for High-Quality Video-to-Audio Synthesis </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Cheng,+H+K">Ho Kei Cheng</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ishii,+M">Masato Ishii</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hayakawa,+A">Akio Hayakawa</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shibuya,+T">Takashi Shibuya</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Schwing,+A">Alexander Schwing</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mitsufuji,+Y">Yuki Mitsufuji</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted to CVPR 2025. Project page: <a href="https://hkchengrex.github.io/MMAudio" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG); Sound (cs.SD); Audio and Speech Processing (eess.AS) </div> <p class='mathjax'> We propose to synthesize high-quality and synchronized audio, given video and optional text conditions, using a novel multimodal joint training framework MMAudio. In contrast to single-modality training conditioned on (limited) video data only, MMAudio is jointly trained with larger-scale, readily available text-audio data to learn to generate semantically aligned high-quality audio samples. Additionally, we improve audio-visual synchrony with a conditional synchronization module that aligns video conditions with audio latents at the frame level. Trained with a flow matching objective, MMAudio achieves new video-to-audio state-of-the-art among public models in terms of audio quality, semantic alignment, and audio-visual synchronization, while having a low inference time (1.23s to generate an 8s clip) and just 157M parameters. MMAudio also achieves surprisingly competitive performance in text-to-audio generation, showing that joint training does not hinder single-modality performance. Code and demo are available at: <a href="https://hkchengrex.github.io/MMAudio" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item232'>[232]</a> <a href ="/abs/2501.17762" title="Abstract" id="2501.17762"> arXiv:2501.17762 </a> (replaced) [<a href="/pdf/2501.17762" title="Download PDF" id="pdf-2501.17762" aria-labelledby="pdf-2501.17762">pdf</a>, <a href="https://arxiv.org/html/2501.17762v3" title="View HTML" id="html-2501.17762" aria-labelledby="html-2501.17762" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2501.17762" title="Other formats" id="oth-2501.17762" aria-labelledby="oth-2501.17762">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Improving Privacy Benefits of Redaction </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Gusain,+V">Vaibhav Gusain</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Leith,+D">Douglas Leith</a></div> <div class='list-journal-ref'><span class='descriptor'>Journal-ref:</span> ESANN 2025 </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> We propose a novel redaction methodology that can be used to sanitize natural text data. Our new technique provides better privacy benefits than other state of the art techniques while maintaining lower redaction levels. </p> </div> </dd> <dt> <a name='item233'>[233]</a> <a href ="/abs/2502.02514" title="Abstract" id="2502.02514"> arXiv:2502.02514 </a> (replaced) [<a href="/pdf/2502.02514" title="Download PDF" id="pdf-2502.02514" aria-labelledby="pdf-2502.02514">pdf</a>, <a href="https://arxiv.org/html/2502.02514v2" title="View HTML" id="html-2502.02514" aria-labelledby="html-2502.02514" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.02514" title="Other formats" id="oth-2502.02514" aria-labelledby="oth-2502.02514">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Privacy Attacks on Image AutoRegressive Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Kowalczuk,+A">Antoni Kowalczuk</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dubi%C5%84ski,+J">Jan Dubi艅ski</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Boenisch,+F">Franziska Boenisch</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dziedzic,+A">Adam Dziedzic</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Code: <a href="https://github.com/sprintml/privacy_attacks_against_iars" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Image autoregressive generation has emerged as a powerful new paradigm, with image autoregressive models (IARs) matching state-of-the-art diffusion models (DMs) in image quality (FID: 1.48 vs. 1.58) while allowing for higher generation speed. However, the privacy risks associated with IARs remain unexplored, raising concerns about their responsible deployment. To address this gap, we conduct a comprehensive privacy analysis of IARs, comparing their privacy risks to those of DMs as a reference point. Specifically, we develop a novel membership inference attack (MIA) that achieves a remarkably high success rate in detecting training images, with a True Positive Rate at False Positive Rate = 1% (TPR@FPR=1%) of 86.38%, compared to just 6.38% for DMs using comparable attacks. We leverage our novel MIA to perform dataset inference (DI) for IARs and show that it requires as few as 6 samples to detect dataset membership, compared to 200 samples for DI in DMs. This confirms a higher level of information leakage in IARs. Finally, we are able to extract hundreds of training data points from an IAR (e.g., 698 from VAR-d30). Our results suggest a fundamental privacy-utility trade-off: while IARs excel in image generation quality and speed, they are empirically significantly more vulnerable to privacy attacks compared to DMs that achieve similar performance. This trend suggests that incorporating techniques from DMs into IARs, such as modeling the per-token probability distribution using a diffusion procedure, could help mitigate IARs' vulnerability to privacy attacks. We make our code available at: <a href="https://github.com/sprintml/privacy_attacks_against_iars" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item234'>[234]</a> <a href ="/abs/2502.02885" title="Abstract" id="2502.02885"> arXiv:2502.02885 </a> (replaced) [<a href="/pdf/2502.02885" title="Download PDF" id="pdf-2502.02885" aria-labelledby="pdf-2502.02885">pdf</a>, <a href="https://arxiv.org/html/2502.02885v3" title="View HTML" id="html-2502.02885" aria-labelledby="html-2502.02885" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.02885" title="Other formats" id="oth-2502.02885" aria-labelledby="oth-2502.02885">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Expertized Caption Auto-Enhancement for Video-Text Retrieval </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+B">Baoyao Yang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+J">Junxiang Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+W">Wanyun Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yao,+W">Wenbin Yao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhou,+Y">Yang Zhou</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Video-text retrieval has been stuck in the information mismatch caused by personalized and inadequate textual descriptions of videos. The substantial information gap between the two modalities hinders an effective cross-modal representation alignment, resulting in ambiguous retrieval results. Although text rewriting methods have been proposed to broaden text expressions, the modality gap remains significant, as the text representation space is hardly expanded with insufficient semantic <a href="http://enrichment.Instead" rel="external noopener nofollow" class="link-external link-http">this http URL</a>, this paper turns to enhancing visual presentation, bridging video expression closer to textual representation via caption generation and thereby facilitating video-text <a href="http://matching.While" rel="external noopener nofollow" class="link-external link-http">this http URL</a> multimodal large language models (mLLM) have shown a powerful capability to convert video content into text, carefully crafted prompts are essential to ensure the reasonableness and completeness of the generated captions. Therefore, this paper proposes an automatic caption enhancement method that improves expression quality and mitigates empiricism in augmented captions through <a href="http://self-learning.Additionally" rel="external noopener nofollow" class="link-external link-http">this http URL</a>, an expertized caption selection mechanism is designed and introduced to customize augmented captions for each video, further exploring the utilization potential of caption <a href="http://augmentation.Our" rel="external noopener nofollow" class="link-external link-http">this http URL</a> method is entirely data-driven, which not only dispenses with heavy data collection and computation workload but also improves self-adaptability by circumventing lexicon dependence and introducing personalized matching. The superiority of our method is validated by state-of-the-art results on various benchmarks, specifically achieving Top-1 recall accuracy of 68.5% on MSR-VTT, 68.1% on MSVD, and 62.0% on DiDeMo. Our code is publicly available at <a href="https://github.com/CaryXiang/ECA4VTR" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item235'>[235]</a> <a href ="/abs/2502.03370" title="Abstract" id="2502.03370"> arXiv:2502.03370 </a> (replaced) [<a href="/pdf/2502.03370" title="Download PDF" id="pdf-2502.03370" aria-labelledby="pdf-2502.03370">pdf</a>, <a href="/format/2502.03370" title="Other formats" id="oth-2502.03370" aria-labelledby="oth-2502.03370">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Deep Learning-Based Approach for Identification of Potato Leaf Diseases Using Wrapper Feature Selection and Feature Concatenation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Naeem,+M+A">Muhammad Ahtsam Naeem</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Saleem,+M+A">Muhammad Asim Saleem</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sharif,+M+I">Muhammad Imran Sharif</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Akber,+S">Shahzad Akber</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Saleem,+S">Sajjad Saleem</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Akhtar,+Z">Zahid Akhtar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Siddique,+K">Kamran Siddique</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> The potato is a widely grown crop in many regions of the world. In recent decades, potato farming has gained incredible traction in the world. Potatoes are susceptible to several illnesses that stunt their development. This plant seems to have significant leaf disease. Early Blight and Late Blight are two prevalent leaf diseases that affect potato plants. The early detection of these diseases would be beneficial for enhancing the yield of this crop. The ideal solution is to use image processing to identify and analyze these disorders. Here, we present an autonomous method based on image processing and machine learning to detect late blight disease affecting potato leaves. The proposed method comprises four different phases: (1) Histogram Equalization is used to improve the quality of the input image; (2) feature extraction is performed using a Deep CNN model, then these extracted features are concatenated; (3) feature selection is performed using wrapper-based feature selection; (4) classification is performed using an SVM classifier and its variants. This proposed method achieves the highest accuracy of 99% using SVM by selecting 550 features. </p> </div> </dd> <dt> <a name='item236'>[236]</a> <a href ="/abs/2502.07847" title="Abstract" id="2502.07847"> arXiv:2502.07847 </a> (replaced) [<a href="/pdf/2502.07847" title="Download PDF" id="pdf-2502.07847" aria-labelledby="pdf-2502.07847">pdf</a>, <a href="https://arxiv.org/html/2502.07847v2" title="View HTML" id="html-2502.07847" aria-labelledby="html-2502.07847" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.07847" title="Other formats" id="oth-2502.07847" aria-labelledby="oth-2502.07847">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Confidence-calibrated covariate shift correction for few-shot classification in Vision-Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Khan,+B">Behraj Khan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Qureshi,+R">Rizwan Qureshi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Durrani,+N+M">Nouman Muhammad Durrani</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Syed,+T">Tahir Syed</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Since the establishment of vision-language foundation models as the new mainstay in low-shot vision classification tasks, the question of domain generalization arising from insufficient target data is assuming more importance. This scarcity challenge induces sampling bias and amplifies model sensitivity to variations and shifts in data distributions. While fine-tuning on multiple domains could mitigate such domain generalization issues, it is resource-intensive and demands diverse data sources. <br>In this work, we systematically analyze two critical challenges: (1) covariate shift between the pre-training distribution and the underspecified target distribution, and (2) confidence misalignment, where predictions on novel data are overconfident. <br>To address both challenges simultaneously, we introduce \textbf{Confidence-Calibrated Covariate Shift Correction (CalShift)} -- a unified approach that combines a Fisher information penalty to mitigate covariate shift and a Confidence Misalignment Penalty (CMP) to reduce overconfidence in misclassified examples. <br>Experimental evaluations across various vision and covariate shift benchmarks demonstrate that CalShift significantly improves model calibration, achieving up to a 5.82\% reduction in Expected Calibration Error (ECE). Furthermore, CalShift enhances robustness, improving accuracy by 3.5\% on challenging datasets impacted by covariate shifts. <br>Our results highlight CalShift as a promising strategy for building robust and reliable low-shot vision-language systems for real-world applications. </p> </div> </dd> <dt> <a name='item237'>[237]</a> <a href ="/abs/2502.09017" title="Abstract" id="2502.09017"> arXiv:2502.09017 </a> (replaced) [<a href="/pdf/2502.09017" title="Download PDF" id="pdf-2502.09017" aria-labelledby="pdf-2502.09017">pdf</a>, <a href="https://arxiv.org/html/2502.09017v2" title="View HTML" id="html-2502.09017" aria-labelledby="html-2502.09017" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2502.09017" title="Other formats" id="oth-2502.09017" aria-labelledby="oth-2502.09017">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Diversity Enhances an LLM's Performance in RAG and Long-context Task </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zhichao Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Bi,+B">Bin Bi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Luo,+Y">Yanqi Luo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Asur,+S">Sitaram Asur</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cheng,+C+N">Claire Na Cheng</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> The rapid advancements in large language models (LLMs) have highlighted the challenge of context window limitations, primarily due to the quadratic time complexity of the self-attention mechanism (\(O(N^2)\), where \(N\) denotes the context window length). This constraint impacts tasks such as retrieval-augmented generation (RAG) in question answering (Q\&A) and long context summarization. A common approach involves selecting content with the highest similarity to the query; however, this often leads to redundancy and the exclusion of diverse yet relevant information. Building on principles from Maximal Marginal Relevance (MMR) and Farthest Point Sampling (FPS), we integrate diversity into the content selection process. Our findings reveal that incorporating diversity substantially increases the recall of selecting relevant sentences or chunks before LLM-based Q\&A and summarization. These results highlight the importance of maintaining diversity in future LLM applications to further improve summarization and Q\&A outcomes. </p> </div> </dd> <dt> <a name='item238'>[238]</a> <a href ="/abs/2503.05050" title="Abstract" id="2503.05050"> arXiv:2503.05050 </a> (replaced) [<a href="/pdf/2503.05050" title="Download PDF" id="pdf-2503.05050" aria-labelledby="pdf-2503.05050">pdf</a>, <a href="https://arxiv.org/html/2503.05050v2" title="View HTML" id="html-2503.05050" aria-labelledby="html-2503.05050" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.05050" title="Other formats" id="oth-2503.05050" aria-labelledby="oth-2503.05050">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Unified Framework with Novel Metrics for Evaluating the Effectiveness of XAI Techniques in LLMs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Mersha,+M+A">Melkamu Abay Mersha</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yigezu,+M+G">Mesay Gemeda Yigezu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shakil,+H">Hassan Shakil</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=AlShami,+A+K">Ali K. AlShami</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Byun,+S">Sanghyun Byun</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kalita,+J">Jugal Kalita</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> arXiv admin note: substantial text overlap with <a href="https://arxiv.org/abs/2501.15374" data-arxiv-id="2501.15374" class="link-https">arXiv:2501.15374</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> The increasing complexity of LLMs presents significant challenges to their transparency and interpretability, necessitating the use of eXplainable AI (XAI) techniques to enhance trustworthiness and usability. This study introduces a comprehensive evaluation framework with four novel metrics for assessing the effectiveness of five XAI techniques across five LLMs and two downstream tasks. We apply this framework to evaluate several XAI techniques LIME, SHAP, Integrated Gradients, Layer-wise Relevance Propagation (LRP), and Attention Mechanism Visualization (AMV) using the IMDB Movie Reviews and Tweet Sentiment Extraction datasets. The evaluation focuses on four key metrics: Human-reasoning Agreement (HA), Robustness, Consistency, and Contrastivity. Our results show that LIME consistently achieves high scores across multiple LLMs and evaluation metrics, while AMV demonstrates superior Robustness and near-perfect Consistency. LRP excels in Contrastivity, particularly with more complex models. Our findings provide valuable insights into the strengths and limitations of different XAI methods, offering guidance for developing and selecting appropriate XAI techniques for LLMs. </p> </div> </dd> <dt> <a name='item239'>[239]</a> <a href ="/abs/2503.07378" title="Abstract" id="2503.07378"> arXiv:2503.07378 </a> (replaced) [<a href="/pdf/2503.07378" title="Download PDF" id="pdf-2503.07378" aria-labelledby="pdf-2503.07378">pdf</a>, <a href="https://arxiv.org/html/2503.07378v5" title="View HTML" id="html-2503.07378" aria-labelledby="html-2503.07378" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.07378" title="Other formats" id="oth-2503.07378" aria-labelledby="oth-2503.07378">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Materials Map Integrating Experimental and Computational Data via Graph-Based Machine Learning for Enhanced Materials Discovery </div> <div class='list-authors'><a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Hashimoto,+Y">Yusuke Hashimoto</a>, <a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Jia,+X">Xue Jia</a>, <a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Li,+H">Hao Li</a>, <a href="https://arxiv.org/search/cond-mat?searchtype=author&query=Tomai,+T">Takaaki Tomai</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Materials Science (cond-mat.mtrl-sci)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Materials informatics (MI), emerging from the integration of materials science and data science, is expected to significantly accelerate material development and discovery. The data used in MI are derived from both computational and experimental studies; however, their integration remains challenging. In our previous study, we reported the integration of these datasets by applying a machine learning model that is trained on the experimental dataset to the compositional data stored in the computational database. In this study, we use the obtained datasets to construct materials maps, which visualize the relationships between material properties and structural features, aiming to support experimental researchers. The materials map is constructed using the MatDeepLearn (MDL) framework, which implements materials property prediction using graph-based representations of material structure and deep learning modeling. Through statistical analysis, we find that the MDL framework using the message passing neural network (MPNN) architecture efficiently extracts features reflecting the structural complexity of materials. Moreover, we find that this advantage does not necessarily translate into improved accuracy in the prediction of material properties. We attribute this unexpected outcome to the high learning performance inherent in MPNN, which can contribute to the structuring of data points within the materials map. </p> </div> </dd> <dt> <a name='item240'>[240]</a> <a href ="/abs/2503.10666" title="Abstract" id="2503.10666"> arXiv:2503.10666 </a> (replaced) [<a href="/pdf/2503.10666" title="Download PDF" id="pdf-2503.10666" aria-labelledby="pdf-2503.10666">pdf</a>, <a href="https://arxiv.org/html/2503.10666v2" title="View HTML" id="html-2503.10666" aria-labelledby="html-2503.10666" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.10666" title="Other formats" id="oth-2503.10666" aria-labelledby="oth-2503.10666">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Green Prompting </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Adamska,+M">Marta Adamska</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Smirnova,+D">Daria Smirnova</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nasiri,+H">Hamid Nasiri</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+Z">Zhengxin Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Garraghan,+P">Peter Garraghan</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 9 pages, 5 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Large Language Models (LLMs) have become widely used across various domains spanning search engines, code generation, and text creation. However, a major concern associated with their adoption is the high cost of inference, impacting both their sustainability and financial feasibility. In this study, we empirically study how different prompt and response characteristics directly impact LLM inference energy cost. We conduct experiments leveraging three open-source transformer-based LLMs across three task types$-$question answering, sentiment analysis, and text generation. For each inference, we analyzed prompt and response characteristics (length, semantic meaning, time taken, energy consumption). Our results demonstrate that even when presented with identical tasks, models generate responses with varying characteristics and subsequently exhibit distinct energy consumption patterns. We found that prompt length is less significant than the semantic meaning of the task itself. In addition, we identified specific keywords associated with higher or lower energy usage that vary between associated tasks. These findings highlight the importance of prompt design in optimizing inference efficiency. We conclude that the semantic meaning of prompts and certain task-related keywords significantly impact inference costs, leading the way for deeper exploration towards creating energy-adaptive LLMs. </p> </div> </dd> <dt> <a name='item241'>[241]</a> <a href ="/abs/2503.12763" title="Abstract" id="2503.12763"> arXiv:2503.12763 </a> (replaced) [<a href="/pdf/2503.12763" title="Download PDF" id="pdf-2503.12763" aria-labelledby="pdf-2503.12763">pdf</a>, <a href="https://arxiv.org/html/2503.12763v2" title="View HTML" id="html-2503.12763" aria-labelledby="html-2503.12763" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.12763" title="Other formats" id="oth-2503.12763" aria-labelledby="oth-2503.12763">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Survey on Human Interaction Motion Generation </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Sui,+K">Kewei Sui</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ghosh,+A">Anindita Ghosh</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hwang,+I">Inwoo Hwang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhou,+B">Bing Zhou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+J">Jian Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Guo,+C">Chuan Guo</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> The repository listing relevant papers is accessible at: <a href="https://github.com/soraproducer/Awesome-Human-Interaction-Motion-Generation" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Humans inhabit a world defined by interactions -- with other humans, objects, and environments. These interactive movements not only convey our relationships with our surroundings but also demonstrate how we perceive and communicate with the real world. Therefore, replicating these interaction behaviors in digital systems has emerged as an important topic for applications in robotics, virtual reality, and animation. While recent advances in deep generative models and new datasets have accelerated progress in this field, significant challenges remain in modeling the intricate human dynamics and their interactions with entities in the external world. In this survey, we present, for the first time, a comprehensive overview of the literature in human interaction motion generation. We begin by establishing foundational concepts essential for understanding the research background. We then systematically review existing solutions and datasets across three primary interaction tasks -- human-human, human-object, and human-scene interactions -- followed by evaluation metrics. Finally, we discuss open research directions and future opportunities. </p> </div> </dd> <dt> <a name='item242'>[242]</a> <a href ="/abs/2503.15485" title="Abstract" id="2503.15485"> arXiv:2503.15485 </a> (replaced) [<a href="/pdf/2503.15485" title="Download PDF" id="pdf-2503.15485" aria-labelledby="pdf-2503.15485">pdf</a>, <a href="https://arxiv.org/html/2503.15485v2" title="View HTML" id="html-2503.15485" aria-labelledby="html-2503.15485" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.15485" title="Other formats" id="oth-2503.15485" aria-labelledby="oth-2503.15485">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> TULIP: Towards Unified Language-Image Pretraining </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Tang,+Z">Zineng Tang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lian,+L">Long Lian</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Eisape,+S">Seun Eisape</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+X">XuDong Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Herzig,+R">Roei Herzig</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yala,+A">Adam Yala</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Suhr,+A">Alane Suhr</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Darrell,+T">Trevor Darrell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chan,+D+M">David M. Chan</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> (v2) Clarified fine-tuning process, updated appendix </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> Despite the recent success of image-text contrastive models like CLIP and SigLIP, these models often struggle with vision-centric tasks that demand high-fidelity image understanding, such as counting, depth estimation, and fine-grained object recognition. These models, by performing language alignment, tend to prioritize high-level semantics over visual understanding, weakening their image understanding. On the other hand, vision-focused models are great at processing visual information but struggle to understand language, limiting their flexibility for language-driven tasks. In this work, we introduce TULIP, an open-source, drop-in replacement for existing CLIP-like models. Our method leverages generative data augmentation, enhanced image-image and text-text contrastive learning, and image/text reconstruction regularization to learn fine-grained visual features while preserving global semantic alignment. Our approach, scaling to over 1B parameters, outperforms existing state-of-the-art (SOTA) models across multiple benchmarks, establishing a new SOTA zero-shot performance on ImageNet-1K, delivering up to a $2\times$ enhancement over SigLIP on RxRx1 in linear probing for few-shot classification, and improving vision-language models, achieving over $3\times$ higher scores than SigLIP on MMVP. Our code/checkpoints are available at <a href="https://tulip-berkeley.github.io" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item243'>[243]</a> <a href ="/abs/2503.21878" title="Abstract" id="2503.21878"> arXiv:2503.21878 </a> (replaced) [<a href="/pdf/2503.21878" title="Download PDF" id="pdf-2503.21878" aria-labelledby="pdf-2503.21878">pdf</a>, <a href="/format/2503.21878" title="Other formats" id="oth-2503.21878" aria-labelledby="oth-2503.21878">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Is Best-of-N the Best of Them? Coverage, Scaling, and Optimality in Inference-Time Alignment </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+A">Audrey Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Block,+A">Adam Block</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Q">Qinghua Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jiang,+N">Nan Jiang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Krishnamurthy,+A">Akshay Krishnamurthy</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Foster,+D+J">Dylan J. Foster</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Machine Learning (cs.LG); Machine Learning (stat.ML) </div> <p class='mathjax'> Inference-time computation offers a powerful axis for scaling the performance of language models. However, naively increasing computation in techniques like Best-of-N sampling can lead to performance degradation due to reward hacking. Toward a theoretical understanding of how to best leverage additional computation, we focus on inference-time alignment, which we formalize as the problem of improving the quality of responses drawn from a pre-trained policy, given a prompt of interest and access to an imperfect reward model. We analyze the performance of inference-time alignment algorithms in terms of (i) response quality, and (ii) compute, and provide new results that highlight the importance of the pre-trained policy's coverage over high-quality responses for performance and compute scaling: <br>1. We show that Best-of-$N$ alignment with an ideal choice for $N$ can achieve optimal performance under stringent notions of coverage, but provably suffers from reward hacking when $N$ is large, and fails to achieve tight guarantees under more realistic coverage conditions. <br>2. We introduce $\texttt{InferenceTimePessimism}$, a new algorithm which mitigates reward hacking through deliberate use of inference-time compute, implementing the principle of pessimism in the face of uncertainty via rejection sampling; we prove that its performance is optimal and does not degrade with $N$, meaning it is scaling-monotonic. <br>We complement our theoretical results with an experimental evaluation that demonstrate the benefits of $\texttt{InferenceTimePessimism}$ across a variety of tasks and models. </p> </div> </dd> <dt> <a name='item244'>[244]</a> <a href ="/abs/2503.22617" title="Abstract" id="2503.22617"> arXiv:2503.22617 </a> (replaced) [<a href="/pdf/2503.22617" title="Download PDF" id="pdf-2503.22617" aria-labelledby="pdf-2503.22617">pdf</a>, <a href="https://arxiv.org/html/2503.22617v2" title="View HTML" id="html-2503.22617" aria-labelledby="html-2503.22617" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.22617" title="Other formats" id="oth-2503.22617" aria-labelledby="oth-2503.22617">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Using Machine Learning for Lunar Mineralogy-I: Hyperspectral Imaging of Volcanic Samples </div> <div class='list-authors'><a href="https://arxiv.org/search/astro-ph?searchtype=author&query=Hesar,+F+F">Fatemeh Fazel Hesar</a>, <a href="https://arxiv.org/search/astro-ph?searchtype=author&query=Raouf,+M">Mojtaba Raouf</a>, <a href="https://arxiv.org/search/astro-ph?searchtype=author&query=Soltani,+P">Peyman Soltani</a>, <a href="https://arxiv.org/search/astro-ph?searchtype=author&query=Foing,+B">Bernard Foing</a>, <a href="https://arxiv.org/search/astro-ph?searchtype=author&query=de+Dood,+M+J">Michiel J.A. de Dood</a>, <a href="https://arxiv.org/search/astro-ph?searchtype=author&query=Verbeek,+F+J">Fons J. Verbeek</a>, <a href="https://arxiv.org/search/astro-ph?searchtype=author&query=Cheng,+E">Esther Cheng</a>, <a href="https://arxiv.org/search/astro-ph?searchtype=author&query=Zhou,+C">Chenming Zhou</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 18 pages, 7 Figure, Accepted to the Special Issue: Planetary Radar Astronomy - Universe: Planetary Sciences Journal </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Earth and Planetary Astrophysics (astro-ph.EP)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> This study examines the mineral composition of volcanic samples similar to lunar materials, focusing on olivine and pyroxene. Using hyperspectral imaging from 400 to 1000 nm, we created data cubes to analyze the reflectance characteristics of samples from samples from Vulcano, a volcanically active island in the Aeolian Archipelago, north of Sicily, Italy, categorizing them into nine regions of interest and analyzing spectral data for each. We applied various unsupervised clustering algorithms, including K-Means, Hierarchical Clustering, GMM, and Spectral Clustering, to classify the spectral profiles. Principal Component Analysis revealed distinct spectral signatures associated with specific minerals, facilitating precise identification. Clustering performance varied by region, with K-Means achieving the highest silhouette-score of 0.47, whereas GMM performed poorly with a score of only 0.25. Non-negative Matrix Factorization aided in identifying similarities among clusters across different methods and reference spectra for olivine and pyroxene. Hierarchical clustering emerged as the most reliable technique, achieving a 94\% similarity with the olivine spectrum in one sample, whereas GMM exhibited notable variability. Overall, the analysis indicated that both Hierarchical and K-Means methods yielded lower errors in total measurements, with K-Means demonstrating superior performance in estimated dispersion and clustering. Additionally, GMM showed a higher root mean square error compared to the other models. The RMSE analysis confirmed K-Means as the most consistent algorithm across all samples, suggesting a predominance of olivine in the Vulcano region relative to pyroxene. This predominance is likely linked to historical formation conditions similar to volcanic processes on the Moon, where olivine-rich compositions are common in ancient lava flows and impact melt rocks. </p> </div> </dd> <dt> <a name='item245'>[245]</a> <a href ="/abs/2504.00509" title="Abstract" id="2504.00509"> arXiv:2504.00509 </a> (replaced) [<a href="/pdf/2504.00509" title="Download PDF" id="pdf-2504.00509" aria-labelledby="pdf-2504.00509">pdf</a>, <a href="https://arxiv.org/html/2504.00509v2" title="View HTML" id="html-2504.00509" aria-labelledby="html-2504.00509" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.00509" title="Other formats" id="oth-2504.00509" aria-labelledby="oth-2504.00509">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Recitation over Reasoning: How Cutting-Edge Language Models Can Fail on Elementary School-Level Reasoning Problems? </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yan,+K">Kai Yan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+Y">Yufei Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Du,+Z">Zhengyin Du</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yao,+X">Xuesong Yao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zheyu Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Guo,+X">Xiaowen Guo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+J">Jiecao Chen</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 23 pages, 3 figures, 10 tables. V2 refines related work and acknowledgement, and adds links to chat logs for qualitative studies </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Artificial Intelligence (cs.AI)</span>; Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> The rapid escalation from elementary school-level to frontier problems of the difficulty for LLM benchmarks in recent years have weaved a miracle for researchers that we are only inches away from surpassing human intelligence. However, is the LLMs' remarkable reasoning ability indeed comes from true intelligence by human standards, or are they simply reciting solutions witnessed during training at an Internet level? To study this problem, we propose RoR-Bench, a novel, multi-modal benchmark for detecting LLM's recitation behavior when asked simple reasoning problems but with conditions subtly shifted, and conduct empirical analysis on our benchmark. Surprisingly, we found existing cutting-edge LLMs unanimously exhibits extremely severe recitation behavior; by changing one phrase in the condition, top models such as OpenAI-o1 and DeepSeek-R1 can suffer $60\%$ performance loss on elementary school-level arithmetic and reasoning problems. Such findings are a wake-up call to the LLM community that compels us to re-evaluate the true intelligence level of cutting-edge LLMs. </p> </div> </dd> <dt> <a name='item246'>[246]</a> <a href ="/abs/2504.03601" title="Abstract" id="2504.03601"> arXiv:2504.03601 </a> (replaced) [<a href="/pdf/2504.03601" title="Download PDF" id="pdf-2504.03601" aria-labelledby="pdf-2504.03601">pdf</a>, <a href="https://arxiv.org/html/2504.03601v2" title="View HTML" id="html-2504.03601" aria-labelledby="html-2504.03601" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03601" title="Other formats" id="oth-2504.03601" aria-labelledby="oth-2504.03601">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> APIGen-MT: Agentic Pipeline for Multi-Turn Data Generation via Simulated Agent-Human Interplay </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Prabhakar,+A">Akshara Prabhakar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Z">Zuxin Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhu,+M">Ming Zhu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+J">Jianguo Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Awalgaonkar,+T">Tulika Awalgaonkar</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+S">Shiyu Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Z">Zhiwei Liu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chen,+H">Haolin Chen</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hoang,+T">Thai Hoang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Niebles,+J+C">Juan Carlos Niebles</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Heinecke,+S">Shelby Heinecke</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yao,+W">Weiran Yao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+H">Huan Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Savarese,+S">Silvio Savarese</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xiong,+C">Caiming Xiong</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 12 pages plus references and appendices </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Training effective AI agents for multi-turn interactions requires high-quality data that captures realistic human-agent dynamics, yet such data is scarce and expensive to collect manually. We introduce APIGen-MT, a two-phase framework that generates verifiable and diverse multi-turn agent data. In the first phase, our agentic pipeline produces detailed task blueprints with ground-truth actions, leveraging a committee of LLM reviewers and iterative feedback loops. These blueprints are then transformed into complete interaction trajectories through simulated human-agent interplay. We train a family of models -- the xLAM-2-fc-r series with sizes ranging from 1B to 70B parameters. Our models outperform frontier models such as GPT-4o and Claude 3.5 on $\tau$-bench and BFCL benchmarks, with the smaller models surpassing their larger counterparts, particularly in multi-turn settings, while maintaining superior consistency across multiple trials. Comprehensive experiments demonstrate that our verified blueprint-to-details approach yields high-quality training data, enabling the development of more reliable, efficient, and capable agents. We open-source both the synthetic data collected and the trained xLAM-2-fc-r models to advance research in AI agents. Models are available on HuggingFace at <a href="https://huggingface.co/collections/Salesforce/xlam-2-67ef5be12949d8dcdae354c4" rel="external noopener nofollow" class="link-external link-https">this https URL</a> and project website is <a href="https://apigen-mt.github.io" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> <dt> <a name='item247'>[247]</a> <a href ="/abs/2504.04582" title="Abstract" id="2504.04582"> arXiv:2504.04582 </a> (replaced) [<a href="/pdf/2504.04582" title="Download PDF" id="pdf-2504.04582" aria-labelledby="pdf-2504.04582">pdf</a>, <a href="https://arxiv.org/html/2504.04582v2" title="View HTML" id="html-2504.04582" aria-labelledby="html-2504.04582" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.04582" title="Other formats" id="oth-2504.04582" aria-labelledby="oth-2504.04582">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Your Image Generator Is Your New Private Dataset </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Resmini,+N">Nicolo Resmini</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lomurno,+E">Eugenio Lomurno</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sbrolli,+C">Cristian Sbrolli</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Matteucci,+M">Matteo Matteucci</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Machine Learning (cs.LG) </div> <p class='mathjax'> Generative diffusion models have emerged as powerful tools to synthetically produce training data, offering potential solutions to data scarcity and reducing labelling costs for downstream supervised deep learning applications. However, effectively leveraging text-conditioned image generation for building classifier training sets requires addressing key issues: constructing informative textual prompts, adapting generative models to specific domains, and ensuring robust performance. This paper proposes the Text-Conditioned Knowledge Recycling (TCKR) pipeline to tackle these challenges. TCKR combines dynamic image captioning, parameter-efficient diffusion model fine-tuning, and Generative Knowledge Distillation techniques to create synthetic datasets tailored for image classification. The pipeline is rigorously evaluated on ten diverse image classification benchmarks. The results demonstrate that models trained solely on TCKR-generated data achieve classification accuracies on par with (and in several cases exceeding) models trained on real images. Furthermore, the evaluation reveals that these synthetic-data-trained models exhibit substantially enhanced privacy characteristics: their vulnerability to Membership Inference Attacks is significantly reduced, with the membership inference AUC lowered by 5.49 points on average compared to using real training data, demonstrating a substantial improvement in the performance-privacy trade-off. These findings indicate that high-fidelity synthetic data can effectively replace real data for training classifiers, yielding strong performance whilst simultaneously providing improved privacy protection as a valuable emergent property. The code and trained models are available in the accompanying open-source repository. </p> </div> </dd> <dt> <a name='item248'>[248]</a> <a href ="/abs/2504.04749" title="Abstract" id="2504.04749"> arXiv:2504.04749 </a> (replaced) [<a href="/pdf/2504.04749" title="Download PDF" id="pdf-2504.04749" aria-labelledby="pdf-2504.04749">pdf</a>, <a href="/format/2504.04749" title="Other formats" id="oth-2504.04749" aria-labelledby="oth-2504.04749">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Vision Transformers with Autoencoders and Explainable AI for Cancer Patient Risk Stratification Using Whole Slide Imaging </div> <div class='list-authors'><a href="https://arxiv.org/search/eess?searchtype=author&query=Hussein,+A">Ahmad Hussein</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Prasad,+M">Mukesh Prasad</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Anaissi,+A">Ali Anaissi</a>, <a href="https://arxiv.org/search/eess?searchtype=author&query=Braytee,+A">Ali Braytee</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 11 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Image and Video Processing (eess.IV)</span>; Computer Vision and Pattern Recognition (cs.CV); Machine Learning (cs.LG) </div> <p class='mathjax'> Cancer remains one of the leading causes of mortality worldwide, necessitating accurate diagnosis and prognosis. Whole Slide Imaging (WSI) has become an integral part of clinical workflows with advancements in digital pathology. While various studies have utilized WSIs, their extracted features may not fully capture the most relevant pathological information, and their lack of interpretability limits clinical adoption. <br>In this paper, we propose PATH-X, a framework that integrates Vision Transformers (ViT) and Autoencoders with SHAP (Shapley Additive Explanations) to enhance model explainability for patient stratification and risk prediction using WSIs from The Cancer Genome Atlas (TCGA). A representative image slice is selected from each WSI, and numerical feature embeddings are extracted using Google's pre-trained ViT. These features are then compressed via an autoencoder and used for unsupervised clustering and classification tasks. Kaplan-Meier survival analysis is applied to evaluate stratification into two and three risk groups. SHAP is used to identify key contributing features, which are mapped onto histopathological slices to provide spatial context. <br>PATH-X demonstrates strong performance in breast and glioma cancers, where a sufficient number of WSIs enabled robust stratification. However, performance in lung cancer was limited due to data availability, emphasizing the need for larger datasets to enhance model reliability and clinical applicability. </p> </div> </dd> <dt> <a name='item249'>[249]</a> <a href ="/abs/2504.05004" title="Abstract" id="2504.05004"> arXiv:2504.05004 </a> (replaced) [<a href="/pdf/2504.05004" title="Download PDF" id="pdf-2504.05004" aria-labelledby="pdf-2504.05004">pdf</a>, <a href="https://arxiv.org/html/2504.05004v2" title="View HTML" id="html-2504.05004" aria-labelledby="html-2504.05004" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.05004" title="Other formats" id="oth-2504.05004" aria-labelledby="oth-2504.05004">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Stacking Variational Bayesian Monte Carlo </div> <div class='list-authors'><a href="https://arxiv.org/search/stat?searchtype=author&query=Silvestrin,+F">Francesco Silvestrin</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Li,+C">Chengkun Li</a>, <a href="https://arxiv.org/search/stat?searchtype=author&query=Acerbi,+L">Luigi Acerbi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at the Workshop track of the 7th Symposium in Advances in Approximate Bayesian Inference (AABI 2025). 24 pages, 9 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (stat.ML)</span>; Machine Learning (cs.LG) </div> <p class='mathjax'> Variational Bayesian Monte Carlo (VBMC) is a sample-efficient method for approximate Bayesian inference with computationally expensive likelihoods. While VBMC's local surrogate approach provides stable approximations, its conservative exploration strategy and limited evaluation budget can cause it to miss regions of complex posteriors. In this work, we introduce Stacking Variational Bayesian Monte Carlo (S-VBMC), a method that constructs global posterior approximations by merging independent VBMC runs through a principled and inexpensive post-processing step. Our approach leverages VBMC's mixture posterior representation and per-component evidence estimates, requiring no additional likelihood evaluations while being naturally parallelizable. We demonstrate S-VBMC's effectiveness on two synthetic problems designed to challenge VBMC's exploration capabilities and two real-world applications from computational neuroscience, showing substantial improvements in posterior approximation quality across all cases. </p> </div> </dd> </dl> <div class='paging'>Total of 249 entries </div> <div class='morefewer'>Showing up to 2000 entries per page: <a href=/list/cs.LG/new?skip=0&show=1000 rel="nofollow"> fewer</a> | <span style="color: #454545">more</span> | <span style="color: #454545">all</span> </div> </div> </div> </div> </main> <footer style="clear: both;"> <div class="columns is-desktop" role="navigation" aria-label="Secondary" style="margin: -0.75em -0.75em 0.75em -0.75em"> <!-- Macro-Column 1 --> <div class="column" style="padding: 0;"> <div class="columns"> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- End Macro-Column 1 --> <!-- Macro-Column 2 --> <div class="column" style="padding: 0;"> <div class="columns"> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> <!-- End Macro-Column 2 --> </div> </footer> </div> <script src="/static/base/1.0.1/js/member_acknowledgement.js"></script> </body> </html>