CINXE.COM
Cryptography and Security
<!DOCTYPE html> <html lang="en"> <head> <title>Cryptography and Security </title> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="apple-touch-icon" sizes="180x180" href="/static/browse/0.3.4/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="/static/browse/0.3.4/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="/static/browse/0.3.4/images/icons/favicon-16x16.png"> <link rel="manifest" href="/static/browse/0.3.4/images/icons/site.webmanifest"> <link rel="mask-icon" href="/static/browse/0.3.4/images/icons/safari-pinned-tab.svg" color="#5bbad5"> <meta name="msapplication-TileColor" content="#da532c"> <meta name="theme-color" content="#ffffff"> <link rel="stylesheet" type="text/css" media="screen" href="/static/browse/0.3.4/css/arXiv.css?v=20241206" /> <link rel="stylesheet" type="text/css" media="print" href="/static/browse/0.3.4/css/arXiv-print.css?v=20200611" /> <link rel="stylesheet" type="text/css" media="screen" href="/static/browse/0.3.4/css/browse_search.css" /> <script language="javascript" src="/static/browse/0.3.4/js/accordion.js" /></script> <script src="/static/browse/0.3.4/js/mathjaxToggle.min.js" type="text/javascript"></script> <script type="text/javascript" language="javascript">mathjaxToggle();</script> </head> <body class="with-cu-identity"> <div class="flex-wrap-footer"> <header> <a href="#content" class="is-sr-only">Skip to main content</a> <!-- start desktop header --> <div class="columns is-vcentered is-hidden-mobile" id="cu-identity"> <div class="column" id="cu-logo"> <a href="https://www.cornell.edu/"><img src="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg" alt="Cornell University" /></a> </div><div class="column" id="support-ack"> <span id="support-ack-url">We gratefully acknowledge support from the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors.</span> <a href="https://info.arxiv.org/about/donate.html" class="btn-header-donate">Donate</a> </div> </div> <div id="header" class="is-hidden-mobile"> <a aria-hidden="true" tabindex="-1" href="/IgnoreMe"></a> <div class="header-breadcrumbs"> <a href="/"><img src="/static/browse/0.3.4/images/arxiv-logo-one-color-white.svg" alt="arxiv logo" style="height:40px;"/></a> <span>></span> <a href="/list/cs.CR/recent">cs.CR</a> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div><!-- /end desktop header --> <div class="mobile-header"> <div class="columns is-mobile"> <div class="column logo-arxiv"><a href="https://arxiv.org/"><img src="/static/browse/0.3.4/images/arxiv-logomark-small-white.svg" alt="arXiv logo" style="height:60px;" /></a></div> <div class="column logo-cornell"><a href="https://www.cornell.edu/"> <picture> <source media="(min-width: 501px)" srcset="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg 400w" sizes="400w" /> <source srcset="/static/browse/0.3.4/images/icons/cu/cornell_seal_simple_black.svg 2x" /> <img src="/static/browse/0.3.4/images/icons/cu/cornell-reduced-white-SMALL.svg" alt="Cornell University Logo" /> </picture> </a></div> <div class="column nav" id="toggle-container" role="menubar"> <button class="toggle-control"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-white"><title>open search</title><path d="M505 442.7L405.3 343c-4.5-4.5-10.6-7-17-7H372c27.6-35.3 44-79.7 44-128C416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c48.3 0 92.7-16.4 128-44v16.3c0 6.4 2.5 12.5 7 17l99.7 99.7c9.4 9.4 24.6 9.4 33.9 0l28.3-28.3c9.4-9.4 9.4-24.6.1-34zM208 336c-70.7 0-128-57.2-128-128 0-70.7 57.2-128 128-128 70.7 0 128 57.2 128 128 0 70.7-57.2 128-128 128z"/></svg></button> <div class="mobile-toggle-block toggle-target"> <form class="mobile-search-form" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <input class="input" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <input type="hidden" name="source" value="header"> <input type="hidden" name="searchtype" value="all"> <button class="button">GO</button> </div> </form> </div> <button class="toggle-control"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-white" role="menu"><title>open navigation menu</title><path d="M16 132h416c8.837 0 16-7.163 16-16V76c0-8.837-7.163-16-16-16H16C7.163 60 0 67.163 0 76v40c0 8.837 7.163 16 16 16zm0 160h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16zm0 160h416c8.837 0 16-7.163 16-16v-40c0-8.837-7.163-16-16-16H16c-8.837 0-16 7.163-16 16v40c0 8.837 7.163 16 16 16z"/ ></svg></button> <div class="mobile-toggle-block toggle-target"> <nav class="mobile-menu" aria-labelledby="mobilemenulabel"> <h2 id="mobilemenulabel">quick links</h2> <ul> <li><a href="https://arxiv.org/login">Login</a></li> <li><a href="https://info.arxiv.org/help">Help Pages</a></li> <li><a href="https://info.arxiv.org/about">About</a></li> </ul> </nav> </div> </div> </div> </div><!-- /end mobile-header --> </header> <main> <div id="content"> <div id='content-inner'> <div id='dlpage'> <h1>Cryptography and Security</h1> <ul> <li><a href="#item0">New submissions</a></li> <li><a href="#item10">Cross-lists</a></li> <li><a href="#item11">Replacements</a></li> </ul> <p>See <a id="recent-cs.CR" aria-labelledby="recent-cs.CR" href="/list/cs.CR/recent">recent</a> articles</p> <h3>Showing new listings for Monday, 7 April 2025</h3> <div class='paging'>Total of 20 entries </div> <div class='morefewer'>Showing up to 2000 entries per page: <a href=/list/cs.CR/new?skip=0&show=1000 rel="nofollow"> fewer</a> | <span style="color: #454545">more</span> | <span style="color: #454545">all</span> </div> <dl id='articles'> <h3>New submissions (showing 9 of 9 entries)</h3> <dt> <a name='item1'>[1]</a> <a href ="/abs/2504.02963" title="Abstract" id="2504.02963"> arXiv:2504.02963 </a> [<a href="/pdf/2504.02963" title="Download PDF" id="pdf-2504.02963" aria-labelledby="pdf-2504.02963">pdf</a>, <a href="https://arxiv.org/html/2504.02963v1" title="View HTML" id="html-2504.02963" aria-labelledby="html-2504.02963" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.02963" title="Other formats" id="oth-2504.02963" aria-labelledby="oth-2504.02963">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Digital Forensics in the Age of Large Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yin,+Z">Zhipeng Yin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+Z">Zichong Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+W">Weifeng Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhuang,+J">Jun Zhuang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Mozumder,+P">Pallab Mozumder</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Smith,+A">Antoinette Smith</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+W">Wenbin Zhang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Digital forensics plays a pivotal role in modern investigative processes, utilizing specialized methods to systematically collect, analyze, and interpret digital evidence for judicial proceedings. However, traditional digital forensic techniques are primarily based on manual labor-intensive processes, which become increasingly insufficient with the rapid growth and complexity of digital data. To this end, Large Language Models (LLMs) have emerged as powerful tools capable of automating and enhancing various digital forensic tasks, significantly transforming the field. Despite the strides made, general practitioners and forensic experts often lack a comprehensive understanding of the capabilities, principles, and limitations of LLM, which limits the full potential of LLM in forensic applications. To fill this gap, this paper aims to provide an accessible and systematic overview of how LLM has revolutionized the digital forensics approach. Specifically, it takes a look at the basic concepts of digital forensics, as well as the evolution of LLM, and emphasizes the superior capabilities of LLM. To connect theory and practice, relevant examples and real-world scenarios are discussed. We also critically analyze the current limitations of applying LLMs to digital forensics, including issues related to illusion, interpretability, bias, and ethical considerations. In addition, this paper outlines the prospects for future research, highlighting the need for effective use of LLMs for transparency, accountability, and robust standardization in the forensic process. </p> </div> </dd> <dt> <a name='item2'>[2]</a> <a href ="/abs/2504.02979" title="Abstract" id="2504.02979"> arXiv:2504.02979 </a> [<a href="/pdf/2504.02979" title="Download PDF" id="pdf-2504.02979" aria-labelledby="pdf-2504.02979">pdf</a>, <a href="https://arxiv.org/html/2504.02979v1" title="View HTML" id="html-2504.02979" aria-labelledby="html-2504.02979" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.02979" title="Other formats" id="oth-2504.02979" aria-labelledby="oth-2504.02979">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Multi-Screaming-Channel Attacks: Frequency Diversity for Enhanced Attacks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Guillaume,+J">Jeremy Guillaume</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Pelcat,+M">Maxime Pelcat</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Nafkha,+A">Amor Nafkha</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Salvador,+R">Rub茅n Salvador</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Submitted to IEEE Transactions on Information Forensics and Security (TIFS), 11 pages, 8 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span> </div> <p class='mathjax'> Side-channel attacks consist of retrieving internal data from a victim system by analyzing its leakage, which usually requires proximity to the victim in the range of a few millimetres. Screaming channels are EM side channels transmitted at a distance of a few meters. They appear on mixed-signal devices integrating an RF module on the same silicon die as the digital part. Consequently, the side channels are modulated by legitimate RF signal carriers and appear at the harmonics of the digital clock frequency. While initial works have only considered collecting leakage at these harmonics, late work has demonstrated that the leakage is also present at frequencies other than these harmonics. This result significantly increases the number of available frequencies to perform a screaming-channel attack, which can be convenient in an environment where multiple harmonics are polluted. This work studies how this diversity of frequencies carrying leakage can be used to improve attack performance. We first study how to combine multiple frequencies. Second, we demonstrate that frequency combination can improve attack performance and evaluate this improvement according to the performance of the combined frequencies. Finally, we demonstrate the interest of frequency combination in attacks at 15 and, for the first time to the best of our knowledge, at 30 meters. One last important observation is that this frequency combination divides by 2 the number of traces needed to reach a given attack performance. </p> </div> </dd> <dt> <a name='item3'>[3]</a> <a href ="/abs/2504.03002" title="Abstract" id="2504.03002"> arXiv:2504.03002 </a> [<a href="/pdf/2504.03002" title="Download PDF" id="pdf-2504.03002" aria-labelledby="pdf-2504.03002">pdf</a>, <a href="https://arxiv.org/html/2504.03002v1" title="View HTML" id="html-2504.03002" aria-labelledby="html-2504.03002" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03002" title="Other formats" id="oth-2504.03002" aria-labelledby="oth-2504.03002">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Improving Efficiency in Federated Learning with Optimized Homomorphic Encryption </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yang,+F">Feiran Yang</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 18 pages, 1 figure </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span> </div> <p class='mathjax'> Federated learning is a method used in machine learning to allow multiple devices to work together on a model without sharing their private data. Each participant keeps their private data on their system and trains a local model and only sends updates to a central server, which combines these updates to improve the overall model. A key enabler of privacy in FL is homomorphic encryption (HE). HE allows computations to be performed directly on encrypted data. While HE offers strong privacy guarantees, it is computationally intensive, leading to significant latency and scalability issues, particularly for large-scale models like BERT. In my research, I aimed to address this inefficiency problem. My research introduces a novel algorithm to address these inefficiencies while maintaining robust privacy guarantees. I integrated several mathematical techniques such as selective parameter encryption, sensitivity maps, and differential privacy noise within my algorithms, which has already improved its efficiency. I have also conducted rigorous mathematical proofs to validate the correctness and robustness of the approach. I implemented this algorithm by coding it in C++, simulating the environment of federated learning on large-scale models, and verified that the efficiency of my algorithm is $3$ times the efficiency of the state-of-the-art method. This research has significant implications for machine learning because its ability to improve efficiency while balancing privacy makes it a practical solution! It would enable federated learning to be used very efficiently and deployed in various resource-constrained environments, as this research provides a novel solution to one of the key challenges in federated learning: the inefficiency of homomorphic encryption, as my new algorithm is able to enhance the scalability and resource efficiency of FL while maintaining robust privacy guarantees. </p> </div> </dd> <dt> <a name='item4'>[4]</a> <a href ="/abs/2504.03077" title="Abstract" id="2504.03077"> arXiv:2504.03077 </a> [<a href="/pdf/2504.03077" title="Download PDF" id="pdf-2504.03077" aria-labelledby="pdf-2504.03077">pdf</a>, <a href="https://arxiv.org/html/2504.03077v1" title="View HTML" id="html-2504.03077" aria-labelledby="html-2504.03077" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03077" title="Other formats" id="oth-2504.03077" aria-labelledby="oth-2504.03077">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Integrating Identity-Based Identification against Adaptive Adversaries in Federated Learning </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Szelag,+J+K">Jakub Kacper Szelag</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Chin,+J">Ji-Jian Chin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ansell,+L">Lauren Ansell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yip,+S">Sook-Chin Yip</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 10 pages, 5 figures, research article, IEEE possible publication (in submission) </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Federated Learning (FL) has recently emerged as a promising paradigm for privacy-preserving, distributed machine learning. However, FL systems face significant security threats, particularly from adaptive adversaries capable of modifying their attack strategies to evade detection. One such threat is the presence of Reconnecting Malicious Clients (RMCs), which exploit FLs open connectivity by reconnecting to the system with modified attack strategies. To address this vulnerability, we propose integration of Identity-Based Identification (IBI) as a security measure within FL environments. By leveraging IBI, we enable FL systems to authenticate clients based on cryptographic identity schemes, effectively preventing previously disconnected malicious clients from re-entering the system. Our approach is implemented using the TNC-IBI (Tan-Ng-Chin) scheme over elliptic curves to ensure computational efficiency, particularly in resource-constrained environments like Internet of Things (IoT). Experimental results demonstrate that integrating IBI with secure aggregation algorithms, such as Krum and Trimmed Mean, significantly improves FL robustness by mitigating the impact of RMCs. We further discuss the broader implications of IBI in FL security, highlighting research directions for adaptive adversary detection, reputation-based mechanisms, and the applicability of identity-based cryptographic frameworks in decentralized FL architectures. Our findings advocate for a holistic approach to FL security, emphasizing the necessity of proactive defence strategies against evolving adaptive adversarial threats. </p> </div> </dd> <dt> <a name='item5'>[5]</a> <a href ="/abs/2504.03111" title="Abstract" id="2504.03111"> arXiv:2504.03111 </a> [<a href="/pdf/2504.03111" title="Download PDF" id="pdf-2504.03111" aria-labelledby="pdf-2504.03111">pdf</a>, <a href="/format/2504.03111" title="Other formats" id="oth-2504.03111" aria-labelledby="oth-2504.03111">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Les Dissonances: Cross-Tool Harvesting and Polluting in Multi-Tool Empowered LLM Agents </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+Z">Zichuan Li</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Cui,+J">Jian Cui</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liao,+X">Xiaojing Liao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xing,+L">Luyi Xing</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span> </div> <p class='mathjax'> Large Language Model (LLM) agents are autonomous systems powered by LLMs, capable of reasoning and planning to solve problems by leveraging a set of tools. However, the integration of multi-tool capabilities in LLM agents introduces challenges in securely managing tools, ensuring their compatibility, handling dependency relationships, and protecting control flows within LLM agent workflows. In this paper, we present the first systematic security analysis of task control flows in multi-tool-enabled LLM agents. We identify a novel threat, Cross-Tool Harvesting and Polluting (XTHP), which includes multiple attack vectors to first hijack the normal control flows of agent tasks, and then collect and pollute confidential or private information within LLM agent systems. To understand the impact of this threat, we developed Chord, a dynamic scanning tool designed to automatically detect real-world agent tools susceptible to XTHP attacks. Our evaluation of 73 real-world tools from the repositories of two major LLM agent development frameworks, LangChain and LlamaIndex, revealed a significant security concern: 80% of the tools are vulnerable to hijacking attacks, 78% to XTH attacks, and 41% to XTP attacks, highlighting the prevalence of this threat. </p> </div> </dd> <dt> <a name='item6'>[6]</a> <a href ="/abs/2504.03173" title="Abstract" id="2504.03173"> arXiv:2504.03173 </a> [<a href="/pdf/2504.03173" title="Download PDF" id="pdf-2504.03173" aria-labelledby="pdf-2504.03173">pdf</a>, <a href="https://arxiv.org/html/2504.03173v1" title="View HTML" id="html-2504.03173" aria-labelledby="html-2504.03173" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03173" title="Other formats" id="oth-2504.03173" aria-labelledby="oth-2504.03173">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PPFPL: Cross-silo Privacy-preserving Federated Prototype Learning Against Data Poisoning Attacks on Non-IID Data </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+H">Hongliang Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+J">Jiguo Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Xu,+F">Fenghua Xu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Hu,+C">Chunqiang Hu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+Y">Yongzhao Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wang,+X">Xiaofen Wang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+Z">Zhongyuan Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+X">Xiaosong Zhang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Distributed, Parallel, and Cluster Computing (cs.DC) </div> <p class='mathjax'> Privacy-Preserving Federated Learning (PPFL) allows multiple clients to collaboratively train a deep learning model by submitting hidden model updates. Nonetheless, PPFL is vulnerable to data poisoning attacks due to the distributed training nature of clients. Existing solutions have struggled to improve the performance of cross-silo PPFL in poisoned Non-IID data. To address the issues, this paper proposes a privacy-preserving federated prototype learning framework, named PPFPL, which enhances the cross-silo FL performance in poisoned Non-IID data while effectively resisting data poisoning attacks. Specifically, we adopt prototypes as client-submitted model updates to eliminate the impact of tampered data distribution on federated learning. Moreover, we utilize two servers to achieve Byzantine-robust aggregation by secure aggregation protocol, which greatly reduces the impact of malicious clients. Theoretical analyses confirm the convergence of PPFPL, and experimental results on publicly available datasets show that PPFPL is effective for resisting data poisoning attacks with Non-IID conditions. </p> </div> </dd> <dt> <a name='item7'>[7]</a> <a href ="/abs/2504.03238" title="Abstract" id="2504.03238"> arXiv:2504.03238 </a> [<a href="/pdf/2504.03238" title="Download PDF" id="pdf-2504.03238" aria-labelledby="pdf-2504.03238">pdf</a>, <a href="https://arxiv.org/html/2504.03238v1" title="View HTML" id="html-2504.03238" aria-labelledby="html-2504.03238" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03238" title="Other formats" id="oth-2504.03238" aria-labelledby="oth-2504.03238">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Malware Detection in Docker Containers: An Image is Worth a Thousand Logs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Nousias,+A">Akis Nousias</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Katsaros,+E">Efklidis Katsaros</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Syrmos,+E">Evangelos Syrmos</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Radoglou-Grammatikis,+P">Panagiotis Radoglou-Grammatikis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lagkas,+T">Thomas Lagkas</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Argyriou,+V">Vasileios Argyriou</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Moscholios,+I">Ioannis Moscholios</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Markakis,+E">Evangelos Markakis</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Goudos,+S">Sotirios Goudos</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sarigiannidis,+P">Panagiotis Sarigiannidis</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> Accepted at ICC-W </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Artificial Intelligence (cs.AI); Computer Vision and Pattern Recognition (cs.CV) </div> <p class='mathjax'> Malware detection is increasingly challenged by evolving techniques like obfuscation and polymorphism, limiting the effectiveness of traditional methods. Meanwhile, the widespread adoption of software containers has introduced new security challenges, including the growing threat of malicious software injection, where a container, once compromised, can serve as entry point for further cyberattacks. In this work, we address these security issues by introducing a method to identify compromised containers through machine learning analysis of their file systems. We cast the entire software containers into large RGB images via their tarball representations, and propose to use established Convolutional Neural Network architectures on a streaming, patch-based manner. To support our experiments, we release the COSOCO dataset--the first of its kind--containing 3364 large-scale RGB images of benign and compromised software containers at <a href="https://huggingface.co/datasets/k3ylabs/cosoco-image-dataset" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. Our method detects more malware and achieves higher F1 and Recall scores than all individual and ensembles of VirusTotal engines, demonstrating its effectiveness and setting a new standard for identifying malware-compromised software containers. </p> </div> </dd> <dt> <a name='item8'>[8]</a> <a href ="/abs/2504.03347" title="Abstract" id="2504.03347"> arXiv:2504.03347 </a> [<a href="/pdf/2504.03347" title="Download PDF" id="pdf-2504.03347" aria-labelledby="pdf-2504.03347">pdf</a>, <a href="/format/2504.03347" title="Other formats" id="oth-2504.03347" aria-labelledby="oth-2504.03347">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Optimizing Password Cracking for Digital Investigations </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Hachem,+M">Mohamad Hachem</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Lanfranchi,+A">Adam Lanfranchi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Clarke,+N">Nathan Clarke</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kavrestad,+J">Joakim Kavrestad</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span> </div> <p class='mathjax'> Efficient password cracking is a critical aspect of digital forensics, enabling investigators to decrypt protected content during criminal investigations. Traditional password cracking methods, including brute-force, dictionary and rule-based attacks face challenges in balancing efficiency with increasing computational complexity. This study explores rule based optimisation strategies to enhance the effectiveness of password cracking while minimising resource consumption. By analysing publicly available password datasets, we propose an optimised rule set that reduces computational iterations by approximately 40%, significantly improving the speed of password recovery. Additionally, the impact of national password recommendations were examined, specifically, the UK National Cyber Security Centre's three word password guideline on password security and forensic recovery. Through user generated password surveys, we evaluate the crackability of three word passwords using dictionaries of varying common word proportions. Results indicate that while three word passwords provide improved memorability and usability, they remain vulnerable when common word combinations are used, with up to 77.5% of passwords cracked using a 30% common word dictionary subset. The study underscores the importance of dynamic password cracking strategies that account for evolving user behaviours and policy driven password structures. Findings contribution to both forensic efficiency and cyber security awareness, highlight the dual impact of password policies on security and investigative capabilities. Future work will focus upon refining rule based cracking techniques and expanding research on password composition trends. </p> </div> </dd> <dt> <a name='item9'>[9]</a> <a href ="/abs/2504.03363" title="Abstract" id="2504.03363"> arXiv:2504.03363 </a> [<a href="/pdf/2504.03363" title="Download PDF" id="pdf-2504.03363" aria-labelledby="pdf-2504.03363">pdf</a>, <a href="/format/2504.03363" title="Other formats" id="oth-2504.03363" aria-labelledby="oth-2504.03363">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> SoK: Attacks on Modern Card Payments </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Hofmeier,+X">Xenia Hofmeier</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Basin,+D">David Basin</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Sasse,+R">Ralf Sasse</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Toro-Pozo,+J">Jorge Toro-Pozo</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span> </div> <p class='mathjax'> EMV is the global standard for smart card payments and its NFC-based version, EMV contactless, is widely used, also for mobile payments. In this systematization of knowledge, we examine attacks on the EMV contactless protocol. We provide a comprehensive framework encompassing its desired security properties and adversary models. We also identify and categorize a comprehensive collection of protocol flaws and show how different subsets thereof can be combined into attacks. In addition to this systematization, we examine the underlying reasons for the many attacks against EMV and point to a better way forward. </p> </div> </dd> </dl> <dl id='articles'> <h3>Cross submissions (showing 1 of 1 entries)</h3> <dt> <a name='item10'>[10]</a> <a href ="/abs/2504.03307" title="Abstract" id="2504.03307"> arXiv:2504.03307 </a> (cross-list from math.AC) [<a href="/pdf/2504.03307" title="Download PDF" id="pdf-2504.03307" aria-labelledby="pdf-2504.03307">pdf</a>, <a href="https://arxiv.org/html/2504.03307v1" title="View HTML" id="html-2504.03307" aria-labelledby="html-2504.03307" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2504.03307" title="Other formats" id="oth-2504.03307" aria-labelledby="oth-2504.03307">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> On the algebraic degree stability of vectorial Boolean functions when restricted to affine subspaces </div> <div class='list-authors'><a href="https://arxiv.org/search/math?searchtype=author&query=Carlet,+C">Claude Carlet</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Feukoua,+S">Serge Feukoua</a>, <a href="https://arxiv.org/search/math?searchtype=author&query=Salagean,+A">Ana Salagean</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 25 pages </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Commutative Algebra (math.AC)</span>; Cryptography and Security (cs.CR) </div> <p class='mathjax'> We study the behaviour of the algebraic degree of vectorial Boolean functions when their inputs are restricted to an affine subspace of their domain. Functions which maintain their degree on all subspaces of as high a codimension as possible are particularly interesting for cryptographic applications. <br>For functions which are power functions $x^d$ in their univariate representation, we fully characterize the exponents $d$ for which the algebraic degree of the function stays unchanged when the input is restricted to spaces of codimension 1 or 2. For codimensions $k\ge 3$, we give a sufficient condition for the algebraic degree to stay unchanged. We apply these results to the multiplicative inverse function, as well as to the Kasami functions. We define an optimality notion regarding the stability of the degree on subspaces, and determine a number of optimal functions, including the multiplicative inverse function and the quadratic APN functions. <br>We also give an explicit formula for counting the functions that keep their algebraic degree unchanged when restricted to hyperplanes. </p> </div> </dd> </dl> <dl id='articles'> <h3>Replacement submissions (showing 10 of 10 entries)</h3> <dt> <a name='item11'>[11]</a> <a href ="/abs/2203.14358" title="Abstract" id="2203.14358"> arXiv:2203.14358 </a> (replaced) [<a href="/pdf/2203.14358" title="Download PDF" id="pdf-2203.14358" aria-labelledby="pdf-2203.14358">pdf</a>, <a href="/format/2203.14358" title="Other formats" id="oth-2203.14358" aria-labelledby="oth-2203.14358">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Memristive Based Design of a Core Digital Circuit for Elliptic Curve Cryptography </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Alammari,+K">Khalid Alammari</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ahmadi,+M">Majid Ahmadi</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Ahmadi,+A">Arash Ahmadi</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> We are in the process of revising our paper while getting required licenses for some of the figures used in our manuscript </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Emerging Technologies (cs.ET) </div> <p class='mathjax'> The new emerging non-volatile memory (NVM) devices known as memristors could be the promising candidate for future digital architecture, owing to their nanoscale size and its ability to integrate with the exciting CMOS technology. In this paper, a combination of memristor devices and CMOS transistors are working together to form a hybrid CMOS-memristor circuit for XAX- Module, a core element for the finite field multiplier. The proposed design was implemented using Pt /TaOx/Ta memristor device and simulated in Cadence Virtuoso. The simulation results demonstrate the design functionality. The proposed module appears to be efficient in terms of layout area, delay and power consumption since the design utilizes the hybrid CMOS/memristor gates. </p> </div> </dd> <dt> <a name='item12'>[12]</a> <a href ="/abs/2406.13221" title="Abstract" id="2406.13221"> arXiv:2406.13221 </a> (replaced) [<a href="/pdf/2406.13221" title="Download PDF" id="pdf-2406.13221" aria-labelledby="pdf-2406.13221">pdf</a>, <a href="https://arxiv.org/html/2406.13221v4" title="View HTML" id="html-2406.13221" aria-labelledby="html-2406.13221" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2406.13221" title="Other formats" id="oth-2406.13221" aria-labelledby="oth-2406.13221">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Privacy-Preserving Logistic Regression Training on Large Datasets </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Chiang,+J">John Chiang</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span> </div> <p class='mathjax'> Privacy-preserving machine learning is one class of cryptographic methods that aim to analyze private and sensitive data while keeping privacy, such as homomorphic logistic regression training over large encrypted data. In this paper, we propose an efficient algorithm for logistic regression training on large encrypted data using Homomorphic Encryption (HE), which is the mini-batch version of recent methods using a faster gradient variant called $\texttt{quadratic gradient}$. It is claimed that $\texttt{quadratic gradient}$ can integrate curve information (Hessian matrix) into the gradient and therefore can effectively accelerate the first-order gradient (descent) algorithms. We also implement the full-batch version of their method when the encrypted dataset is so large that it has to be encrypted in the mini-batch manner. We compare our mini-batch algorithm with our full-batch implementation method on real financial data consisting of 422,108 samples with 200 freatures. %Our experiments show that Nesterov's accelerated gradient (NAG) Given the inefficiency of HEs, our results are inspiring and demonstrate that the logistic regression training on large encrypted dataset is of practical feasibility, marking a significant milestone in our understanding. </p> </div> </dd> <dt> <a name='item13'>[13]</a> <a href ="/abs/2409.09794" title="Abstract" id="2409.09794"> arXiv:2409.09794 </a> (replaced) [<a href="/pdf/2409.09794" title="Download PDF" id="pdf-2409.09794" aria-labelledby="pdf-2409.09794">pdf</a>, <a href="https://arxiv.org/html/2409.09794v2" title="View HTML" id="html-2409.09794" aria-labelledby="html-2409.09794" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2409.09794" title="Other formats" id="oth-2409.09794" aria-labelledby="oth-2409.09794">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Federated Learning in Adversarial Environments: Testbed Design and Poisoning Resilience in Cybersecurity </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+H+J">Hao Jian Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Otal,+H+T">Hakan T. Otal</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Canbaz,+M+A">M. Abdullah Canbaz</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 6 pages, 4 figures </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Distributed, Parallel, and Cluster Computing (cs.DC); Machine Learning (cs.LG) </div> <p class='mathjax'> This paper presents the design and implementation of a Federated Learning (FL) testbed, focusing on its application in cybersecurity and evaluating its resilience against poisoning attacks. Federated Learning allows multiple clients to collaboratively train a global model while keeping their data decentralized, addressing critical needs for data privacy and security, particularly in sensitive fields like cybersecurity. Our testbed, built using Raspberry Pi and Nvidia Jetson hardware by running the Flower framework, facilitates experimentation with various FL frameworks, assessing their performance, scalability, and ease of integration. Through a case study on federated intrusion detection systems, the testbed's capabilities are shown in detecting anomalies and securing critical infrastructure without exposing sensitive network data. Comprehensive poisoning tests, targeting both model and data integrity, evaluate the system's robustness under adversarial conditions. The results show that while federated learning enhances data privacy and distributed learning, it remains vulnerable to poisoning attacks, which must be mitigated to ensure its reliability in real-world applications. </p> </div> </dd> <dt> <a name='item14'>[14]</a> <a href ="/abs/2409.13723" title="Abstract" id="2409.13723"> arXiv:2409.13723 </a> (replaced) [<a href="/pdf/2409.13723" title="Download PDF" id="pdf-2409.13723" aria-labelledby="pdf-2409.13723">pdf</a>, <a href="https://arxiv.org/html/2409.13723v3" title="View HTML" id="html-2409.13723" aria-labelledby="html-2409.13723" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2409.13723" title="Other formats" id="oth-2409.13723" aria-labelledby="oth-2409.13723">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Explainable Artificial Intelligence (XAI) for Malware Analysis: A Survey of Techniques, Applications, and Open Challenges </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Manthena,+H">Harikha Manthena</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shajarian,+S">Shaghayegh Shajarian</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kimmell,+J">Jeffrey Kimmell</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Abdelsalam,+M">Mahmoud Abdelsalam</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Khorsandroo,+S">Sajad Khorsandroo</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Gupta,+M">Maanak Gupta</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Machine learning (ML) has rapidly advanced in recent years, revolutionizing fields such as finance, medicine, and cybersecurity. In malware detection, ML-based approaches have demonstrated high accuracy; however, their lack of transparency poses a significant challenge. Traditional black-box models often fail to provide interpretable justifications for their predictions, limiting their adoption in security-critical environments where understanding the reasoning behind a detection is essential for threat mitigation and response. Explainable AI (XAI) addresses this gap by enhancing model interpretability while maintaining strong detection capabilities. This survey presents a comprehensive review of state-of-the-art ML techniques for malware analysis, with a specific focus on explainability methods. We examine existing XAI frameworks, their application in malware classification and detection, and the challenges associated with making malware detection models more interpretable. Additionally, we explore recent advancements and highlight open research challenges in the field of explainable malware analysis. By providing a structured overview of XAI-driven malware detection approaches, this survey serves as a valuable resource for researchers and practitioners seeking to bridge the gap between ML performance and explainability in cybersecurity. </p> </div> </dd> <dt> <a name='item15'>[15]</a> <a href ="/abs/2409.14729" title="Abstract" id="2409.14729"> arXiv:2409.14729 </a> (replaced) [<a href="/pdf/2409.14729" title="Download PDF" id="pdf-2409.14729" aria-labelledby="pdf-2409.14729">pdf</a>, <a href="/format/2409.14729" title="Other formats" id="oth-2409.14729" aria-labelledby="oth-2409.14729">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PROMPTFUZZ: Harnessing Fuzzing Techniques for Robust Testing of Prompt Injection in LLMs </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+J">Jiahao Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shao,+Y">Yangguang Shao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Miao,+H">Hanwen Miao</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Shi,+J">Junzheng Shi</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Artificial Intelligence (cs.AI) </div> <p class='mathjax'> Large Language Models (LLMs) have gained widespread use in various applications due to their powerful capability to generate human-like text. However, prompt injection attacks, which involve overwriting a model's original instructions with malicious prompts to manipulate the generated text, have raised significant concerns about the security and reliability of LLMs. Ensuring that LLMs are robust against such attacks is crucial for their deployment in real-world applications, particularly in critical tasks. <br>In this paper, we propose PROMPTFUZZ, a novel testing framework that leverages fuzzing techniques to systematically assess the robustness of LLMs against prompt injection attacks. Inspired by software fuzzing, PROMPTFUZZ selects promising seed prompts and generates a diverse set of prompt injections to evaluate the target LLM's resilience. PROMPTFUZZ operates in two stages: the prepare phase, which involves selecting promising initial seeds and collecting few-shot examples, and the focus phase, which uses the collected examples to generate diverse, high-quality prompt injections. Using PROMPTFUZZ, we can uncover more vulnerabilities in LLMs, even those with strong defense prompts. <br>By deploying the generated attack prompts from PROMPTFUZZ in a real-world competition, we achieved the 7th ranking out of over 4000 participants (top 0.14%) within 2 hours. Additionally, we construct a dataset to fine-tune LLMs for enhanced robustness against prompt injection attacks. While the fine-tuned model shows improved robustness, PROMPTFUZZ continues to identify vulnerabilities, highlighting the importance of robust testing for LLMs. Our work emphasizes the critical need for effective testing tools and provides a practical framework for evaluating and improving the robustness of LLMs against prompt injection attacks. </p> </div> </dd> <dt> <a name='item16'>[16]</a> <a href ="/abs/2410.02099" title="Abstract" id="2410.02099"> arXiv:2410.02099 </a> (replaced) [<a href="/pdf/2410.02099" title="Download PDF" id="pdf-2410.02099" aria-labelledby="pdf-2410.02099">pdf</a>, <a href="/format/2410.02099" title="Other formats" id="oth-2410.02099" aria-labelledby="oth-2410.02099">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> A Watermark for Black-Box Language Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Bahri,+D">Dara Bahri</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Wieting,+J">John Wieting</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Cryptography and Security (cs.CR)</span>; Computation and Language (cs.CL); Machine Learning (cs.LG) </div> <p class='mathjax'> Watermarking has recently emerged as an effective strategy for detecting the outputs of large language models (LLMs). Most existing schemes require white-box access to the model's next-token probability distribution, which is typically not accessible to downstream users of an LLM API. In this work, we propose a principled watermarking scheme that requires only the ability to sample sequences from the LLM (i.e. black-box access), boasts a distortion-free property, and can be chained or nested using multiple secret keys. We provide performance guarantees, demonstrate how it can be leveraged when white-box access is available, and show when it can outperform existing white-box schemes via comprehensive experiments. </p> </div> </dd> <dt> <a name='item17'>[17]</a> <a href ="/abs/2311.02757" title="Abstract" id="2311.02757"> arXiv:2311.02757 </a> (replaced) [<a href="/pdf/2311.02757" title="Download PDF" id="pdf-2311.02757" aria-labelledby="pdf-2311.02757">pdf</a>, <a href="https://arxiv.org/html/2311.02757v2" title="View HTML" id="html-2311.02757" aria-labelledby="html-2311.02757" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2311.02757" title="Other formats" id="oth-2311.02757" aria-labelledby="oth-2311.02757">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Certified Defense on the Fairness of Graph Neural Networks </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Dong,+Y">Yushun Dong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Zhang,+B">Binchi Zhang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Tong,+H">Hanghang Tong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Li,+J">Jundong Li</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Machine Learning (cs.LG)</span>; Cryptography and Security (cs.CR); Machine Learning (stat.ML) </div> <p class='mathjax'> Graph Neural Networks (GNNs) have emerged as a prominent graph learning model in various graph-based tasks over the years. Nevertheless, due to the vulnerabilities of GNNs, it has been empirically proved that malicious attackers could easily corrupt the fairness level of their predictions by adding perturbations to the input graph data. In this paper, we take crucial steps to study a novel problem of certifiable defense on the fairness level of GNNs. Specifically, we propose a principled framework named ELEGANT and present a detailed theoretical certification analysis for the fairness of GNNs. ELEGANT takes any GNNs as its backbone, and the fairness level of such a backbone is theoretically impossible to be corrupted under certain perturbation budgets for attackers. Notably, ELEGANT does not have any assumption over the GNN structure or parameters, and does not require re-training the GNNs to realize certification. Hence it can serve as a plug-and-play framework for any optimized GNNs ready to be deployed. We verify the satisfactory effectiveness of ELEGANT in practice through extensive experiments on real-world datasets across different backbones of GNNs, where ELEGANT is also demonstrated to be beneficial for GNN debiasing. Open-source code can be found at <a href="https://github.com/yushundong/ELEGANT" rel="external noopener nofollow" class="link-external link-https">this https URL</a>. </p> </div> </dd> <dt> <a name='item18'>[18]</a> <a href ="/abs/2411.04730" title="Abstract" id="2411.04730"> arXiv:2411.04730 </a> (replaced) [<a href="/pdf/2411.04730" title="Download PDF" id="pdf-2411.04730" aria-labelledby="pdf-2411.04730">pdf</a>, <a href="/format/2411.04730" title="Other formats" id="oth-2411.04730" aria-labelledby="oth-2411.04730">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> Cloning Games, Black Holes and Cryptography </div> <div class='list-authors'><a href="https://arxiv.org/search/quant-ph?searchtype=author&query=Poremba,+A">Alexander Poremba</a>, <a href="https://arxiv.org/search/quant-ph?searchtype=author&query=Ragavan,+S">Seyoon Ragavan</a>, <a href="https://arxiv.org/search/quant-ph?searchtype=author&query=Vaikuntanathan,+V">Vinod Vaikuntanathan</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Quantum Physics (quant-ph)</span>; Cryptography and Security (cs.CR); High Energy Physics - Theory (hep-th) </div> <p class='mathjax'> Quantum no-cloning is one of the most fundamental properties of quantum information. In this work, we introduce a new toolkit for analyzing cloning games; these games capture more quantitative versions of no-cloning and are central to unclonable cryptography. Previous works rely on the framework laid out by Tomamichel, Fehr, Kaniewski and Wehner to analyze both the $n$-qubit BB84 game and the subspace coset game. Their constructions and analysis face the following inherent limitations: <br>- The existing bounds on the values of these games are at least $2^{-0.25n}$; on the other hand, the trivial adversarial strategy wins with probability $2^{-n}$. Not only that, the BB84 game does in fact admit a highly nontrivial winning strategy. This raises the natural question: are there cloning games which admit no non-trivial winning strategies? <br>- The existing constructions are not multi-copy secure; the BB84 game is not even $2 \mapsto 3$ secure, and the subspace coset game is not $t \mapsto t+1$ secure for a polynomially large $t$. Moreover, we provide evidence that the existing technical tools do not suffice to prove multi-copy security of even completely different constructions. This raises the natural question: can we design new cloning games that achieve multi-copy security, possibly by developing a new analytic toolkit? <br>We study a new cloning game based on binary phase states and show that it is $t$-copy secure when $t=o(n/\log n)$. Moreover, for constant $t$, we obtain the first asymptotically optimal bounds of $O(2^{-n})$. We also show a worst-case to average-case reduction for a large class of cloning games, which allows us to show the same quantitative results for Haar cloning games. These technical ingredients together enable two new applications which have previously been out of reach; one in black hole physics, and one in unclonable cryptography. </p> </div> </dd> <dt> <a name='item19'>[19]</a> <a href ="/abs/2501.03544" title="Abstract" id="2501.03544"> arXiv:2501.03544 </a> (replaced) [<a href="/pdf/2501.03544" title="Download PDF" id="pdf-2501.03544" aria-labelledby="pdf-2501.03544">pdf</a>, <a href="https://arxiv.org/html/2501.03544v2" title="View HTML" id="html-2501.03544" aria-labelledby="html-2501.03544" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2501.03544" title="Other formats" id="oth-2501.03544" aria-labelledby="oth-2501.03544">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> PromptGuard: Soft Prompt-Guided Unsafe Content Moderation for Text-to-Image Models </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Yuan,+L">Lingzhi Yuan</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Jia,+X">Xiaojun Jia</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Huang,+Y">Yihao Huang</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Dong,+W">Wei Dong</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Liu,+Y">Yang Liu</a></div> <div class='list-comments mathjax'><span class='descriptor'>Comments:</span> 16 pages, 8 figures, 10 tables </div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computer Vision and Pattern Recognition (cs.CV)</span>; Artificial Intelligence (cs.AI); Cryptography and Security (cs.CR) </div> <p class='mathjax'> Text-to-image (T2I) models have been shown to be vulnerable to misuse, particularly in generating not-safe-for-work (NSFW) content, raising serious ethical concerns. In this work, we present PromptGuard, a novel content moderation technique that draws inspiration from the system prompt mechanism in large language models (LLMs) for safety alignment. Unlike LLMs, T2I models lack a direct interface for enforcing behavioral guidelines. Our key idea is to optimize a safety soft prompt that functions as an implicit system prompt within the T2I model's textual embedding space. This universal soft prompt (P*) directly moderates NSFW inputs, enabling safe yet realistic image generation without altering the inference efficiency or requiring proxy models. Extensive experiments across three datasets demonstrate that PromptGuard effectively mitigates NSFW content generation while preserving high-quality benign outputs. PromptGuard achieves 7.8 times faster than prior content moderation methods, surpassing eight state-of-the-art defenses with an optimal unsafe ratio down to 5.84%. </p> </div> </dd> <dt> <a name='item20'>[20]</a> <a href ="/abs/2503.20279" title="Abstract" id="2503.20279"> arXiv:2503.20279 </a> (replaced) [<a href="/pdf/2503.20279" title="Download PDF" id="pdf-2503.20279" aria-labelledby="pdf-2503.20279">pdf</a>, <a href="https://arxiv.org/html/2503.20279v2" title="View HTML" id="html-2503.20279" aria-labelledby="html-2503.20279" rel="noopener noreferrer" target="_blank">html</a>, <a href="/format/2503.20279" title="Other formats" id="oth-2503.20279" aria-labelledby="oth-2503.20279">other</a>] </dt> <dd> <div class='meta'> <div class='list-title mathjax'><span class='descriptor'>Title:</span> sudo rm -rf agentic_security </div> <div class='list-authors'><a href="https://arxiv.org/search/cs?searchtype=author&query=Lee,+S">Sejin Lee</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Kim,+J">Jian Kim</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Park,+H">Haon Park</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yousefpour,+A">Ashkan Yousefpour</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Yu,+S">Sangyoon Yu</a>, <a href="https://arxiv.org/search/cs?searchtype=author&query=Song,+M">Min Song</a></div> <div class='list-subjects'><span class='descriptor'>Subjects:</span> <span class="primary-subject">Computation and Language (cs.CL)</span>; Artificial Intelligence (cs.AI); Cryptography and Security (cs.CR) </div> <p class='mathjax'> Large Language Models (LLMs) are increasingly deployed as computer-use agents, autonomously performing tasks within real desktop or web environments. While this evolution greatly expands practical use cases for humans, it also creates serious security exposures. We present SUDO (Screen-based Universal Detox2Tox Offense), a novel attack framework that systematically bypasses refusal trained safeguards in commercial computer-use agents, such as Claude Computer Use. The core mechanism, Detox2Tox, transforms harmful requests (that agents initially reject) into seemingly benign requests via detoxification, secures detailed instructions from advanced vision language models (VLMs), and then reintroduces malicious content via toxification just before execution. Unlike conventional jailbreaks, SUDO iteratively refines its attacks based on a built-in refusal feedback, making it increasingly effective against robust policy filters. In extensive tests spanning 50 real-world tasks and multiple state-of-the-art VLMs, SUDO achieves a stark attack success rate of 24% (with no refinement), and up to 41% (by its iterative refinement) in Claude Computer Use. By revealing these vulnerabilities and demonstrating the ease with which they can be exploited in real-world computing environments, this paper highlights an immediate need for robust, context-aware safeguards. WARNING: This paper includes harmful or offensive model outputs Our code is available at: <a href="https://github.com/AIM-Intelligence/SUDO.git" rel="external noopener nofollow" class="link-external link-https">this https URL</a> </p> </div> </dd> </dl> <div class='paging'>Total of 20 entries </div> <div class='morefewer'>Showing up to 2000 entries per page: <a href=/list/cs.CR/new?skip=0&show=1000 rel="nofollow"> fewer</a> | <span style="color: #454545">more</span> | <span style="color: #454545">all</span> </div> </div> </div> </div> </main> <footer style="clear: both;"> <div class="columns is-desktop" role="navigation" aria-label="Secondary" style="margin: -0.75em -0.75em 0.75em -0.75em"> <!-- Macro-Column 1 --> <div class="column" style="padding: 0;"> <div class="columns"> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- End Macro-Column 1 --> <!-- Macro-Column 2 --> <div class="column" style="padding: 0;"> <div class="columns"> <div class="column"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul style="list-style: none; line-height: 2;"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> <!-- End Macro-Column 2 --> </div> </footer> </div> <script src="/static/base/1.0.1/js/member_acknowledgement.js"></script> </body> </html>