CINXE.COM

Elastic Advances LLM Security with Standardized Fields and Integrations — Elastic Security Labs

<!DOCTYPE html><html lang="en"><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><title>Elastic Advances LLM Security with Standardized Fields and Integrations — Elastic Security Labs</title><meta name="description" content="Discover Elastic’s latest advancements in LLM security, focusing on standardized field integrations and enhanced detection capabilities. Learn how adopting these standards can safeguard your systems."/><meta property="og:title" content="Elastic Advances LLM Security with Standardized Fields and Integrations — Elastic Security Labs"/><meta property="og:description" content="Discover Elastic’s latest advancements in LLM security, focusing on standardized field integrations and enhanced detection capabilities. Learn how adopting these standards can safeguard your systems."/><meta property="og:image" content="https://www.elastic.co/security-labs/assets/images/elastic-advances-llm-security/Security Labs Images 4.jpg?302b96e28f6ebcce1bc02f66a81af30d"/><meta property="og:image:alt" content="Discover Elastic’s latest advancements in LLM security, focusing on standardized field integrations and enhanced detection capabilities. Learn how adopting these standards can safeguard your systems."/><meta property="og:site_name"/><meta property="og:url" content="https://www.elastic.co/security-labs/elastic-advances-llm-security"/><meta property="og:type" content="website"/><meta name="twitter:card" content="summary_large_image"/><meta name="twitter:title" content="Elastic Advances LLM Security with Standardized Fields and Integrations — Elastic Security Labs"/><meta name="twitter:description" content="Discover Elastic’s latest advancements in LLM security, focusing on standardized field integrations and enhanced detection capabilities. Learn how adopting these standards can safeguard your systems."/><meta name="twitter:image" content="https://www.elastic.co/security-labs/assets/images/elastic-advances-llm-security/Security Labs Images 4.jpg?302b96e28f6ebcce1bc02f66a81af30d"/><meta name="twitter:image:alt" content="Discover Elastic’s latest advancements in LLM security, focusing on standardized field integrations and enhanced detection capabilities. Learn how adopting these standards can safeguard your systems."/><link rel="canonical" href="https://www.elastic.co/security-labs/elastic-advances-llm-security"/><link rel="preload" href="/security-labs/logo.svg" as="image" fetchpriority="high"/><link rel="preload" as="image" imageSrcSet="/security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=640&amp;q=75 640w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=750&amp;q=75 750w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=828&amp;q=75 828w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=1080&amp;q=75 1080w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=1200&amp;q=75 1200w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=1920&amp;q=75 1920w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=2048&amp;q=75 2048w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=3840&amp;q=75 3840w" imageSizes="100vw" fetchpriority="high"/><meta name="next-head-count" content="19"/><script src="https://play.vidyard.com/embed/v4.js" type="text/javascript" async=""></script><link rel="icon" href="/security-labs/favicon.svg"/><link rel="mask-icon" href="/security-labs/favicon.svg" color="#1C1E23"/><link rel="apple-touch-icon" href="/security-labs/favicon.svg"/><meta name="theme-color" content="#1C1E23"/><link rel="preload" href="/security-labs/_next/static/media/6d93bde91c0c2823-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/security-labs/_next/static/media/a34f9d1faa5f3315-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/security-labs/_next/static/media/369c6e283c5acc6e-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/security-labs/_next/static/media/92f44bb82993d879-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/security-labs/_next/static/media/ee71530a747ff30b-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/security-labs/_next/static/media/9fac010bc1f02be0-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/security-labs/_next/static/media/cbf5fbad4d73afac-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><script id="google-tag-manager" data-nscript="beforeInteractive"> (function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); })(window,document,'script','dataLayer','GTM-KNJMG2M'); </script><link rel="preload" href="/security-labs/_next/static/css/265ed7605fd03477.css" as="style"/><link rel="stylesheet" href="/security-labs/_next/static/css/265ed7605fd03477.css" data-n-g=""/><link rel="preload" href="/security-labs/_next/static/css/1007ff9e696f6f88.css" as="style"/><link rel="stylesheet" href="/security-labs/_next/static/css/1007ff9e696f6f88.css" data-n-p=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/security-labs/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js"></script><script src="/security-labs/_next/static/chunks/webpack-7987c6fda769d510.js" defer=""></script><script src="/security-labs/_next/static/chunks/framework-7a7e500878b44665.js" defer=""></script><script src="/security-labs/_next/static/chunks/main-ebd33a9f1cae5951.js" defer=""></script><script src="/security-labs/_next/static/chunks/pages/_app-cb8664d1d3df2511.js" defer=""></script><script src="/security-labs/_next/static/chunks/fec483df-43ee602fabdfe3a4.js" defer=""></script><script src="/security-labs/_next/static/chunks/877-34f408271ef44c22.js" defer=""></script><script src="/security-labs/_next/static/chunks/511-d08fe0fdd6f8a984.js" defer=""></script><script src="/security-labs/_next/static/chunks/683-a5053c37fe5bd0c9.js" defer=""></script><script src="/security-labs/_next/static/chunks/402-4378a3e3b84d79cd.js" defer=""></script><script src="/security-labs/_next/static/chunks/616-0b017b9cfa597392.js" defer=""></script><script src="/security-labs/_next/static/chunks/pages/%5Bslug%5D-b0c191de1a3710e4.js" defer=""></script><script src="/security-labs/_next/static/wTIynxBm98ujmQxLsgK6X/_buildManifest.js" defer=""></script><script src="/security-labs/_next/static/wTIynxBm98ujmQxLsgK6X/_ssgManifest.js" defer=""></script></head><body><noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-KNJMG2M" height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript><div id="__next"><main class="__variable_0351a5 __variable_1f211e __variable_a5b5f5 flex flex-col min-h-screen"><div class="scroll-percentage-container"><div class="scroll-percentage-bar" style="width:0%"></div></div><nav class="fixed w-full z-40" data-headlessui-state=""><div class="bg-gradient-to-b from-zinc-900 from-20% h-[200%] to-transparent absolute inset-0 z-0 pointer-events-none"></div><div class="container relative z-10"><div class="flex h-16 items-center justify-between"><div class="flex items-center justify-start w-full"><div><a class="hover:opacity-50 transition" href="/security-labs"><img alt="elastic security labs logo" fetchpriority="high" width="200" height="30" decoding="async" data-nimg="1" style="color:transparent" src="/security-labs/logo.svg"/></a></div><div class="hidden lg:ml-6 lg:block"><div class="flex space-x-4"><a class="flex lg:inline-flex font-light my-1 py-1 px-2 font-display font-semibold lg:text-sm xl:text-base items-center transition hover:hover-link hover:text-white focus:accessible-link-focus" href="/security-labs/about"><span>About</span></a><div class="relative" data-headlessui-state=""><div><button class="flex lg:inline-flex font-light my-1 py-1 px-2 font-display font-semibold lg:text-sm xl:text-base items-center transition hover:hover-link hover:text-white focus:accessible-link-focus" id="headlessui-menu-button-:R2kpm:" type="button" aria-haspopup="menu" aria-expanded="false" data-headlessui-state="">Topics<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="ml-1 -mr-1 h-4 w-4 text-zinc-400 relative top-[1px]"><path fill-rule="evenodd" d="M5.23 7.21a.75.75 0 011.06.02L10 11.168l3.71-3.938a.75.75 0 111.08 1.04l-4.25 4.5a.75.75 0 01-1.08 0l-4.25-4.5a.75.75 0 01.02-1.06z" clip-rule="evenodd"></path></svg></button></div></div><a class="flex lg:inline-flex font-light my-1 py-1 px-2 font-display font-semibold lg:text-sm xl:text-base items-center transition hover:hover-link hover:text-white focus:accessible-link-focus" href="/security-labs/category/vulnerability-updates"><span>Vulnerability updates</span></a><a class="flex lg:inline-flex font-light my-1 py-1 px-2 font-display font-semibold lg:text-sm xl:text-base items-center transition hover:hover-link hover:text-white focus:accessible-link-focus" href="/security-labs/category/reports"><span>Reports</span></a><a class="flex lg:inline-flex font-light my-1 py-1 px-2 font-display font-semibold lg:text-sm xl:text-base items-center transition hover:hover-link hover:text-white focus:accessible-link-focus" href="/security-labs/category/tools"><span>Tools</span></a></div></div><div class="hidden lg:ml-auto lg:block"><div class="flex items-center space-x-4"><a class="rounded flex items-center p-4 text-white focus:outline-none focus:ring-0 focus:ring-offset-1 focus:ring-offset-zinc-600 group" href="https://search.elastic.co/?location%5B0%5D=Security%20Labs&amp;referrer=https://www.elastic.co/security-labs/elastic-advances-llm-security"><div class="flex items-center relative font-display"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="h-6 w-6"><path stroke-linecap="round" stroke-linejoin="round" d="M21 21l-5.197-5.197m0 0A7.5 7.5 0 105.196 5.196a7.5 7.5 0 0010.607 10.607z"></path></svg></div></a><a class="flex lg:inline-flex font-light my-1 py-1 px-2 font-display font-semibold lg:text-sm xl:text-base items-center transition hover:hover-link hover:text-white focus:accessible-link-focus" href="https://www.elastic.co/security-labs/rss/feed.xml"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="h-4 w-4 mr-1"><path d="M3.75 3a.75.75 0 00-.75.75v.5c0 .414.336.75.75.75H4c6.075 0 11 4.925 11 11v.25c0 .414.336.75.75.75h.5a.75.75 0 00.75-.75V16C17 8.82 11.18 3 4 3h-.25z"></path><path d="M3 8.75A.75.75 0 013.75 8H4a8 8 0 018 8v.25a.75.75 0 01-.75.75h-.5a.75.75 0 01-.75-.75V16a6 6 0 00-6-6h-.25A.75.75 0 013 9.25v-.5zM7 15a2 2 0 11-4 0 2 2 0 014 0z"></path></svg><span class="hidden xl:block">Subscribe</span></a><a class="font-display inline-flex items-center justify-center rounded font-semibold disabled:!select-none disabled:!bg-gray-400 bg-blue-600 text-white hover:bg-blue-500 enabled:hover:text-white/80 transition-colors px-4 py-2 text-sm flex-1 lg:flex-auto" href="https://cloud.elastic.co/registration?cta=cloud-registration&amp;tech=trial&amp;plcmt=navigation&amp;pg=security-labs">Start free trial</a><a class="font-display inline-flex items-center justify-center rounded font-semibold text-white disabled:!select-none disabled:!bg-gray-400 button px-4 py-2 text-sm flex-1 lg:flex-auto" href="https://www.elastic.co/contact">Contact sales</a></div></div></div><div class="-mr-2 flex lg:hidden"><a class="rounded flex items-center p-4 text-white focus:outline-none focus:ring-0 focus:ring-offset-1 focus:ring-offset-zinc-600 group" href="https://search.elastic.co/?location%5B0%5D=Security%20Labs&amp;referrer=https://www.elastic.co/security-labs/elastic-advances-llm-security"><div class="flex items-center relative font-display"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="h-6 w-6"><path stroke-linecap="round" stroke-linejoin="round" d="M21 21l-5.197-5.197m0 0A7.5 7.5 0 105.196 5.196a7.5 7.5 0 0010.607 10.607z"></path></svg></div></a><button class="inline-flex items-center justify-center rounded-md p-2 text-gray-400 hover:bg-gray-700 hover:text-white focus:outline-none focus:ring-2 focus:ring-inset focus:ring-white" id="headlessui-disclosure-button-:R59m:" type="button" aria-expanded="false" data-headlessui-state=""><span class="sr-only">Open navigation menu</span><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="block h-6 w-6"><path stroke-linecap="round" stroke-linejoin="round" d="M3.75 6.75h16.5M3.75 12h16.5m-16.5 5.25h16.5"></path></svg></button></div></div></div></nav><main class="mb-20 flex-1 flex flex-col"><div class="h-48 md:h-64"><div class="after:absolute after:block after:bg-blue-400 after:blur-3xl after:content-[&#x27; &#x27;] after:h-96 after:opacity-5 after:right-0 after:rounded-full after:top-20 after:w-1/2 after:z-0 before:absolute before:block before:blur-3xl before:bg-orange-400 before:content-[&#x27; &#x27;] before:h-96 before:left-0 before:opacity-5 before:rounded-full before:w-1/2 before:z-0 w-full h-full relative"><div class="relative z-10 w-full h-[125%] -top-[25%] bg-no-repeat bg-cover bg-bottom flex items-center justify-center" style="background-image:url(/security-labs/grid.svg)"></div></div></div><article class="px-4"><div class="max-w-7xl mx-auto relative z-10 flex flex-col space-y-4"><div class="eyebrow break-words"><time class="block mb-2 md:mb-0 md:inline-block article-published-date" dateTime="2024-05-06T00:00:00.000Z">6 May 2024</time><span class="hidden md:inline-block md:mx-2">•</span><a class="hover:text-blue-400 text-xs md:text-sm whitespace-nowrap author-name" href="/security-labs/author/mika-ayenson">Mika Ayenson, PhD</a><span class="mx-2">•</span><a class="hover:text-blue-400 text-xs md:text-sm whitespace-nowrap author-name" href="/security-labs/author/dan-kortschak">Dan Kortschak, PhD</a><span class="mx-2">•</span><a class="hover:text-blue-400 text-xs md:text-sm whitespace-nowrap author-name" href="/security-labs/author/jake-king">Jake King</a><span class="mx-2">•</span><a class="hover:text-blue-400 text-xs md:text-sm whitespace-nowrap author-name" href="/security-labs/author/susan-chang">Susan Chang</a><span class="mx-2">•</span><a class="hover:text-blue-400 text-xs md:text-sm whitespace-nowrap author-name" href="/security-labs/author/andrew-kroh">Andrew Kroh</a></div><h1 class="font-bold leading-tighter text-3xl md:text-5xl"><span>Elastic Advances LLM Security with Standardized Fields and&nbsp;Integrations</span></h1><p class="text-zinc-200 text-base md:text-xl">Explore How Elastic&#x27;s New LLM Security Strategies Enhance Detection, Standardization, and Protection Across the LLM Ecosystem</p><div class="flex items-center mt-4 text-zinc-200 text-sm space-x-4 border-t border-white/25 pt-4"><span class="flex items-center space-x-1"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="h-4 w-4 text-zinc-400"><path stroke-linecap="round" stroke-linejoin="round" d="M12 6v6h4.5m4.5 0a9 9 0 11-18 0 9 9 0 0118 0z"></path></svg><span>21 min read</span></span><span class="flex items-center space-x-1"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="h-4 w-4 text-zinc-400"><path stroke-linecap="round" stroke-linejoin="round" d="M9.568 3H5.25A2.25 2.25 0 003 5.25v4.318c0 .597.237 1.17.659 1.591l9.581 9.581c.699.699 1.78.872 2.607.33a18.095 18.095 0 005.223-5.223c.542-.827.369-1.908-.33-2.607L11.16 3.66A2.25 2.25 0 009.568 3z"></path><path stroke-linecap="round" stroke-linejoin="round" d="M6 6h.008v.008H6V6z"></path></svg><span><a class="hover:text-blue-400 whitespace-nowrap" href="/security-labs/category/detection-science">Detection science</a>, </span><span><a class="hover:text-blue-400 whitespace-nowrap" href="/security-labs/category/machine-learning">Machine learning</a>, </span><span><a class="hover:text-blue-400 whitespace-nowrap" href="/security-labs/category/generative-ai">Generative AI</a></span></span></div></div><div class="max-w-7xl mx-auto"><div class="bg-zinc-900 border border-zinc-800 drop-shadow-lg p-5 sm:p-8 md:p-10 rounded-3xl mt-5 md:mt-10"><div class="relative w-full rounded-lg overflow-hidden aspect-video"><img alt="Elastic Advances LLM Security with Standardized Fields and Integrations" fetchpriority="high" decoding="async" data-nimg="fill" class="object-cover absolute h-full w-full" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="/security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=640&amp;q=75 640w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=750&amp;q=75 750w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=828&amp;q=75 828w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=1080&amp;q=75 1080w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=1200&amp;q=75 1200w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=1920&amp;q=75 1920w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=2048&amp;q=75 2048w, /security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=3840&amp;q=75 3840w" src="/security-labs/_next/image?url=%2Fsecurity-labs%2Fassets%2Fimages%2Felastic-advances-llm-security%2FSecurity%20Labs%20Images%204.jpg&amp;w=3840&amp;q=75"/><div class="absolute border border-white/50 inset-0 mix-blend-overlay rounded-lg z-10"></div></div></div></div><div class="lg:max-w-7xl mx-auto relative mt-12 lg:grid lg:grid-cols-4 lg:gap-8 items-start"><div class="flex justify-center lg:col-span-3"><div class="prose lg:prose-lg prose-invert w-full article-content"><div><h2 class="font-bold text-2xl md:text-4xl relative"><span id="introduction" class="absolute -top-32"></span>Introduction</h2> <p>Last week, security researcher Mika Ayenson <a href="https://www.elastic.co/security-labs/embedding-security-in-llm-workflows">authored a publication</a> highlighting potential detection strategies and an LLM content auditing prototype solution via a proxy implemented during Elastic’s OnWeek event series. This post highlighted the importance of research pertaining to the safety of LLM technology implemented in different environments, and the research focus we’ve taken at Elastic Security Labs.</p> <p>Given Elastic&#x27;s unique vantage point leveraging LLM technology in our platform to power capabilities such as the Security <a href="https://www.elastic.co/guide/en/security/current/security-assistant.html">AI Assistant</a>, our desire for more formal detection rules, integrations, and research content has been growing. This publication highlights some of the recent advancements we’ve made in LLM integrations, our thoughts around detections aligned with industry standards, and ECS field mappings.</p> <p>We are committed to a comprehensive security strategy that protects not just the direct user-based LLM interactions but also the broader ecosystem surrounding them. This approach involves layers of security detection engineering opportunities to address not only the LLM requests/responses but also the underlying systems and integrations used by the models.</p> <p>These detection opportunities collectively help to secure the LLM ecosystem and can be broadly grouped into five categories:</p> <ol> <li><strong>Prompt and Response</strong>: Detection mechanisms designed to identify and mitigate threats based on the growing variety of LLM interactions to ensure that all communications are securely audited.</li> <li><strong>Infrastructure and Platform</strong>: Implementing detections to protect the infrastructure hosting LLMs (including wearable AI Pin devices), including detecting threats against the data stored, processing activities, and server communication.</li> <li><strong>API and Integrations</strong>: Detecting threats when interacting with LLM APIs and protecting integrations with other applications that ingest model output.</li> <li><strong>Operational Processes and Data</strong>: Monitoring operational processes (including in AI agents) and data flows while protecting data throughout its lifecycle.</li> <li><strong>Compliance and Ethical</strong>: Aligning detection strategies with well-adopted industry regulations and ethical standards.</li> </ol> <p> Securing the LLM Ecosystem: five categories</p> <p>Another important consideration for these categories expands into who can best address risks or who is responsible for each category of risk pertaining to LLM systems.</p> <p>Similar to existing <a href="https://www.cisecurity.org/insights/blog/shared-responsibility-cloud-security-what-you-need-to-know">Shared Security Responsibility</a> models, Elastic has assessed four broad categories, which will eventually be expanded upon further as we continue our research into detection engineering strategies and integrations. Broadly, this publication considers security protections that involve the following responsibility owners:</p> <ul> <li><strong>LLM Creators</strong>: Organizations who are building, designing, hosting, and training LLMs, such as OpenAI, Amazon Web Services, or Google</li> <li><strong>LLM Integrators</strong>: Organizations and individuals who integrate existing LLM technologies produced by LLM Creators into other applications</li> <li><strong>LLM Maintainers</strong>: Individuals who monitor operational LLMs for performance, reliability, security, and integrity use-cases and remain directly involved in the maintenance of the codebase, infrastructure, and software architecture</li> <li><strong>Security Users</strong>: People who are actively looking for vulnerabilities in systems through traditional testing mechanisms and means. This may expand beyond the traditional risks discussed in <a href="https://llmtop10.com/">OWASP’s LLM Top 10</a> into risks associated with software and infrastructure surrounding these systems</li> </ul> <p>This broader perspective showcases a unified approach to LLM detection engineering that begins with ingesting data using native Elastic <a href="https://www.elastic.co/integrations">integrations</a>; in this example, we highlight the AWS Bedrock Model Invocation use case.</p> <h2 class="font-bold text-2xl md:text-4xl relative"><span id="integrating-llm-logs-into-elastic" class="absolute -top-32"></span>Integrating LLM logs into Elastic</h2> <p>Elastic integrations simplify data ingestion into Elastic from various sources, ultimately enhancing our security solution. These integrations are managed through Fleet in Kibana, allowing users to easily deploy and manage data within the Elastic Agent. Users can quickly adapt Elastic to new data sources by selecting and configuring integrations through Fleet. For more details, see Elastic’s <a href="https://www.elastic.co/blog/elastic-agent-and-fleet-make-it-easier-to-integrate-your-systems-with-elastic">blog</a> on making it easier to integrate your systems with Elastic.</p> <p>The initial ONWeek work undertaken by the team involved a simple proxy solution that extracted fields from interactions with the Elastic Security AI Assistant. This prototype was deployed alongside the Elastic Stack and consumed data from a vendor solution that lacked security auditing capabilities. While this initial implementation proved conceptually interesting, it prompted the team to invest time in assessing existing Elastic integrations from one of our cloud provider partners, <a href="https://docs.elastic.co/integrations/aws">Amazon Web Services</a>. This methodology guarantees streamlined accessibility for our users, offering seamless, one-click integrations for data ingestion. All ingest pipelines conform to ECS/OTel normalization standards, encompassing comprehensive content, including dashboards, within a unified package. Furthermore, this strategy positions us to leverage additional existing integrations, such as Azure and GCP, for future LLM-focused integrations.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="vendor-selection-and-api-capabilities" class="absolute -top-32"></span>Vendor selection and API capabilities</h3> <p>When selecting which LLM providers to create integrations for, we looked at the types of fields we need to ingest for our security use cases. For the starting set of rules detailed here, we needed information such as timestamps and token counts; we found that vendors such as Azure OpenAI provided content moderation filtering on the prompts and generated content. LangSmith (part of the LangChain tooling) was also a top contender, as the data contains the type of vendor used (e.g., OpenAI, Bedrock, etc.) and all the respective metadata. However, this required that the user also have LangSmith set up. For this implementation, we decided to go with first-party supported logs from a vendor that provides LLMs.</p> <p>As we went deeper into potential integrations, we decided to land with AWS Bedrock, for a few specific reasons. Firstly, Bedrock logging has <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/model-invocation-logging.html">first-party support</a> to Amazon CloudWatch Logs and Amazon S3. Secondly, the logging is built specifically for model invocation, including data specific to LLMs (as opposed to other operations and machine learning models), including prompts and responses, and guardrail/content filtering. Thirdly, Elastic already has a <a href="https://www.elastic.co/integrations/data-integrations?solution=all-solutions&amp;category=aws">robust catalog</a> of integrations with AWS, so we were able to quickly create a new integration for AWS Bedrock model invocation logs specifically. The next section will dive into this new integration, which you can use to capture your Bedrock model invocation logs in the Elastic stack.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="elastic-aws-bedrock-model-integration" class="absolute -top-32"></span>Elastic AWS Bedrock model integration</h3> <h4 class="font-bold leading-tight text-lg md:text-2xl relative"><span id="overview" class="absolute -top-32"></span>Overview</h4> <p>The new Elastic <a href="https://docs.elastic.co/integrations/aws_bedrock">AWS Bedrock</a> integration for model invocation logs provides a way to collect and analyze data from AWS services quickly, specifically focusing on the model. This integration provides two primary methods for log collection: Amazon S3 buckets and Amazon CloudWatch. Each method is optimized to offer robust data retrieval capabilities while considering cost-effectiveness and performance efficiency. We use these LLM-specific fields collected for detection engineering purposes.</p> <p>Note: While this integration does not cover every proposed field, it does standardize existing AWS Bedrock fields into the gen_ai category. This approach makes it easier to maintain detection rules across various data sources, minimizing the need for separate rules for each LLM vendor.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="configuring-integration-data-collection-method" class="absolute -top-32"></span>Configuring integration data collection method</h3> <h4 class="font-bold leading-tight text-lg md:text-2xl relative"><span id="collecting-logs-from-s3-buckets" class="absolute -top-32"></span>Collecting logs from S3 buckets</h4> <p>This integration allows for efficient log collection from S3 buckets using two distinct methods:</p> <ul> <li><strong>SQS Notification</strong>: This is the preferred method for collecting. It involves reading S3 notification events from an AWS Simple Queue Service (SQS) queue. This method is less costly and provides better performance compared to direct polling.</li> <li><strong>Direct S3 Bucket Polling</strong>: This method directly polls a list of S3 objects within an S3 bucket and is recommended only when SQS notifications cannot be configured. This approach is more resource-intensive, but it provides an alternative when SQS is not feasible.</li> </ul> <h4 class="font-bold leading-tight text-lg md:text-2xl relative"><span id="collecting-logs-from-cloudwatch" class="absolute -top-32"></span>Collecting logs from CloudWatch</h4> <p>Logs can also be collected directly from CloudWatch, where the integration taps into all log streams within a specified log group using the filterLogEvents AWS API. This method is an alternative to using S3 buckets altogether.</p> <h4 class="font-bold leading-tight text-lg md:text-2xl relative"><span id="integration-installation" class="absolute -top-32"></span>Integration installation</h4> <p>The integration can be set up within the Elastic Agent by following normal Elastic <a href="https://www.elastic.co/guide/en/fleet/current/add-integration-to-policy.html">installation steps</a>.</p> <ol> <li>Navigate to the AWS Bedrock integration</li> <li>Configure the <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">queue_url</code> for SQS or <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">bucket_arn</code> for direct S3 polling.</li> </ol> <p></p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="configuring-bedrock-guardrails" class="absolute -top-32"></span>Configuring Bedrock Guardrails</h3> <p>AWS Bedrock <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html">Guardrails</a> enable organizations to enforce security by setting policies that limit harmful or undesirable content in LLM interactions. These guardrails can be customized to include denied topics to block specific subjects and content filters to moderate the severity of content in prompts and responses. Additionally, word and sensitive information filters block profanity and mask personally identifiable information (PII), ensuring interactions comply with privacy and ethical standards. This feature helps control the content generated and consumed by LLMs and, ideally, reduces the risk associated with malicious prompts.</p> <p>Note: other guardrail examples include Azure OpenAI’s <a href="https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/content-filter?tabs=warning%2Cpython-new">content and response</a> filters, which we aim to capture in our proposed LLM standardized fields for vendor-agnostic logging.</p> <p></p> <p>When LLM interaction content triggers these filters, the response objects are populated with <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">amazon-bedrock-trace</code> and <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">amazon-bedrock-guardrailAction</code> fields, providing details about the Guardrails outcome, and nested fields indicating whether the input matched the content filter. This response object enrichment with detailed filter outcomes improves the overall data quality, which becomes particularly effective when these nested fields are aligned with ECS mappings.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="the-importance-of-ecs-mappings" class="absolute -top-32"></span>The importance of ECS mappings</h3> <p>Field mapping is a critical part of the process for integration development, primarily to improve our ability to write broadly scoped and widely compatible detection rules. By standardizing how data is ingested and analyzed, organizations can more effectively detect, investigate, and respond to potential threats or anomalies in logs ingested into Elastic, and in this specific case, LLM logs.</p> <p>Our initial mapping begins by investigating fields provided by the vendor and existing gaps, leading to the establishment of a comprehensive schema tailored to the nuances of LLM operations. We then reconciled the fields to align with our OpenTelemetry <a href="https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/llm-spans.md">semantic conventions</a>. These mappings shown in the table cover various aspects:</p> <ul> <li><strong>General LLM Interaction Fields</strong>: These include basic but critical information such as the content of requests and responses, token counts, timestamps, and user identifiers, which are foundational for understanding the context and scope of interactions.</li> <li><strong>Text Quality and Relevance Metric Fields</strong>: Fields measuring text readability, complexity, and similarity scores help assess the quality and relevance of model outputs, ensuring that responses are not only accurate but also user-appropriate.</li> <li><strong>Security Metric Fields</strong>: This class of metrics is important for identifying and quantifying potential security risks, including regex pattern matches and scores related to jailbreak attempts, prompt injections, and other security concerns such as hallucination consistency and refusal responses.</li> <li><strong>Policy Enforcement Fields</strong>: These fields capture details about specific policy enforcement actions taken during interactions, such as blocking or modifying content, and provide insights into the confidence levels of these actions, enhancing security and compliance measures.</li> <li><strong>Threat Analysis Fields</strong>: Focused on identifying and quantifying potential threats, these fields provide a detailed analysis of risk scores, types of detected threats, and the measures taken to mitigate these threats.</li> <li><strong>Compliance Fields</strong>: These fields help ensure that interactions comply with various regulatory standards, detailing any compliance violations detected and the specific rules that were triggered during the interaction.</li> <li><strong>OWASP Top Ten Specific Fields</strong>: These fields map directly to the OWASP Top 10 risks for LLM applications, helping to align security measures with recognized industry standards.</li> <li><strong>Sentiment and Toxicity Analysis Fields</strong>: These analyses are essential to gauge the tone and detect any harmful content in the response, ensuring that outputs align with ethical guidelines and standards. This includes sentiment scores, toxicity levels, and identification of inappropriate or sensitive content.</li> <li><strong>Performance Metric Fields</strong>: These fields measure the performance aspects of LLM interactions, including response times and sizes of requests and responses, which are critical for optimizing system performance and ensuring efficient operations.</li> </ul> <p></p> <p></p> <p>Note: See the <a href="https://gist.github.com/Mikaayenson/cf03f6d3998e16834c1274f007f2666c">gist</a> for an extended table of fields proposed.</p> <p>These fields are mapped by our LLM integrations and ultimately used within our detections. As we continue to understand the threat landscape, we will continue to refine these fields to ensure additional fields populated by other LLM vendors are standardized and conceptually reflected within the mapping.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="broader-implications-and-benefits-of-standardization" class="absolute -top-32"></span>Broader Implications and Benefits of Standardization</h3> <p>Standardizing security fields within the LLM ecosystem (e.g., user interaction and application integration) facilitates a unified approach to the security domain. Elastic endeavors to lead the charge by defining and promoting a set of standard fields. This effort not only enhances the security posture of individual organizations but also fosters a safer industry.</p> <p><strong>Integration with Security Tools</strong>: By standardizing responses from LLM-related security tools, it enriches security analysis fields that can be shipped with the original LLM vendor content to a security solution. If operationally chained together in the LLM application’s ecosystem, security tools can audit each invocation request and response. Security teams can then leverage these fields to build complex detection mechanisms that can identify subtle signs of misuse or vulnerabilities within LLM interactions.</p> <p><strong>Consistency Across Vendors</strong>: Insisting that all LLM vendors adopt these standard fields drives a singular goal to effectively protect applications, but in a way that establishes a baseline that all industry users can adhere to. Users are encouraged to align to a common schema regardless of the platform or tool.</p> <p><strong>Enhanced Detection Engineering</strong>: With these standard fields, detection engineering becomes more robust and the change of false positives is decreased. Security engineers can create effective rules that identify potential threats across different models, interactions, and ecosystems. This consistency is especially important for organizations that rely on multiple LLMs or security tools and need to maintain a unified platform.</p> <h4 class="font-bold leading-tight text-lg md:text-2xl relative"><span id="sample-llm-specific-fields-aws-bedrock-use-case" class="absolute -top-32"></span>Sample LLM-specific fields: AWS Bedrock use case</h4> <p>Based on the integration’s ingestion pipeline, field mappings, and processors, the AWS Bedrock data is cleaned up, standardized, and mapped to Elastic Common Schema (<a href="https://www.elastic.co/guide/en/ecs/current/ecs-reference.html">ECS</a>) fields. The core Bedrock fields are then introduced under the <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">aws.bedrock</code> group which includes details about the model invocation like requests, responses, and token counts. The integration populates additional fields tailored for the LLM to provide deeper insights into the model’s interactions which are later used in our detections.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="llm-detection-engineering-examples" class="absolute -top-32"></span>LLM detection engineering examples</h3> <p>With the standardized fields and the Elastic AWS Bedrock integration, we can begin crafting detection engineering rules that showcase the proposed capability with varying complexity. The below examples are written using <a href="https://www.elastic.co/guide/en/security/8.13/rules-ui-create.html#create-esql-rule">ES|QL</a>.</p> <p>Note: Check out the detection-rules <a href="https://github.com/elastic/detection-rules/tree/main/hunting">hunting</a> directory and <a href="https://github.com/elastic/detection-rules/tree/main/rules/integrations/aws_bedrock"><code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">aws_bedrock</code></a> rules for more details about these queries.</p> <h4 class="font-bold leading-tight text-lg md:text-2xl relative"><span id="basic-detection-of-sensitive-content-refusal" class="absolute -top-32"></span>Basic detection of sensitive content refusal</h4> <p>With current policies and standards on sensitive topics within the organization, it is important to have mechanisms in place to ensure LLMs also adhere to compliance and ethical standards. Organizations have an opportunity to monitor and capture instances where an LLM directly refuses to respond to sensitive topics.</p> <p><strong>Sample Detection</strong>:</p> <pre><code>from logs-aws_bedrock.invocation-* | WHERE @timestamp &gt; NOW() - 1 DAY AND ( gen_ai.completion LIKE &quot;*I cannot provide any information about*&quot; AND gen_ai.response.finish_reasons LIKE &quot;*end_turn*&quot; ) | STATS user_request_count = count() BY gen_ai.user.id | WHERE user_request_count &gt;= 3</code></pre> <p><strong>Detection Description</strong>: This query is used to detect instances where the model explicitly refuses to provide information on potentially sensitive or restricted topics multiple times. Combined with predefined formatted outputs, the use of specific phrases like &quot;I cannot provide any information about&quot; within the output content indicates that the model has been triggered by a user prompt to discuss something it&#x27;s programmed to treat as confidential or inappropriate.</p> <p><strong>Security Relevance</strong>: Monitoring LLM refusals helps to identify attempts to probe the model for sensitive data or to exploit it in a manner that could lead to the leakage of proprietary or restricted information. By analyzing the patterns and frequency of these refusals, security teams can investigate if there are targeted attempts to breach information security policies.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="potential-denial-of-service-or-resource-exhaustion-attacks" class="absolute -top-32"></span>Potential denial of service or resource exhaustion attacks</h3> <p>Due to the engineering design of LLMs being highly computational and data-intensive, they are susceptible to resource exhaustion and denial of service (DoS) attacks. High usage patterns may indicate abuse or malicious activities designed to degrade the LLM’s availability. Due to the ambiguity of correlating prompt request size directly with token count, it is essential to consider the implications of high token counts in prompts which may not always result from larger requests bodies. Token count and character counts depend on the specific model, where each can be different and is related to how embeddings are generated.</p> <p><strong>Sample Detection</strong>:</p> <pre><code>from logs-aws_bedrock.invocation-* | WHERE @timestamp &gt; NOW() - 1 DAY AND ( gen_ai.usage.prompt_tokens &gt; 8000 OR gen_ai.usage.completion_tokens &gt; 8000 OR gen_ai.performance.request_size &gt; 8000 ) | STATS max_prompt_tokens = max(gen_ai.usage.prompt_tokens), max_request_tokens = max(gen_ai.performance.request_size), max_completion_tokens = max(gen_ai.usage.completion_tokens), request_count = count() BY cloud.account.id | WHERE request_count &gt; 1 | SORT max_prompt_tokens, max_request_tokens, max_completion_tokens DESC</code></pre> <p><strong>Detection Description</strong>: This query identifies high-volume token usage which could be indicative of abuse or an attempted denial of service (DoS) attack. Monitoring for unusually high token counts (input or output) helps detect patterns that could slow down or overwhelm the system, potentially leading to service disruptions. Given each application may leverage a different token volume, we’ve chosen a simple threshold based on our existing experience that should cover basic use cases.</p> <p><strong>Security Relevance</strong>: This form of monitoring helps detect potential concerns with system availability and performance. It helps in the early detection of DoS attacks or abusive behavior that could degrade service quality for legitimate users. By aggregating and analyzing token usage by account, security teams can pinpoint sources of potentially malicious traffic and take appropriate measures.</p> <h4 class="font-bold leading-tight text-lg md:text-2xl relative"><span id="monitoring-for-latency-anomalies" class="absolute -top-32"></span>Monitoring for latency anomalies</h4> <p>Latency-based metrics can be a key indicator of underlying performance issues or security threats that overload the system. By monitoring processing delays, organizations can ensure that servers are operating as efficiently as expected.</p> <p><strong>Sample Detection</strong>:</p> <pre><code>from logs-aws_bedrock.invocation-* | WHERE @timestamp &gt; NOW() - 1 DAY | EVAL response_delay_seconds = gen_ai.performance.start_response_time / 1000 | WHERE response_delay_seconds &gt; 5 | STATS max_response_delay = max(response_delay_seconds), request_count = count() BY gen_ai.user.id | WHERE request_count &gt; 3 | SORT max_response_delay DESC</code></pre> <p><strong>Detection Description</strong>: This updated query monitors the time it takes for an LLM to start sending a response after receiving a request, focusing on the initial response latency. It identifies significant delays by comparing the actual start of the response to typical response times, highlighting instances where these delays may be abnormally long.</p> <p><strong>Security Relevance</strong>: Anomalous latencies can be symptomatic of issues such as network attacks (e.g., DDoS) or system inefficiencies that need to be addressed. By tracking and analyzing latency metrics, organizations can ensure that their systems are running efficiently and securely, and can quickly respond to potential threats that might manifest as abnormal delays.</p> <h2 class="font-bold text-2xl md:text-4xl relative"><span id="advanced-llm-detection-engineering-use-cases" class="absolute -top-32"></span>Advanced LLM detection engineering use cases</h2> <p>This section explores potential use cases that could be addressed with an Elastic Security integration. It assumes that these fields are fully populated and that necessary security auditing enrichment features (e.g., Guardrails) have been implemented, either within AWS Bedrock or via a similar approach provided by the LLM vendor. In combination with the available data source and Elastic integration, detection rules can be built on top of these Guardrail requests and responses to detect misuse of LLMs in deployment.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="malicious-model-uploads-and-cross-tenant-escalation" class="absolute -top-32"></span>Malicious model uploads and cross-tenant escalation</h3> <p>A recent investigation into the Hugging Face Interface API revealed a significant risk where attackers could upload a maliciously crafted model to perform arbitrary code execution. This was achieved by using a Python Pickle file that, when deserialized, executed embedded malicious code. These vulnerabilities highlight the need for rigorous security measures to inspect and sanitize all inputs in AI-as-a-Service (AIAAS) platforms from the LLM, to the infrastructure that hosts the model, and the application API integration. Refer to <a href="https://www.wiz.io/blog/wiz-and-hugging-face-address-risks-to-ai-infrastructure">this article</a> for more details.</p> <p><strong>Potential Detection Opportunity</strong>: Use fields like <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.request.model.id</code>, <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.request.model.version</code>, and prompt <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.completion</code> to detect interactions with anomalous models. Monitoring unusual values or patterns in the model identifiers and version numbers along with inspecting the requested content (e.g., looking for typical Python Pickle serialization techniques) may indicate suspicious behavior. Similarly, a check prior to uploading the model using similar fields may block the upload. Cross-referencing additional fields like <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.user.id</code> can help identify malicious cross-tenant operations performing these types of activities.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="unauthorized-urls-and-external-communication" class="absolute -top-32"></span>Unauthorized URLs and external communication</h3> <p>As LLMs become more integrated into operational ecosystems, their ability to interact with external capabilities like email or webhooks can be exploited by attackers. To protect against these interactions, it’s important to implement detection rules that can identify suspicious or unauthorized activities based on the model’s outputs and subsequent integrations.</p> <p><strong>Potential Detection Opportunity</strong>: Use fields like <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.completion</code>, and <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.security.regex_pattern_count</code> to triage malicious external URLs and webhooks. These regex patterns need to be predefined based on well-known suspicious patterns.</p> <h4 class="font-bold leading-tight text-lg md:text-2xl relative"><span id="hierarchical-instruction-prioritization" class="absolute -top-32"></span>Hierarchical instruction prioritization</h4> <p>LLMs are increasingly used in environments where they receive instructions from various sources (e.g., <a href="https://openai.com/blog/custom-instructions-for-chatgpt">ChatGPT Custom Instructions</a>), which may not always have benign intentions. This build-your-own model workflow can lead to a range of potential security vulnerabilities, if the model treats all instructions with equal importance, and they go unchecked. Reference <a href="https://arxiv.org/pdf/2404.13208.pdf">here</a>.</p> <p><strong>Potential Detection Opportunity</strong>: Monitor fields like <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.model.instructions</code> and <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.completion</code> to identify discrepancies between given instructions and the models responses which may indicate cases where models treat all instructions with equal importance. Additionally, analyze the <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.similarity_score</code>, to discern how similar the response is from the original request.</p> <h3 class="font-bold leading-tight text-xl md:text-3xl relative"><span id="extended-detections-featuring-additional-elastic-rule-types" class="absolute -top-32"></span>Extended detections featuring additional Elastic rule types</h3> <p>This section introduces additional detection engineering techniques using some of Elastic’s rule types, Threshold, Indicator Match, and New Terms to provide a more nuanced and robust security posture.</p> <ul> <li><strong>Threshold Rules</strong>: Identify high frequency of denied requests over a short period of time grouped by <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.user.id</code> that could be indicative of abuse attempts. (e.g. OWASP’s LLM04)</li> <li><strong>Indicator Match Rules</strong>: Match known malicious threat intel provided indicators such as the LLM user ID like the <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">gen_ai.user.id</code> which contain these user attributes. (e.g. <code class="px-1.5 py-1 rounded not-prose bg-[var(--tw-prose-invert-pre-bg)] whitespace-break-spaces text-[85%] text-emerald-600">arn:aws:iam::12345678912:user/thethreatactor</code>)</li> <li><strong>New Terms Rules</strong>: Detect new or unusual terms in user prompts that could indicate usual activity outside of the normal usage for the user’s role, potentially indicating new malicious behaviors.</li> </ul> <h2 class="font-bold text-2xl md:text-4xl relative"><span id="summary" class="absolute -top-32"></span>Summary</h2> <p>Elastic is pioneering the standardization of LLM-based fields across the generative AI landscape to enable security detections across the ecosystem. This initiative not only aligns with our ongoing enhancements in LLM integration and security strategies but also supports our broad security framework that safeguards both direct user interactions and the underlying system architectures. By promoting a uniform language among LLM vendors for enhanced detection and response capabilities, we aim to protect the entire ecosystem, making it more secure and dependable. Elastic invites all stakeholders within the industry, creators, maintainers, integrators and users, to adopt these standardized practices, thereby strengthening collective security measures and advancing industry-wide protections.</p> <p>As we continue to add and enhance our integrations, starting with AWS Bedrock, we are strategizing to align other LLM-based integrations to the new standards we’ve set, paving the way for a unified experience across the Elastic ecosystem. The seamless overlap with existing Elasticsearch capabilities empowers users to leverage sophisticated search and analytics directly on the LLM data, driving existing workflows back to tools users are most comfortable with.</p> <p>Check out the <a href="https://www.elastic.co/security/llm-safety-report">LLM Safety Assessment</a>, which delves deeper into these topics.</p> <p><strong>The release and timing of any features or functionality described in this post remain at Elastic&#x27;s sole discretion. Any features or functionality not currently available may not be delivered on time or at all.</strong></p></div></div></div><div class="hidden lg:flex lg:col-span-1 text-sm lg:flex-col lg:space-y-6"><div class="toc"><h4 class="font-bold leading-tight text-lg md:text-2xl mb-3">Jump to section</h4><ul class="flex flex-col space-y-2"><li><a class="flex items-center space-x-1 hover:text-white" href="/security-labs/elastic-advances-llm-security#introduction"><span>Introduction</span></a></li><li><a class="flex items-center space-x-1 hover:text-white" href="/security-labs/elastic-advances-llm-security#integrating-llm-logs-into-elastic"><span>Integrating LLM logs into&nbsp;Elastic</span></a></li><li><a class="flex items-center space-x-1 hover:text-white ml-4" href="/security-labs/elastic-advances-llm-security#vendor-selection-and-api-capabilities"><span>Vendor selection and API&nbsp;capabilities</span></a></li><li><a class="flex items-center space-x-1 hover:text-white ml-4" href="/security-labs/elastic-advances-llm-security#elastic-aws-bedrock-model-integration"><span>Elastic AWS Bedrock model&nbsp;integration</span></a></li><li><a class="flex items-center space-x-1 hover:text-white ml-8" href="/security-labs/elastic-advances-llm-security#overview"><span>Overview</span></a></li><li><a class="flex items-center space-x-1 hover:text-white ml-4" href="/security-labs/elastic-advances-llm-security#configuring-integration-data-collection-method"><span>Configuring integration data collection&nbsp;method</span></a></li><li><a class="flex items-center space-x-1 hover:text-white ml-8" href="/security-labs/elastic-advances-llm-security#collecting-logs-from-s3-buckets"><span>Collecting logs from S3&nbsp;buckets</span></a></li><li><a class="flex items-center space-x-1 hover:text-white ml-8" href="/security-labs/elastic-advances-llm-security#collecting-logs-from-cloudwatch"><span>Collecting logs from&nbsp;CloudWatch</span></a></li><li><a class="flex items-center space-x-1 hover:text-white ml-8" href="/security-labs/elastic-advances-llm-security#integration-installation"><span>Integration&nbsp;installation</span></a></li><li><a class="flex items-center space-x-1 hover:text-white ml-4" href="/security-labs/elastic-advances-llm-security#configuring-bedrock-guardrails"><span>Configuring Bedrock&nbsp;Guardrails</span></a></li></ul><button class="border-t border-white/20 w-full mt-3 py-2 flex items-center space-x-1 text-xs font-medium uppercase tracking-wide hover:text-white"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true" class="w-3 h-3"><path d="M10.75 4.75a.75.75 0 00-1.5 0v4.5h-4.5a.75.75 0 000 1.5h4.5v4.5a.75.75 0 001.5 0v-4.5h4.5a.75.75 0 000-1.5h-4.5v-4.5z"></path></svg><span>Show more</span></button></div><div class="bg-zinc-900 border border-zinc-800 drop-shadow-lg p-5 md:p-2 sm:p-4 md:px-6 md:py-4 rounded-xl"><h4 class="font-bold leading-tight text-lg md:text-2xl mb-3">Elastic Security Labs Newsletter</h4><div><a target="_blank" class="button inline-flex" href="https://www.elastic.co/elastic-security-labs/newsletter?utm_source=security-labs">Sign Up</a></div></div></div></div><div class="bg-zinc-900 border border-zinc-800 drop-shadow-lg p-5 md:p-2 sm:p-4 md:px-6 md:py-4 rounded-xl my-5 md:my-10 max-w-3xl mx-auto flex flex-col items-center shadow-2xl"><h4 class="font-bold leading-tight text-lg md:text-2xl">Share this article</h4><div class="flex flex-wrap items-center justify-center mt-4 space-x-4"><a class="flex items-center space-x-2 button" href="https://twitter.com/intent/tweet?text=Elastic Advances LLM Security with Standardized Fields and Integrations&amp;url=https://www.elastic.co/security-labs/elastic-advances-llm-security" target="_blank" rel="noopener noreferrer" aria-label="Share this article on Twitter" title="Share this article on Twitter"><svg class="w-4 h-4" viewBox="0 0 24 24"><path fill="currentColor" d="M23.954 4.569c-.885.389-1.83.653-2.825.772a4.98 4.98 0 002.187-2.746 9.955 9.955 0 01-3.157 1.204 4.98 4.98 0 00-8.49 4.54A14.128 14.128 0 011.69 3.05a4.98 4.98 0 001.54 6.638A4.94 4.94 0 011.2 8.62v.06a4.98 4.98 0 004 4.87 4.94 4.94 0 01-2.24.086 4.98 4.98 0 004.64 3.45A9.97 9.97 0 010 20.35a14.075 14.075 0 007.59 2.22c9.16 0 14.17-7.583 14.17-14.17 0-.217-.005-.434-.015-.65a10.128 10.128 0 002.485-2.58l-.001-.001z"></path></svg><span>Twitter</span></a><a class="flex items-center space-x-2 button" href="https://www.facebook.com/sharer/sharer.php?u=https://www.elastic.co/security-labs/elastic-advances-llm-security" target="_blank" rel="noopener noreferrer" aria-label="Share this article on Facebook" title="Share this article on Facebook"><svg class="w-4 h-4" viewBox="0 0 24 24"><path fill="currentColor" d="M22.5 12c0-5.799-4.701-10.5-10.5-10.5S1.5 6.201 1.5 12c0 5.301 3.901 9.699 9 10.401V14.4h-2.7v-2.7h2.7v-2.1c0-2.7 1.8-4.2 4.2-4.2 1.2 0 2.1.1 2.4.2v2.4h-1.5c-1.2 0-1.5.6-1.5 1.5v1.8h3l-.3 2.7h-2.7V22C18.599 21.3 22.5 17.301 22.5 12z"></path></svg><span>Facebook</span></a><a class="flex items-center space-x-2 button" href="https://www.linkedin.com/shareArticle?mini=true&amp;url=https://www.elastic.co/security-labs/elastic-advances-llm-security&amp;title=Elastic Advances LLM Security with Standardized Fields and Integrations" target="_blank" rel="noopener noreferrer" aria-label="Share this article on LinkedIn" title="Share this article on LinkedIn"><svg class="w-4 h-4" viewBox="0 0 24 24"><path fill="currentColor" d="M19 0h-14c-2.761 0-5 2.239-5 5v14c0 2.761 2.239 5 5 5h14c2.762 0 5-2.239 5-5v-14c0-2.761-2.238-5-5-5zm-11 19h-3v-11h3v11zm-1.5-12.268c-.966 0-1.75-.79-1.75-1.764s.784-1.764 1.75-1.764 1.75.79 1.75 1.764-.783 1.764-1.75 1.764zm13.5 12.268h-3v-5.604c0-3.368-4-3.113-4 0v5.604h-3v-11h3v1.765c1.396-2.586 7-2.777 7 2.476v6.759z"></path></svg><span>LinkedIn</span></a><a class="flex items-center space-x-2 button" href="https://reddit.com/submit?url=https://www.elastic.co/security-labs/elastic-advances-llm-security&amp;title=Elastic Advances LLM Security with Standardized Fields and Integrations" target="_blank" rel="noopener noreferrer" aria-label="Share this article on Reddit" title="Share this article on Reddit"><svg class="w-4 h-4" viewBox="0 0 24 24"><path fill-rule="evenodd" clip-rule="evenodd" d="M24 12C24 18.6274 18.6274 24 12 24C5.37258 24 0 18.6274 0 12C0 5.37258 5.37258 0 12 0C18.6274 0 24 5.37258 24 12ZM19.6879 11.0584C19.8819 11.3352 19.9916 11.6622 20.004 12C20.0091 12.3306 19.9205 12.656 19.7485 12.9384C19.5765 13.2208 19.3281 13.4488 19.032 13.596C19.0455 13.7717 19.0455 13.9483 19.032 14.124C19.032 16.812 15.9 18.996 12.036 18.996C8.172 18.996 5.04 16.812 5.04 14.124C5.02649 13.9483 5.02649 13.7717 5.04 13.596C4.80919 13.49 4.6042 13.335 4.43923 13.1419C4.27427 12.9487 4.15327 12.722 4.08462 12.4775C4.01598 12.2329 4.00133 11.9764 4.04169 11.7256C4.08205 11.4748 4.17646 11.2358 4.31837 11.0251C4.46028 10.8145 4.6463 10.6372 4.86354 10.5056C5.08078 10.3739 5.32404 10.2911 5.57646 10.2629C5.82889 10.2346 6.08444 10.2616 6.32541 10.3419C6.56638 10.4222 6.78701 10.5539 6.972 10.728C8.35473 9.79023 9.98146 9.27718 11.652 9.252L12.54 5.088C12.55 5.03979 12.5694 4.99405 12.5972 4.95341C12.625 4.91277 12.6605 4.87805 12.7018 4.85127C12.7431 4.82448 12.7894 4.80615 12.8378 4.79735C12.8862 4.78855 12.9359 4.78945 12.984 4.8L15.924 5.388C16.0676 5.14132 16.2944 4.9539 16.5637 4.85937C16.833 4.76484 17.1272 4.7694 17.3934 4.87222C17.6597 4.97505 17.8806 5.1694 18.0164 5.42041C18.1523 5.67141 18.1942 5.96262 18.1348 6.24177C18.0753 6.52092 17.9182 6.76972 17.6918 6.94352C17.4654 7.11732 17.1845 7.20473 16.8995 7.19006C16.6144 7.1754 16.3439 7.05962 16.1366 6.8635C15.9292 6.66738 15.7985 6.40378 15.768 6.12L13.2 5.58L12.42 9.324C14.0702 9.3594 15.6749 9.87206 17.04 10.8C17.2839 10.566 17.5902 10.4074 17.9221 10.3436C18.254 10.2797 18.5973 10.3132 18.9106 10.4401C19.2239 10.5669 19.4939 10.7817 19.6879 11.0584ZM8.20624 12.5333C8.07438 12.7307 8.004 12.9627 8.004 13.2C8.004 13.5183 8.13043 13.8235 8.35547 14.0485C8.58051 14.2736 8.88574 14.4 9.204 14.4C9.44134 14.4 9.67335 14.3296 9.87068 14.1978C10.068 14.0659 10.2218 13.8785 10.3127 13.6592C10.4035 13.4399 10.4272 13.1987 10.3809 12.9659C10.3346 12.7331 10.2204 12.5193 10.0525 12.3515C9.8847 12.1836 9.67089 12.0694 9.43811 12.0231C9.20533 11.9768 8.96405 12.0005 8.74478 12.0913C8.52551 12.1822 8.33809 12.336 8.20624 12.5333ZM12.012 17.424C13.0771 17.4681 14.1246 17.1416 14.976 16.5V16.548C15.0075 16.5173 15.0327 16.4806 15.05 16.4402C15.0674 16.3997 15.0766 16.3563 15.0772 16.3122C15.0777 16.2682 15.0696 16.2245 15.0533 16.1837C15.0369 16.1428 15.0127 16.1055 14.982 16.074C14.9513 16.0425 14.9146 16.0173 14.8742 16C14.8337 15.9826 14.7903 15.9734 14.7462 15.9728C14.7022 15.9723 14.6585 15.9804 14.6177 15.9967C14.5768 16.0131 14.5395 16.0373 14.508 16.068C13.7797 16.5904 12.895 16.8487 12 16.8C11.1061 16.8399 10.2255 16.5732 9.504 16.044C9.44182 15.993 9.36289 15.9669 9.28256 15.9708C9.20222 15.9748 9.12622 16.0085 9.06935 16.0653C9.01247 16.1222 8.97879 16.1982 8.97484 16.2786C8.97089 16.3589 8.99697 16.4378 9.048 16.5C9.89937 17.1416 10.9469 17.4681 12.012 17.424ZM14.0933 14.2458C14.2907 14.3776 14.5227 14.448 14.76 14.448L14.748 14.496C14.9107 14.4978 15.0721 14.4664 15.2223 14.4038C15.3725 14.3413 15.5084 14.2488 15.6218 14.1321C15.7352 14.0154 15.8236 13.8768 15.8818 13.7248C15.9399 13.5728 15.9665 13.4106 15.96 13.248C15.96 13.0107 15.8896 12.7787 15.7578 12.5813C15.6259 12.384 15.4385 12.2302 15.2192 12.1393C14.9999 12.0485 14.7587 12.0248 14.5259 12.0711C14.2931 12.1174 14.0793 12.2316 13.9115 12.3995C13.7436 12.5673 13.6294 12.7811 13.5831 13.0139C13.5368 13.2467 13.5605 13.4879 13.6513 13.7072C13.7422 13.9265 13.896 14.1139 14.0933 14.2458Z" fill="currentColor"></path></svg><span>Reddit</span></a></div></div></article></main><footer class="mt-auto text-xs md:text-sm"><div class="container py-6 flex flex-col md:flex-row gap-2 md:gap-0 justify-between items-center"><div class="text-zinc-300"><nav><ul class="flex space-x-4"><li><a class="hover:text-white font-medium" href="/security-labs/sitemap.xml">Sitemap</a></li><li><a class="hover:text-white font-medium flex items-center space-x-1" href="https://elastic.co?utm_source=elastic-search-labs&amp;utm_medium=referral&amp;utm_campaign=search-labs&amp;utm_content=footer"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" aria-hidden="true" class="inline-block w-3 h-3"><path stroke-linecap="round" stroke-linejoin="round" d="M13.5 6H5.25A2.25 2.25 0 003 8.25v10.5A2.25 2.25 0 005.25 21h10.5A2.25 2.25 0 0018 18.75V10.5m-10.5 6L21 3m0 0h-5.25M21 3v5.25"></path></svg><span>Elastic.co</span></a></li><li><a class="hover:text-white font-medium flex items-center space-x-1" href="https://twitter.com/elasticseclabs"><svg class="w-4 h-4 inline-block w-3 h-3" viewBox="0 0 24 24"><path fill="currentColor" d="M23.954 4.569c-.885.389-1.83.653-2.825.772a4.98 4.98 0 002.187-2.746 9.955 9.955 0 01-3.157 1.204 4.98 4.98 0 00-8.49 4.54A14.128 14.128 0 011.69 3.05a4.98 4.98 0 001.54 6.638A4.94 4.94 0 011.2 8.62v.06a4.98 4.98 0 004 4.87 4.94 4.94 0 01-2.24.086 4.98 4.98 0 004.64 3.45A9.97 9.97 0 010 20.35a14.075 14.075 0 007.59 2.22c9.16 0 14.17-7.583 14.17-14.17 0-.217-.005-.434-.015-.65a10.128 10.128 0 002.485-2.58l-.001-.001z"></path></svg><span>@elasticseclabs</span></a></li></ul></nav></div><div class="flex flex-col space-y-1 text-zinc-300"><p>© <!-- -->2025<!-- -->. Elasticsearch B.V. All Rights Reserved.</p></div></div></footer></main></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"article":{"title":"Elastic Advances LLM Security with Standardized Fields and Integrations","slug":"elastic-advances-llm-security","date":"2024-05-06","description":"Discover Elastic’s latest advancements in LLM security, focusing on standardized field integrations and enhanced detection capabilities. Learn how adopting these standards can safeguard your systems.","image":"Security Labs Images 4.jpg","subtitle":"Explore How Elastic's New LLM Security Strategies Enhance Detection, Standardization, and Protection Across the LLM Ecosystem","body":{"raw":"\n## Introduction\n\nLast week, security researcher Mika Ayenson [authored a publication](https://www.elastic.co/security-labs/embedding-security-in-llm-workflows) highlighting potential detection strategies and an LLM content auditing prototype solution via a proxy implemented during Elastic’s OnWeek event series. This post highlighted the importance of research pertaining to the safety of LLM technology implemented in different environments, and the research focus we’ve taken at Elastic Security Labs.\n\nGiven Elastic's unique vantage point leveraging LLM technology in our platform to power capabilities such as the Security [AI Assistant](https://www.elastic.co/guide/en/security/current/security-assistant.html), our desire for more formal detection rules, integrations, and research content has been growing. This publication highlights some of the recent advancements we’ve made in LLM integrations, our thoughts around detections aligned with industry standards, and ECS field mappings.\n\nWe are committed to a comprehensive security strategy that protects not just the direct user-based LLM interactions but also the broader ecosystem surrounding them. This approach involves layers of security detection engineering opportunities to address not only the LLM requests/responses but also the underlying systems and integrations used by the models.\n\nThese detection opportunities collectively help to secure the LLM ecosystem and can be broadly grouped into five categories:\n\n 1. **Prompt and Response**: Detection mechanisms designed to identify and mitigate threats based on the growing variety of LLM interactions to ensure that all communications are securely audited.\n 2. **Infrastructure and Platform**: Implementing detections to protect the infrastructure hosting LLMs (including wearable AI Pin devices), including detecting threats against the data stored, processing activities, and server communication.\n 3. **API and Integrations**: Detecting threats when interacting with LLM APIs and protecting integrations with other applications that ingest model output.\n 4. **Operational Processes and Data**: Monitoring operational processes (including in AI agents) and data flows while protecting data throughout its lifecycle.\n 5. **Compliance and Ethical**: Aligning detection strategies with well-adopted industry regulations and ethical standards. \n\n![Securing the LLM Ecosystem: five categories](/assets/images/elastic-advances-llm-security/image4.png)\nSecuring the LLM Ecosystem: five categories\n\nAnother important consideration for these categories expands into who can best address risks or who is responsible for each category of risk pertaining to LLM systems. \n\nSimilar to existing [Shared Security Responsibility](https://www.cisecurity.org/insights/blog/shared-responsibility-cloud-security-what-you-need-to-know) models, Elastic has assessed four broad categories, which will eventually be expanded upon further as we continue our research into detection engineering strategies and integrations. Broadly, this publication considers security protections that involve the following responsibility owners:\n\n - **LLM Creators**: Organizations who are building, designing, hosting, and training LLMs, such as OpenAI, Amazon Web Services, or Google\n - **LLM Integrators**: Organizations and individuals who integrate existing LLM technologies produced by LLM Creators into other applications\n - **LLM Maintainers**: Individuals who monitor operational LLMs for performance, reliability, security, and integrity use-cases and remain directly involved in the maintenance of the codebase, infrastructure, and software architecture\n - **Security Users**: People who are actively looking for vulnerabilities in systems through traditional testing mechanisms and means. This may expand beyond the traditional risks discussed in [OWASP’s LLM Top 10](https://llmtop10.com/) into risks associated with software and infrastructure surrounding these systems\n\nThis broader perspective showcases a unified approach to LLM detection engineering that begins with ingesting data using native Elastic [integrations](https://www.elastic.co/integrations); in this example, we highlight the AWS Bedrock Model Invocation use case. \n\n## Integrating LLM logs into Elastic\n\nElastic integrations simplify data ingestion into Elastic from various sources, ultimately enhancing our security solution. These integrations are managed through Fleet in Kibana, allowing users to easily deploy and manage data within the Elastic Agent. Users can quickly adapt Elastic to new data sources by selecting and configuring integrations through Fleet. For more details, see Elastic’s [blog](https://www.elastic.co/blog/elastic-agent-and-fleet-make-it-easier-to-integrate-your-systems-with-elastic) on making it easier to integrate your systems with Elastic.\n\nThe initial ONWeek work undertaken by the team involved a simple proxy solution that extracted fields from interactions with the Elastic Security AI Assistant. This prototype was deployed alongside the Elastic Stack and consumed data from a vendor solution that lacked security auditing capabilities. While this initial implementation proved conceptually interesting, it prompted the team to invest time in assessing existing Elastic integrations from one of our cloud provider partners, [Amazon Web Services](https://docs.elastic.co/integrations/aws). This methodology guarantees streamlined accessibility for our users, offering seamless, one-click integrations for data ingestion. All ingest pipelines conform to ECS/OTel normalization standards, encompassing comprehensive content, including dashboards, within a unified package. Furthermore, this strategy positions us to leverage additional existing integrations, such as Azure and GCP, for future LLM-focused integrations.\n\n### Vendor selection and API capabilities\n\nWhen selecting which LLM providers to create integrations for, we looked at the types of fields we need to ingest for our security use cases. For the starting set of rules detailed here, we needed information such as timestamps and token counts; we found that vendors such as Azure OpenAI provided content moderation filtering on the prompts and generated content. LangSmith (part of the LangChain tooling) was also a top contender, as the data contains the type of vendor used (e.g., OpenAI, Bedrock, etc.) and all the respective metadata. However, this required that the user also have LangSmith set up. For this implementation, we decided to go with first-party supported logs from a vendor that provides LLMs. \n\nAs we went deeper into potential integrations, we decided to land with AWS Bedrock, for a few specific reasons. Firstly, Bedrock logging has [first-party support](https://docs.aws.amazon.com/bedrock/latest/userguide/model-invocation-logging.html) to Amazon CloudWatch Logs and Amazon S3. Secondly, the logging is built specifically for model invocation, including data specific to LLMs (as opposed to other operations and machine learning models), including prompts and responses, and guardrail/content filtering. Thirdly, Elastic already has a [robust catalog](https://www.elastic.co/integrations/data-integrations?solution=all-solutions\u0026category=aws) of integrations with AWS, so we were able to quickly create a new integration for AWS Bedrock model invocation logs specifically. The next section will dive into this new integration, which you can use to capture your Bedrock model invocation logs in the Elastic stack.\n\n### Elastic AWS Bedrock model integration\n\n#### Overview\n\nThe new Elastic [AWS Bedrock](https://docs.elastic.co/integrations/aws_bedrock) integration for model invocation logs provides a way to collect and analyze data from AWS services quickly, specifically focusing on the model. This integration provides two primary methods for log collection: Amazon S3 buckets and Amazon CloudWatch. Each method is optimized to offer robust data retrieval capabilities while considering cost-effectiveness and performance efficiency. We use these LLM-specific fields collected for detection engineering purposes.\n\nNote: While this integration does not cover every proposed field, it does standardize existing AWS Bedrock fields into the gen_ai category. This approach makes it easier to maintain detection rules across various data sources, minimizing the need for separate rules for each LLM vendor.\n\n### Configuring integration data collection method\n\n#### Collecting logs from S3 buckets\n\nThis integration allows for efficient log collection from S3 buckets using two distinct methods:\n\n - **SQS Notification**: This is the preferred method for collecting. It involves reading S3 notification events from an AWS Simple Queue Service (SQS) queue. This method is less costly and provides better performance compared to direct polling. \n - **Direct S3 Bucket Polling**: This method directly polls a list of S3 objects within an S3 bucket and is recommended only when SQS notifications cannot be configured. This approach is more resource-intensive, but it provides an alternative when SQS is not feasible.\n\n#### Collecting logs from CloudWatch\n\nLogs can also be collected directly from CloudWatch, where the integration taps into all log streams within a specified log group using the filterLogEvents AWS API. This method is an alternative to using S3 buckets altogether. \n\n#### Integration installation\n\nThe integration can be set up within the Elastic Agent by following normal Elastic [installation steps](https://www.elastic.co/guide/en/fleet/current/add-integration-to-policy.html). \n\n 1. Navigate to the AWS Bedrock integration\n 2. Configure the ```queue_url``` for SQS or ```bucket_arn``` for direct S3 polling.\n\n![New AWS Bedrock Elastic Integration](/assets/images/elastic-advances-llm-security/image2.png)\n\n### Configuring Bedrock Guardrails\n\nAWS Bedrock [Guardrails](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html) enable organizations to enforce security by setting policies that limit harmful or undesirable content in LLM interactions. These guardrails can be customized to include denied topics to block specific subjects and content filters to moderate the severity of content in prompts and responses. Additionally, word and sensitive information filters block profanity and mask personally identifiable information (PII), ensuring interactions comply with privacy and ethical standards. This feature helps control the content generated and consumed by LLMs and, ideally, reduces the risk associated with malicious prompts.\n\nNote: other guardrail examples include Azure OpenAI’s [content and response](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/content-filter?tabs=warning%2Cpython-new) filters, which we aim to capture in our proposed LLM standardized fields for vendor-agnostic logging. \n\n![AWS Bedrock Guardrails](/assets/images/elastic-advances-llm-security/image1.png)\n\nWhen LLM interaction content triggers these filters, the response objects are populated with ```amazon-bedrock-trace``` and ```amazon-bedrock-guardrailAction``` fields, providing details about the Guardrails outcome, and nested fields indicating whether the input matched the content filter. This response object enrichment with detailed filter outcomes improves the overall data quality, which becomes particularly effective when these nested fields are aligned with ECS mappings.\n\n### The importance of ECS mappings\n\nField mapping is a critical part of the process for integration development, primarily to improve our ability to write broadly scoped and widely compatible detection rules. By standardizing how data is ingested and analyzed, organizations can more effectively detect, investigate, and respond to potential threats or anomalies in logs ingested into Elastic, and in this specific case, LLM logs.\n\nOur initial mapping begins by investigating fields provided by the vendor and existing gaps, leading to the establishment of a comprehensive schema tailored to the nuances of LLM operations. We then reconciled the fields to align with our OpenTelemetry [semantic conventions](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/llm-spans.md). These mappings shown in the table cover various aspects:\n\n - **General LLM Interaction Fields**: These include basic but critical information such as the content of requests and responses, token counts, timestamps, and user identifiers, which are foundational for understanding the context and scope of interactions.\n - **Text Quality and Relevance Metric Fields**: Fields measuring text readability, complexity, and similarity scores help assess the quality and relevance of model outputs, ensuring that responses are not only accurate but also user-appropriate. \n - **Security Metric Fields**: This class of metrics is important for identifying and quantifying potential security risks, including regex pattern matches and scores related to jailbreak attempts, prompt injections, and other security concerns such as hallucination consistency and refusal responses.\n - **Policy Enforcement Fields**: These fields capture details about specific policy enforcement actions taken during interactions, such as blocking or modifying content, and provide insights into the confidence levels of these actions, enhancing security and compliance measures.\n - **Threat Analysis Fields**: Focused on identifying and quantifying potential threats, these fields provide a detailed analysis of risk scores, types of detected threats, and the measures taken to mitigate these threats.\n - **Compliance Fields**: These fields help ensure that interactions comply with various regulatory standards, detailing any compliance violations detected and the specific rules that were triggered during the interaction.\n - **OWASP Top Ten Specific Fields**: These fields map directly to the OWASP Top 10 risks for LLM applications, helping to align security measures with recognized industry standards.\n - **Sentiment and Toxicity Analysis Fields**: These analyses are essential to gauge the tone and detect any harmful content in the response, ensuring that outputs align with ethical guidelines and standards. This includes sentiment scores, toxicity levels, and identification of inappropriate or sensitive content.\n - **Performance Metric Fields**: These fields measure the performance aspects of LLM interactions, including response times and sizes of requests and responses, which are critical for optimizing system performance and ensuring efficient operations.\n\n![General, quality, security, policy, and threat analysis fields](/assets/images/elastic-advances-llm-security/image5.png)\n\n![Compliance, OWASP top 10, security tools analysis, sentiment and toxicity analysis, and performance fields](/assets/images/elastic-advances-llm-security/image6.png)\n\nNote: See the [gist](https://gist.github.com/Mikaayenson/cf03f6d3998e16834c1274f007f2666c) for an extended table of fields proposed.\n\nThese fields are mapped by our LLM integrations and ultimately used within our detections. As we continue to understand the threat landscape, we will continue to refine these fields to ensure additional fields populated by other LLM vendors are standardized and conceptually reflected within the mapping.\n\n### Broader Implications and Benefits of Standardization\n\nStandardizing security fields within the LLM ecosystem (e.g., user interaction and application integration) facilitates a unified approach to the security domain. Elastic endeavors to lead the charge by defining and promoting a set of standard fields. This effort not only enhances the security posture of individual organizations but also fosters a safer industry. \n\n**Integration with Security Tools**: By standardizing responses from LLM-related security tools, it enriches security analysis fields that can be shipped with the original LLM vendor content to a security solution. If operationally chained together in the LLM application’s ecosystem, security tools can audit each invocation request and response. Security teams can then leverage these fields to build complex detection mechanisms that can identify subtle signs of misuse or vulnerabilities within LLM interactions. \n\n**Consistency Across Vendors**: Insisting that all LLM vendors adopt these standard fields drives a singular goal to effectively protect applications, but in a way that establishes a baseline that all industry users can adhere to. Users are encouraged to align to a common schema regardless of the platform or tool. \n\n**Enhanced Detection Engineering**: With these standard fields, detection engineering becomes more robust and the change of false positives is decreased. Security engineers can create effective rules that identify potential threats across different models, interactions, and ecosystems. This consistency is especially important for organizations that rely on multiple LLMs or security tools and need to maintain a unified platform.\n\n#### Sample LLM-specific fields: AWS Bedrock use case\n\nBased on the integration’s ingestion pipeline, field mappings, and processors, the AWS Bedrock data is cleaned up, standardized, and mapped to Elastic Common Schema ([ECS](https://www.elastic.co/guide/en/ecs/current/ecs-reference.html)) fields. The core Bedrock fields are then introduced under the ```aws.bedrock``` group which includes details about the model invocation like requests, responses, and token counts. The integration populates additional fields tailored for the LLM to provide deeper insights into the model’s interactions which are later used in our detections. \n\n### LLM detection engineering examples\n\nWith the standardized fields and the Elastic AWS Bedrock integration, we can begin crafting detection engineering rules that showcase the proposed capability with varying complexity. The below examples are written using [ES|QL](https://www.elastic.co/guide/en/security/8.13/rules-ui-create.html#create-esql-rule).\n\nNote: Check out the detection-rules [hunting](https://github.com/elastic/detection-rules/tree/main/hunting) directory and [```aws_bedrock```](https://github.com/elastic/detection-rules/tree/main/rules/integrations/aws_bedrock) rules for more details about these queries.\n\n#### Basic detection of sensitive content refusal\n\nWith current policies and standards on sensitive topics within the organization, it is important to have mechanisms in place to ensure LLMs also adhere to compliance and ethical standards. Organizations have an opportunity to monitor and capture instances where an LLM directly refuses to respond to sensitive topics.\n\n**Sample Detection**:\n\n```\nfrom logs-aws_bedrock.invocation-*\n | WHERE @timestamp \u003e NOW() - 1 DAY\n AND (\n gen_ai.completion LIKE \"*I cannot provide any information about*\"\n AND gen_ai.response.finish_reasons LIKE \"*end_turn*\"\n )\n | STATS user_request_count = count() BY gen_ai.user.id\n | WHERE user_request_count \u003e= 3\n```\n\n**Detection Description**: This query is used to detect instances where the model explicitly refuses to provide information on potentially sensitive or restricted topics multiple times. Combined with predefined formatted outputs, the use of specific phrases like \"I cannot provide any information about\" within the output content indicates that the model has been triggered by a user prompt to discuss something it's programmed to treat as confidential or inappropriate. \n\n**Security Relevance**: Monitoring LLM refusals helps to identify attempts to probe the model for sensitive data or to exploit it in a manner that could lead to the leakage of proprietary or restricted information. By analyzing the patterns and frequency of these refusals, security teams can investigate if there are targeted attempts to breach information security policies.\n\n### Potential denial of service or resource exhaustion attacks\n\nDue to the engineering design of LLMs being highly computational and data-intensive, they are susceptible to resource exhaustion and denial of service (DoS) attacks. High usage patterns may indicate abuse or malicious activities designed to degrade the LLM’s availability. Due to the ambiguity of correlating prompt request size directly with token count, it is essential to consider the implications of high token counts in prompts which may not always result from larger requests bodies. Token count and character counts depend on the specific model, where each can be different and is related to how embeddings are generated. \n\n**Sample Detection**:\n\n```\nfrom logs-aws_bedrock.invocation-*\n | WHERE @timestamp \u003e NOW() - 1 DAY\n AND (\n gen_ai.usage.prompt_tokens \u003e 8000 OR\n gen_ai.usage.completion_tokens \u003e 8000 OR\n gen_ai.performance.request_size \u003e 8000\n )\n | STATS max_prompt_tokens = max(gen_ai.usage.prompt_tokens),\n max_request_tokens = max(gen_ai.performance.request_size),\n max_completion_tokens = max(gen_ai.usage.completion_tokens),\n request_count = count() BY cloud.account.id\n | WHERE request_count \u003e 1\n | SORT max_prompt_tokens, max_request_tokens, max_completion_tokens DESC\n```\n\n**Detection Description**: This query identifies high-volume token usage which could be indicative of abuse or an attempted denial of service (DoS) attack. Monitoring for unusually high token counts (input or output) helps detect patterns that could slow down or overwhelm the system, potentially leading to service disruptions. Given each application may leverage a different token volume, we’ve chosen a simple threshold based on our existing experience that should cover basic use cases.\n\n**Security Relevance**: This form of monitoring helps detect potential concerns with system availability and performance. It helps in the early detection of DoS attacks or abusive behavior that could degrade service quality for legitimate users. By aggregating and analyzing token usage by account, security teams can pinpoint sources of potentially malicious traffic and take appropriate measures.\n\n#### Monitoring for latency anomalies\n\nLatency-based metrics can be a key indicator of underlying performance issues or security threats that overload the system. By monitoring processing delays, organizations can ensure that servers are operating as efficiently as expected.\n\n**Sample Detection**:\n\n```\nfrom logs-aws_bedrock.invocation-*\n | WHERE @timestamp \u003e NOW() - 1 DAY\n | EVAL response_delay_seconds = gen_ai.performance.start_response_time / 1000\n | WHERE response_delay_seconds \u003e 5\n | STATS max_response_delay = max(response_delay_seconds),\n request_count = count() BY gen_ai.user.id\n | WHERE request_count \u003e 3\n | SORT max_response_delay DESC\n```\n\n**Detection Description**: This updated query monitors the time it takes for an LLM to start sending a response after receiving a request, focusing on the initial response latency. It identifies significant delays by comparing the actual start of the response to typical response times, highlighting instances where these delays may be abnormally long.\n\n**Security Relevance**: Anomalous latencies can be symptomatic of issues such as network attacks (e.g., DDoS) or system inefficiencies that need to be addressed. By tracking and analyzing latency metrics, organizations can ensure that their systems are running efficiently and securely, and can quickly respond to potential threats that might manifest as abnormal delays.\n\n## Advanced LLM detection engineering use cases\n\nThis section explores potential use cases that could be addressed with an Elastic Security integration. It assumes that these fields are fully populated and that necessary security auditing enrichment features (e.g., Guardrails) have been implemented, either within AWS Bedrock or via a similar approach provided by the LLM vendor. In combination with the available data source and Elastic integration, detection rules can be built on top of these Guardrail requests and responses to detect misuse of LLMs in deployment.\n\n### Malicious model uploads and cross-tenant escalation\n\nA recent investigation into the Hugging Face Interface API revealed a significant risk where attackers could upload a maliciously crafted model to perform arbitrary code execution. This was achieved by using a Python Pickle file that, when deserialized, executed embedded malicious code. These vulnerabilities highlight the need for rigorous security measures to inspect and sanitize all inputs in AI-as-a-Service (AIAAS) platforms from the LLM, to the infrastructure that hosts the model, and the application API integration. Refer to [this article](https://www.wiz.io/blog/wiz-and-hugging-face-address-risks-to-ai-infrastructure) for more details.\n\n**Potential Detection Opportunity**: Use fields like ```gen_ai.request.model.id```, ```gen_ai.request.model.version```, and prompt ```gen_ai.completion``` to detect interactions with anomalous models. Monitoring unusual values or patterns in the model identifiers and version numbers along with inspecting the requested content (e.g., looking for typical Python Pickle serialization techniques) may indicate suspicious behavior. Similarly, a check prior to uploading the model using similar fields may block the upload. Cross-referencing additional fields like ```gen_ai.user.id``` can help identify malicious cross-tenant operations performing these types of activities.\n\n### Unauthorized URLs and external communication\n\nAs LLMs become more integrated into operational ecosystems, their ability to interact with external capabilities like email or webhooks can be exploited by attackers. To protect against these interactions, it’s important to implement detection rules that can identify suspicious or unauthorized activities based on the model’s outputs and subsequent integrations.\n\n**Potential Detection Opportunity**: Use fields like ```gen_ai.completion```, and ```gen_ai.security.regex_pattern_count``` to triage malicious external URLs and webhooks. These regex patterns need to be predefined based on well-known suspicious patterns.\n\n#### Hierarchical instruction prioritization\n\nLLMs are increasingly used in environments where they receive instructions from various sources (e.g., [ChatGPT Custom Instructions](https://openai.com/blog/custom-instructions-for-chatgpt)), which may not always have benign intentions. This build-your-own model workflow can lead to a range of potential security vulnerabilities, if the model treats all instructions with equal importance, and they go unchecked. Reference [here](https://arxiv.org/pdf/2404.13208.pdf). \n\n**Potential Detection Opportunity**: Monitor fields like ```gen_ai.model.instructions``` and ```gen_ai.completion``` to identify discrepancies between given instructions and the models responses which may indicate cases where models treat all instructions with equal importance. Additionally, analyze the ```gen_ai.similarity_score```, to discern how similar the response is from the original request.\n\n### Extended detections featuring additional Elastic rule types\n\nThis section introduces additional detection engineering techniques using some of Elastic’s rule types, Threshold, Indicator Match, and New Terms to provide a more nuanced and robust security posture. \n\n - **Threshold Rules**: Identify high frequency of denied requests over a short period of time grouped by ```gen_ai.user.id``` that could be indicative of abuse attempts. (e.g. OWASP’s LLM04) \n - **Indicator Match Rules**: Match known malicious threat intel provided indicators such as the LLM user ID like the ```gen_ai.user.id``` which contain these user attributes. (e.g. ``arn:aws:iam::12345678912:user/thethreatactor``) \n - **New Terms Rules**: Detect new or unusual terms in user prompts that could indicate usual activity outside of the normal usage for the user’s role, potentially indicating new malicious behaviors.\n\n## Summary\n\nElastic is pioneering the standardization of LLM-based fields across the generative AI landscape to enable security detections across the ecosystem. This initiative not only aligns with our ongoing enhancements in LLM integration and security strategies but also supports our broad security framework that safeguards both direct user interactions and the underlying system architectures. By promoting a uniform language among LLM vendors for enhanced detection and response capabilities, we aim to protect the entire ecosystem, making it more secure and dependable. Elastic invites all stakeholders within the industry, creators, maintainers, integrators and users, to adopt these standardized practices, thereby strengthening collective security measures and advancing industry-wide protections.\n\nAs we continue to add and enhance our integrations, starting with AWS Bedrock, we are strategizing to align other LLM-based integrations to the new standards we’ve set, paving the way for a unified experience across the Elastic ecosystem. The seamless overlap with existing Elasticsearch capabilities empowers users to leverage sophisticated search and analytics directly on the LLM data, driving existing workflows back to tools users are most comfortable with. \n\nCheck out the [LLM Safety Assessment](https://www.elastic.co/security/llm-safety-report), which delves deeper into these topics.\n\n__The release and timing of any features or functionality described in this post remain at Elastic's sole discretion. Any features or functionality not currently available may not be delivered on time or at all.__\n","code":"var Component=(()=\u003e{var h=Object.create;var o=Object.defineProperty;var u=Object.getOwnPropertyDescriptor;var g=Object.getOwnPropertyNames;var p=Object.getPrototypeOf,m=Object.prototype.hasOwnProperty;var f=(i,e)=\u003e()=\u003e(e||i((e={exports:{}}).exports,e),e.exports),y=(i,e)=\u003e{for(var n in e)o(i,n,{get:e[n],enumerable:!0})},r=(i,e,n,a)=\u003e{if(e\u0026\u0026typeof e==\"object\"||typeof e==\"function\")for(let s of g(e))!m.call(i,s)\u0026\u0026s!==n\u0026\u0026o(i,s,{get:()=\u003ee[s],enumerable:!(a=u(e,s))||a.enumerable});return i};var w=(i,e,n)=\u003e(n=i!=null?h(p(i)):{},r(e||!i||!i.__esModule?o(n,\"default\",{value:i,enumerable:!0}):n,i)),b=i=\u003er(o({},\"__esModule\",{value:!0}),i);var l=f((A,c)=\u003e{c.exports=_jsx_runtime});var S={};y(S,{default:()=\u003ek,frontmatter:()=\u003ev});var t=w(l()),v={title:\"Elastic Advances LLM Security with Standardized Fields and Integrations\",slug:\"elastic-advances-llm-security\",date:\"2024-05-06\",subtitle:\"Explore How Elastic's New LLM Security Strategies Enhance Detection, Standardization, and Protection Across the LLM Ecosystem\",description:\"Discover Elastic\\u2019s latest advancements in LLM security, focusing on standardized field integrations and enhanced detection capabilities. Learn how adopting these standards can safeguard your systems.\",author:[{slug:\"mika-ayenson\"},{slug:\"dan-kortschak\"},{slug:\"jake-king\"},{slug:\"susan-chang\"},{slug:\"andrew-kroh\"}],image:\"Security Labs Images 4.jpg\",category:[{slug:\"detection-science\"},{slug:\"machine-learning\"},{slug:\"generative-ai\"}]};function d(i){let e=Object.assign({h2:\"h2\",p:\"p\",a:\"a\",ol:\"ol\",li:\"li\",strong:\"strong\",img:\"img\",ul:\"ul\",h3:\"h3\",h4:\"h4\",code:\"code\",pre:\"pre\"},i.components);return(0,t.jsxs)(t.Fragment,{children:[(0,t.jsx)(e.h2,{id:\"introduction\",children:\"Introduction\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"Last week, security researcher Mika Ayenson \",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/security-labs/embedding-security-in-llm-workflows\",rel:\"nofollow\",children:\"authored a publication\"}),\" highlighting potential detection strategies and an LLM content auditing prototype solution via a proxy implemented during Elastic\\u2019s OnWeek event series. This post highlighted the importance of research pertaining to the safety of LLM technology implemented in different environments, and the research focus we\\u2019ve taken at Elastic Security Labs.\"]}),`\n`,(0,t.jsxs)(e.p,{children:[\"Given Elastic's unique vantage point leveraging LLM technology in our platform to power capabilities such as the Security \",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/guide/en/security/current/security-assistant.html\",rel:\"nofollow\",children:\"AI Assistant\"}),\", our desire for more formal detection rules, integrations, and research content has been growing. This publication highlights some of the recent advancements we\\u2019ve made in LLM integrations, our thoughts around detections aligned with industry standards, and ECS field mappings.\"]}),`\n`,(0,t.jsx)(e.p,{children:\"We are committed to a comprehensive security strategy that protects not just the direct user-based LLM interactions but also the broader ecosystem surrounding them. This approach involves layers of security detection engineering opportunities to address not only the LLM requests/responses but also the underlying systems and integrations used by the models.\"}),`\n`,(0,t.jsx)(e.p,{children:\"These detection opportunities collectively help to secure the LLM ecosystem and can be broadly grouped into five categories:\"}),`\n`,(0,t.jsxs)(e.ol,{children:[`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Prompt and Response\"}),\": Detection mechanisms designed to identify and mitigate threats based on the growing variety of LLM interactions to ensure that all communications are securely audited.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Infrastructure and Platform\"}),\": Implementing detections to protect the infrastructure hosting LLMs (including wearable AI Pin devices), including detecting threats against the data stored, processing activities, and server communication.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"API and Integrations\"}),\": Detecting threats when interacting with LLM APIs and protecting integrations with other applications that ingest model output.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Operational Processes and Data\"}),\": Monitoring operational processes (including in AI agents) and data flows while protecting data throughout its lifecycle.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Compliance and Ethical\"}),\": Aligning detection strategies with well-adopted industry regulations and ethical standards.\"]}),`\n`]}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.img,{src:\"/assets/images/elastic-advances-llm-security/image4.png\",alt:\"Securing the LLM Ecosystem: five categories\",width:\"1440\",height:\"253\"}),`\nSecuring the LLM Ecosystem: five categories`]}),`\n`,(0,t.jsx)(e.p,{children:\"Another important consideration for these categories expands into who can best address risks or who is responsible for each category of risk pertaining to LLM systems.\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"Similar to existing \",(0,t.jsx)(e.a,{href:\"https://www.cisecurity.org/insights/blog/shared-responsibility-cloud-security-what-you-need-to-know\",rel:\"nofollow\",children:\"Shared Security Responsibility\"}),\" models, Elastic has assessed four broad categories, which will eventually be expanded upon further as we continue our research into detection engineering strategies and integrations. Broadly, this publication considers security protections that involve the following responsibility owners:\"]}),`\n`,(0,t.jsxs)(e.ul,{children:[`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"LLM Creators\"}),\": Organizations who are building, designing, hosting, and training LLMs, such as OpenAI, Amazon Web Services, or Google\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"LLM Integrators\"}),\": Organizations and individuals who integrate existing LLM technologies produced by LLM Creators into other applications\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"LLM Maintainers\"}),\": Individuals who monitor operational LLMs for performance, reliability, security, and integrity use-cases and remain directly involved in the maintenance of the codebase, infrastructure, and software architecture\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Security Users\"}),\": People who are actively looking for vulnerabilities in systems through traditional testing mechanisms and means. This may expand beyond the traditional risks discussed in \",(0,t.jsx)(e.a,{href:\"https://llmtop10.com/\",rel:\"nofollow\",children:\"OWASP\\u2019s LLM Top 10\"}),\" into risks associated with software and infrastructure surrounding these systems\"]}),`\n`]}),`\n`,(0,t.jsxs)(e.p,{children:[\"This broader perspective showcases a unified approach to LLM detection engineering that begins with ingesting data using native Elastic \",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/integrations\",rel:\"nofollow\",children:\"integrations\"}),\"; in this example, we highlight the AWS Bedrock Model Invocation use case.\"]}),`\n`,(0,t.jsx)(e.h2,{id:\"integrating-llm-logs-into-elastic\",children:\"Integrating LLM logs into Elastic\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"Elastic integrations simplify data ingestion into Elastic from various sources, ultimately enhancing our security solution. These integrations are managed through Fleet in Kibana, allowing users to easily deploy and manage data within the Elastic Agent. Users can quickly adapt Elastic to new data sources by selecting and configuring integrations through Fleet. For more details, see Elastic\\u2019s \",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/blog/elastic-agent-and-fleet-make-it-easier-to-integrate-your-systems-with-elastic\",rel:\"nofollow\",children:\"blog\"}),\" on making it easier to integrate your systems with Elastic.\"]}),`\n`,(0,t.jsxs)(e.p,{children:[\"The initial ONWeek work undertaken by the team involved a simple proxy solution that extracted fields from interactions with the Elastic Security AI Assistant. This prototype was deployed alongside the Elastic Stack and consumed data from a vendor solution that lacked security auditing capabilities. While this initial implementation proved conceptually interesting, it prompted the team to invest time in assessing existing Elastic integrations from one of our cloud provider partners, \",(0,t.jsx)(e.a,{href:\"https://docs.elastic.co/integrations/aws\",rel:\"nofollow\",children:\"Amazon Web Services\"}),\". This methodology guarantees streamlined accessibility for our users, offering seamless, one-click integrations for data ingestion. All ingest pipelines conform to ECS/OTel normalization standards, encompassing comprehensive content, including dashboards, within a unified package. Furthermore, this strategy positions us to leverage additional existing integrations, such as Azure and GCP, for future LLM-focused integrations.\"]}),`\n`,(0,t.jsx)(e.h3,{id:\"vendor-selection-and-api-capabilities\",children:\"Vendor selection and API capabilities\"}),`\n`,(0,t.jsx)(e.p,{children:\"When selecting which LLM providers to create integrations for, we looked at the types of fields we need to ingest for our security use cases. For the starting set of rules detailed here, we needed information such as timestamps and token counts; we found that vendors such as Azure OpenAI provided content moderation filtering on the prompts and generated content. LangSmith (part of the LangChain tooling) was also a top contender, as the data contains the type of vendor used (e.g., OpenAI, Bedrock, etc.) and all the respective metadata. However, this required that the user also have LangSmith set up. For this implementation, we decided to go with first-party supported logs from a vendor that provides LLMs.\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"As we went deeper into potential integrations, we decided to land with AWS Bedrock, for a few specific reasons. Firstly, Bedrock logging has \",(0,t.jsx)(e.a,{href:\"https://docs.aws.amazon.com/bedrock/latest/userguide/model-invocation-logging.html\",rel:\"nofollow\",children:\"first-party support\"}),\" to Amazon CloudWatch Logs and Amazon S3. Secondly, the logging is built specifically for model invocation, including data specific to LLMs (as opposed to other operations and machine learning models), including prompts and responses, and guardrail/content filtering. Thirdly, Elastic already has a \",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/integrations/data-integrations?solution=all-solutions\u0026category=aws\",rel:\"nofollow\",children:\"robust catalog\"}),\" of integrations with AWS, so we were able to quickly create a new integration for AWS Bedrock model invocation logs specifically. The next section will dive into this new integration, which you can use to capture your Bedrock model invocation logs in the Elastic stack.\"]}),`\n`,(0,t.jsx)(e.h3,{id:\"elastic-aws-bedrock-model-integration\",children:\"Elastic AWS Bedrock model integration\"}),`\n`,(0,t.jsx)(e.h4,{id:\"overview\",children:\"Overview\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"The new Elastic \",(0,t.jsx)(e.a,{href:\"https://docs.elastic.co/integrations/aws_bedrock\",rel:\"nofollow\",children:\"AWS Bedrock\"}),\" integration for model invocation logs provides a way to collect and analyze data from AWS services quickly, specifically focusing on the model. This integration provides two primary methods for log collection: Amazon S3 buckets and Amazon CloudWatch. Each method is optimized to offer robust data retrieval capabilities while considering cost-effectiveness and performance efficiency. We use these LLM-specific fields collected for detection engineering purposes.\"]}),`\n`,(0,t.jsx)(e.p,{children:\"Note: While this integration does not cover every proposed field, it does standardize existing AWS Bedrock fields into the gen_ai category. This approach makes it easier to maintain detection rules across various data sources, minimizing the need for separate rules for each LLM vendor.\"}),`\n`,(0,t.jsx)(e.h3,{id:\"configuring-integration-data-collection-method\",children:\"Configuring integration data collection method\"}),`\n`,(0,t.jsx)(e.h4,{id:\"collecting-logs-from-s3-buckets\",children:\"Collecting logs from S3 buckets\"}),`\n`,(0,t.jsx)(e.p,{children:\"This integration allows for efficient log collection from S3 buckets using two distinct methods:\"}),`\n`,(0,t.jsxs)(e.ul,{children:[`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"SQS Notification\"}),\": This is the preferred method for collecting. It involves reading S3 notification events from an AWS Simple Queue Service (SQS) queue. This method is less costly and provides better performance compared to direct polling.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Direct S3 Bucket Polling\"}),\": This method directly polls a list of S3 objects within an S3 bucket and is recommended only when SQS notifications cannot be configured. This approach is more resource-intensive, but it provides an alternative when SQS is not feasible.\"]}),`\n`]}),`\n`,(0,t.jsx)(e.h4,{id:\"collecting-logs-from-cloudwatch\",children:\"Collecting logs from CloudWatch\"}),`\n`,(0,t.jsx)(e.p,{children:\"Logs can also be collected directly from CloudWatch, where the integration taps into all log streams within a specified log group using the filterLogEvents AWS API. This method is an alternative to using S3 buckets altogether.\"}),`\n`,(0,t.jsx)(e.h4,{id:\"integration-installation\",children:\"Integration installation\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"The integration can be set up within the Elastic Agent by following normal Elastic \",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/guide/en/fleet/current/add-integration-to-policy.html\",rel:\"nofollow\",children:\"installation steps\"}),\".\"]}),`\n`,(0,t.jsxs)(e.ol,{children:[`\n`,(0,t.jsx)(e.li,{children:\"Navigate to the AWS Bedrock integration\"}),`\n`,(0,t.jsxs)(e.li,{children:[\"Configure the \",(0,t.jsx)(e.code,{children:\"queue_url\"}),\" for SQS or \",(0,t.jsx)(e.code,{children:\"bucket_arn\"}),\" for direct S3 polling.\"]}),`\n`]}),`\n`,(0,t.jsx)(e.p,{children:(0,t.jsx)(e.img,{src:\"/assets/images/elastic-advances-llm-security/image2.png\",alt:\"New AWS Bedrock Elastic Integration\",width:\"1151\",height:\"611\"})}),`\n`,(0,t.jsx)(e.h3,{id:\"configuring-bedrock-guardrails\",children:\"Configuring Bedrock Guardrails\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"AWS Bedrock \",(0,t.jsx)(e.a,{href:\"https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html\",rel:\"nofollow\",children:\"Guardrails\"}),\" enable organizations to enforce security by setting policies that limit harmful or undesirable content in LLM interactions. These guardrails can be customized to include denied topics to block specific subjects and content filters to moderate the severity of content in prompts and responses. Additionally, word and sensitive information filters block profanity and mask personally identifiable information (PII), ensuring interactions comply with privacy and ethical standards. This feature helps control the content generated and consumed by LLMs and, ideally, reduces the risk associated with malicious prompts.\"]}),`\n`,(0,t.jsxs)(e.p,{children:[\"Note: other guardrail examples include Azure OpenAI\\u2019s \",(0,t.jsx)(e.a,{href:\"https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/content-filter?tabs=warning%2Cpython-new\",rel:\"nofollow\",children:\"content and response\"}),\" filters, which we aim to capture in our proposed LLM standardized fields for vendor-agnostic logging.\"]}),`\n`,(0,t.jsx)(e.p,{children:(0,t.jsx)(e.img,{src:\"/assets/images/elastic-advances-llm-security/image1.png\",alt:\"AWS Bedrock Guardrails\",width:\"1440\",height:\"1213\"})}),`\n`,(0,t.jsxs)(e.p,{children:[\"When LLM interaction content triggers these filters, the response objects are populated with \",(0,t.jsx)(e.code,{children:\"amazon-bedrock-trace\"}),\" and \",(0,t.jsx)(e.code,{children:\"amazon-bedrock-guardrailAction\"}),\" fields, providing details about the Guardrails outcome, and nested fields indicating whether the input matched the content filter. This response object enrichment with detailed filter outcomes improves the overall data quality, which becomes particularly effective when these nested fields are aligned with ECS mappings.\"]}),`\n`,(0,t.jsx)(e.h3,{id:\"the-importance-of-ecs-mappings\",children:\"The importance of ECS mappings\"}),`\n`,(0,t.jsx)(e.p,{children:\"Field mapping is a critical part of the process for integration development, primarily to improve our ability to write broadly scoped and widely compatible detection rules. By standardizing how data is ingested and analyzed, organizations can more effectively detect, investigate, and respond to potential threats or anomalies in logs ingested into Elastic, and in this specific case, LLM logs.\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"Our initial mapping begins by investigating fields provided by the vendor and existing gaps, leading to the establishment of a comprehensive schema tailored to the nuances of LLM operations. We then reconciled the fields to align with our OpenTelemetry \",(0,t.jsx)(e.a,{href:\"https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/llm-spans.md\",rel:\"nofollow\",children:\"semantic conventions\"}),\". These mappings shown in the table cover various aspects:\"]}),`\n`,(0,t.jsxs)(e.ul,{children:[`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"General LLM Interaction Fields\"}),\": These include basic but critical information such as the content of requests and responses, token counts, timestamps, and user identifiers, which are foundational for understanding the context and scope of interactions.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Text Quality and Relevance Metric Fields\"}),\": Fields measuring text readability, complexity, and similarity scores help assess the quality and relevance of model outputs, ensuring that responses are not only accurate but also user-appropriate.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Security Metric Fields\"}),\": This class of metrics is important for identifying and quantifying potential security risks, including regex pattern matches and scores related to jailbreak attempts, prompt injections, and other security concerns such as hallucination consistency and refusal responses.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Policy Enforcement Fields\"}),\": These fields capture details about specific policy enforcement actions taken during interactions, such as blocking or modifying content, and provide insights into the confidence levels of these actions, enhancing security and compliance measures.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Threat Analysis Fields\"}),\": Focused on identifying and quantifying potential threats, these fields provide a detailed analysis of risk scores, types of detected threats, and the measures taken to mitigate these threats.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Compliance Fields\"}),\": These fields help ensure that interactions comply with various regulatory standards, detailing any compliance violations detected and the specific rules that were triggered during the interaction.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"OWASP Top Ten Specific Fields\"}),\": These fields map directly to the OWASP Top 10 risks for LLM applications, helping to align security measures with recognized industry standards.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Sentiment and Toxicity Analysis Fields\"}),\": These analyses are essential to gauge the tone and detect any harmful content in the response, ensuring that outputs align with ethical guidelines and standards. This includes sentiment scores, toxicity levels, and identification of inappropriate or sensitive content.\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Performance Metric Fields\"}),\": These fields measure the performance aspects of LLM interactions, including response times and sizes of requests and responses, which are critical for optimizing system performance and ensuring efficient operations.\"]}),`\n`]}),`\n`,(0,t.jsx)(e.p,{children:(0,t.jsx)(e.img,{src:\"/assets/images/elastic-advances-llm-security/image5.png\",alt:\"General, quality, security, policy, and threat analysis fields\",width:\"1014\",height:\"870\"})}),`\n`,(0,t.jsx)(e.p,{children:(0,t.jsx)(e.img,{src:\"/assets/images/elastic-advances-llm-security/image6.png\",alt:\"Compliance, OWASP top 10, security tools analysis, sentiment and toxicity analysis, and performance fields\",width:\"1014\",height:\"638\"})}),`\n`,(0,t.jsxs)(e.p,{children:[\"Note: See the \",(0,t.jsx)(e.a,{href:\"https://gist.github.com/Mikaayenson/cf03f6d3998e16834c1274f007f2666c\",rel:\"nofollow\",children:\"gist\"}),\" for an extended table of fields proposed.\"]}),`\n`,(0,t.jsx)(e.p,{children:\"These fields are mapped by our LLM integrations and ultimately used within our detections. As we continue to understand the threat landscape, we will continue to refine these fields to ensure additional fields populated by other LLM vendors are standardized and conceptually reflected within the mapping.\"}),`\n`,(0,t.jsx)(e.h3,{id:\"broader-implications-and-benefits-of-standardization\",children:\"Broader Implications and Benefits of Standardization\"}),`\n`,(0,t.jsx)(e.p,{children:\"Standardizing security fields within the LLM ecosystem (e.g., user interaction and application integration) facilitates a unified approach to the security domain. Elastic endeavors to lead the charge by defining and promoting a set of standard fields. This effort not only enhances the security posture of individual organizations but also fosters a safer industry.\"}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Integration with Security Tools\"}),\": By standardizing responses from LLM-related security tools, it enriches security analysis fields that can be shipped with the original LLM vendor content to a security solution. If operationally chained together in the LLM application\\u2019s ecosystem, security tools can audit each invocation request and response. Security teams can then leverage these fields to build complex detection mechanisms that can identify subtle signs of misuse or vulnerabilities within LLM interactions.\"]}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Consistency Across Vendors\"}),\": Insisting that all LLM vendors adopt these standard fields drives a singular goal to effectively protect applications, but in a way that establishes a baseline that all industry users can adhere to. Users are encouraged to align to a common schema regardless of the platform or tool.\"]}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Enhanced Detection Engineering\"}),\": With these standard fields, detection engineering becomes more robust and the change of false positives is decreased. Security engineers can create effective rules that identify potential threats across different models, interactions, and ecosystems. This consistency is especially important for organizations that rely on multiple LLMs or security tools and need to maintain a unified platform.\"]}),`\n`,(0,t.jsx)(e.h4,{id:\"sample-llm-specific-fields-aws-bedrock-use-case\",children:\"Sample LLM-specific fields: AWS Bedrock use case\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"Based on the integration\\u2019s ingestion pipeline, field mappings, and processors, the AWS Bedrock data is cleaned up, standardized, and mapped to Elastic Common Schema (\",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/guide/en/ecs/current/ecs-reference.html\",rel:\"nofollow\",children:\"ECS\"}),\") fields. The core Bedrock fields are then introduced under the \",(0,t.jsx)(e.code,{children:\"aws.bedrock\"}),\" group which includes details about the model invocation like requests, responses, and token counts. The integration populates additional fields tailored for the LLM to provide deeper insights into the model\\u2019s interactions which are later used in our detections.\"]}),`\n`,(0,t.jsx)(e.h3,{id:\"llm-detection-engineering-examples\",children:\"LLM detection engineering examples\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"With the standardized fields and the Elastic AWS Bedrock integration, we can begin crafting detection engineering rules that showcase the proposed capability with varying complexity. The below examples are written using \",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/guide/en/security/8.13/rules-ui-create.html#create-esql-rule\",rel:\"nofollow\",children:\"ES|QL\"}),\".\"]}),`\n`,(0,t.jsxs)(e.p,{children:[\"Note: Check out the detection-rules \",(0,t.jsx)(e.a,{href:\"https://github.com/elastic/detection-rules/tree/main/hunting\",rel:\"nofollow\",children:\"hunting\"}),\" directory and \",(0,t.jsx)(e.a,{href:\"https://github.com/elastic/detection-rules/tree/main/rules/integrations/aws_bedrock\",rel:\"nofollow\",children:(0,t.jsx)(e.code,{children:\"aws_bedrock\"})}),\" rules for more details about these queries.\"]}),`\n`,(0,t.jsx)(e.h4,{id:\"basic-detection-of-sensitive-content-refusal\",children:\"Basic detection of sensitive content refusal\"}),`\n`,(0,t.jsx)(e.p,{children:\"With current policies and standards on sensitive topics within the organization, it is important to have mechanisms in place to ensure LLMs also adhere to compliance and ethical standards. Organizations have an opportunity to monitor and capture instances where an LLM directly refuses to respond to sensitive topics.\"}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Sample Detection\"}),\":\"]}),`\n`,(0,t.jsx)(e.pre,{children:(0,t.jsx)(e.code,{children:`from logs-aws_bedrock.invocation-*\n | WHERE @timestamp \u003e NOW() - 1 DAY\n AND (\n gen_ai.completion LIKE \"*I cannot provide any information about*\"\n AND gen_ai.response.finish_reasons LIKE \"*end_turn*\"\n )\n | STATS user_request_count = count() BY gen_ai.user.id\n | WHERE user_request_count \u003e= 3\n`})}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Detection Description\"}),`: This query is used to detect instances where the model explicitly refuses to provide information on potentially sensitive or restricted topics multiple times. Combined with predefined formatted outputs, the use of specific phrases like \"I cannot provide any information about\" within the output content indicates that the model has been triggered by a user prompt to discuss something it's programmed to treat as confidential or inappropriate.`]}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Security Relevance\"}),\": Monitoring LLM refusals helps to identify attempts to probe the model for sensitive data or to exploit it in a manner that could lead to the leakage of proprietary or restricted information. By analyzing the patterns and frequency of these refusals, security teams can investigate if there are targeted attempts to breach information security policies.\"]}),`\n`,(0,t.jsx)(e.h3,{id:\"potential-denial-of-service-or-resource-exhaustion-attacks\",children:\"Potential denial of service or resource exhaustion attacks\"}),`\n`,(0,t.jsx)(e.p,{children:\"Due to the engineering design of LLMs being highly computational and data-intensive, they are susceptible to resource exhaustion and denial of service (DoS) attacks. High usage patterns may indicate abuse or malicious activities designed to degrade the LLM\\u2019s availability. Due to the ambiguity of correlating prompt request size directly with token count, it is essential to consider the implications of high token counts in prompts which may not always result from larger requests bodies. Token count and character counts depend on the specific model, where each can be different and is related to how embeddings are generated.\"}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Sample Detection\"}),\":\"]}),`\n`,(0,t.jsx)(e.pre,{children:(0,t.jsx)(e.code,{children:`from logs-aws_bedrock.invocation-*\n | WHERE @timestamp \u003e NOW() - 1 DAY\n AND (\n gen_ai.usage.prompt_tokens \u003e 8000 OR\n gen_ai.usage.completion_tokens \u003e 8000 OR\n gen_ai.performance.request_size \u003e 8000\n )\n | STATS max_prompt_tokens = max(gen_ai.usage.prompt_tokens),\n max_request_tokens = max(gen_ai.performance.request_size),\n max_completion_tokens = max(gen_ai.usage.completion_tokens),\n request_count = count() BY cloud.account.id\n | WHERE request_count \u003e 1\n | SORT max_prompt_tokens, max_request_tokens, max_completion_tokens DESC\n`})}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Detection Description\"}),\": This query identifies high-volume token usage which could be indicative of abuse or an attempted denial of service (DoS) attack. Monitoring for unusually high token counts (input or output) helps detect patterns that could slow down or overwhelm the system, potentially leading to service disruptions. Given each application may leverage a different token volume, we\\u2019ve chosen a simple threshold based on our existing experience that should cover basic use cases.\"]}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Security Relevance\"}),\": This form of monitoring helps detect potential concerns with system availability and performance. It helps in the early detection of DoS attacks or abusive behavior that could degrade service quality for legitimate users. By aggregating and analyzing token usage by account, security teams can pinpoint sources of potentially malicious traffic and take appropriate measures.\"]}),`\n`,(0,t.jsx)(e.h4,{id:\"monitoring-for-latency-anomalies\",children:\"Monitoring for latency anomalies\"}),`\n`,(0,t.jsx)(e.p,{children:\"Latency-based metrics can be a key indicator of underlying performance issues or security threats that overload the system. By monitoring processing delays, organizations can ensure that servers are operating as efficiently as expected.\"}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Sample Detection\"}),\":\"]}),`\n`,(0,t.jsx)(e.pre,{children:(0,t.jsx)(e.code,{children:`from logs-aws_bedrock.invocation-*\n | WHERE @timestamp \u003e NOW() - 1 DAY\n | EVAL response_delay_seconds = gen_ai.performance.start_response_time / 1000\n | WHERE response_delay_seconds \u003e 5\n | STATS max_response_delay = max(response_delay_seconds),\n request_count = count() BY gen_ai.user.id\n | WHERE request_count \u003e 3\n | SORT max_response_delay DESC\n`})}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Detection Description\"}),\": This updated query monitors the time it takes for an LLM to start sending a response after receiving a request, focusing on the initial response latency. It identifies significant delays by comparing the actual start of the response to typical response times, highlighting instances where these delays may be abnormally long.\"]}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Security Relevance\"}),\": Anomalous latencies can be symptomatic of issues such as network attacks (e.g., DDoS) or system inefficiencies that need to be addressed. By tracking and analyzing latency metrics, organizations can ensure that their systems are running efficiently and securely, and can quickly respond to potential threats that might manifest as abnormal delays.\"]}),`\n`,(0,t.jsx)(e.h2,{id:\"advanced-llm-detection-engineering-use-cases\",children:\"Advanced LLM detection engineering use cases\"}),`\n`,(0,t.jsx)(e.p,{children:\"This section explores potential use cases that could be addressed with an Elastic Security integration. It assumes that these fields are fully populated and that necessary security auditing enrichment features (e.g., Guardrails) have been implemented, either within AWS Bedrock or via a similar approach provided by the LLM vendor. In combination with the available data source and Elastic integration, detection rules can be built on top of these Guardrail requests and responses to detect misuse of LLMs in deployment.\"}),`\n`,(0,t.jsx)(e.h3,{id:\"malicious-model-uploads-and-cross-tenant-escalation\",children:\"Malicious model uploads and cross-tenant escalation\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"A recent investigation into the Hugging Face Interface API revealed a significant risk where attackers could upload a maliciously crafted model to perform arbitrary code execution. This was achieved by using a Python Pickle file that, when deserialized, executed embedded malicious code. These vulnerabilities highlight the need for rigorous security measures to inspect and sanitize all inputs in AI-as-a-Service (AIAAS) platforms from the LLM, to the infrastructure that hosts the model, and the application API integration. Refer to \",(0,t.jsx)(e.a,{href:\"https://www.wiz.io/blog/wiz-and-hugging-face-address-risks-to-ai-infrastructure\",rel:\"nofollow\",children:\"this article\"}),\" for more details.\"]}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Potential Detection Opportunity\"}),\": Use fields like \",(0,t.jsx)(e.code,{children:\"gen_ai.request.model.id\"}),\", \",(0,t.jsx)(e.code,{children:\"gen_ai.request.model.version\"}),\", and prompt \",(0,t.jsx)(e.code,{children:\"gen_ai.completion\"}),\" to detect interactions with anomalous models. Monitoring unusual values or patterns in the model identifiers and version numbers along with inspecting the requested content (e.g., looking for typical Python Pickle serialization techniques) may indicate suspicious behavior. Similarly, a check prior to uploading the model using similar fields may block the upload. Cross-referencing additional fields like \",(0,t.jsx)(e.code,{children:\"gen_ai.user.id\"}),\" can help identify malicious cross-tenant operations performing these types of activities.\"]}),`\n`,(0,t.jsx)(e.h3,{id:\"unauthorized-urls-and-external-communication\",children:\"Unauthorized URLs and external communication\"}),`\n`,(0,t.jsx)(e.p,{children:\"As LLMs become more integrated into operational ecosystems, their ability to interact with external capabilities like email or webhooks can be exploited by attackers. To protect against these interactions, it\\u2019s important to implement detection rules that can identify suspicious or unauthorized activities based on the model\\u2019s outputs and subsequent integrations.\"}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Potential Detection Opportunity\"}),\": Use fields like \",(0,t.jsx)(e.code,{children:\"gen_ai.completion\"}),\", and \",(0,t.jsx)(e.code,{children:\"gen_ai.security.regex_pattern_count\"}),\" to triage malicious external URLs and webhooks. These regex patterns need to be predefined based on well-known suspicious patterns.\"]}),`\n`,(0,t.jsx)(e.h4,{id:\"hierarchical-instruction-prioritization\",children:\"Hierarchical instruction prioritization\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"LLMs are increasingly used in environments where they receive instructions from various sources (e.g., \",(0,t.jsx)(e.a,{href:\"https://openai.com/blog/custom-instructions-for-chatgpt\",rel:\"nofollow\",children:\"ChatGPT Custom Instructions\"}),\"), which may not always have benign intentions. This build-your-own model workflow can lead to a range of potential security vulnerabilities, if the model treats all instructions with equal importance, and they go unchecked. Reference \",(0,t.jsx)(e.a,{href:\"https://arxiv.org/pdf/2404.13208.pdf\",rel:\"nofollow\",children:\"here\"}),\".\"]}),`\n`,(0,t.jsxs)(e.p,{children:[(0,t.jsx)(e.strong,{children:\"Potential Detection Opportunity\"}),\": Monitor fields like \",(0,t.jsx)(e.code,{children:\"gen_ai.model.instructions\"}),\" and \",(0,t.jsx)(e.code,{children:\"gen_ai.completion\"}),\" to identify discrepancies between given instructions and the models responses which may indicate cases where models treat all instructions with equal importance. Additionally, analyze the \",(0,t.jsx)(e.code,{children:\"gen_ai.similarity_score\"}),\", to discern how similar the response is from the original request.\"]}),`\n`,(0,t.jsx)(e.h3,{id:\"extended-detections-featuring-additional-elastic-rule-types\",children:\"Extended detections featuring additional Elastic rule types\"}),`\n`,(0,t.jsx)(e.p,{children:\"This section introduces additional detection engineering techniques using some of Elastic\\u2019s rule types, Threshold, Indicator Match, and New Terms to provide a more nuanced and robust security posture.\"}),`\n`,(0,t.jsxs)(e.ul,{children:[`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Threshold Rules\"}),\": Identify high frequency of denied requests over a short period of time grouped by \",(0,t.jsx)(e.code,{children:\"gen_ai.user.id\"}),\" that could be indicative of abuse attempts. (e.g. OWASP\\u2019s LLM04)\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"Indicator Match Rules\"}),\": Match known malicious threat intel provided indicators such as the LLM user ID like the \",(0,t.jsx)(e.code,{children:\"gen_ai.user.id\"}),\" which contain these user attributes. (e.g. \",(0,t.jsx)(e.code,{children:\"arn:aws:iam::12345678912:user/thethreatactor\"}),\")\"]}),`\n`,(0,t.jsxs)(e.li,{children:[(0,t.jsx)(e.strong,{children:\"New Terms Rules\"}),\": Detect new or unusual terms in user prompts that could indicate usual activity outside of the normal usage for the user\\u2019s role, potentially indicating new malicious behaviors.\"]}),`\n`]}),`\n`,(0,t.jsx)(e.h2,{id:\"summary\",children:\"Summary\"}),`\n`,(0,t.jsx)(e.p,{children:\"Elastic is pioneering the standardization of LLM-based fields across the generative AI landscape to enable security detections across the ecosystem. This initiative not only aligns with our ongoing enhancements in LLM integration and security strategies but also supports our broad security framework that safeguards both direct user interactions and the underlying system architectures. By promoting a uniform language among LLM vendors for enhanced detection and response capabilities, we aim to protect the entire ecosystem, making it more secure and dependable. Elastic invites all stakeholders within the industry, creators, maintainers, integrators and users, to adopt these standardized practices, thereby strengthening collective security measures and advancing industry-wide protections.\"}),`\n`,(0,t.jsx)(e.p,{children:\"As we continue to add and enhance our integrations, starting with AWS Bedrock, we are strategizing to align other LLM-based integrations to the new standards we\\u2019ve set, paving the way for a unified experience across the Elastic ecosystem. The seamless overlap with existing Elasticsearch capabilities empowers users to leverage sophisticated search and analytics directly on the LLM data, driving existing workflows back to tools users are most comfortable with.\"}),`\n`,(0,t.jsxs)(e.p,{children:[\"Check out the \",(0,t.jsx)(e.a,{href:\"https://www.elastic.co/security/llm-safety-report\",rel:\"nofollow\",children:\"LLM Safety Assessment\"}),\", which delves deeper into these topics.\"]}),`\n`,(0,t.jsx)(e.p,{children:(0,t.jsx)(e.strong,{children:\"The release and timing of any features or functionality described in this post remain at Elastic's sole discretion. Any features or functionality not currently available may not be delivered on time or at all.\"})})]})}function L(i={}){let{wrapper:e}=i.components||{};return e?(0,t.jsx)(e,Object.assign({},i,{children:(0,t.jsx)(d,i)})):d(i)}var k=L;return b(S);})();\n;return Component;"},"_id":"articles/elastic-advances-llm-security.mdx","_raw":{"sourceFilePath":"articles/elastic-advances-llm-security.mdx","sourceFileName":"elastic-advances-llm-security.mdx","sourceFileDir":"articles","contentType":"mdx","flattenedPath":"articles/elastic-advances-llm-security"},"type":"Article","imageUrl":"/assets/images/elastic-advances-llm-security/Security Labs Images 4.jpg","readingTime":"21 min read","series":"","url":"/elastic-advances-llm-security","headings":[{"level":2,"title":"Introduction","href":"#introduction"},{"level":2,"title":"Integrating LLM logs into Elastic","href":"#integrating-llm-logs-into-elastic"},{"level":3,"title":"Vendor selection and API capabilities","href":"#vendor-selection-and-api-capabilities"},{"level":3,"title":"Elastic AWS Bedrock model integration","href":"#elastic-aws-bedrock-model-integration"},{"level":4,"title":"Overview","href":"#overview"},{"level":3,"title":"Configuring integration data collection method","href":"#configuring-integration-data-collection-method"},{"level":4,"title":"Collecting logs from S3 buckets","href":"#collecting-logs-from-s3-buckets"},{"level":4,"title":"Collecting logs from CloudWatch","href":"#collecting-logs-from-cloudwatch"},{"level":4,"title":"Integration installation","href":"#integration-installation"},{"level":3,"title":"Configuring Bedrock Guardrails","href":"#configuring-bedrock-guardrails"},{"level":3,"title":"The importance of ECS mappings","href":"#the-importance-of-ecs-mappings"},{"level":3,"title":"Broader Implications and Benefits of Standardization","href":"#broader-implications-and-benefits-of-standardization"},{"level":4,"title":"Sample LLM-specific fields: AWS Bedrock use case","href":"#sample-llm-specific-fields-aws-bedrock-use-case"},{"level":3,"title":"LLM detection engineering examples","href":"#llm-detection-engineering-examples"},{"level":4,"title":"Basic detection of sensitive content refusal","href":"#basic-detection-of-sensitive-content-refusal"},{"level":3,"title":"Potential denial of service or resource exhaustion attacks","href":"#potential-denial-of-service-or-resource-exhaustion-attacks"},{"level":4,"title":"Monitoring for latency anomalies","href":"#monitoring-for-latency-anomalies"},{"level":2,"title":"Advanced LLM detection engineering use cases","href":"#advanced-llm-detection-engineering-use-cases"},{"level":3,"title":"Malicious model uploads and cross-tenant escalation","href":"#malicious-model-uploads-and-cross-tenant-escalation"},{"level":3,"title":"Unauthorized URLs and external communication","href":"#unauthorized-urls-and-external-communication"},{"level":4,"title":"Hierarchical instruction prioritization","href":"#hierarchical-instruction-prioritization"},{"level":3,"title":"Extended detections featuring additional Elastic rule types","href":"#extended-detections-featuring-additional-elastic-rule-types"},{"level":2,"title":"Summary","href":"#summary"}],"author":[{"title":"Mika Ayenson, PhD","slug":"mika-ayenson","body":{"raw":"","code":"var Component=(()=\u003e{var x=Object.create;var a=Object.defineProperty;var f=Object.getOwnPropertyDescriptor;var _=Object.getOwnPropertyNames;var g=Object.getPrototypeOf,j=Object.prototype.hasOwnProperty;var l=(t,n)=\u003e()=\u003e(n||t((n={exports:{}}).exports,n),n.exports),M=(t,n)=\u003e{for(var e in n)a(t,e,{get:n[e],enumerable:!0})},i=(t,n,e,s)=\u003e{if(n\u0026\u0026typeof n==\"object\"||typeof n==\"function\")for(let o of _(n))!j.call(t,o)\u0026\u0026o!==e\u0026\u0026a(t,o,{get:()=\u003en[o],enumerable:!(s=f(n,o))||s.enumerable});return t};var d=(t,n,e)=\u003e(e=t!=null?x(g(t)):{},i(n||!t||!t.__esModule?a(e,\"default\",{value:t,enumerable:!0}):e,t)),p=t=\u003ei(a({},\"__esModule\",{value:!0}),t);var m=l((F,c)=\u003e{c.exports=_jsx_runtime});var k={};M(k,{default:()=\u003eh,frontmatter:()=\u003ey});var r=d(m()),y={title:\"Mika Ayenson, PhD\",slug:\"mika-ayenson\"};function u(t){return(0,r.jsx)(r.Fragment,{})}function D(t={}){let{wrapper:n}=t.components||{};return n?(0,r.jsx)(n,Object.assign({},t,{children:(0,r.jsx)(u,t)})):u(t)}var h=D;return p(k);})();\n;return Component;"},"_id":"authors/mika-ayenson.mdx","_raw":{"sourceFilePath":"authors/mika-ayenson.mdx","sourceFileName":"mika-ayenson.mdx","sourceFileDir":"authors","contentType":"mdx","flattenedPath":"authors/mika-ayenson"},"type":"Author","imageUrl":"","url":"/authors/mika-ayenson"},{"title":"Dan Kortschak, PhD","slug":"dan-kortschak","body":{"raw":"","code":"var Component=(()=\u003e{var x=Object.create;var a=Object.defineProperty;var f=Object.getOwnPropertyDescriptor;var _=Object.getOwnPropertyNames;var d=Object.getPrototypeOf,g=Object.prototype.hasOwnProperty;var h=(t,n)=\u003e()=\u003e(n||t((n={exports:{}}).exports,n),n.exports),j=(t,n)=\u003e{for(var e in n)a(t,e,{get:n[e],enumerable:!0})},c=(t,n,e,s)=\u003e{if(n\u0026\u0026typeof n==\"object\"||typeof n==\"function\")for(let o of _(n))!g.call(t,o)\u0026\u0026o!==e\u0026\u0026a(t,o,{get:()=\u003en[o],enumerable:!(s=f(n,o))||s.enumerable});return t};var l=(t,n,e)=\u003e(e=t!=null?x(d(t)):{},c(n||!t||!t.__esModule?a(e,\"default\",{value:t,enumerable:!0}):e,t)),D=t=\u003ec(a({},\"__esModule\",{value:!0}),t);var i=h((X,u)=\u003e{u.exports=_jsx_runtime});var C={};j(C,{default:()=\u003eM,frontmatter:()=\u003ek});var r=l(i()),k={title:\"Dan Kortschak, PhD\",slug:\"dan-kortschak\"};function m(t){return(0,r.jsx)(r.Fragment,{})}function p(t={}){let{wrapper:n}=t.components||{};return n?(0,r.jsx)(n,Object.assign({},t,{children:(0,r.jsx)(m,t)})):m(t)}var M=p;return D(C);})();\n;return Component;"},"_id":"authors/dan-kortschak.mdx","_raw":{"sourceFilePath":"authors/dan-kortschak.mdx","sourceFileName":"dan-kortschak.mdx","sourceFileDir":"authors","contentType":"mdx","flattenedPath":"authors/dan-kortschak"},"type":"Author","imageUrl":"","url":"/authors/dan-kortschak"},{"title":"Jake King","slug":"jake-king","description":"Elastic Security Intelligence Team Lead","image":"jake-king.jpg","body":{"raw":"","code":"var Component=(()=\u003e{var u=Object.create;var i=Object.defineProperty;var j=Object.getOwnPropertyDescriptor;var l=Object.getOwnPropertyNames;var x=Object.getPrototypeOf,d=Object.prototype.hasOwnProperty;var f=(t,e)=\u003e()=\u003e(e||t((e={exports:{}}).exports,e),e.exports),k=(t,e)=\u003e{for(var n in e)i(t,n,{get:e[n],enumerable:!0})},c=(t,e,n,o)=\u003e{if(e\u0026\u0026typeof e==\"object\"||typeof e==\"function\")for(let a of l(e))!d.call(t,a)\u0026\u0026a!==n\u0026\u0026i(t,a,{get:()=\u003ee[a],enumerable:!(o=j(e,a))||o.enumerable});return t};var p=(t,e,n)=\u003e(n=t!=null?u(x(t)):{},c(e||!t||!t.__esModule?i(n,\"default\",{value:t,enumerable:!0}):n,t)),_=t=\u003ec(i({},\"__esModule\",{value:!0}),t);var g=f((L,s)=\u003e{s.exports=_jsx_runtime});var D={};k(D,{default:()=\u003eC,frontmatter:()=\u003eM});var r=p(g()),M={title:\"Jake King\",description:\"Elastic Security Intelligence Team Lead\",slug:\"jake-king\",image:\"jake-king.jpg\"};function m(t){return(0,r.jsx)(r.Fragment,{})}function y(t={}){let{wrapper:e}=t.components||{};return e?(0,r.jsx)(e,Object.assign({},t,{children:(0,r.jsx)(m,t)})):m(t)}var C=y;return _(D);})();\n;return Component;"},"_id":"authors/jake-king.mdx","_raw":{"sourceFilePath":"authors/jake-king.mdx","sourceFileName":"jake-king.mdx","sourceFileDir":"authors","contentType":"mdx","flattenedPath":"authors/jake-king"},"type":"Author","imageUrl":"/assets/images/authors/jake-king.jpg","url":"/authors/jake-king"},{"title":"Susan Chang","slug":"susan-chang","body":{"raw":"","code":"var Component=(()=\u003e{var x=Object.create;var o=Object.defineProperty;var g=Object.getOwnPropertyDescriptor;var f=Object.getOwnPropertyNames;var _=Object.getPrototypeOf,j=Object.prototype.hasOwnProperty;var l=(t,n)=\u003e()=\u003e(n||t((n={exports:{}}).exports,n),n.exports),d=(t,n)=\u003e{for(var e in n)o(t,e,{get:n[e],enumerable:!0})},u=(t,n,e,s)=\u003e{if(n\u0026\u0026typeof n==\"object\"||typeof n==\"function\")for(let a of f(n))!j.call(t,a)\u0026\u0026a!==e\u0026\u0026o(t,a,{get:()=\u003en[a],enumerable:!(s=g(n,a))||s.enumerable});return t};var h=(t,n,e)=\u003e(e=t!=null?x(_(t)):{},u(n||!t||!t.__esModule?o(e,\"default\",{value:t,enumerable:!0}):e,t)),p=t=\u003eu(o({},\"__esModule\",{value:!0}),t);var i=l((b,c)=\u003e{c.exports=_jsx_runtime});var F={};d(F,{default:()=\u003eD,frontmatter:()=\u003eC});var r=h(i()),C={title:\"Susan Chang\",slug:\"susan-chang\"};function m(t){return(0,r.jsx)(r.Fragment,{})}function M(t={}){let{wrapper:n}=t.components||{};return n?(0,r.jsx)(n,Object.assign({},t,{children:(0,r.jsx)(m,t)})):m(t)}var D=M;return p(F);})();\n;return Component;"},"_id":"authors/susan-chang.mdx","_raw":{"sourceFilePath":"authors/susan-chang.mdx","sourceFileName":"susan-chang.mdx","sourceFileDir":"authors","contentType":"mdx","flattenedPath":"authors/susan-chang"},"type":"Author","imageUrl":"","url":"/authors/susan-chang"},{"title":"Andrew Kroh","slug":"andrew-kroh","description":"Principal Software Engineer II, Elastic","body":{"raw":"","code":"var Component=(()=\u003e{var x=Object.create;var a=Object.defineProperty;var d=Object.getOwnPropertyDescriptor;var f=Object.getOwnPropertyNames;var l=Object.getPrototypeOf,g=Object.prototype.hasOwnProperty;var p=(t,n)=\u003e()=\u003e(n||t((n={exports:{}}).exports,n),n.exports),_=(t,n)=\u003e{for(var e in n)a(t,e,{get:n[e],enumerable:!0})},s=(t,n,e,i)=\u003e{if(n\u0026\u0026typeof n==\"object\"||typeof n==\"function\")for(let o of f(n))!g.call(t,o)\u0026\u0026o!==e\u0026\u0026a(t,o,{get:()=\u003en[o],enumerable:!(i=d(n,o))||i.enumerable});return t};var j=(t,n,e)=\u003e(e=t!=null?x(l(t)):{},s(n||!t||!t.__esModule?a(e,\"default\",{value:t,enumerable:!0}):e,t)),w=t=\u003es(a({},\"__esModule\",{value:!0}),t);var u=p((F,c)=\u003e{c.exports=_jsx_runtime});var D={};_(D,{default:()=\u003eC,frontmatter:()=\u003eh});var r=j(u()),h={title:\"Andrew Kroh\",description:\"Principal Software Engineer II, Elastic\",slug:\"andrew-kroh\"};function m(t){return(0,r.jsx)(r.Fragment,{})}function M(t={}){let{wrapper:n}=t.components||{};return n?(0,r.jsx)(n,Object.assign({},t,{children:(0,r.jsx)(m,t)})):m(t)}var C=M;return w(D);})();\n;return Component;"},"_id":"authors/andrew-kroh.mdx","_raw":{"sourceFilePath":"authors/andrew-kroh.mdx","sourceFileName":"andrew-kroh.mdx","sourceFileDir":"authors","contentType":"mdx","flattenedPath":"authors/andrew-kroh"},"type":"Author","imageUrl":"","url":"/authors/andrew-kroh"}],"category":[{"title":"Detection science","slug":"detection-science","body":{"raw":"","code":"var Component=(()=\u003e{var x=Object.create;var c=Object.defineProperty;var f=Object.getOwnPropertyDescriptor;var _=Object.getOwnPropertyNames;var d=Object.getPrototypeOf,g=Object.prototype.hasOwnProperty;var j=(t,e)=\u003e()=\u003e(e||t((e={exports:{}}).exports,e),e.exports),l=(t,e)=\u003e{for(var n in e)c(t,n,{get:e[n],enumerable:!0})},i=(t,e,n,s)=\u003e{if(e\u0026\u0026typeof e==\"object\"||typeof e==\"function\")for(let o of _(e))!g.call(t,o)\u0026\u0026o!==n\u0026\u0026c(t,o,{get:()=\u003ee[o],enumerable:!(s=f(e,o))||s.enumerable});return t};var p=(t,e,n)=\u003e(n=t!=null?x(d(t)):{},i(e||!t||!t.__esModule?c(n,\"default\",{value:t,enumerable:!0}):n,t)),D=t=\u003ei(c({},\"__esModule\",{value:!0}),t);var u=j((h,a)=\u003e{a.exports=_jsx_runtime});var X={};l(X,{default:()=\u003eF,frontmatter:()=\u003eM});var r=p(u()),M={title:\"Detection science\",slug:\"detection-science\"};function m(t){return(0,r.jsx)(r.Fragment,{})}function C(t={}){let{wrapper:e}=t.components||{};return e?(0,r.jsx)(e,Object.assign({},t,{children:(0,r.jsx)(m,t)})):m(t)}var F=C;return D(X);})();\n;return Component;"},"_id":"categories/detection-science.mdx","_raw":{"sourceFilePath":"categories/detection-science.mdx","sourceFileName":"detection-science.mdx","sourceFileDir":"categories","contentType":"mdx","flattenedPath":"categories/detection-science"},"type":"Category","url":"/categories/detection-science"},{"title":"Machine learning","slug":"machine-learning","body":{"raw":"","code":"var Component=(()=\u003e{var x=Object.create;var o=Object.defineProperty;var g=Object.getOwnPropertyDescriptor;var l=Object.getOwnPropertyNames;var f=Object.getPrototypeOf,_=Object.prototype.hasOwnProperty;var j=(t,n)=\u003e()=\u003e(n||t((n={exports:{}}).exports,n),n.exports),M=(t,n)=\u003e{for(var e in n)o(t,e,{get:n[e],enumerable:!0})},c=(t,n,e,i)=\u003e{if(n\u0026\u0026typeof n==\"object\"||typeof n==\"function\")for(let a of l(n))!_.call(t,a)\u0026\u0026a!==e\u0026\u0026o(t,a,{get:()=\u003en[a],enumerable:!(i=g(n,a))||i.enumerable});return t};var d=(t,n,e)=\u003e(e=t!=null?x(f(t)):{},c(n||!t||!t.__esModule?o(e,\"default\",{value:t,enumerable:!0}):e,t)),h=t=\u003ec(o({},\"__esModule\",{value:!0}),t);var m=j((b,s)=\u003e{s.exports=_jsx_runtime});var F={};M(F,{default:()=\u003eD,frontmatter:()=\u003ep});var r=d(m()),p={title:\"Machine learning\",slug:\"machine-learning\"};function u(t){return(0,r.jsx)(r.Fragment,{})}function C(t={}){let{wrapper:n}=t.components||{};return n?(0,r.jsx)(n,Object.assign({},t,{children:(0,r.jsx)(u,t)})):u(t)}var D=C;return h(F);})();\n;return Component;"},"_id":"categories/machine-learning.mdx","_raw":{"sourceFilePath":"categories/machine-learning.mdx","sourceFileName":"machine-learning.mdx","sourceFileDir":"categories","contentType":"mdx","flattenedPath":"categories/machine-learning"},"type":"Category","url":"/categories/machine-learning"},{"title":"Generative AI","slug":"generative-ai","body":{"raw":"","code":"var Component=(()=\u003e{var x=Object.create;var o=Object.defineProperty;var f=Object.getOwnPropertyDescriptor;var g=Object.getOwnPropertyNames;var _=Object.getPrototypeOf,j=Object.prototype.hasOwnProperty;var l=(t,e)=\u003e()=\u003e(e||t((e={exports:{}}).exports,e),e.exports),d=(t,e)=\u003e{for(var n in e)o(t,n,{get:e[n],enumerable:!0})},s=(t,e,n,i)=\u003e{if(e\u0026\u0026typeof e==\"object\"||typeof e==\"function\")for(let a of g(e))!j.call(t,a)\u0026\u0026a!==n\u0026\u0026o(t,a,{get:()=\u003ee[a],enumerable:!(i=f(e,a))||i.enumerable});return t};var p=(t,e,n)=\u003e(n=t!=null?x(_(t)):{},s(e||!t||!t.__esModule?o(n,\"default\",{value:t,enumerable:!0}):n,t)),M=t=\u003es(o({},\"__esModule\",{value:!0}),t);var u=l((b,c)=\u003e{c.exports=_jsx_runtime});var F={};d(F,{default:()=\u003eD,frontmatter:()=\u003ev});var r=p(u()),v={title:\"Generative AI\",slug:\"generative-ai\"};function m(t){return(0,r.jsx)(r.Fragment,{})}function C(t={}){let{wrapper:e}=t.components||{};return e?(0,r.jsx)(e,Object.assign({},t,{children:(0,r.jsx)(m,t)})):m(t)}var D=C;return M(F);})();\n;return Component;"},"_id":"categories/generative-ai.mdx","_raw":{"sourceFilePath":"categories/generative-ai.mdx","sourceFileName":"generative-ai.mdx","sourceFileDir":"categories","contentType":"mdx","flattenedPath":"categories/generative-ai"},"type":"Category","url":"/categories/generative-ai"}]},"seriesArticles":null},"__N_SSG":true},"page":"/[slug]","query":{"slug":"elastic-advances-llm-security"},"buildId":"wTIynxBm98ujmQxLsgK6X","assetPrefix":"/security-labs","isFallback":false,"gsp":true,"scriptLoader":[]}</script></body></html>

Pages: 1 2 3 4 5 6 7 8 9 10