CINXE.COM

How Everdian delivers “life-saving” real-time critical insights, via AI | Scaleway Blog

<!DOCTYPE html><html lang="en"><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><title>How Everdian delivers “life-saving” real-time critical insights, via AI<!-- --> <!-- --> | Scaleway Blog</title><meta name="description" content="AI startup Everdian chose Scaleway not just for its powerful GPU clusters, but also because Scaleway’s simplicity means new team members can be onboarded in just a few weeks. More inside!"/><meta property="og:url" content="https://www.scaleway.com/en/blog/how-everdian-delivers-life-saving-real-time-critical-insights-via-ai/"/><meta property="og:type" content="article"/><meta property="og:title" content="How Everdian delivers “life-saving” real-time critical insights, via AI"/><meta property="og:description" content="AI startup Everdian chose Scaleway not just for its powerful GPU clusters, but also because Scaleway’s simplicity means new team members can be onboarded in just a few weeks. More inside!"/><meta property="article:author" content="https://www.scaleway.com/en/blog/author/jean-baptiste-fourmont"/><meta property="og:image" content="https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp"/><meta content="https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp" name="twitter:image"/><meta content="summary" name="twitter:card"/><meta content="@Scaleway" name="twitter:creator"/><meta content="How Everdian delivers “life-saving” real-time critical insights, via AI" name="twitter:title"/><meta content="AI startup Everdian chose Scaleway not just for its powerful GPU clusters, but also because Scaleway’s simplicity means new team members can be onboarded in just a few weeks. More inside!" name="twitter:description"/><link href="https://www.scaleway.com/en/blog/how-everdian-delivers-life-saving-real-time-critical-insights-via-ai/" rel="canonical"/><meta name="next-head-count" content="16"/><link rel="preload" href="/_next/static/media/a34f9d1faa5f3315-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/_next/static/media/2d141e1a38819612-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/_next/static/css/5a4aac22989312df.css" as="style"/><link rel="stylesheet" href="/_next/static/css/5a4aac22989312df.css" data-n-g=""/><link rel="preload" href="/_next/static/css/c609e7b393629430.css" as="style"/><link rel="stylesheet" href="/_next/static/css/c609e7b393629430.css" data-n-p=""/><link rel="preload" href="/_next/static/css/1ca77b1ad9949237.css" as="style"/><link rel="stylesheet" href="/_next/static/css/1ca77b1ad9949237.css" data-n-p=""/><link rel="preload" href="/_next/static/css/4ad6f1eee4386756.css" as="style"/><link rel="stylesheet" href="/_next/static/css/4ad6f1eee4386756.css" data-n-p=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/_next/static/chunks/polyfills-42372ed130431b0a.js"></script><script src="/_next/static/chunks/webpack-7ff391b0025b4fe3.js" defer=""></script><script src="/_next/static/chunks/framework-2de211cc2591dcf8.js" defer=""></script><script src="/_next/static/chunks/main-387e329be5509820.js" defer=""></script><script src="/_next/static/chunks/pages/_app-1496642abd45d16f.js" defer=""></script><script src="/_next/static/chunks/675-283f369b69dc812e.js" defer=""></script><script src="/_next/static/chunks/395-a7842d01211b5e87.js" defer=""></script><script src="/_next/static/chunks/278-6df85a5cbf7eb789.js" defer=""></script><script src="/_next/static/chunks/830-01fd7ba2b0b77b8e.js" defer=""></script><script src="/_next/static/chunks/102-27040e86297157d6.js" defer=""></script><script src="/_next/static/chunks/854-ca4cfc03ee0da0c6.js" defer=""></script><script src="/_next/static/chunks/367-337657c830ee0244.js" defer=""></script><script src="/_next/static/chunks/51-873605722a81cdf5.js" defer=""></script><script src="/_next/static/chunks/pages/blog/%5Bslug%5D-574c558d3f8ed907.js" defer=""></script><script src="/_next/static/85BYxc5vA-nbO8Fs_1Ijf/_buildManifest.js" defer=""></script><script src="/_next/static/85BYxc5vA-nbO8Fs_1Ijf/_ssgManifest.js" defer=""></script></head><body><div id="__next"><style data-emotion="css-global 0"></style><div class="__variable_375d66 __variable_f77ac8 container"><div class="blog"><header class="HeaderBlog_headerContainer__n3f6s full-width"><div class="container"><div class="HeaderBlog_header__CTV5V"><div class="HeaderBlog_logo__kbnMY"><a href="/en/blog/"><img alt="Scaleway Blog" loading="lazy" width="240" height="40" decoding="async" data-nimg="1" style="color:transparent" srcSet="/_next/static/media/logo-blog.49246fc4.svg 1x, /_next/static/media/logo-blog.49246fc4.svg 2x" src="/_next/static/media/logo-blog.49246fc4.svg"/></a><a href="#main" class="SkipLink_link__wUma3">Skip to main content</a><a href="#footer" class="SkipLink_link__wUma3">Skip to footer section</a><button class="HeaderBlog_menuButton__PP1O7" type="button"><style data-emotion="css 3sqif5">.css-3sqif5{vertical-align:middle;fill:currentColor;height:1em;width:1em;min-width:1em;min-height:1em;}.css-3sqif5 .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-3sqif5 e1gt4cfo0"><path fill-rule="evenodd" d="M2 4.75A.75.75 0 0 1 2.75 4h14.5a.75.75 0 0 1 0 1.5H2.75A.75.75 0 0 1 2 4.75M2 10a.75.75 0 0 1 .75-.75h14.5a.75.75 0 0 1 0 1.5H2.75A.75.75 0 0 1 2 10m0 5.25a.75.75 0 0 1 .75-.75h14.5a.75.75 0 0 1 0 1.5H2.75a.75.75 0 0 1-.75-.75" clip-rule="evenodd"></path></svg></button></div><nav class="HeaderBlog_topNav__cNrI_ font-body-small-regular"><ul class="HeaderBlog_links__1jfH4"><li><a href="/en/blog/incidents/">Incidents</a></li><li><a href="https://www.scaleway.com/en/docs/">Docs</a></li><li><a href="https://www.scaleway.com/en/contact/">Contact</a></li></ul><ul class="HeaderBlog_language__IixQV"><li><span class="sr-only">English</span><span>en</span></li></ul></nav><nav class="HeaderBlog_bottomNav__wIZob"><a class="cta-primary cta-size-small" href="/en/">Discover Scaleway</a><div class="HeaderBlog_socials__eZU_7"><a href="https://x.com/Scaleway/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path d="M15.203 1.875h2.757l-6.023 6.883 7.085 9.367h-5.547l-4.345-5.68-4.972 5.68H1.4l6.442-7.363-6.797-8.887h5.688l3.928 5.193zm-.967 14.6h1.527L5.903 3.438H4.264z"></path></svg><span class="sr-only">X</span></a><a href="https://slack.scaleway.com/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path fill-rule="evenodd" d="M6.056 3.419a1.75 1.75 0 0 0 1.75 1.751H9.39a.167.167 0 0 0 .167-.166V3.419a1.75 1.75 0 1 0-3.501 0m3.5 4.392a1.75 1.75 0 0 0-1.75-1.751H3.417a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752m-6.123 6.142a1.75 1.75 0 0 0 1.75-1.752v-1.585a.167.167 0 0 0-.167-.166H3.433a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752m4.376-3.503a1.75 1.75 0 0 0-1.75 1.751v4.38a1.75 1.75 0 1 0 3.5 0V12.2a1.75 1.75 0 0 0-1.75-1.751m7.01-2.639a1.75 1.75 0 1 1 3.501 0 1.75 1.75 0 0 1-1.75 1.752h-1.584a.167.167 0 0 1-.167-.167zm-.875 0a1.75 1.75 0 1 1-3.5 0V3.42a1.75 1.75 0 1 1 3.5 0zm0 8.77a1.75 1.75 0 0 0-1.75-1.752H10.61a.167.167 0 0 0-.167.167v1.585a1.75 1.75 0 1 0 3.501 0m-3.5-4.38a1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752 1.75 1.75 0 0 0-1.75-1.751h-4.39a1.75 1.75 0 0 0-1.75 1.751" clip-rule="evenodd"></path></svg><span class="sr-only">Slack</span></a><a href="/en/blog/rss.xml"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path d="M3.75 3a.75.75 0 0 0-.75.75v.5c0 .414.336.75.75.75H4c6.075 0 11 4.925 11 11v.25c0 .414.336.75.75.75h.5a.75.75 0 0 0 .75-.75V16C17 8.82 11.18 3 4 3z"></path><path d="M3 8.75A.75.75 0 0 1 3.75 8H4a8 8 0 0 1 8 8v.25a.75.75 0 0 1-.75.75h-.5a.75.75 0 0 1-.75-.75V16a6 6 0 0 0-6-6h-.25A.75.75 0 0 1 3 9.25zM7 15a2 2 0 1 1-4 0 2 2 0 0 1 4 0"></path></svg><span class="sr-only">RSS</span></a></div></nav></div></div></header><main class="main" id="main"><nav class="TopBar_navBar__jEc9M"><a class="TopBar_link__c_MXa" href="/en/blog/"><svg width="16" height="16" aria-hidden="true"><use xlink:href="/svg/sprite.svg#all-items"></use></svg>all</a><a class="TopBar_link__c_MXa" href="/en/blog/build/">build</a><a class="TopBar_link__c_MXa" href="/en/blog/deploy/">deploy</a><a class="TopBar_link__c_MXa TopBar_isActive__bqGIp" href="/en/blog/scale/">scale</a></nav><section class="Hero_wrapper__l0O5u"><div class="Hero_content__WhyjP"><h1 class="font-heading-secondary-title Hero_title__64Z8x">How Everdian delivers “life-saving” real-time critical insights, via AI - interview with Cedric Milinaire</h1><div class="Hero_footer__KFZYB"><div class="blogCategory"><a class="cta-inline cta-size-big" href="/en/blog/scale/">Scale</a></div><span class="blogDot Hero_dot__OjyBJ" aria-hidden="true">•</span><address class="blogAuthor"><a class="cta-inline cta-size-big" href="/en/blog/author/jean-baptiste-fourmont/">Jean-Baptiste Fourmont</a></address><span class="blogDot Hero_dot__OjyBJ" aria-hidden="true">•</span><div><time dateTime="2024-07-22">22/07/24</time><span class="blogDot" aria-hidden="true">•</span><span>4 min read</span></div></div></div><div class="Hero_imageWrapper__tMCgD"><img alt="" loading="lazy" width="512" height="320" decoding="async" data-nimg="1" style="color:transparent" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 1x, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 2x" src="https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp"/></div></section><div class="PostPage_post__sMook"><div class="TableOfContent_tableOfContent__e65l6"><nav aria-describedby="tableofcontent-main"><h2 class="font-body-small-bold">Table of contents</h2><ul><li class="TableOfContent_selected__VR351"><a href="#main">Introduction</a></li><li class=""><a href="#how-it-works">How it works</a></li><li class=""><a href="#why-they-chose-scaleway">Why they chose Scaleway</a></li><li class=""><a href="#building-the-architecture">Building the architecture</a></li><li class=""><a href="#preparing-the-next-stages-of-growth-by-faster-onboarding">Preparing the next stages of growth by faster onboarding</a></li></ul></nav></div><div class="BlogRichText_blogRichText__zXeTD"><p><em>Why does the new generation of European AI startups increasingly turn to Scaleway? It’s not just to access the European cloud’s most powerful GPU cluster. As Cedric Milinaire, Director General &amp; CTO of France’s <a href="https://everdian.com/">Everdian</a> explains, it’s also to accelerate growth, as Scaleway’s simplicity means new team members can be onboarded in just a few weeks. Find out more below!</em></p> <p>Everdian is an AI startup specialized in real-time decision making. Its main differentiator is that it <strong>uses multiple proprietary AI models capable of analyzing large streams of data in real time</strong>, to alert strategic decision makers about key ongoing events. Users can build custom dashboards to visualize results and generate their own alerts.</p> <p>Based on algorithms that could broadly be classified as NLP (Natural Language Processing), its activity covers:</p> <ul> <li>Real-time security alerts (for example, if there’s a fire alert in a building, the AI helps to get the news out there, and show relevant videos to facilitate fast localization of the fire)</li> <li>Multi-source monitoring, to detect fast-spreading topics, and for fact checking</li> <li>In the financial sector, predicting market shares and stock evaluations, as well as sentiment detection with regards to big announcements</li> <li>Many other use cases, such as finding a client’s stolen assets on second hand reseller platforms.</li> </ul> <p>Everdian uses all types of data, including text, images and videos. For training, the team annotates real world data, then adds synthetic data to improve it. <strong>Today, the metadata is often more important than the data itself. So Everdian needs to tweak the datasets to optimize its effects</strong>. This can lead to significant improvements in the fields of privacy and energy efficiency.</p> <h2 id="how-it-works">How it works</h2> <p>AI startups are everywhere right now, as are hype-fueled funding rounds. But Everdian’s objective is to make a difference in the real world.</p> <p><strong>“When you handle use cases with human lives at stake, ten seconds is really important,”</strong> says Milinaire. “For example, we’re used by search and rescue teams to alert them about the occurrence of fire incidents. We provide context with live video feeds and various information posted online. Without us, the only information they may have is that the fire’s in the building. <strong>We can tell them - based on data posted online - it’s on the 5th floor and not the 6th. And that saves lives.”</strong></p> <p>To perform such a feat, Everdian collects data streams into large graphs and analyzes the multiple data points; the level of filtering depends on the services and use cases.</p> <p>For instance, image analysis services provide more accurate reports than public opinions (often blurry and contradicting). Then feedback correlation and source comparison will provide a clear idea of any situation and enable Everdian to share the most relevant information.</p> <p>The startup’s proprietary clustering algorithm and AI models analyze image and video similarity, in order to only keep relevant ones. Naturally, the larger the dataset, the harder it is to filter through the noise.</p> <p>Indeed, the most frequent challenge is understanding the different data points. <strong>When Everdian detects critical events, it only wants images of that event, not of people giving their opinion about it</strong>. And it needs to select the one best video - not several - that gives the clearest idea of what’s happening. In short, to be able to share only the most relevant and critical information first.</p> <h2 id="why-they-chose-scaleway">Why they chose Scaleway</h2> <p>Everdian’s number one need is GPUs, “because we analyze millions of texts and images”, says Milinaire, “<strong>so we need access to a whole cluster of GPUs in order to optimize our models, syncing them to the hardware. So <a href="https://www.scaleway.com/en/h100-pcie-try-it-now/">Scaleway’s H100s</a> are really useful for us</strong>.”</p> <p>They also need highly efficient storage; this is important when handling large amounts of data. For this, Everdian uses Elasticsearch, as it allows for archiving that lets clients “dig through data”, as Milinaire puts it. Everdian uses snapshots on Scaleway Block Storage here.</p> <p>So the startup’s main pain points were:</p> <ol> <li>Cost, as GPUs are expensive</li> <li>Availability: H100 only available as spot instances at first</li> <li>Variety: Everdian’s work requires a wide variety of GPUs <em>(we’re working on it!)</em></li> </ol> <p>When searching for a cloud provider, Scaleway’s offering and tools largely matched Everdian’s requirements. The main drawback was the security part, as Scaleway was less advanced than other CSPs at that time. Security is a key factor for Everdian, as all new customers demand comprehensive documentation and guarantees on this front.</p> <p><strong>In the end, the tradeoff was positive, as Everdian’s choice meant they could access advanced cloud features and considerable quantities of GPUs</strong>. Individual NVIDIA H100s, as well as entire clusters, are required to analyze millions of texts and images. After that, models are optimized, in sync with the hardware capabilities of each machine.</p> <h2 id="building-the-architecture">Building the architecture</h2> <p>To provide a solution able to auto-scale, auto-heal and auto-upgrade, the decision was made to <strong>containerize everything and always build on Kubernetes</strong> (via Scaleway’s <a href="https://www.scaleway.com/en/kubernetes-kapsule/">Kapsule</a> product). Then, due to the complexity of data sources, services and customers it has to manage, Everdian opted for a microservices-focused approach.</p> <p>Their main feature request was for dedicated control planes (in general availability since Autumn 2023) to enable higher levels of resilience and controls. Then, they built everything around those Kubernetes clusters: backups, data and videos, all hosted on Object Storage.</p> <p>Everdian’s tech teams have notably praised the simplicity and efficiency of Scaleway Kapsule, especially compared with larger CSPs’ equivalent products.</p> <p>They were also reassured by <a href="https://www.scaleway.com/en/vpc/">VPC</a>, where the ability to communicate between different zones, thanks to Scaleway’s Multi-AZ offering, where data is redundant across several availability zones, was perceived as a great advantage. Everdian started in the PAR 1 data center region, then extended to PAR 2 to access those lovely new GPUs, whilst accessing a better level of resilience and reliability.</p> <p>One missing feature is still the VPN, that Everdian completed themselves for their internal tooling. Their feedback has been noted and Scaleway’s team is working on it.</p> <p>Milinaire’s current wishlist now includes managed Elasticsearch: a wish Scaleway heard, and so is now looking for others’ points of view in its product discovery approach.</p> <h2 id="preparing-the-next-stages-of-growth-by-faster-onboarding">Preparing the next stages of growth by faster onboarding</h2> <p>Everdian found that Scaleway was the ideal cloud provider to ramp up their teams’ technical expertise quickly. “<strong>On Scaleway’s platform, our tech teams were operational in a matter of weeks; much faster than with hyperscaler cloud providers</strong>,” says Milinaire, who adds:</p> <p><strong>“We hired a DevSecOps. I didn’t explain anything about Scaleway to him. I just said ‘this is in the Scaleway console, figure it out. You can do it!’ Not long afterwards, he was creating VPCs everywhere!”</strong></p> <p>Another example was remote employees, who require quick and autonomous onboarding to use other services in a matter of days, without any mentoring or further explanations.</p> <p>Compared with hyperscalers, this accessibility helps Everdian’s teams be more productive and enables the company to welcome new tech staff more quickly, thereby boosting their impact. With other providers, a non-knowledgeable team member would take weeks to onboard, after reading documentation before being able to start using their first cloud products.</p> <p>Everdian also cites the <strong>proximity of Scaleway’s support staff as a key differentiator: “my feedback is always taken into consideration”</strong>, says Milinaire.</p> <p>This will be critical for Everdian’s next stages of growth, given its ambitious roadmap. Such as reworking the organization and project leveraging new features, along with the always improving IAM and network capabilities of Scaleway.</p> <p>Another area of improvement will be the AI model optimization - as Everdian grows, their consumption of compute power grows exponentially - needing detailed attention of their AI scientists and technology teams.</p></div></div><section class="ExtraPosts_container__0fO7Q"><h2 class="font-heading-highlighted ExtraPosts_title__hqJSu">Recommended articles</h2><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/ai-and-quality-giskard-combessie/">AI and the quality conundrum, with Giskard.AI’s Alexandre Combessie</a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">How can AI remain innovative whilst complying with regulations and standards? French startup and ai-PULSE exhibitor Giskard.AI has the answer...</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/build/">Build</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/james-martin/">James Martin</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2023-12-18">18/12/23</time><span class="blogDot" aria-hidden="true">•</span><span>4 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">AI</span><span class="Tag_tag__JS3kY">ai-PULSE</span></div></div></article></div><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/get-started-ai-cost-emissions-mindmatch/">How to get started in AI without excessive cost, or emissions! - MindMatch guest post</a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">How can startups take their first steps with Large Language Models (LLMs)? Leveraging AI needn&#x27;t cost the earth, explains MindMatch&#x27;s Zofia Smoleń</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/build/">Build</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/zofia-smolen/">Zofia Smoleń</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2024-02-26">26/02/24</time><span class="blogDot" aria-hidden="true">•</span><span>7 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">AI</span><span class="Tag_tag__JS3kY">Startups</span><span class="Tag_tag__JS3kY">Sustainability</span></div></div></article></div><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/data-powering-tech-efficiency-privacy-value/">How Data is powering tech efficiency, privacy and value(s) </a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">Data management has never been more critical to business success. But how can it be handled efficiently, whilst respecting privacy, and generate value... in line with a company&#x27;s core values?</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/scale/">Scale</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/james-martin/">James Martin</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2023-04-21">21/04/23</time><span class="blogDot" aria-hidden="true">•</span><span>4 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">Data</span><span class="Tag_tag__JS3kY">Startup</span></div></div></article></div></section></main><footer id="footer" class="Footer_footer__dXXGl full-width"><div class="container"><div class="Footer_categories__GKzcP"><div><div class="Footer_title__SsUPi">Products</div><ul><li><a class="cta-inline cta-size-big" href="/en/all-products/">All Products</a></li><li><a class="cta-inline cta-size-big" href="/en/betas/">Betas</a></li><li><a class="cta-inline cta-size-big" href="/en/bare-metal/">Bare Metal</a></li><li><a class="cta-inline cta-size-big" href="/en/dedibox/">Dedibox</a></li><li><a class="cta-inline cta-size-big" href="/en/elastic-metal/">Elastic Metal</a></li><li><a class="cta-inline cta-size-big" href="/en/virtual-instances/">Compute Instances</a></li><li><a class="cta-inline cta-size-big" href="/en/gpu-instances/">GPU</a></li><li><a class="cta-inline cta-size-big" href="/en/containers/">Containers</a></li><li><a class="cta-inline cta-size-big" href="/en/object-storage/">Object Storage</a></li><li><a class="cta-inline cta-size-big" href="/en/block-storage/">Block Storage</a></li></ul></div><div><div class="Footer_title__SsUPi">Resources</div><ul><li><a href="https://www.scaleway.com/en/docs/" class="cta-inline cta-size-big">Documentation</a></li><li><a href="https://www.scaleway.com/en/docs/changelog/" class="cta-inline cta-size-big">Changelog</a></li><li><a class="cta-inline cta-size-big" href="https://www.scaleway.com/en/blog/">Blog</a></li><li><a href="https://feature-request.scaleway.com/" class="cta-inline cta-size-big">Feature Requests</a></li><li><a href="https://slack.scaleway.com/" class="cta-inline cta-size-big">Slack Community</a></li></ul></div><div><div class="Footer_title__SsUPi">Contact</div><ul><li><a href="https://console.scaleway.com/support/create/" class="cta-inline cta-size-big">Create a ticket</a></li><li><a href="https://console.scaleway.com/support/abuses/create/" class="cta-inline cta-size-big">Report Abuse</a></li><li><a href="https://status.scaleway.com/" class="cta-inline cta-size-big">Status</a></li><li><a href="https://console.online.net/fr/login" class="cta-inline cta-size-big">Dedibox Console online.net</a></li><li><a class="cta-inline cta-size-big" href="/en/assistance/">Support plans</a></li><li><a href="https://ultraviolet.scaleway.com/6dd9b5c45/p/62b4e2-ultraviolet" class="cta-inline cta-size-big">Brand resources</a></li></ul></div><div><div class="Footer_title__SsUPi">Company</div><ul><li><a class="cta-inline cta-size-big" href="/en/about-us/">About us</a></li><li><a class="cta-inline cta-size-big" href="/en/events/">Events</a></li><li><a href="https://www.scaleway.com/en/marketplace/" class="cta-inline cta-size-big">Marketplace</a></li><li><a class="cta-inline cta-size-big" href="/en/environmental-leadership/">Environment </a></li><li><a class="cta-inline cta-size-big" href="/en/social-responsibility/">Social Responsibility</a></li><li><a class="cta-inline cta-size-big" href="/en/security-and-resilience/">Security</a></li><li><a class="cta-inline cta-size-big" href="/en/shared-responsibility-model/">Shared Responsibility Model</a></li><li><a class="cta-inline cta-size-big" href="/en/news/">News</a></li><li><a class="cta-inline cta-size-big" href="/en/careers/">Careers</a></li><li><a class="cta-inline cta-size-big" href="/en/scaleway-learning/">Scaleway Learning</a></li><li><a class="cta-inline cta-size-big" href="/en/customer-testimonials/">Client Success Stories</a></li><li><style data-emotion="css je8g23">.css-je8g23{pointer-events:none;}</style><style data-emotion="css 1ra7yv3">.css-1ra7yv3{background-color:transparent;border:none;padding:0;color:#34a8ff;-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;text-underline-offset:2px;text-decoration-color:transparent;display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row;-webkit-align-items:center;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-transition:text-decoration-color 250ms ease-out;transition:text-decoration-color 250ms ease-out;gap:8px;position:relative;cursor:pointer;width:-webkit-fit-content;width:-moz-fit-content;width:fit-content;font-size:16px;font-family:Inter,Asap,sans-serif;font-weight:500;letter-spacing:0;line-height:24px;paragraph-spacing:0;text-case:none;}.css-1ra7yv3 .e1afnb7a2{-webkit-transition:-webkit-transform 250ms ease-out;transition:transform 250ms ease-out;}.css-1ra7yv3 >*{pointer-events:none;}.css-1ra7yv3:hover,.css-1ra7yv3:focus{outline:none;-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;color:#6fc2ff;text-decoration-color:#6fc2ff;}.css-1ra7yv3:hover .e1afnb7a2,.css-1ra7yv3:focus .e1afnb7a2{-webkit-transform:translate(-4px, 0);-moz-transform:translate(-4px, 0);-ms-transform:translate(-4px, 0);transform:translate(-4px, 0);}.css-1ra7yv3[data-variant='inline']{-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;}.css-1ra7yv3:hover::after,.css-1ra7yv3:focus::after{background-color:#34a8ff;}.css-1ra7yv3:active{text-decoration-thickness:2px;}</style><a href="https://labs.scaleway.com/en/" target="_blank" rel="noopener noreferrer" class="css-1ra7yv3 e1afnb7a0" variant="bodyStrong" data-variant="standalone">Labs<style data-emotion="css ajnoa3">.css-ajnoa3{display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;padding-bottom:4px;}</style><span class="css-ajnoa3 e1afnb7a1"><style data-emotion="css 1udvifh">.css-1udvifh{vertical-align:middle;fill:currentColor;height:14px;width:14px;min-width:14px;min-height:14px;}.css-1udvifh .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="e1afnb7a2 css-1udvifh e1gt4cfo0"><path fill-rule="evenodd" d="M4.25 6.5a.75.75 0 0 0-.75.75v8.5c0 .414.336.75.75.75h8.5a.75.75 0 0 0 .75-.75v-4a.75.75 0 0 1 1.5 0v4A2.25 2.25 0 0 1 12.75 18h-8.5A2.25 2.25 0 0 1 2 15.75v-8.5A2.25 2.25 0 0 1 4.25 5h5a.75.75 0 0 1 0 1.5z" clip-rule="evenodd"></path><path fill-rule="evenodd" d="M6.194 13.753a.75.75 0 0 0 1.06.053L16.5 5.44v2.81a.75.75 0 0 0 1.5 0v-4.5a.75.75 0 0 0-.75-.75h-4.5a.75.75 0 0 0 0 1.5h2.553l-9.056 8.194a.75.75 0 0 0-.053 1.06" clip-rule="evenodd"></path></svg></span></a></li></ul></div></div><div class="Footer_socialsContainer__FuhFv"><a href="/en/"><img alt="Scaleway" loading="lazy" width="166" height="32" decoding="async" data-nimg="1" style="color:transparent" srcSet="/_next/static/media/logo.7e2996cb.svg 1x, /_next/static/media/logo.7e2996cb.svg 2x" src="/_next/static/media/logo.7e2996cb.svg"/></a><div><p>Follow us</p><a class="Footer_socialLink__9UK2B" href="https://x.com/Scaleway/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path d="M15.203 1.875h2.757l-6.023 6.883 7.085 9.367h-5.547l-4.345-5.68-4.972 5.68H1.4l6.442-7.363-6.797-8.887h5.688l3.928 5.193zm-.967 14.6h1.527L5.903 3.438H4.264z"></path></svg><span class="sr-only">x</span></a><a class="Footer_socialLink__9UK2B" href="https://slack.scaleway.com/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path fill-rule="evenodd" d="M6.056 3.419a1.75 1.75 0 0 0 1.75 1.751H9.39a.167.167 0 0 0 .167-.166V3.419a1.75 1.75 0 1 0-3.501 0m3.5 4.392a1.75 1.75 0 0 0-1.75-1.751H3.417a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752m-6.123 6.142a1.75 1.75 0 0 0 1.75-1.752v-1.585a.167.167 0 0 0-.167-.166H3.433a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752m4.376-3.503a1.75 1.75 0 0 0-1.75 1.751v4.38a1.75 1.75 0 1 0 3.5 0V12.2a1.75 1.75 0 0 0-1.75-1.751m7.01-2.639a1.75 1.75 0 1 1 3.501 0 1.75 1.75 0 0 1-1.75 1.752h-1.584a.167.167 0 0 1-.167-.167zm-.875 0a1.75 1.75 0 1 1-3.5 0V3.42a1.75 1.75 0 1 1 3.5 0zm0 8.77a1.75 1.75 0 0 0-1.75-1.752H10.61a.167.167 0 0 0-.167.167v1.585a1.75 1.75 0 1 0 3.501 0m-3.5-4.38a1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752 1.75 1.75 0 0 0-1.75-1.751h-4.39a1.75 1.75 0 0 0-1.75 1.751" clip-rule="evenodd"></path></svg><span class="sr-only">slack</span></a><a class="Footer_socialLink__9UK2B" href="https://www.instagram.com/scaleway/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path fill-rule="evenodd" d="M1.667 9.719c0-2.848 0-4.272.563-5.356A5 5 0 0 1 4.362 2.23c1.084-.563 2.507-.563 5.355-.563h.566c2.848 0 4.272 0 5.355.563a5 5 0 0 1 2.132 2.133c.563 1.084.563 2.508.563 5.356v.566c0 2.848 0 4.272-.562 5.356a5 5 0 0 1-2.133 2.133c-1.083.563-2.507.563-5.355.563h-.566c-2.848 0-4.271 0-5.355-.563a5 5 0 0 1-2.132-2.133c-.563-1.084-.563-2.508-.563-5.356zm3.67.284a4.668 4.668 0 1 0 9.336 0 4.668 4.668 0 0 0-9.336 0m7.697 0a3.03 3.03 0 1 1-6.06 0 3.03 3.03 0 1 1 6.06 0m2.912-4.854a1.09 1.09 0 1 1-2.18 0 1.09 1.09 0 0 1 2.18 0" clip-rule="evenodd"></path></svg><span class="sr-only">instagram</span></a><a class="Footer_socialLink__9UK2B" href="https://www.linkedin.com/company/scaleway/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path fill-rule="evenodd" d="M18.332 18.166a.167.167 0 0 1-.167.167h-3.09a.167.167 0 0 1-.167-.167V12.5c0-1.599-.608-2.492-1.874-2.492-1.377 0-2.096.93-2.096 2.492v5.666a.167.167 0 0 1-.167.167H7.804a.167.167 0 0 1-.166-.167V7.39c0-.092.074-.167.166-.167h2.967c.092 0 .167.075.167.167v.67c0 .174.275.26.39.131a3.88 3.88 0 0 1 2.96-1.307c2.357 0 4.044 1.439 4.044 4.415zM3.7 5.767a2.043 2.043 0 0 1-2.035-2.05c0-1.132.91-2.05 2.035-2.05s2.034.918 2.034 2.05-.91 2.05-2.034 2.05m-1.704 12.4c0 .091.074.166.166.166H5.27a.167.167 0 0 0 .167-.167V7.39a.167.167 0 0 0-.167-.167H2.163a.167.167 0 0 0-.166.167z" clip-rule="evenodd"></path></svg><span class="sr-only">linkedIn</span></a></div></div><ul class="Footer_sublinks__Mjpw0"><li><a href="/en/contracts/">Contracts</a></li><li><a href="/en/legal-notice/">Legal Notice</a></li><li><a href="/en/privacy-policy/">Privacy Policy</a></li><li><a href="/en/cookie/">Cookie</a></li><li><a href="https://security.scaleway.com">Security Measures</a></li></ul><span class="Footer_brand__qv1gM">© 1999-<!-- -->2024<!-- --> - Scaleway SAS</span></div></footer></div><div id="portal"></div></div></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"post":{"id":447,"attributes":{"title":"How Everdian delivers “life-saving” real-time critical insights, via AI","path":"how-everdian-delivers-life-saving-real-time-critical-insights-via-ai/","description":"_Why does the new generation of European AI startups increasingly turn to Scaleway? It’s not just to access the European cloud’s most powerful GPU cluster. As Cedric Milinaire, Director General \u0026 CTO of France’s [Everdian](https://everdian.com/) explains, it’s also to accelerate growth, as Scaleway’s simplicity means new team members can be onboarded in just a few weeks. Find out more below!_\n\nEverdian is an AI startup specialized in real-time decision making. Its main differentiator is that it **uses multiple proprietary AI models capable of analyzing large streams of data in real time**, to alert strategic decision makers about key ongoing events. Users can build custom dashboards to visualize results and generate their own alerts.\n\nBased on algorithms that could broadly be classified as NLP (Natural Language Processing), its activity covers:\n\n-\tReal-time security alerts (for example, if there’s a fire alert in a building, the AI helps to get the news out there, and show relevant videos to facilitate fast localization of the fire)\n-\tMulti-source monitoring, to detect fast-spreading topics, and for fact checking\n-\tIn the financial sector, predicting market shares and stock evaluations, as well as sentiment detection with regards to big announcements\n-\tMany other use cases, such as finding a client’s stolen assets on second hand reseller platforms.\n\n\nEverdian uses all types of data, including text, images and videos. For training, the team annotates real world data, then adds synthetic data to improve it. **Today, the metadata is often more important than the data itself. So Everdian needs to tweak the datasets to optimize its effects**. This can lead to significant improvements in the fields of privacy and energy efficiency.\n\n\n## How it works\n\nAI startups are everywhere right now, as are hype-fueled funding rounds. But Everdian’s objective is to make a difference in the real world.\n\n**“When you handle use cases with human lives at stake, ten seconds is really important,”** says Milinaire. “For example, we’re used by search and rescue teams to alert them about the occurrence of fire incidents. We provide context with live video feeds and various information posted online. Without us, the only information they may have is that the fire’s in the building. **We can tell them - based on data posted online - it’s on the 5th floor and not the 6th. And that saves lives.”**\n\nTo perform such a feat, Everdian collects data streams into large graphs and analyzes the multiple data points; the level of filtering depends on the services and use cases.\n\nFor instance, image analysis services provide more accurate reports than public opinions (often blurry and contradicting). Then feedback correlation and source comparison will provide a clear idea of any situation and enable Everdian to share the most relevant information. \n\nThe startup’s proprietary clustering algorithm and AI models analyze image and video similarity, in order to only keep relevant ones. Naturally, the larger the dataset, the harder it is to filter through the noise. \n\nIndeed, the most frequent challenge is understanding the different data points. **When Everdian detects critical events, it only wants images of that event, not of people giving their opinion about it**. And it needs to select the one best video - not several - that gives the clearest idea of what’s happening. In short, to be able to share only the most relevant and critical information first.\n\n\n## Why they chose Scaleway\n\nEverdian’s number one need is GPUs, “because we analyze millions of texts and images”, says Milinaire, “**so we need access to a whole cluster of GPUs in order to optimize our models, syncing them to the hardware. So [Scaleway’s H100s](https://www.scaleway.com/en/h100-pcie-try-it-now/) are really useful for us**.”\n\nThey also need highly efficient storage; this is important when handling large amounts of data. For this, Everdian uses Elasticsearch, as it allows for archiving that lets clients “dig through data”, as Milinaire puts it. Everdian uses snapshots on Scaleway Block Storage here.\n\nSo the startup’s main pain points were:\n\n1.\tCost, as GPUs are expensive\n2.\tAvailability: H100 only available as spot instances at first\n3.\tVariety: Everdian’s work requires a wide variety of GPUs _(we’re working on it!)_\n\n\nWhen searching for a cloud provider, Scaleway’s offering and tools largely matched Everdian’s requirements. The main drawback was the security part, as Scaleway was less advanced than other CSPs at that time. Security is a key factor for Everdian, as all new customers demand comprehensive documentation and guarantees on this front.\n\n**In the end, the tradeoff was positive, as Everdian’s choice meant they could access advanced cloud features and considerable quantities of GPUs**. Individual NVIDIA H100s, as well as entire clusters, are required to analyze millions of texts and images. After that, models are optimized, in sync with the hardware capabilities of each machine.\n\n\n## Building the architecture\n\nTo provide a solution able to auto-scale, auto-heal and auto-upgrade, the decision was made to **containerize everything and always build on Kubernetes** (via Scaleway’s [Kapsule](https://www.scaleway.com/en/kubernetes-kapsule/) product). Then, due to the complexity of data sources, services and customers it has to manage, Everdian opted for a microservices-focused approach. \n\nTheir main feature request was for dedicated control planes (in general availability since Autumn 2023) to enable higher levels of resilience and controls. Then, they built everything around those Kubernetes clusters: backups, data and videos, all hosted on Object Storage.\n\nEverdian’s tech teams have notably praised the simplicity and efficiency of Scaleway Kapsule, especially compared with larger CSPs’ equivalent products.\n\nThey were also reassured by [VPC](https://www.scaleway.com/en/vpc/), where the ability to communicate between different zones, thanks to Scaleway’s Multi-AZ offering, where data is redundant across several availability zones, was perceived as a great advantage. Everdian started in the PAR 1 data center region, then extended to PAR 2 to access those lovely new GPUs, whilst accessing a better level of resilience and reliability.\n\nOne missing feature is still the VPN, that Everdian completed themselves for their internal tooling. Their feedback has been noted and Scaleway’s team is working on it.\n\nMilinaire’s current wishlist now includes managed Elasticsearch: a wish Scaleway heard, and so is now looking for others’ points of view in its product discovery approach.\n\n\n## Preparing the next stages of growth by faster onboarding\n\nEverdian found that Scaleway was the ideal cloud provider to ramp up their teams’ technical expertise quickly. “**On Scaleway’s platform, our tech teams were operational in a matter of weeks; much faster than with hyperscaler cloud providers**,” says Milinaire, who adds:\n\n**“We hired a DevSecOps. I didn’t explain anything about Scaleway to him. I just said ‘this is in the Scaleway console, figure it out. You can do it!’ Not long afterwards, he was creating VPCs everywhere!”**\n\nAnother example was remote employees, who require quick and autonomous onboarding to use other services in a matter of days, without any mentoring or further explanations.\n\nCompared with hyperscalers, this accessibility helps Everdian’s teams be more productive and enables the company to welcome new tech staff more quickly, thereby boosting their impact. With other providers, a non-knowledgeable team member would take weeks to onboard, after reading documentation before being able to start using their first cloud products. \n\nEverdian also cites the **proximity of Scaleway’s support staff as a key differentiator: “my feedback is always taken into consideration”**, says Milinaire.\n\nThis will be critical for Everdian’s next stages of growth, given its ambitious roadmap. Such as reworking the organization and project leveraging new features, along with the always improving IAM and network capabilities of Scaleway. \n\nAnother area of improvement will be the AI model optimization - as Everdian grows, their consumption of compute power grows exponentially - needing detailed attention of their AI scientists and technology teams. \n","createdAt":"2024-07-22T12:58:29.370Z","updatedAt":"2024-08-08T12:47:00.644Z","publishedAt":"2024-07-22T13:09:56.966Z","locale":"en","tags":"AI\nTestimonial","popular":false,"articleOfTheMonth":false,"category":"Scale","timeToRead":4,"excerpt":"AI startup Everdian chose Scaleway not just for its powerful GPU clusters, but also because Scaleway’s simplicity means new team members can be onboarded in just a few weeks. More inside!","author":"Jean-Baptiste Fourmont","h1":"How Everdian delivers “life-saving” real-time critical insights, via AI - interview with Cedric Milinaire","createdOn":"2024-07-22","image":{"data":{"id":3241,"attributes":{"name":"Natural-Language-Processing-AI-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"large_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"large_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"284.79","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"small_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"small_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"108.87","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"medium_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"medium_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"194.75","width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"thumbnail_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"thumbnail_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"38.57","width":245,"height":152}},"hash":"Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","ext":".webp","mime":"image/webp","size":366.43,"url":"https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-02-15T13:43:21.487Z","updatedAt":"2024-10-14T14:18:30.813Z"}}},"recommendedArticles":{"data":[{"id":407,"attributes":{"title":"ai-and-quality-giskard-combessie","path":"ai-and-quality-giskard-combessie/","description":"In a world where AI has become more and more a common presence in our lives, the quest for quality AI solutions has taken center stage. We sat down with Alexandre Combessie, co-founder and CEO of [Giskard.AI](https://www.giskard.ai/) - which notably exhibited at ai-PULSE last month - to delve into the challenges around ethics and quality faced by AI solutions and their users. \n\nWith a background steeped in AI expertise, Alexandre brings a wealth of experience to the table. Before creating Giskard.AI, he spent five years at Dataiku, focusing on building models for various industries, particularly in NLP (natural language processing) and time series. His experience in crafting models for large-scale enterprises, including in healthcare and financial services, laid the foundations for Giskard.AI’s later innovative work. \n\nToday, Giskard is a French firm that specializes in AI quality, which Combessie co-founded in 2021. It ensures AI professionals maximize Machine Learning (ML) algorithm quality, minimizing errors, biases and vulnerabilities. Giskard is as such notably establishing itself as the leading software platform for aiding compliance with upcoming AI regulations and standards.\n\n\n## Quality: The multifaceted essence of AI\n\nNow that conversing with AI has become commonplace, the distinction between a run-of-the-mill AI and a quality-driven one has never been more important. Combessie emphasizes that quality in results spans multiple dimensions, with two key factors standing out:\n\n1. **Generative AI's hallucinations**\nAt the heart of generative AI lies the ability to create and construct, often leading to intriguing \"hallucinations,\" whereby the AI conjures up information that is false, leading to a range of significant issues. Such fabrications could contribute to the spread of fake news, error of diagnosis, and heighten the risk of poor human decision-making. Moreover, the possibility of errors in critical areas like medical diagnoses due to AI-generated inaccuracies is a particularly concerning aspect. Alexandre encourages us to explore hallucinations even further than has been established to date, to understand both their potential and limitations.\n\n2. **The ethical challenge**\nThe ethical dimension of AI looms large: the algorithms that fuel AI models are derived from existing datasets, potentially perpetuating biases and prejudices. The crucial question arises: could an algorithm be toxic, or offensive? This challenge of ethics and bias calls for profound scrutiny. Even before generative AI’s recent exponential growth, quality concerns were evident, spanning ethical biases in scoring algorithms, lack of transparency regarding AI models’ decisions, and performance issues in production. \n \nEthical biases in consumer applications like facial recognition have already been unearthed, and in the industrial sphere, predictive maintenance or fraud detection could prove particularly sensitive to AI’s potential mistakes. To investigate such cases, drawing on two years of dedicated quality work pre-ChatGPT, Giskard.AI was able to formulate and test diverse solutions that extend beyond chatbots to various business applications of AI, such as tabular data.\n\n\n## Stepping into a maturing market: ethics, risk, and performance\n\nA key hurdle in AI's journey toward quality and ethics is the market's maturity. Concepts like risk, ethics, and performance are relatively new to the AI world, demanding both internal team education and external regulation. The importance of evangelization is at the center of those changes, and not just within a company, but also in terms of regulatory compliance. The objective is clear: minimize errors, offense, and legal concerns for high-risk AI models, which are starting to impact all we see and read.\n\nCombessie's engineering background parallels his dedication to ensuring quality in AI. Drawing a captivating analogy to civil engineering, he emphasizes the high standards that underlie his work. He envisions building a bridge between data scientists and those who grasp the significance of AI's ethical and quality dimensions. \n\nBeyond the accuracy of a result lies the model metrics. Which metrics truly matter for a model to be seen as a great one? Combessie rejects the notion of relying on one single KPI. Such an approach may provide limited accuracy and overlook important aspects of model performance.\n\nThe concept of \"robustness metrics\" emerges as a vital topic, especially for models deployed in production environments. Combessie shares a compelling example from the real estate sector, where AI-driven decisions led to catastrophic financial losses. Zillow deployed an AI algorithm to predict the prices of the homes they would sell. After having put the model in autopilot mode, they lost over $500 million in six months. They stopped trading, and fired their entire data science team. Ensuring AI models do not lead to such disastrous outcomes is a critical aspect of maintaining robustness.\n\n\n## Shaping Ethical AI \n\nThe responsibility for ethical AI lies with the companies that develop and deploy it. If a company's AI lacks ethics or perpetuates discrimination, that company is legally accountable. In high-risk AI scenarios, failure to adhere to ethical standards could result in fines of up to 6% of a company's revenue, according to the upcoming EU AI Act. \n\nAs stated on [Giskard.AI’s blog](https://www.giskard.ai/knowledge/the-eu-ai-act-what-can-you-expect), under the AI Act (which was passed early December), generative foundation models like ChatGPT will be subject to strict controls. These include transparency, or public disclosure of which content was created by AI; declaring what copyrighted data was used in training; and barriers to stop such models generating illegal content.\n\nWith these constraints in mind, Giskard.AI empowers companies to adhere to legislation by simplifying and measuring their compliance efficiently. As such, Giskard.AI has taken the lead in advancing ethical AI. It provides the tools to assess discriminatory biases in models, particularly concerning attributes like age, gender, and ethnicity. Collaborating with organizations like AFNOR, Giskard.AI contributes to setting standards that safeguard against biases.\n\n\n## Conclusion\n\nFor Combessie and Giskard, moving forwards, the key will be finding the ideal balance between innovation and regulation. \"Having testing systems for ML models which are easy to integrate by data scientists are key to making the compliance process to the regulation as easy as possible,\" says Combessie, \"so that regulation is possible without slowing down innovation, but also respecting the rights of citizens.\"\n\nFurthermore, as Giskard does AI \"in an open-source way\", he adds, \"our methods are transparent and auditable\".\n\n\n### About Giskard\n\n_Giskard is a French software publisher specializing in Artificial Intelligence (AI) quality. Founded in 2021 by three AI experts, including two former engineers from Dataiku and a former data scientist from Thales, Giskard's mission is to help AI professionals ensure the quality of their algorithms. It assists in avoiding the risks of errors, biases, and vulnerabilities in AI algorithms._\n\n_Giskard is backed by renowned investors in the AI field, including Elaia and the CTO of Hugging Face. In August 2023, Giskard received a strategic investment from the European Commission to establish itself as the leading software platform for facilitating compliance with the European AI regulation._\n\n_Learn more: [Giskard.AI](https://www.giskard.ai/)_\n\n","createdAt":"2023-12-18T09:19:03.924Z","updatedAt":"2023-12-18T09:20:36.590Z","publishedAt":"2023-12-18T09:20:36.576Z","locale":"en","tags":"AI\nai-PULSE","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":4,"excerpt":"How can AI remain innovative whilst complying with regulations and standards? French startup and ai-PULSE exhibitor Giskard.AI has the answer...","author":"James Martin","h1":"AI and the quality conundrum, with Giskard.AI’s Alexandre Combessie","createdOn":"2023-12-18","image":{"data":{"id":3098,"attributes":{"name":"Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp","hash":"large_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f","mime":"image/webp","name":"large_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2.webp","path":null,"size":22.33,"width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp","hash":"small_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f","mime":"image/webp","name":"small_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2.webp","path":null,"size":9.88,"width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp","hash":"medium_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f","mime":"image/webp","name":"medium_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2.webp","path":null,"size":16.43,"width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp","hash":"thumbnail_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f","mime":"image/webp","name":"thumbnail_Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2.webp","path":null,"size":3.64,"width":245,"height":152}},"hash":"Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f","ext":".webp","mime":"image/webp","size":33.04,"url":"https://www-uploads.scaleway.com/Brainstorm_Header_Blog_Generic6_post_image_d50f7bcba2_b0d794848f.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-12-01T09:27:59.551Z","updatedAt":"2023-12-01T09:27:59.551Z"}}},"recommendedArticles":{"data":[]},"meta":{"id":1285,"title":"AI and the quality conundrum, with Giskard.AI’s Alexandre Combessie","description":"How can AI remain innovative whilst complying with regulations and standards? French startup and ai-PULSE exhibitor Giskard.AI has the answer...","ogtype":null,"ogtitle":null,"ogdescription":"How can AI remain innovative whilst complying with regulations and standards? French startup and ai-PULSE exhibitor Giskard.AI has the answer...","noindex":false},"localizations":{"data":[]}}},{"id":430,"attributes":{"title":"get-started-AI-cost-emissions-mindmatch","path":"get-started-ai-cost-emissions-mindmatch/","description":"_This a guest post by Zofia Smoleń, Founder of Polish startup [MindMatch](https://mindmatch.pl/), a member of Scaleway's [Startup Program](https://www.scaleway.com/en/startup-program/) 🚀_\n\nOne of the greatest developments of recent years was making computers speak our language. Scientists have been working on language models (which are basically models predicting next sequence of letters) for some time already, but only recently they came up with models that actually work - Large Language Models (LLMs). The biggest issue with them is that they are… Large.\n\nLLMs have billions of parameters. In order to run them, you have to own quite a lot of computer power and use a significant amount of energy. For instance, OpenAI spends $700 000 daily on ChatGPT, and their model is highly optimized. For the rest of us, this kind of spending is neither good for your wallet, nor for the climate.\n\nSo in order to limit your spending and carbon footprint, you cannot just use whatever OpenAI or even Hugging Face provides. You have to dedicate some time and thought to come up with more frugal methods of getting the job done. That is exactly what [Scaleway Startup Program member] MindMatch has been doing lately.\n\nMindMatch is providing a place where Polish patients can seek mental help from specialists. Using an open-source LLM from Hugging Face, MindMatch recognizes their patients’ precise needs based on a description of their feelings. With that knowledge, MindMatch can find the right therapy for their patients. It is a Polish-only website, but you can type in English (or any other language) and the chatbot ([here](https://mindmatch.pl/chat)) will understand you and give you its recommendation. In this article, we wrap their thoughts on dealing with speed and memory problems in production.\n\n\n## 1. Define your needs\n\nWhat do you need to do exactly? Do you need to reply to messages in a human-like manner? Or do you just need to classify your text? Is it only topic extraction? \n\nRead your bibliography. Check how people approached your task. Obviously, start from the latest papers, because in AI (and especially Natural Language Processing), all the work becomes obsolete and outdated very quickly. But… taking a quick look at what people did before Transformers (the state-of-the-art model architecture behind ChatGPT) can do no harm. Moreover, you may find solutions that resolve your task almost as well as any modern model would (if your task is comparatively easy) and are simpler, faster and lighter.\n\nYou could start by simply looking at articles on Towards data science, but we also encourage you to browse through Google Scholar. A lot of work in data science is documented only in research papers so it actually makes sense to read them (as opposed to papers in social science).\n\nWhy does this matter? You don’t need a costly ChatGPT-like solution just to tell you whether your patient is talking about depression or anxiety. Defining your needs and scouring the internet in search of all solutions applied so far might give you a better view on your options, and help select those that make sense in terms of performance and model size.\n\n\n## 2. Set up your directory so that you can easily switch between different models and architectures\n\nThis is probably the most obvious step for all developers, but make sure that you store all the models, classes and functions (and obviously constants - for example labels that you want to classify) in a way that allows you to quickly iterate, without needing to dig deep into code. This will make it easier for you, but also for all non-technical people that will want to understand and work on the model. \n\nWhat worked well for MindMatch was even storing all the dictionaries in an external database that was modifiable via Content Management Systems. One of those dictionaries was a list of classes used by the model. This way non-technical people were able to test the model. Obviously, to reduce the database costs, MindMatch had to make sure that they only pull those classes when necessary.\n\nAlso, the right documentation will make it easier for you to use MLOps tools such as Mlflow. Even if it is just a prototype yet, it is better for you to prepare for the bright future of your product and further iterations.\n\nThere is a lot of information and guidance about how to set the directory so that it is neat and tidy. Browse Medium and other portals until you find enough inspiration for your purpose.\n\n\n## 3. Choose the right deployment model\n\nNow you’ve defined your needs, it’s time to choose the right solution. Since you want to use LLMs, you will most likely not even think about training your own model from scratch (unless you are a multi-billion company or a unicorn startup with high aspirations). So your options are limited to pre-trained models.\n\nFor the pre-trained models, there are basically two options. You can either call them through an API and get results generated on an external computer instance (what OpenAI offers), or you can install the model on your computer and run it there as well (that is what Hugging Face offers, for example).\n\nThe first option is usually more expensive, but that makes sense - you are using the computer power of another company, and it should come with a price. This way, you don’t have to worry about scalability. Usually, proprietary models like OpenAI’s work like that, so on top of that you also pay a fee for just using the model. But some companies producing open source models, like Mistral, also provide APIs. \n\nThe second option (installing the model on your computer) comes only with open source models. So you don’t pay for the model itself, but you have to run it on your computer. This option is often chosen by companies who don’t want to be dependent on proprietary models and prefer to have more control over their solution. It comes with a cost: that of storage and computing power. It is pretty rare for organizations to own physical instances with memory sufficient for running LLM models, so most companies (like MindMatch) choose to use cloud services for that purpose.\n\nThe choice between proprietary and open-source models depends on various factors, including the specific needs of the project, budget constraints, desired level of control and customization, and the importance of transparency and community support. In many cases it also depends on the level of domain knowledge within the organization. Proprietary models are usually easier to deploy.\n\n\n## 4. Fit the model to your purpose\n\nThe simpler the better. You should look for models that exactly match your needs. Assuming that you defined your needs already and did your research on Google Scholar, you should already know what solutions you are looking for. What now, then? Chances are, there are already at least a dozen of models that can solve your problem.\n\nWe strongly advise you to have a look at Hugging Face’s “Models” section. Choose the model type; and then, starting from the most popular (it usually makes the most sense), try those models on your data. Pay particular attention to the accuracy and size of the model. The smaller the model is, the cheaper it is. As for accuracy, remember that your data is different from what the model was trained on. So if you want to use your solution for medical applications, you might want to try models that were trained on medical data.\n\nAlso, remember that the pre-trained models are just language models. They don’t have any specialist knowledge. In fact, they rarely see any domain-specific words in training data. So don’t expect the model to talk easily about Euphyllophytes plants without any additional fine-tuning, Retrieval Augmented Generation (RAG) or at least prompt engineering. Any of those augmentations come with higher computing power cost.\n\nSo you need to be smart about what exactly you make your model do. For example, when MindMatch tried to use zero-shot classification to recognize ADHD (a phrase rarely seen in training datasets), they decided to make it recognize Hyperactivity instead. Hyperactivity being a more frequent keyword that could easily act as a proxy for ADHD, allowed MindMatch to improve accuracy without deteriorating speed.\n\n\n## 5. Run it on the right machine\n\nGPU or CPU? Many would assume that the answer lies simply between the speed and the price, as GPUs are generally more expensive and faster. That is usually true, but not always. Here are a few things to consider.\n\n\n### Model Size, Complexity and Parallelisation\n\nLarge and complex models, like GPT-4, benefit significantly from the processing power of GPUs, especially for tasks like training or running multiple instances simultaneously. GPUs have many more computing cores than CPUs, making them adept at parallel processing. This is particularly useful for the matrix and vector computations common in deep learning.\nBut in order to start up GPU processing data must be transferred from RAM to GPU memory (GRAM), which can be costly. If the data is large and amenable to parallel processing, this overhead is offset by faster processing on the GPU.\n\nGPUs may not perform as well with tasks that require sequential processing, such as those involving Recurrent Neural Networks (RNNs) or Long Short-Term Memory (LSTM) networks (this applies to some implementations of Natural Language Processing). The sequential computation in LSTM layers, for instance, doesn't align well with the GPU's parallel processing capabilities, leading to underutilization (10% - 20% GPU load).\n\nDespite their limitations in sequential computation, GPUs can be highly effective during the backpropagation phase of LSTM, where derivative computations can be parallelized, leading to higher GPU utilization (around 80%). \n\n\n### Inference vs. Training\n\nFor training large models, GPUs are almost essential due to their speed and efficiency (not in all cases, as mentioned above). However, for inference (especially with smaller models or less frequent requests), [CPUs can be sufficient and more cost-effective](https://www.scaleway.com/en/blog/why-cpus-also-make-sense-for-ai-inference/). If you are using a pre-trained model (you most probably are), you only care about inference, so don’t assume that GPU will be better - compare it with CPUs.\n\n\n### Scalability, Budget and Resources\n\nIf you need to scale up your operations (e.g., serving a large number of requests simultaneously), GPUs offer better scalability options compared to CPUs. \nGPUs are more expensive and consume more power. If budget and resources are limited, starting with CPUs and then scaling up to GPUs as needed can be a practical approach.\n\n\n## 6. Optimize it even further (for readers with technical backgrounds)\n\nAre all of the above obvious to you? Here are other techniques (that often require you to dig a little deeper) that allow for optimized runtime and memory.\n\n\n### Quantization\n\nQuantization is a technique used to optimize Large Language Models (LLMs) by reducing the precision of the model’s weights and activations. Typically, LLMs use 32 or 16 bits for each parameter, consuming significant memory. Quantization aims to represent these values with fewer bits, often as low as eight bits, without greatly sacrificing performance.\n\nThe process involves two key steps: rounding and clipping. Rounding adjusts the values to fit into the lower bit format, while clipping manages the range of values to prevent extremes. This reduction in precision and range enables the model to operate in a more compact format, saving memory space.\n\nBy quantizing a model, several benefits arise:\n- Reduced Memory Footprint: The model occupies less space, allowing larger models to fit into the same hardware\n- Enhanced Transfer Efficiency: It speeds up the model, especially in scenarios where bandwidth limits performance.\n\nHowever, quantizing LLMs comes with challenges:\n- Quantizing weights is straightforward as they are fixed post-training. But quantizing activations (input of transformer blocks) is more complex due to their varying range and outliers\n- In many GPUs, quantized weights (INT8) need to be converted back to higher precision (like FP16) for calculations, affecting efficiency\n- Managing the dynamic range of activations is crucial, as they often contain outliers. Techniques like selective precision (using higher precision for certain activations) or borrowing the dynamic range from weights are used.\n\n\n### Pruning\n\nPruning involves identifying and removing parameters in a model that are either negligible or redundant. One common method of pruning is sparsity, where values close to zero are set to zero, leading to a more condensed matrix representation that only includes non-zero values and their indices. This approach reduces the overall space occupied by the matrix compared to a fully populated, dense matrix.\n\nPruning can be categorized into two types:\n\n- Structured Pruning: This method reduces the model's size by eliminating entire structural elements like neurons, channels, or layers. Structured pruning effectively decreases the model size while preserving the general structure of the Large Language Model (LLM). It is more scalable and manageable for larger models compared to unstructured pruning\n- Unstructured Pruning: In this approach, individual weights or neurons are targeted independently, often by setting a threshold and zeroing out parameters that fall below it. It results in a sparser, irregular model structure that may require specialized handling. Unstructured pruning typically needs further fine-tuning or retraining to restore model accuracy. In large models with billions of parameters, this can become a complex and time-consuming process. To address this, techniques such as iterative fine-tuning, combining parameter-efficient tuning with pruning, and the implementation of SparseGPT are employed.\n\nSparseGPT, specifically, adopts a one-shot pruning strategy that bypasses the need for retraining. It approaches pruning as a sparse regression task, using an approximate solver that seeks a sufficiently good solution rather than an exact one. This approach significantly enhances the efficiency of SparseGPT.\n\nIn practice, SparseGPT has been successful in achieving high levels of unstructured sparsity in large GPT models, such as OPT-175B and BLOOM-176B. It can attain over 60% sparsity - a higher rate than what is typically achieved with structured pruning - with only a minimal increase in perplexity, which measures the model's predictive accuracy.\n\n\n### Distillation\n\nDistillation is a method of transferring knowledge from a larger model (teacher) to a smaller one (student). This is done by training the student model to mimic the teacher’s behavior, focusing on matching either the final layer outputs (logits) or intermediate layer activations. An example of this is DistilBERT, which retains most of BERT's capabilities but at a reduced size and increased speed. Distillation is especially useful when training data is scarce.\nHowever, be careful if you want to distill a model! Many state-of-the-art LLMs have restrictive licenses that prohibit using their outputs to train other LLMs. It is usually ok though, to use open-source models to train other LLMs.\n\n\n### Model serving techniques\n\nModel serving techniques aim to maximize the use of memory bandwidth during model execution. Key strategies include:\n- In-flight Batching: Processing multiple requests simultaneously, continuously replacing finished sequences with new requests to optimize GPU utilization.\n- Speculative Inference: Generating multiple future tokens based on a draft model, and then verifying or rejecting these predictions in parallel. This approach allows for faster text generation compared to the traditional token-by-token method.\n\n\n## Conclusion\n\nThere are many ways to optimize model performance, leading not only to lower costs but also to less waste and lower carbon footprint. Start from a high-level definition of your needs, test different solutions and then dig into details, reducing the cost even further. MindMatch still is testing different options of reaching satisfying accuracy with lower computational costs - it is a never ending process.\n","createdAt":"2024-02-26T14:20:53.327Z","updatedAt":"2024-02-26T14:25:12.462Z","publishedAt":"2024-02-26T14:25:12.395Z","locale":"en","tags":"AI\nStartups\nSustainability","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":7,"excerpt":"How can startups take their first steps with Large Language Models (LLMs)? Leveraging AI needn't cost the earth, explains MindMatch's Zofia Smoleń","author":"Zofia Smoleń","h1":"How to get started in AI without excessive cost, or emissions! - MindMatch guest post","createdOn":"2024-02-26","image":{"data":{"id":3240,"attributes":{"name":"Automatic-Speech-Recognition-AI-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","hash":"large_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","mime":"image/webp","name":"large_Automatic-Speech-Recognition-AI-Illustration-Blog.webp","path":null,"size":75.83,"width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","hash":"small_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","mime":"image/webp","name":"small_Automatic-Speech-Recognition-AI-Illustration-Blog.webp","path":null,"size":28.21,"width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","hash":"medium_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","mime":"image/webp","name":"medium_Automatic-Speech-Recognition-AI-Illustration-Blog.webp","path":null,"size":51,"width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","hash":"thumbnail_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","mime":"image/webp","name":"thumbnail_Automatic-Speech-Recognition-AI-Illustration-Blog.jpg","path":null,"size":8.66,"width":245,"height":152}},"hash":"Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","ext":".webp","mime":"image/webp","size":528.03,"url":"https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-02-15T13:43:21.303Z","updatedAt":"2024-02-26T14:23:17.313Z"}}},"recommendedArticles":{"data":[{"id":403,"attributes":{"title":"ai-in-practice-generating-video-subtitles","path":"ai-in-practice-generating-video-subtitles/","description":"Scaleway is a French company with an international vision, so it is imperative that we provide information to our 550+ employees in both English and French, to ensure clear understanding and information flow. We create a diverse set of training videos for internal usage, with some being originally voiced in English, and others in French. In all cases they should include subtitles for both languages.\n\nCreating subtitles is a time-consuming process that we quickly realized would not scale. Fortunately, we were able to harness the power of AI for this exact task. With the help of [OpenAI’s Whisper](https://github.com/openai/whisper), the University of [Helsinki’s Opus-MT](https://github.com/Helsinki-NLP/Opus-MT) and a bit of code, we were able to not only transcribe, and when required, translate our internal videos; but we could also generate subtitles in [the srt format](https://en.wikipedia.org/wiki/SubRip#:~:text=by%20that%20program.-,SubRip%20file%20format,-%5Bedit%5D), that we can simply import into a video editing software or feed to a video player.\n\n\n## OpenAI’s Whisper\n\nWhisper is an Open Source model created by OpenAI. It is a general-purpose speech recognition model that is able to identify and transcribe a wide variety of spoken languages. It is one of the most popular models around today and is released under MIT license.\n\nOpenAI provides a Python SDK that will interact with the model, which has a wide variety of “flavors” based on the accuracy of their results: tiny, base, small, medium, and large. Larger models have been trained with a greater amount of parameters or examples, which makes them larger in size, and more resource-hungry — the _tiny_ version of the model requires 1GB of VRAM (Video RAM) and the _large_ version requires around 10GB.\n\n\n## Helsinki-NLP’s Opus-MT\n\nThe University of Helsinki made its own Open Source text translation models available based on the Marian-MT framework used by Microsoft Translator. Opus-MT models are provided as language pairs: translation source, and translation target, meaning that the model Helsinki-NLP/opus-mt-fr-en will translate text in French (fr) to English (en), and the other way around with Helsinki-NLP/opus-mt-en-fr.\n\nOpus-MT can be used via the [Transformers Python library](https://huggingface.co/docs/transformers/index) from Hugging Face or using Docker. It is an Open Source project released under the MIT License and requires you to cite the OPUS-MT paper on your implementations:\n\n```\n@InProceedings{TiedemannThottingal:EAMT2020,\n author = {J{\\\"o}rg Tiedemann and Santhosh Thottingal},\n title = {{OPUS-MT} — {B}uilding open translation services for the {W}orld},\n booktitle = {Proceedings of the 22nd Annual Conferenec of the European Association for Machine Translation (EAMT)},\n year = {2020},\n address = {Lisbon, Portugal}\n }\n```\n\n## Generating subtitles\n\nCombining these two models into a subtitle-generating service is only a matter of adding some code to “glue” them together. But before diving into the code, let’s review our requirements:\n\nFirst, we need to create a Virtual Machine capable of running AI models without a hitch, and the [NVIDIA H100-1-80G GPU instance](https://www.scaleway.com/en/h100-pcie-try-it-now/) is a great choice.\n\nWith the type of instance clear, we can now focus on the functional requirements. We want to pass in a video file as input to Whisper to get a transcript. The second step will be to translate that transcript using OPUS-MT from a specific source language to a target language. Finally, we want to create a subtitle file in the target language that is in sync with the audio.\n\n\n### Setting up Whisper\n\nYou will find the latest information about setting it up on [their GitHub repository](https://github.com/openai/whisper), but in general, you can install the Python library using pip:\n\n```\npip install -U openai-whisper\n```\n\nWhisper relies heavily on the FFmpeg project for manipulating multimedia files. FFmpeg can be installed via APT:\n\n ```\n sudo apt install ffmpeg -y\n ```\n\n### The code\n\n\n#### 1. A simple text transcription\n\nThis basic example is the most straightforward way to transcribe audio into text. After importing the Whisper library, you load a _flavor_ of the model by passing [a string with its name](https://github.com/openai/whisper/#available-models-and-languages) to the load_model method. In this case, the _base_ model is accurate enough, but some use cases may require larger or smaller model flavors.\n\nAfter loading the model, you load the audio source by passing the file path. Notice that you can use both audio and video files, and in general, any file type with audio that is [supported by ](https://ffmpeg.org/ffmpeg-formats.html)FFmpeg.\n\nFinally, you make use of the transcribe method of the model by passing it the loaded audio. As a result, you get a dictionary that amongst other items, contains the whole transcription text.\n\n```python\n#main.py\n\nimport whisper\n\nmodel = whisper.load_model(\"base\")\naudio = whisper.load_audio(\"input_file.mp4\")\nresult = model.transcribe(audio)\n\nprint(result[\"text\"])\n```\n\nThis basic example gives you the main tools needed for the rest of the project: loading a model, loading an input audio file, and transcribing the audio using the model. This is already a big step forward and puts us closer to our goal of generating a subtitle file, however, you may have noticed that the resulting text doesn’t include any time references, it’s only text. Syncing this transcribed text with the audio would be a task that would require large amounts of manual work, but fortunately, Whisper’s transcription process also outputs _segments_ that are time-coded.\n\n\n#### 2. Segments\n\nHaving time-coded segments means you can pinpoint them to their specific start and end times during the clip. For instance, if the first speech segment in the clip is “We're no strangers” and it starts at 00:17:50 and ends at 00:18:30, you will get that information in the segment dictionary, giving you all you need to create an srt subtitle file, now all you have to do is to properly format it to conform with the appropriate syntax.\n\n```python\n#Getting the transcription segments\nfrom datetime import timedelta #For when getting the segment time\nimport os #For creating the srt file in the filesystem\nimport whisper\n\nmodel = whisper.load_model(\"base\")\naudio = whisper.load_audio(\"input_file.mp4\")\nresult = model.transcribe(audio)\n\nsegments = result[\"segments\"] #A list of segments\n\nfor segment in segments:\n\t#...\n```\n\n#### 3. An srt subtile file\n\nSubtitle files in the srt format are divided into sequences that include the start and end timecodes — separated by the “ --\u003e \" string — followed by the caption text ending in a line break. Here’s an example:\n\n```\n1\n00:01:26,612 --\u003e 00:01:29,376\nTook you long enough!\nDid you find it? where is it?.\n\n2\n00:01:39,101 --\u003e 00:01:42,609\nI did. But I wish I didn't.\n\n3\n00:02:16,339 --\u003e 00:02:18,169\nWhat are you talking about?\n```\n\nEach segment contains an ID field that can be used as the sequence number. The start and end times — the moments during which the subtitle is supposed to be on screen — can be obtained by padding the `timedelta` of each of the corresponding fields with zeroes (we’re keeping things simple here, but note that a more accurate subtitle syncing result have been achieved by projects such as [stable-ts](https://github.com/jianfch/stable-ts)). And the caption is the segment’s text. Here is the code that will generate each formatted subtitle sequence: \n\n```python\n#Getting segments transcription and formatting it as an srt subtitle\n\n#...\n\nfor segment in segments:\n\tstartTime = str(0)+str(timedelta(seconds=int(segment['start'])))+',000'\n\tendTime = str(0)+str(timedelta(seconds=int(segment['end'])))+',000'\n\ttext = segment['text']\n\n\tsubtitle_segment = f\"{segment['id'] + 1}\\n{startTime} --\u003e {endTime}\\n{ text }\\n\\n\"\n```\n\nAll that is left is to write each `subtitle_segment` to a new file:\n\n```python\n#Writting to the output subtitle file\n\twith open(\"subtitle.srt\", 'a', encoding='utf-8') as srtFile:\n \tsrtFile.write(subtitle_segment)\n```\n\nThe complete example code should look like this:\n\n```python\n#main.py\n\nfrom datetime import timedelta\nimport os\nimport whisper\n\nmodel = whisper.load_model(\"base\")\naudio = whisper.load_audio(\"input_file.mp4\")\nresult = model.transcribe(audio)\n\nsegments = result[\"segments\"]\n\nfor segment in segments:\n startTime = str(0)+str(timedelta(seconds=int(segment['start'])))+',000'\n endTime = str(0)+str(timedelta(seconds=int(segment['end'])))+',000'\n text = segment['text']\n\n subtitle_segment = f\"{segment['id'] + 1}\\n{startTime} --\u003e {endTime}\\n{ text }\\n\\n\"\n #Writting to the output subtitle file\n with open(\"subtitle.srt\", 'a', encoding='utf-8') as srtFile:\n \t srtFile.write(subtitle_segment)\n```\n\nNow to try it out you can download _this example file _— Or bring your own! —_ _with wget for instance:\n\n```sh\nwget https://scaleway.com/ai-book/examples/1/example.mp4 -O input_file.mp4\n```\n\nAnd then simply run the script:\n\n```sh\npython3 main.py\n```\n\nAfter only a few seconds — because you’re using [one of the fastest GPU instances on the planet](https://www.scaleway.com/en/h100-pcie-try-it-now/) —, the script will complete running and you will have a new `subtitle.srt` file that you can use during your video editing process or to load while playing the video file, great! But… the subtitle file is in the same language as the video. It is indeed useful as it is, but you probably want to reach a wider audience by translating it into different languages. We’ll explore that next.\n\n\n#### 4. Translating a segment’s text\n\nTranslating each segment’s text comes down to importing `MarianMTModel` and` MarianTokenizer` from Hugging Face’s Transformers library, passing the desired model name, and generating the translation. Install the dependencies by running the following command:\n\n```sh\npip install transformers SentencePiece\n```\n\nIn this example \"Helsinki-NLP/opus-mt-fr-en\" is used to translate from French to English. The `translate` abstracts the translation process by requiring a source string and returning a translated version of it.\n\n```python\nfrom transformers import MarianMTModel, MarianTokenizer\n# ...\n\nopus_mt_model_name = \"Helsinki-NLP/opus-mt-fr-en\"\ntokenizer = MarianTokenizer.from_pretrained(opus_mt_model_name)\nopus_mt_model = MarianMTModel.from_pretrained(opus_mt_model_name)\n\ndef translate(str):\n\ttranslated = opus_mt_model.generate(**tokenizer(str, return_tensors=\"pt\", padding=True))\n\tres = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]\n\treturn res[0]\n```\n\nThere’s no need to worry about the `**tokenizer` function for now, just know that it receives the source string and some additional parameters that we can leave untouched.\n\nThe complete code example looks like this:\n\n```python\nfrom datetime import timedelta\nimport os\nimport whisper\nfrom transformers import MarianMTModel, MarianTokenizer\n\nmodel = whisper.load_model(\"base\")\naudio = whisper.load_audio(\"input_file.mp4\")\nresult = model.transcribe(audio)\n\nsegments = result[\"segments\"]\n\nopus_mt_model_name = \"Helsinki-NLP/opus-mt-fr-en\"\ntokenizer = MarianTokenizer.from_pretrained(opus_mt_model_name)\nopus_mt_model = MarianMTModel.from_pretrained(opus_mt_model_name)\n\ndef translate(str):\n\ttranslated = opus_mt_model.generate(**tokenizer(str, return_tensors=\"pt\", padding=True))\n\tres = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]\n\treturn res[0]\n\nfor segment in segments:\n startTime = str(0)+str(timedelta(seconds=int(segment['start'])))+',000'\n endTime = str(0)+str(timedelta(seconds=int(segment['end'])))+',000'\n text = translate(segment['text'])\n\n\n subtitle_segment = f\"{segment['id'] + 1}\\n{startTime} --\u003e {endTime}\\n{ text }\\n\\n\"\n #Writting to the output subtitle file\n with open(\"subtitle.srt\", 'a', encoding='utf-8') as srtFile:\n \t srtFile.write(subtitle_segment)\n```\n\nThat’s it! Even though the results are not perfect, and you may need to make a few manual adjustments here and there, considering the rate at which AI is advancing, things can only get better in the time to come.\n\nYou can now extend and adapt this code to your own needs, how about making it dynamically accept a file path as an input parameter? Or what if you made it into a web service others can easily take advantage of? The choice is yours! just don’t forget to cite the OPUS-MT paper on your implementations if you’re using the translation feature.","createdAt":"2023-11-28T18:00:37.698Z","updatedAt":"2024-02-07T15:14:18.482Z","publishedAt":"2023-11-30T08:26:58.333Z","locale":"en","tags":"ai\nH100","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":5,"excerpt":"In this practical example, we roll up our sleeves and put Scaleway's H100 Instances to use by leveraging a couple of open source ML models to optimize our internal communication workflows.","author":"Diego Coy","h1":"AI in practice: Generating video subtitles","createdOn":"2023-12-01"}},{"id":425,"attributes":{"title":"how-sustainable-is-ai","path":"how-sustainable-is-ai/","description":"Just over a year after the ChatGPT-fuelled generative AI explosion, it’s hard to remember a time without these groundbreaking tools. However, it remains to be seen if the breakneck speed of change has given us enough time to fully assess generative AI’s true impact on the planet. So let’s take a look.\n\n\n## The impact, in figures\n\nFirst and foremost, it’s now well established that generative AI requires considerably more computing power than standard calculations. A key reason for this is that **generative AI model training calls for GPUs rather than CPUs. The former generally requires around four times more energy than the latter** (case in point: Ampere’s CPUs for AI consume [3-5 times less energy than the equivalent NVIDIA machines](https://www.scaleway.com/en/blog/why-cpus-also-make-sense-for-ai-inference/)).\n\nFurthermore, as **AI GPUs tend to generate 2.5x more heat than CPUs** (standard CPUs used in cloud computing are in the range of 250-350W TDP, whereas GPUs are in the 750-800W range, cf. [Intel](https://www.intel.com/content/www/us/en/secure/care/products/237263/intel-xeon-gold-6554s-processor-180m-cache-2-2-ghz.html), [AMD](https://www.amd.com/fr/products/cpu/amd-epyc-9534) [x2](https://www.amd.com/fr/products/accelerators/instinct/mi300/mi300a.html), \u0026 [NVIDIA](https://www.nvidia.com/fr-fr/data-center/h100/)), they require that much extra cooling power. So the processors needed for generative AI training and inference are considerably more power-hungry than pre-generative AI models.\n\nThen there’s the difference between training and inference. Looking at the former, or the process required to ‘educate’ a generative AI model by feeding it as much data as possible, the emissions generated by training vary hugely depending on the model:\n- **552 tCO2e** - GPT3.5, 1.3, 6 \u0026 175bn parameters ([source](https://dataforgood.fr/iagenerative/))\n- **284 tCO2e** - a medium-size LLM, 213m parameters ([source](https://arxiv.org/abs/1906.02243))\n- **30 tCO2e** - BLOOM, a frugal LLM, 175bn parameters ([source](https://arxiv.org/abs/1906.02243))\n\n_(tCO2e = tons of CO2 equivalent, namely CO2 + the 3 other most potent greenhouse gasses)_\n\n\nThis means that training a generative AI model can generate anything from the equivalent of three French people’s annual emissions (10 tCO2e), to 50.\n\nBut of course, training is a one-off occurrence. **Inference, or the everyday usage of a model, has its own impact, which has been estimated at 200 times higher than that of training**. According to French tech association [Data for Good](https://dataforgood.fr/iagenerative/), considering ChatGPT has [100m weekly users](https://techcrunch.com/2023/11/06/openais-chatgpt-now-has-100-million-weekly-active-users/), that’s 100,000 tCO2e/year for GPT-3.5.\n\nTo put it another way, **generating one image with generative AI can use as much energy as that required to fully recharge a smartphone**, according to the latest [white paper](https://arxiv.org/pdf/2311.16863.pdf) co-authored by Sasha Luccioni, Climate Lead and AI Researcher at Hugging Face. \"Can\" is the operative word here, however, as [The Verge](https://www.theverge.com/24066646/ai-electricity-energy-watts-generative-consumption) points out, given the huge variety of GenAI models already available. \n\nThen there’s **water**. Also linked to inference, it’s been established that [one conversation with ChatGPT uses half a liter of water](https://arxiv.org/pdf/2304.03271.pdf) in terms of the data center cooling resources required (cf. the considerable heat generated by GPUs, above). Not to mention GPT-3’s training, which required 5.4 million liters of water ([same source](https://arxiv.org/pdf/2304.03271.pdf)). That’s a bit more than one liter per training hour (training GPT-3 took 4.6 million GPU hours, according to… [ChatGPT](https://www.wholegraindigital.com/blog/social-environmental-impacts-of-ai/?utm_source=pocket_saves)!)\n\nGiven these elements, it’s not surprising that AI energy demand is set to outpace supply. \n\nIf Google were to use AI for its around 9 billion daily searches - which it [most likely will](https://www.theverge.com/2023/5/10/23717120/google-search-ai-results-generated-experience-io) - it would need 29.2 terawatt hours (TWh) of power each year, according to researcher Alex de Vries. As such, as de Vries told [Euronews last year](https://www.euronews.com/next/2023/10/10/demand-for-ai-could-mean-technology-consumes-same-energy-as-a-country-analysis-shows), **by 2027, AI could consume as much electricity as a medium-sized country like the Netherlands**.\n\nThe IEA (International Energy Association) recently issued [a similar warning](https://iea.blob.core.windows.net/assets/6b2fd954-2017-408e-bf08-952fdd62118a/Electricity2024-Analysisandforecastto2026.pdf): **data centers’ energy consumption could more than double by 2026, to 1,000TWh, driven by AI** and cryptocurrency.\n\nOne of AI’s most influential leaders naturally saw this coming: at Davos in January 2024, **OpenAI CEO Sam Altman said AI will definitely need much more energy than initially thought**. “There’s no way to get there without a[n energy] breakthrough [like nuclear fusion]”, [Reuters](https://www.usnews.com/news/technology/articles/2024-01-16/openai-ceo-altman-says-at-davos-future-ai-depends-on-energy-breakthrough) reported him saying on a panel. This could well be why OpenAI’s most famous investor, Microsoft, just hired a new Director of Nuclear Development Acceleration: to “help power its own AI revolution”, according to [TechRadar Pro](https://www.techradar.com/pro/microsoft-goes-atomic-worlds-most-valuable-company-just-hired-a-director-of-nuclear-development-acceleration-to-help-power-its-very-own-ai-revolution). \n\nWhilst we’re a [long way off nuclear fusion](https://www.newsweek.com/nuclear-fusion-when-ready-electricity-technology-1773349) - versus current fission methods - a trend of nuclear-powered data centers is definitely bubbling up. \n\nAccording to [AMD CEO Lisa Su](https://www.theregister.com/2023/02/23/amd_zettaflop_systems_nuclear/), in around ten years’ time we may see zettaflop-class supercomputers, whose requirement for 500MW facilities will far outstrip todays’ 20-50MW facilities. Such needs can only be powered by local, dedicated sources like nuclear SMRs (small modular reactors).\n\nThis is why [The Register](https://www.theregister.com/2023/09/27/datacenters_nuclear_power/) reports that last year, [Cumulus Data](https://cumulusinfra.com/) opened a 65MW nuclear data center, which it claims will ultimately reach a capacity of 950MW. In addition, SMR-powered facilities are currently being investigated by Green Energy Partners/IP3 (Virginia, USA) and Bahnhof (Sweden). \n\nGiven our current reliance on fossil fuels (e.g. with the US still dependent on them for 80% of its energy), could nuclear-powered emission-free data centers be a better option for the planet than current solutions? Time will tell, especially for future generations…\n\n\n## How to reduce that impact\n\nThe first rule of any sustainability strategy, especially in tech, should be to ask “do I really need this?”\n\nIndeed, generative AI is neither inevitable, nor adapted to all use cases. As we’ve [already explained here](https://www.scaleway.com/en/blog/symbolic-ai-is-dead-long-live-symbolic-ai/), **symbolic, or “good old-fashioned” AI, can do a lot more than what many of us expect, and with considerably less impact**. French startup Golem.ai has notably established that one of their [email-sorting symbolic AI models emits 1000 less CO2eq than GPT-3](https://golem.ai/fr/ia-frugalite-sobriete).\n\nThat said, if you do decide you absolutely must use generative AI, does it have to be on the scale of ChatGPT? Must it hoover up all of the world’s data, or can it just focus on a specialized dataset, like legal documents, for example?\n\nDo you have to use a supercomputer for training, or would a smaller, single [H100 GPU](https://www.scaleway.com/en/h100-pcie-try-it-now/) do the trick? Could you simultaneously prolong the life of old hardware and save money by using older generation GPUs?\n\n**For inference, could a less energy-hungry CPU, like Ampere’s, meet your needs** (cf. above)? \n\nNext, it can be inspiring to look into **the many ways generative AI is being used today to actively further sustainability; potentially, to an extent that may far outweigh its impact.**\n\nIndeed, a [McKinsey report](https://www.mckinsey.com/capabilities/quantumblack/our-insights/how-artificial-intelligence-can-deliver-real-value-to-companies) once estimated AI-based technologies could help companies to reduce their emissions by up to 10%, and their energy costs by 10-20%.\n\nOne clear example in tech is Google’s AI subsidiary DeepMind, which [declared](https://deepmind.google/discover/blog/deepmind-ai-reduces-google-data-centre-cooling-bill-by-40) as early as 2016 that its application of machine learning in GCP data centers has enabled said facilities to consume 40% less energy. How? By improving anticipation of key internal factors, like how different types of machinery interact with each other, and external ones like the weather, thanks to training data such as past temperatures, power, pump speeds and setpoints.\n\nAI’s impact in data centers can also be reduced by using alternative cooling systems. This is the case of the **DC5 data center, where Scaleway’s AI machines are housed, which consumes 30-40% less energy than standard facilities, because it uses no air conditioning**. Instead, it relies on free cooling most of the year, and, in warmer summer months, adiabatic cooling, a process which cools outside air by passing it through a moist membrane. This, plus French energy’s low carbon intensity, makes **DC5 one of the world’s least impactful AI installations**.\n\n\n## AI for good: We’re just getting started\n\nMachine learning can also help in broader contexts, although many of today's LLM-based solutions are based more on predictive than generative AI. For example, using past data to predict future demand for electricity, thereby optimizing smart grids; anticipating road traffic, which can make travel, deliveries way more efficient, thereby reducing pollution (Google claims its [Green Light initiative with Google Maps](https://blog.google/outreach-initiatives/sustainability/google-ai-reduce-greenhouse-emissions-project-greenlight/) can reduce emissions at intersections by 10%); fine-tuning energy consumption in buildings via temperature prediction; and the forecasting of extreme weather events or incidents, like [Pyronear](https://pyronear.org/en/), which uses AI-equipped towers to detect forest fires.\n\nAll of these examples and more - also covering societal impacts, public policy analysis, education and finance - are already happening thanks to generative AI. This [white paper](https://dl.acm.org/doi/10.1145/3485128) by leading academics and Google thought-leaders demonstrates how these diverse activities are accelerating sustainability as a whole.\n\nGenerative AI can also facilitate access to key information about sustainability. French national ecological agency (and Scaleway client) [ADEME](https://www.linkedin.com/posts/lydia-passet-787a43159_ademe-iagaeznaezrative-genai-activity-7155594529746669570-xm_F/) is currently experimenting with a text-based model trained on the agency's extensive documentation database, with the objective of extracting key data more quickly and understandably. If the experiment is successful, the model could be opened up to the general public. Spearhearded by **Ekimetrics**' \"AI for Sustainability\" team, the project is similar to \"[ClimateQ\u0026A](https://huggingface.co/spaces/Ekimetrics/climate-question-answering)\", a model trained on the IPCC reports, which is essentially a **ChatGPT for sustainability** (and not the only one, cf. this ChatGPT plugin, \"[IPCC Explainer](https://chat.openai.com/g/g-CXYs3qu1D-ipcc-explainer)\"). \n\nThen there are flag-waving applications, which warn us of potential sustainability emergencies. Data for Good notably enabled ocean protection ONG [Bloom](https://www.bloomassociation.org/en/) to detect illegal fishing using AI, and used AI to power its [Carbonbombs.org](http://Carbonbombs.org) website, which flags the world’s most-polluting projects, such as coal mines, and is now influencing global policy. Finally, [Climatetrace.org](http://Climatetrace.org) uses AI to highlight those countries that aren’t decarbonizing as quickly as they say they are.\n\nNot forgetting the AI models themselves: the smaller they are, the less energy they consume, which makes them better for everyone, including the planet. As **Mistral AI CEO and co-founder Arthur Mensch** [told ai-PULSE](https://www.scaleway.com/en/blog/best-quotes-ai-pulse-2023/) last November, “in the ‘Vanilla Attention’ version of Transformers, you need to keep the tokens in memory. “With Mistral AI’s ‘Sliding Window Attention’ model, there are four times less tokens in memory, reducing memory pressure and therefore saving money. Currently, **too much memory is used by generative AI**”. This is notably why the company’s latest model, Mistral-7B, can run locally on a (recent) smartphone.\n\nLooking ahead, this efficiency-first approach will apply to AI solution offerings too. **Scaleway aims to beta release by mid-2024 an inference service which will serve LLMs connected to clients’ private or sensitive data**. Based on LLMs like Llama or those of Mistral AI, such a service is cost- and energy-efficient because:\n- Compute resources can be sized up and down by users according to usage peaks\n- Using existing pre-trained and open source models avoids the emissions generated by training new models from scratch.\n\n\n## The conclusion: it’s too soon to say\n\nIf we consider that there are as many AI solutions as impacts, the jury is out at best. Most experts agree it’s too early in the generative AI revolution to measure its true impact on the planet.\n\nBut there is good news! Firstly, it is totally possible to assess impact before choosing the right AI model for your needs, namely:\n- What its emissions impact is, using tools like [Machine Learning Emissions Calculator](https://mlco2.github.io/impact/#compute)\n- Whether you can re-use, or fine-tune an existing model - nearly 500,000 different ones are available in repositories like [Hugging Face](https://huggingface.co/models) - as this will consume way less energy than creating a new one from scratch\n- Whether it’s hosted by a cloud provider that works to reduce its energy consumption, and whose data centers are in a low-carbon intensity country.\n\n\nFurthermore, the principles of [green IT](https://www.scaleway.com/en/why-shift-to-green-it/) apply just as much to AI as they do to ‘traditional’ computing:\n\n- **Data centers** should use renewable energy - and as little of it as possible - whilst radically limiting water usage. They should also use alternatives to air conditioning, to considerably reduce AI’s environmental impact (see above)\n- **Hardware** should be optimized to use as little energy as possible, and to last for as long as possible\n- **Software** solutions - think AI models in this case, or options like Inference as a Service - should be engineered to consume as few computational resources, and therefore energy, as possible.\n\n\nLast but not least, AI datasets, models and machines should only be as big or powerful as they need to be. Otherwise, tech’s eternal risk of falling into the rebound effect, or [Jevon’s paradox](https://en.wikipedia.org/wiki/Jevons_paradox) - using a service more, rather than less, as it gets more efficient - could have dire consequences. \n\nAs Ekimetrics’ Head of AI for Sustainability Theo Alves Da Costa [puts it](https://vert.eco/articles/lintelligence-artificielle-va-t-elle-donner-le-coup-de-grace-au-climat?utm_source=pocket_saves), “if we use the bulldozer of AI to knock a nail into a wall, the nail will go in, but we also run the risk of knocking the whole wall down. In cases like this, it’s better to just use a hammer”.\n\n\n_Special thanks to Ekimetrics’ [Theo Alves Da Costa](https://www.linkedin.com/in/th%C3%A9o-alves-da-costa-09397a82/) for many of the sources in this article. And to Hugging Face’s [Sasha Luccioni](https://www.sashaluccioni.com/) for the inspiration!_\n","createdAt":"2024-02-15T10:19:55.417Z","updatedAt":"2024-03-11T09:35:03.784Z","publishedAt":"2024-02-15T10:25:58.667Z","locale":"en","tags":"AI\nSustainability\nGreen IT","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":6,"excerpt":"Do generative AI's benefits for the planet outweigh its impacts? Let's try to find out...","author":"James Martin","h1":"How Sustainable is AI?","createdOn":"2024-02-15"}},{"id":428,"attributes":{"title":"infrastructures-for-llms-in-the-cloud","path":"infrastructures-for-llms-in-the-cloud/","description":"Open source makes LLMs (large language models) available to everyone. There are plenty of options available, especially for inference. You’ve probably heard of [Hugging Face’s inference library](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client), but there’s also [OpenLLM](https://github.com/bentoml/OpenLLM), [vLLM](https://docs.vllm.ai/en/latest/), and many others. \n\nThe main challenge, especially if you’re a company like Mistral AI building new LLMs, is that the architecture of your LLM has to be supported by all these solutions. They need to be able to talk to Hugging Face, to NVIDIA, to OpenLLM and so on.\n\nThe second challenge is the cost, especially that of the infrastructures you’ll need to scale your LLM deployment. For that, you have different solutions: \n\n1. Choosing the right GPUs (your LLM has to fit with them)\n2. Choosing the right techniques:\n- Quantization, which involves reducing the number of bytes used by the variables, so you can fit larger models into smaller memory constraints. That’s a give and take between the two, as that can have impacts on the accuracy of your model and its performance results\n- Fine-tuning methods, like parameter-efficient fine-tuning ([PEFT](https://github.com/huggingface/peft)). With PEFT methods, you can significantly decrease computational and memory cost by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. And you can combine PEFT methods with quantization too.\n\n\nThen you have to decide whether you host it yourself; you use a PaaS solution; or ready-to-use API endpoints, like what OpenAI does.\n\n\n## Choosing the right GPU\n\n![NVIDIA H100 - L4 - L40S](https://www-uploads.scaleway.com/NVIDIA_H100_L4_L40_S_b997aec7f9.webp)\n\nThe above is Scaleway’s offering, but similar offerings are currently being installed with most major cloud providers. \n\n- **H100 PCIe 5** is the flagship, NVIDIA’s most powerful GPU. It has interesting features like the Transformer Engine, a library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada Lovelace GPUs, to provide better performance with lower memory utilization in both training and inference. It speeds up training of Transformer models, meaning you can put twice the amount of variables in memory, in 8 bits instead of 16. Furthermore, NVIDIA’s Library helps make these changes simpler; plus a large amount of memory and memory bandwidth are key, as the faster you can load your memory, the faster your GPU will be\n- **L4 PCIe 4** can be seen as the modern successor to the NVIDIA T4, intended for inference, but perfectly capable of training smaller LLM models. Like H100, it can manage new data formats like FP8. It has less memory bandwidth than H100, but that may create some bottlenecks for certain use cases, like handling large batches of images for training computer vision models. In these cases, you may not see a significant performance boost compared with previous Ampere architecture for example. And unlike H100, this one has video and 3D rendering capabilities, so if you want to generate a synthetic dataset for computer vision with Blender, you can use this GPU\n- **L40S PCIe 4** is what NVIDIA considers as the new A100. It has twice the amount of memory as the L4, but with a larger memory bandwidth, and stronger compute performance too. For generative AI, according to NVIDIA, when you optimize your code with FP8 and so on, DGX with 8x A100 with 40 Gb NVlink can perform as well as 8 L40S PCIe 4 without NVLink, so that’s a powerful and interesting GPU.\n\n\n## Using GPU Instances tip 1: Docker images\n\n![NGC Catalog](https://www-uploads.scaleway.com/NGC_Catalog_6a93ebe2f5.webp)\n\nWhen using GPUs, use Docker images, and start with those offered by NVIDIA, which are free. This way, the code is portable, so it can run on your laptop, on a workstation, on a GPU Instance (whatever the cloud provider, so without lock-in), or on a powerful cluster (either with SLURM as the orchestrator if you’re in the HPC/AI world, or Kubernetes if you’re more in the AI/MLOps world).\n\nNVIDIA updates these images regularly, so you can benefit from performance improvements and bug/security fixes. A100 performance is significantly better now than it was at launch, and the same will apply to H100, L4 and so on. Also, there are a lot of time-saving features, which will allow you to make POCs more quickly, like framework and tools like NeMo, Riva and so on, which are available through the NGC catalog (above). \n\nThis also opens up the possibility to use an AI Enterprise license on supported hardware configurations, which is something typically only seen in cloud provider offers), which will give you support in case you meet bugs or performance issues, and even offers help from NVIDIA data scientists, to help you debug your code, and to get the best performance out of all of these softwares. And of course, you can choose your favorite platform, from PyTorch, TensorFlow, Jupyter Lab and so on.\n\n\n### Using Scaleway GPU Instances\n\nIn Scaleway’s GPU OS 12, we’ve already pre-installed Docker, so you can use it right out of the box. I’m often asked why there’s no CUDA or Anaconda preinstalled. The reason is these softwares should be executed inside the containers, because not all users have the same requirements. They may not be using the same versions of CUDA, cuDNN or Pytorch, for example, so it really depends on the user requirements. And it’s easier to use a container built by NVIDIA than installing and maintaining a Python AI environment. Furthermore, doing so makes it easier to reproduce results within your trainings or experiments.\n\nSo basically, you do this:\n\n```js\n## Connect to a GPU instance like H100-1-80G\n\nssh root@\u003creplace_with_instance_public_ip\u003e\n\n## Pull the Nvidia Pytorch docker image (or other image, with the software versions you need)\n\ndocker pull nvcr.io/nvidia/pytorch:24.01-py3\n[...]\n\n## Launch the Pytorch container\n\ndocker run --rm -it --runtime=nvidia \\\n-p 8888:8888 \\\n-p 6006:6006 \\\n-v /root/my-data/:/workspace \\\n-v /scratch/:/workspace/scratch \\\nnvcr.io/nvidia/pytorch:24.01-py3\n\n## You can work with Jupyter Lab, Pytorch etc…\n```\n\nIt’s much easier than trying to install your environment locally.\n\n\n\n## Using GPU Instances tip 2: MIG\n\n![MIG](https://www-uploads.scaleway.com/MIG_0309459e5f.webp)\n\nOne unique feature of the H100 is [MIG, or multi-instance GPU](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/index.html), which allows you to split your GPU into up to seven pieces. This is really useful when you want to optimize your workload. If you have workloads that don’t fully saturate GPUs, this is a nice way to have multiple workloads and maximize GPU utilization. It works with standalone VMs, and works really easily in Kubernetes. You request one GPU reference corresponding to the split you want to use for one GPU resource. \n\nIn Kubernetes, it’s is as easy as replacing in your deployment file the classic resource limits\n**nvidia.com/gpu: '1'**. by the desired MIG partition name, for **example, nvidia.com/mig-3g.40gb: 1**\n\n[Here’s the link](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/index.html) if you want to look into that.\n\n\n\n## Using GPU Instances tip 3: NVIDIA Transformer Engine \u0026 FP8\n\n![FP8](https://www-uploads.scaleway.com/FP_8_5f52cec619.webp)\n\nAll the latest generation of GPUs (available in the latest Nvidia GPU architecture, namely Hopper and Ada Lovelace) use the NVIDIA Transformer Engine, a library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada GPUs, to provide better performance with lower memory utilization in both training and inference.\n\nAs for their use of the FP8 data format, there are actually two kinds of FP8, which offer a tradeoff between the precision and the dynamic range of the numbers you can manipulate (cf. diagram). When training neural networks, both of these types may be utilized. Typically forward activations and weights require more precision, so the E4M3 datatype is best used during forward pass. In the backward pass, however, gradients flowing through the network typically are less susceptible to the loss of precision, but require higher dynamic range. Therefore they are best stored using E5M2 data format. This can even be managed automatically with the 'HYBRID' format ([more information here](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html#transformer_engine.common.recipe.Format)). \n\nThe Transformer Engine is not just for Transformers. As it can also optimize Linear operations, it can benefit other model architectures, like computer vision (cf. the [MNIST example](https://github.com/NVIDIA/TransformerEngine/tree/main/examples/pytorch/mnist)) So basically, you install the Transformer engine package with ‘pip’, load the package, and just test or replace certain operant modules (from your favorite deep learning frameworks) by the one provided in the Transformer engine package (cf. the MNIST example above). If you want to invest a bit of time in optimizing your code by using the Transformer Engine and the FP8 format in your code, you can. It’s good here to optimize, because you’ll use less memory, fit more variables, and speed up your inference and your training. So be sure to optimize your code!\n\n\n## Using LLMs in production: Creating an AI chatbot with RAG\n\n![RAG](https://www-uploads.scaleway.com/RAG_d213036576.webp)\n\nIf you want to do LLMs in production, you might want to create a chatbot, and to do that, you’ll probably want to fine-tune a model on your data for your specific use case. It’s easy with Hugging Face’s Transformers library in terms of code; but it can be hard to improve your results, as this takes lots of trial and error. \n\nAnother technique is to look at RAG, or [Retrieval Augmented Generation](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/), which you can do before fine-tuning, or instead of it. This way there’s less risk of breaking the model, as is a risk with fine-tuning. Plus with RAG, there’s no fine-tuning cost, as you don’t pay for the GPU usage of the multiple tries that fine-tuning requires; and you can keep your data private by hosting it locally. Furthermore, you reduce the risks of hallucinations, which are always a bad thing when you’re trying to [build an AI chatbot for your business](https://www.theguardian.com/world/2024/feb/16/air-canada-chatbot-lawsuit). So I’ve included the [documentation](https://docs.nvidia.com/ai-enterprise/workflows-generative-ai/0.1.0/index.html) that explains this system. NVIDIA even has a [GitHub project](https://github.com/NVIDIA/GenerativeAIExamples/blob/main/examples/README.md) to allow you to build your first AI chatbot with RAG in just five minutes. \n\n\n## What you need to train a foundational LLM\n\nFirstly, a lot of money! LLaMA’s [white paper](https://arxiv.org/abs/2302.13971) says it took 21 days to train LLaMa using 2048 A100 80GB GPUs. We can't possibly speculate on what that costs, but [someone else has here](https://www.reddit.com/r/LocalLLaMA/comments/15ggfjl/cost_of_training_llama_2_by_meta/?rdt=58095) (hint: it's a lot!)\n\nYou’ll also need a team of experts… but not necessarily hundreds! Mistral AI’s Mixture beat GPT3.5 (according to [Mistral AI’s benchmark](https://mistral.ai/news/mixtral-of-experts/)) with a team of less than 20 people. \n\nLots of data will also be required: you may have to scrape the internet for that, or rely on a partnership to help you. Then the data will need to be prepared, i.e. cleaned and deduplicated.\n\nFinally, you’ll need lots of compute power! If we look at this NVIDIA graphic:\n\n![Time to train by LLM size](https://www-uploads.scaleway.com/Time_to_train_by_LLM_size_32baec3420.webp)\n\n…we see there’s a big leap between A100 and H100 (from one month to one week’s training time for the biggest models).\n\n\n## How to handle lots of data\n\nOur Superpod customers use Spark for the data preparation, which uses CPUs (in the range of 10 000 vCPUs), and around 100 TB of block storage, before the dataset is stored in Object Storage. Scaleway is currently working on a Spark managed cluster offer, by the way: watch this space!\n\nNVIDIA also provides tools like [NeMo data Curator](https://www.nvidia.com/en-us/ai-data-science/products/nemo/get-started/) (through NGC/Nvidia AI Enterprise, so we’re talking about containers), which has functions like data download and text extraction, text re-formatting and cleaning, quality filtering, document-level deduplication, multilingual downstream-task decontamination and more.\n\nEven with these tools, data preparation can take a long time, but it has to be done before you start the training.\n\n\n## How to start training\n\nTo start training, you’ll need more than one GPU, so the building blocks will be **NVIDIA DGX H100**, which are ready-to-use computers with a set maximal server configuration, so you’ve got the best of the best:\n\n- **8x NVIDIA H100 80GB GPUs With 640 Gigabytes of Total GPU Memory**\n- 18x NVIDIA® NVLink® connections per GPU\n- 900 gigabytes per second of bidirectional GPU-to-GPU bandwidth, thanks to NVLink\n- **4x NVIDIA NVSwitches™**\n- 7.2 terabytes per second of bidirectional GPU-to-GPU bandwidth\n- 1.5X more than previous generation\n- **10x NVIDIA ConnectX®-7 400 Gigabits-Per-Second Network Interface**\n- 1 terabyte per second of peak bidirectional network bandwidth\n- Dual Intel Xeon Platinum 8480C processors, 112 cores total, and 2 TB System Memory\n- 30 Terabytes NVMe SSD - High speed storage for maximum performance.\n\n\nTo build a Superpod, you take that server, then put 32 of them together, no more, no less. That's what NVIDIA calls a Scaleable Unit. If you scale up four scalable units, you have 128 nodes, and that’s the SuperPOD H100 system. Each of the four units is 1 ExaFLOPS of FP8 format for a total of up to 4 ExaFLOPS in FP8, and the cluster is orchestrated by NVIDIA Base Command Manager, so NVIDIA software, with a SLURM orchestrator, which can launch jobs across multiple computers to do the training.\n\nSo at Scaleway, we’ve got two [supercomputers](https://www.scaleway.com/en/ai-supercomputers/):\n\n**Jeroboam**, the smaller version of the cluster, which was intended to learn to write code that’s multi-GPU and multi-nodes:\n- **2 NVIDIA DGX H100 nodes (16 Nvidia H100 GPU)** \t\n- Up to 63,2 PFLOPS (FP8 Tensor Core)\n- 8 Nvidia H100 80GB SXM GPUs with NVlink up to 900 GB/s per node\n- Dual CPU Intel Xeon Platinum 8480C (112 cores total at 2GHz)\n- 2TB of RAM \n- 2x 1.92TB NVMe for OS\n- 30,72 TB NVMe for Scratch Storage\n\n- Throughput (for 2 DGX) : Up to 40 GB/s Read and 30 GB/s Write\n- Nvidia Infiniband GPU interconnect network up to 400 Gb/s (at cluster level)\n- 60TB of DDN high-performance, low latency storage.\n\n\n**Nabuchodonosor**, the ‘real thing’ for training, which is also built for people who’ll want to train LLMs with videos, not just text, thanks to the large amount of high-performance storage…\n- **127 NVIDIA DGX H100 nodes (1016 Nvidia H100 GPU)** \n- Up to 4 EFLOPS (FP8 Tensor Core)\n- 8 Nvidia H100 80GB SXM GPUs with NVlink up to 900 GB/s per node\n- Dual CPU Intel Xeon Platinum 8480C (112 cores total at 2GHz)\n- 2TB of RAM \n- 2x 1.92TB NVMe for OS\n- 30,72 TB NVMe for Scratch Storage\n\n- Nvidia Infiniband GPU interconnect network up to 400 Gb/s (at cluster level)\n- 1,8PB of DDN high-performance, low latency storage \n- Throughput (for 127 DGX) : Up to 2,7 TB/s Read and 1,95 TB/s Write\n\n\n## Training LLMs\n\n![Data parallelism](https://www-uploads.scaleway.com/Data_parallelism_d738f8be28.webp)\n\nThe challenge of training LLMs on Nabuchodonosor is that it’s an HPC user experience, which means SLURM jobs, not Kubernetes. It’s still containers, though, which you build on top of NVIDIA NGC container images (Pytorch, Tensorflow, Jax…). That’s why when you write your code with these NGC images, even with a single small GPU, your code will be able to scale more easily. One best practice is if you have, say, 100 nodes, don’t launch your jobs on all of them. Keep a few spare in case one or two GPUs fail (it happens!) That way, if you have any issues, you can relaunch your jobs by replacing the faulty nodes.\n\nYou’ll need to write your code in special ways, to maximize performance by using data parallelism and model parallelism (computing across multiple GPUs at the same time); you can use resources like [Deepspeed](https://www.deepspeed.ai/training/) for this.\n\nThen there’s the End-to-End framework [Nvidia NeMo](https://github.com/NVIDIA/NeMo), which will also help you build, finetune and deploy generative AI models.\n\n\n## Superpod challenges\n\n![Superpods in DC5](https://www-uploads.scaleway.com/Superpods_in_DC_5_c0492a8517.webp)\n\nScaleway’s supercomputers were built in just three to seven months, so it was quite a logistical challenge to make sure all the parts were received in time, and connected the right way… with more than 5000 cables! \n\nProviding power is also quite a challenge: the Nabuchodonosor Superpod system’s power usage is 1.2 MW, which means we can only put two DGX units in each rack, so it’s not a great usage of data center surface space. Then there’s the cost of electricity, which is five times more in France than in the USA, for example. But as French electricity’s carbon intensity is very low, it generates around seven times less emissions than in Germany, for example. Furthermore, as all of Scaleway’s AI machines are hosted in DC5, which has no air conditioning and therefore uses 30-40% less energy than standard data centers, we can say this is one of the world’s most sustainable AI installations. [More on AI and sustainability here](https://www.scaleway.com/en/blog/how-sustainable-is-ai/).\n\n\n## What’s next?\n\n![NVIDIA Grace Hopper Superchip](https://www-uploads.scaleway.com/NVIDIA_Grace_Hopper_Superchip_781c5ab894.webp)\n\nScaleway will launch this year the [NVIDIA GH200 Grace Hopper Superchip](https://resources.nvidia.com/en-us-grace-cpu/nvidia-grace-hopper-2), which combines Grace ARM CPUs with Hopper GPUs in the same device, which are linked at 900 GB/s. You can connect 256 of these devices together, which is much larger than what you can connect in the DGX configuration described above (the 8 GPUs connected at 900 GB/s with NVlink in a single DGX H100 server node). And if you need more you can even connect several mesh of 256 GH200 via Infiniband at 400Gb/s. So it’s really for use cases where the memory is the bottleneck, so it’s really for HPC, and for inference of LLMs. When they’re all put together, it’s like a giant GPU, designed for the most demanding use cases, like healthcare and life sciences, for example. \n","createdAt":"2024-02-21T14:45:08.671Z","updatedAt":"2024-02-22T13:48:55.171Z","publishedAt":"2024-02-22T13:48:55.160Z","locale":"en","tags":"AI","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":6,"excerpt":"What do you need to know before getting started with state-of-the-art AI hardware like NVIDIA's H100 PCIe 5, or even Scaleway's Jeroboam or Nabuchodonosor supercomputers? Look no further...","author":"Fabien da Silva","h1":"Infrastructures for LLMs in the cloud","createdOn":"2024-02-21"}}]},"meta":{"id":1354,"title":"How to get started in AI without excessive cost, or emissions!","description":"How can startups take their first steps with Large Language Models (LLMs)? Leveraging AI needn't cost the earth, explains MindMatch's Zofia Smoleń","ogtype":null,"ogtitle":"How to get started in AI without excessive cost, or emissions!","ogdescription":"How can startups take their first steps with Large Language Models (LLMs)? Leveraging AI needn't cost the earth, explains MindMatch's Zofia Smoleń","noindex":false},"localizations":{"data":[]}}},{"id":347,"attributes":{"title":"data-powering-tech-efficiency-privacy-value","path":"data-powering-tech-efficiency-privacy-value/","description":"The adage “data is the new oil” may be verging on the prehistoric; and yet data has never been more essential to success in all business sectors, and in tech in particular. Just ask Google, Facebook \u0026 co… \n\nIn recent years, as our usage of digital tools has skyrocketed, so has the sheer production of data. Let’s not forget that in 2018, [IDC predicted that worldwide data would grow by 61%](https://www.networkworld.com/article/3325397/idc-expect-175-zettabytes-of-data-worldwide-by-2025.html), to 175 zettabytes (a zettabyte is a trillion gigabytes). And that was before the recent ChatGPT-fuelled AI explosion, which is currently generating countless times more data than the gazillions of web pages and images GPT-3 and 4 ingested during their training.\n\nThe key difference, of course, is that whilst most oil extracted has intrinsic value, the same cannot be said for data. A raw stream of information is useless if you don’t know what to do with it. Such knowledge is now critical. According to DELL’s “[Unlocking the Value of Data with Data Innovation Acceleration](https://www.dell.com/en-us/dt/technology-acceleration.htm#pdf-overlay=//www.delltechnologies.com/asset/en-us/solutions/business-solutions/industry-market/unlocking-the-value-of-data-with-data-innovation-acceleration.pdf)” report, better data management can improve product quality, application availability/predictability, customer service, productivity and more:\n![Source: Unlocking the Value of Data with Data Innovation Acceleration](https://www-uploads.scaleway.com/DELL_s_Unlocking_the_Value_of_Data_with_Data_Innovation_Acceleration_report_extract_57de0f2f37.webp)\n\nEven just taking the example of one industrial giant, global steel leader **ArcelorMittal** has “one of France’s biggest data lakes, because we have several sites and production lines, with countless sensors generating data each 10 milliseconds,” **David Glijer**, the company’s Digital Transformation Director, explained at France Digitale's [FD3](https://francedigitale.org/agenda/fd3) event in Paris, March 29. Making the most of this flood of information is precisely why ArcelorMittal has “a department whose goal is to create added value from data, and works with other teams to do that.” \n\nSo, why is it so important to optimize data usage?\n\n\n## 1. Efficiency\n\n“We produce 1 steel coil every 10 minutes. If we have the slightest quality deviation, we need to be able to react efficiently. So we need the data to be very close to the machine”, said Glijer, adding that the steel giant uses a **combination of on-premise, edge and cloud technologies in order to achieve this**.\n\nCloud technologies are, of course, key to efficiency. “**The cloud is frugal by design; it’s about sharing infrastructure and optimizing space,**” Scaleway’s Chief Operating Officer **Albane Bruyas** told the same panel. Central to that frugality, she explained, is using the cloud properly.\n\n“**Most of our Compute machines are made to be shared. When I joined Scaleway, I was surprised to discover how many people use these products like dedicated servers**. It’s important that the cloud be used how it was supposed to be used,” said Bruyas. This is particularly crucial considering most cloud instances are at rest 93% of the time.\n\nSo frugality isn’t just good for electricity bills, it’s also good for the planet. “We improve the impact of our data centers by analyzing where the biggest impact is,” said Bruyas. “We discovered the two most important parts of our impact are hardware and power. So we’re working to make sure that frugality is not only what we ask of our customers, but what we apply to what we do too.”\n\nThe notion of **responsibility** is another key to data efficiency, Bruyas added. “**You’re very sensible when you’re on premise, but when you move to the cloud, you forget to ask where the data is. And yet it’s crucial, because it’s about sovereignty and environmental impact**.\"\n\n\"If your cloud provider doesn’t give you that information, then there’s an issue, because the cloud is a strategic resource,\" Bruyas affirmed. \"This information will enable you to control your risks. A CTO should ask for information on power consumption, products, costs (now and in the long term), environmental impact… It’s really a client responsibility.”\n\n\n## 2. Privacy\n\n**GDPR** is another responsibility all companies handling data — i.e. all companies anywhere — have to deal with. But with some data restraints can come opportunities, as ‘deep data’ startup **XXII** reminded us at FD3.\n\nThe company, which uses AI with surveillance cameras to detect specific cases like traffic jams, garbage in the street, or even fires, has to focus on using **synthetic data** — information that’s artificially generated, rather than produced by real-world events — principally for privacy reasons.\n\n“Our main challenge is GDPR,” said Dam Mulhem, XXII’s Chief Data Officer. “All cameras we use are on the street, or in private entities. So there are issues of privacy, personal data, how we can collect data and how we can give it to the R\u0026D team to use. **We don’t want to use biometric data like facial recognition, nor to recognize gender**.\"\n\n\"We need thousands of images to train our model,\" he continued. \"Using synthetic data, we’re able to scale quicker, and attain operational effectiveness faster. Furthermore, with a sovereign cloud, we can provide that platform directly to our clients, so it’s easy to deploy.”\n\nMastering the data this way, explained Mulhem, is also positive for XXII’s future perspectives. “We build our own software internally, we input the datasets we already have, we have all the metadata, so we can generate on each image the scale, the field of view, the whole chromatic operation with great balance. To generate data, **as we already have an API internally, we can say, for example, ‘I need 10,000 fires’, then they can have the full dataset in just a few hours**.”\n\n\n## 3. Value(s)\n\nSo once efficiency and privacy are covered, how exactly do you go about **generating value from data**? Values are, fittingly, key to this equation. If prospects know their values are aligned with those of your company, they’re more likely to become customers.\n\nThis is one of the reasons XXII has an **ethical committee**, said Malhem, “so whenever we get a new request from a client, we discuss it first. Even if we know how to do facial recognition, we don’t want to provide it to [just] anyone. In airports, we’ve all agreed to have our biometric data available, through our passports. [But elsewhere] it’s not a good idea.”\n\nGlijer concurred that “it’s very important for us to have partners that share our values. **It makes no sense to work with people who aren’t in favor of decarbonation**, for example (ArcelorMittal has pledged to be carbon neutral by 2050). So we need to create a big ecosystem and a strong will in society, for example to use more ‘green IT’ computers. It needs to be a big movement that we want to continue.”\n\nData is equally key to generating future value for ArcelorMittal’s, said Glijer. “We want to be an Industry 4.0 leader in Europe. Next year, we’re starting our first digital-native plant, with new IT systems, automation and 5G by design.” All of which will be impossible without the right data.\n\n\nStill not convinced of data’s true value? A recent study on [Measuring the Effectiveness of Data](https://www.datascienceassn.org/sites/default/files/Measuring%20Business%20Impacts%20of%20Effective%20Data%20I.pdf) from the University of Texas found that if the median Fortune 1000 business increased the usability of their data by just 10%, it would translate to an increase in $2.01 billion in total revenue every year ([source](https://www.linkedin.com/pulse/extracting-value-from-data-successful-digital-tushar-gupta/)). And according to DELL’s aforementioned report, the companies who innovate the most when it comes to data increased their revenue by 19% and released around five more products than those just making a standard effort.\n\nIn a nutshell, investing in data always pays off. Especially with AI propelling us into a brave new world as we speak…\n\n\n","createdAt":"2023-04-19T15:10:14.609Z","updatedAt":"2023-04-21T08:14:31.022Z","publishedAt":"2023-04-21T08:14:31.010Z","locale":"en","tags":"Data\nStartup","popular":false,"articleOfTheMonth":false,"category":"Scale","timeToRead":4,"excerpt":"Data management has never been more critical to business success. But how can it be handled efficiently, whilst respecting privacy, and generate value... in line with a company's core values? ","author":"James Martin","h1":"How Data is powering tech efficiency, privacy and value(s) ","createdOn":"2023-04-21","image":{"data":{"id":2103,"attributes":{"name":"Composer.webp","alternativeText":null,"caption":null,"width":3146,"height":1762,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Composer_c2bc5a6a84.webp","hash":"large_Composer_c2bc5a6a84","mime":"image/webp","name":"large_Composer.webp","path":null,"size":"232.23","width":1000,"height":560},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Composer_c2bc5a6a84.webp","hash":"small_Composer_c2bc5a6a84","mime":"image/webp","name":"small_Composer.webp","path":null,"size":"76.37","width":500,"height":280},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Composer_c2bc5a6a84.webp","hash":"medium_Composer_c2bc5a6a84","mime":"image/webp","name":"medium_Composer.webp","path":null,"size":"150.67","width":750,"height":420},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Composer_c2bc5a6a84.webp","hash":"thumbnail_Composer_c2bc5a6a84","mime":"image/webp","name":"thumbnail_Composer.webp","path":null,"size":"26.30","width":245,"height":137}},"hash":"Composer_c2bc5a6a84","ext":".webp","mime":"image/webp","size":571.9,"url":"https://www-uploads.scaleway.com/Composer_c2bc5a6a84.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-04-19T15:30:20.260Z","updatedAt":"2023-04-19T15:30:20.260Z"}}},"recommendedArticles":{"data":[{"id":73,"attributes":{"title":"changing-data-center-efficiency-practices-the-heat-is-on","path":"changing-data-center-efficiency-practices-the-heat-is-on/","description":"Over the last two years, Paris has been hit with heatwaves the likes of which have never been seen before in the capital. 2019 was record-breaking - with temperatures hitting 43°C on 24 July, and 38°C in August. When temperatures skyrocket like this, there are consequences, and global warming is forcing us to question the techniques ordinarily used to cool data centers, as they are responsible for a significant part of the digital economy’s energy consumption. Our DC5 data center once again proved its robustness and formidable technical capacities by getting through the sweltering heat **without using a single kilowatt of air conditioning!**\n\n\u003cblockquote class=\"twitter-tweet\" style=\"border: none; width: 550px; height: 410px;\"\u003e\n \u003ca href=\"https://twitter.com/a_bermingham/status/1292510024275824648?ref_src=twsrc%5Etfw\"\u003eAugust 9, 2020\u003c/a\u003e\n\u003c/blockquote\u003e \n\u003cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"\u003e\u003c/script\u003e\n\nWith the adoption of the cloud, the world’s IT is shifting _en masse_ to data centers and cloud providers. Data centers are mission-critical infrastructure, meaning that the slightest weakness can have disastrous consequences, and a direct impact on our economy. However, many data centers have not evolved to keep up with the digital evolution we have seen over the last 20 years. There are several reasons behind this paralysis - clients who haven’t changed their SLAs since the tech bubble, and the fact that innovation is seen as a risk. \n\n### Innovation to drive changes\n\nFor some time now, most of the European tech companies have been bought and consolidated by a handful of American and Asian property funds. When the cost of a new data center alone amounts to 75-100 million euros, rather than innovate, they prefer to copy-paste the same structures that have already been tried and tested, and meet the unjustified specifications of major customers. Customers claim they face financial penalties if their servers are not kept at 20°C, but in accordance with ASHRAE standards, modern servers can withstand temperatures of 30°C. Air conditioning continues to be used on a massive scale, and so **30-40% of a data center’s energy is wasted on cooling alone.** \n\n### Investors need reassuring, those writing the specifications need informing, and customers need to turn towards virtuous practices in line with the capabilities of today’s technology. \n\nThere is a rush on the part of many data centers, the ones lauded for their efforts, to be able to use the marketing sheen of the terms “energy efficient”, but behind this slogan are some unspeakable practices which must disappear... **the waste of millions of cubic metres of water** in cooling towers to cool data centers, a process that is characterized by considerable environmental and health risk. This practice, which is nearly banned in France, persists in some European countries and needs to be globally regulated**. It’s time to face the unforgivable consequences of cooling towers and actually take tangible steps towards change and ban them in Europe.** \n\n## Air conditioning is not the solution\n\n![](https://www-uploads.scaleway.com/blog-181207-Online-DC3-031.webp)\n\nNot only do cooling units require **huge quantities of energy**, but they are also subject to technical limitations including the risk of unplanned downtime, spikes in energy consumption, and lower efficiency when the outside temperature soars. These are always anxious times for data center managers as most air conditioning systems are dimensioned for 40°C maximum outside temperature, and hot temperatures have an impact on their EER (Energy-Efficiency Ratio). \n\nHow did we get to a point where it’s acceptable for a data center to consume 30-40% of its energy for air conditioning in the middle of winter, when electricity grids are most solicited, and energy is carbon-intensive?\n\nUnfortunately, traditional data centers have generally made less progress in energy management. Out of the estimated 650 billion kWh of electricity consumed in 2020 by data centers, at least 240 billion kWhs are **wasted, avoidably, on air conditioning!** Yet, the solutions have been staring us in the face for decades. The trick to beating the heat without breaking the bank is less about cooling, and more about temperature and climate monitoring. There is so much scope for innovation in this sector, and not just any innovation, **innovation that makes sense**. It’s up to all of us as customers, and digital stakeholders, to make these changes. \n\n**Rather than planting trees or buying wind farms, we prefer to think outside of the box. Our sustainable actions have real impact because we have innovated where it counts most - energy and water use at the source.** \n\nDC5 is different from a traditional data center. To combine power, high availability and a scalable modular structure while **minimizing the energy footprint** as much as possible, Scaleway Datacenter’s engineering teams focused their efforts on optimizing all infrastructure parameters. Nothing was left to chance. Everything was clearly conceptualized and meticulously built down to the last detail to achieve an exceptionally energy and water efficient result that provides a **significant financial edge** for our clients. \n\n## Leaving outdated technology out in the cold\n\nInstead of air conditioning and cooling units, all it takes is a few grams of water evaporated in the air, a few hours per year, and the air coming in from the outside can be cooled by nearly 10°C. This process - adiabatic cooling, has been known to humankind since ancient times, and allows us to maintain stable, optimal conditions for the precious servers belonging to our customers and the Scaleway Elements public cloud. \n\nScaleway’s process is incredibly straightforward, and works with standard computing equipment without the need for proprietary modifications. We even built a climatic chamber to study the impact of all weather and humidity conditions on virtually all of the IT equipment on the market. \n\n## How does it work?\n\n2,200 sensors and measurement points are analyzed by a mathematical algorithm in real-time. Every 17 milliseconds the data center adapts, self-regulates and optimizes its processes so that each and every server gets exactly the right energy and cooling needed to function. What makes our system more **trustworthy** than any mechanical process is this **unique design**. Not only this, the water and energy savings are immense, year round. The system needs virtually **no maintenance and uses no refrigerants** \\- which are greenhouse gases. In other words, it’s **zero carbon**! \n\n**Insane risk-taking? Major innovation?**\n\nNo. We use technology that makes sense for each and every one of our Scalers who are deeply concerned by their environmental impact. We are a key digital stakeholder, and not only do we innovate, we also guarantee the utmost quality and reliability of service. \n\nThe results are in - after two unprecedented heatwaves and three years of 100% SLA - there’s no denying it’s been a success! \n\nAt a time when Europe and the States are calling the environmental and ecological impact of the digital sector into question, it has never been more urgent to change our habits and bring innovation back to the forefront of data centers. \n\nWe need to recognise that innovation is the only way to cut energy footprints and the use of water at the source, rather than compensating by throwing money at the problem.","createdAt":"2023-01-17T14:10:57.263Z","updatedAt":"2024-10-29T13:48:03.704Z","publishedAt":"2023-01-23T17:33:20.950Z","locale":"en","tags":"Datacenter\nStory\nSustainability\nBehind the Scene","popular":false,"articleOfTheMonth":false,"category":"Scale","timeToRead":5,"excerpt":"Global warming is forcing us to question the techniques ordinarily used to cool data centers, as they are responsible for a significant part of the digital economy’s energy consumption. ","author":"Arnaud de Bermingham","h1":"Changing data center efficiency practices - the heat is on!","createdOn":"2021-10-14"}},{"id":130,"attributes":{"title":"design-considerations-and-our-recommendations-for-data-protection","path":"design-considerations-and-our-recommendations-for-data-protection/","description":"When you build your infrastructure with Scaleway, **it’s important to take a few simple rules into account, to limit the risk of data loss**, whatever the cause. Data is a shared responsibility - between provider and customer.\n\nThe causes of data loss can, for example, be due to a hardware failure, a network failure, hacking, malicious acts or the destruction of physical infrastructure.\n\nCertain precautions need to be taken depending on the product type, namely for bare metal, Infrastructure as a Service or Platform as a Service.\n\n**In the interest of transparency,** we would like to clarify and elaborate on the means used by Scaleway - our design recommendations and the responsibility of each person with regard to the data we store and process.\n\n## The concept of regions and AZs\n\nRegarding the location of data, it is important to distinguish between three key concepts of the public cloud: the region, the availability zone and the data center.\n\n* A **region** includes several availability zones (AZ), ideally three within a geographical area of about 200 km. A region is also a unique network that is dissociated from (not interconnected) other regions with the exception of Amsterdam which for historical reasons is also a peering location for the Paris region. At Scaleway, Paris, Amsterdam or Warsaw are regions.\n* An **Availability Zone** (AZ) is made up of one or more data centers situated in a geographical area of about 5km with a maximum internal latency of 1.4 ms and situated at least 50km from another availability zone in the same region. At Scaleway, the fr-par-1 availability zone contains our DC2 and DC3 data centers, and the fr-par-2 availability zone contains our DC5 data center. The fr-par-3 availability zone will soon be made available with our DC4 data center.\n* A **data center** (DC) is the physical location of an availability zone.\n\nCustomers can choose the region and availability zone when ordering **infrastructure products** (IaaS). The physical fault domain is the availability zone and the network failure area is the region.\n\nAs a customer, you are responsible for the redundancy and the management of the services that run on top of your infrastructure products. The highest level of redundancy is obtained by developing your application across several distinct regions.\n\nCustomers can choose only the region when ordering **platform products** (PaaS). In this case, fault domain corresponds to the region and is therefore essentially linked to the network. Redundancy and service management are the responsibility of Scaleway. In other words, the cloud provider operating PaaS services in several AZs in the same region, is responsible for them.\n\nThis is why an ideal public cloud design is usually based on three availability zones in the same region. At Scaleway, we fully subscribe to this logic. Indeed, with three availability zones, the distribution of a PaaS product across different AZs allows for a high level of redundancy and availability.\n\n**In the interests of transparency - we can't claim perfect implementation of this ideal logic for the public cloud.**\n\nTo date, not all of our regions are made up of three availability zones. This has no impact on IaaS products. For PaaS products, the level of availability and disaster resilience is not as optimal as with a three-zone design. We have long been aware of this issue, but we have always categorically refused to compromise by having multiple availability zones, clusters or virtual data centers in the same physical data center.\n\nTo avoid misleading our customers, we systematically recommend that they build their infrastructure across multiple regions. This is the most elegant way to ensure a redundant, high availability service.\n\nIn 2021, we will add three new availability zones to our three current regions. This project has already been validated and investments secured. Our PaaS software stack is designed with this in mind, and will be redeployed accordingly by the end of the year.\n\n## Bare metal products\n\nWhen you use bare metal products, we do not, and cannot, have control of your infrastructure and data.\n\nNevertheless, here are our recommendations to minimize the risks:\n\n\"RAID\" storage is NOT a backup or a guarantee of data durability. Scaleway does not guarantee any backup of your data and cannot even physically do it for you.\n\nYou must, at the very least, set up a remote backup system, in accordance with basic IT security rules and standards. Moreover, we strongly recommend our customers distribute their sensitive data across several servers located in different data centers, or even different providers and with a DRP (Disaster Recovery Plan) or BCP (Business Continuity Plan) rationale.\n\n**Backup solutions:**\n\n* Solution 1: we offer an FTP [_Dedibackup_](https://www.scaleway.com/en/docs/dedibox/dedicated-servers/how-to/use-dedibackup-ftp-backup/) replicated backup space for all Dedibox server customers. There are two versions available: 100GB free of charge, and 750GB for 4.99 € excl. tax/month. At Scaleway, this data is stored in our Object Storage, with a high level of redundancy and durability (see Object Storage chapter). However, Dedibackup has very limited functionality and security, and should be considered as technologically outdated in 2021.\n* Solution 2: we strongly recommend the use of [_Object Storage_](https://www.scaleway.com/en/object-storage/) combined with long term regional archiving on [_C14 Cold Storage_](https://www.scaleway.com/en/c14-cold-storage/). For example, if your server is located in DC3 (in the Paris region), we recommend storing your backup datasets on Object Storage in the Amsterdam or Warsaw region. This solution is inexpensive, easy to implement and offers extremely high durability.\n* Solution 3: the perfect solution in a multi-cloud approach is to store your backup datasets with a different provider, in a geographical location that is sufficiently far away from your primary server.\n\n**Important design considerations:**\n\n* _Dedibackup_ is based on Scaleway _Object Storage_. Although _Object Storage_ is a regional product, due to the lack of three availability zones in the Paris region, data is currently mainly stored in the fr-par-2 availability zone (DC5). If your Bare Metal server is located in DC5, we recommend that you use solution 2 from this list. Also, for DC5 Bare Metal customers, we will soon allow you to choose the storage region of your _Dedibackup_.\n* To view the physical location of your server in our data centers, simply log in to the account management section of the console or contact our technical support team.\n\n**A note about RPN-SAN:**\n\n[RPN-SAN](https://www.scaleway.com/en/dedibox/storage/rpn-san/) is a turnkey block storage solution, managed by Scaleway and designed for Dedibox Bare Metal servers running on the RPN private network. The product is available in two versions - a \"Basic\" version and a \"High-Availability\" version.\n\n**Important design considerations:**\n\n* The \"Basic\" version of RPN-SAN has no redundancy and is therefore only suitable for customers with specific needs, who are aware of the related constraints. The data is stored on a single server, on a RAID hard disk array. **This storage should be considered temporary and used only as such.** The data is not duplicated across multiple servers or data centers, and there is no backup on Scaleway's side.\n* The “High-Availability” version of RPN-SAN has geographical redundancy. Data is synchronized between two different data centers via a dedicated fiber optic network.\n\n**The backup process:**\n\n* RPN-SAN requires a backup process just like a bare metal server, on the server side, following one of the three methods described above.\n* Even with the High Availability version, although unlikely, stored data can be irretrievably lost and therefore must be backed up just like the local storage of a bare metal server.\n\n## IaaS Products\n\n### Scaleway Instances and Block Storage\n\nScaleway Elements instances have local storage and can also benefit from optional remote storage with Block Storage.\n\nThere has long been confusion about Instances. In order to avoid incidents that we see all too often, let’s take a closer look at them.\n\nInstances **are not and will never be \"VPS\" (Virtual Private Server) products**, they do not work in the same way, and data storage does not follow the same logic. Remember that a state-of-the-art Public Cloud client infrastructure should normally be designed to scale horizontally, over a large number of instances. As each instance has a limited lifespan, the number of instances running simultaneously at a given time is based on an application's usage. Persistent data is normally stored mainly in Object Storage but also in Block Storage.\n\nThe public cloud is not suitable for monolithic and non-distributed applications, and only bare metal server products or the private cloud can meet their requirements.\n\nConfusion about the service level of instances has been around (for a long time) in the cloud world, but with [a little effort](https://aws.amazon.com/compute/sla/), it is possible to better understand the difference with older models (VPS):\n\n![](https://lh6.googleusercontent.com/PCPvjVcDfm1m_AHiv9afyuNbTHT1s4xTLHVx-b8YzPhVxCCdbbzK9Uvd70EYFLaUaSqQD9X0e5QGQRFetrS9XhjyV3efMj0R9jPb59V4JM4bRDkL0WPZsU0dc0HHjbZ4vklusNRm)\n\nThis translates to potentially **74.40 hours** of unavailability over a 1 month period.\n\nAnother source can be found [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html).\n\n**Important design considerations:**\n\n* So-called **Local** storage is temporary. This storage has the same lifetime as the instance, and can be moved by Scaleway to our Object Storage for future use when the instance is destroyed (Power-Off / Stop functionality). This local storage can be backed up as a \"snapshot\" and duplicated as many times as you like. It is based on SSDs and NVMe using high-performance RAID arrays that have, by definition, a limited lifetime and write rate. It is neither redundant nor backed up by Scaleway, and should never be used to store persistent and/or important data.\n* Local storage therefore works precisely as its name suggests. It is physically attached to the hypervisor that hosts the instance, without any abstraction layer. We do not know how to perform hot migration on this type of storage, so in the event of maintenance or a software or hardware incident on the hypervisor, data may be permanently lost.\n* Block Storage is a high availability and high performance persistent storage. It is made up of multiple redundant clusters offering triple replication of your data. As an IaaS product, its use, resilience and lifespan are controlled by an availability zone. The same goes for the instance.\n* Block Storage cannot be used in any other availability zone than that of the instance due to latency issues.\n* \n**Best practices and backup techniques:**\n\nThe golden rule - each type of storage has its own use.\n\n* Local storage should be used for operating systems (the images), logs, temporary transactional data, temporary files, and datasets requiring high performance calculations.\n* Block Storage should be used for storing user data, databases and content.\n* Object Storage should be used for all data that needs to be delivered and that is persistent.\n* If local storage contains persistent data, although strongly discouraged, at the very least a snapshot should be taken regularly and saved using Object Storage in another region and kept in C14 cold storage.\n* Similarly, even though Block Storage is redundant and replicated three times, it is physically hosted in the same availability zone as the instance. This is why we strongly recommend performing regular snapshots and creating a dataset using Object Storage in another region and the C14 Cold Storage.\n\nA note about snapshots:\n\n* Scaleway keeps the snapshots of local volumes in Object Storage. Block volume snapshots are kept in the same cluster.\n* In the short term, we plan to give our customers direct access to these snapshots via an Object Storage bucket in order to facilitate backup or lifecycle actions by the customer.\n\n## PaaS Products\n\n### Object Storage\n\nObject Storage is an Amazon S3-compatible data storage system that offers a high level of functionality and resilience. This product is entirely managed by Scaleway, meaning we have full responsibility for the durability of the data stored in Object Storage.\n\n**Important design considerations:**\n\n* Object Storage is currently available across our three regions. We recommend using this product as primary storage for your cloud applications.\n* Our platform uses erasure coding 6+3.\n* Our regions currently do not consist of the three availability zones necessary for maximum resilience. In other words, data availability can be impacted in case of the total destruction of an availability zone. With the implementation of the fr-par-3 availability zone in the near future, Object Storage will be resilient to the total destruction of any availability zone in the same region.\n* Our Object Storage supports data life cycles as well as service classes, including Glacier class. Transferring stored data to our [C14 Cold Storage](https://www.scaleway.com/en/c14-cold-storage/) can be done very easily and quickly at extremely low costs.\n* Our C14 Cold Storage is physically hosted separately from other availability zones, in the DC4 Datacenter (fallout shelter), which benefits from extremely high physical and fire protection standards. It is currently only available in the Paris region.\n\n**Best practices and backup techniques:**\n\n* Ideally, your applications should be designed to store data using Object Storage in two different regions simultaneously. This is also the only technical solution that provides redundancy with respect to the region's network. This applies regardless of the public cloud provider you use. Besides being simple to use and requiring minimal development work during the build of your application, this solution is also extremely reliable in terms of both availability and durability.\n* Within a multi-cloud approach, the first solution is to also use a region of another Amazon S3-compatible public cloud provider that is far enough away from the Scaleway region you have chosen.\n* If the dual-region or multi-cloud solution cannot be implemented, we simply recommend backing up your buckets regularly. This is easy to do with Scaleway, just create a life cycle rule duplicating your Amazon S3 buckets in C14 Cold Storage. [This solution](https://www.scaleway.com/en/docs/object-storage-glacier/) is very simple, reliable and takes only a few minutes to set up.\n* C14 Cold Storage is the most reliable and resilient product on the market for your backups, delivering the highest market standards at a very low price. At Scaleway, we offer the first 75GB every month and additional gigabytes are charged at less than €0.002 per month. Once the data is written, the physical device is disconnected from the electricity supply, protecting your data from potential software or human error.\n\n### Scaleway Elements Database as a Service (DBaaS)\n\nOur Managed Database is based on Scaleway Elements instances. Like all the other cloud providers on the market, our product has the same scope as our instances, and therefore the same availability zone.\n\n**Important design considerations:**\n\n* Our standard DBaaS products offer Backup/Restore and database export capabilities through Scaleway Object Storage.\n* The HA version distributes the two nodes that make up your database cluster across different racks on different hypervisors, in the same availability zone. This option makes your database resilient to a hypervisor or instance crash.\n* We perform cross-region backups by default. Paris DBaaS are backed up in Amsterdam, Amsterdam in Paris, and Warsaw in Paris. Users can access these backups via the console or the API through a pre-signed link valid for 24 hours.\n\n**Best practices and backup techniques:**\n\n* Ideally, for a multi-cloud approach, we recommend the simultaneous or failover use of several database clusters, across several regions or with different cloud providers. However, in practice, this is almost impossible to implement with relational databases.\n* A good practice is to always have a DBaaS cluster on standby, configured and ready to use in another region, and ready to receive the most recent dump from the main cluster. Consider using FQDN names with a low TTL (60 seconds) from our domains service to easily and quickly change the destination cluster of your applications.\n* Don't forget to regularly (several times a day) archive your DBaaS database dump in C14 Cold Storage.\n\n### Kubernetes Kapsule\n\nKapsule is an instance orchestrator. As such, it has the same scope as an instance, and therefore the same availability zone.\n\nOur recommendations are identical to those of our Elements Instances. Ideally, we advise you use two Kubernetes clusters in two different regions, or even with two separate cloud providers, and use our multi-cloud load-balancing product for your different clusters.\n\n ","createdAt":"2023-01-18T10:58:14.830Z","updatedAt":"2024-10-28T16:20:21.274Z","publishedAt":"2023-01-18T10:59:45.539Z","locale":"en","tags":"Data\nSecurity\nScaleway","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":12,"excerpt":"When you build your infrastructure with Scaleway, it’s important to take a few simple rules into account, to limit the risk of data loss. Data is a shared responsibility: provider \u0026 customer.","author":"Arnaud de Bermingham","h1":"Design considerations and our recommendations for data protection","createdOn":"2021-04-20"}},{"id":136,"attributes":{"title":"how-we-protect-your-data","path":"how-we-protect-your-data/","description":"In light of recent events, many of you have asked us about the practices and methods we use to protect our data centers. We thank you for all your questions, and we will respond, as always, with maximum transparency.\n\n## Data centers: at the heart of data resilience strategies\n\nWe have always considered our data centers to be a selling point and a tangible reflection of the quality of our products. They are also an invaluable production tool, and a source of pride for us.\n\n**For almost 15 years, our data centers have been part of an uncompromising investment strategy**, which allows us to take full responsibility on our clients’ behalf. We have chosen to specialize in all aspects of this profession: from design to construction and implementation, basing our engineering on feedback. We have used our teams’ valuable knowledge to set up data centers which are more and more **resilient and innovative**.\n\nWe are one of the few companies with a fully integrated approach, from data center to software (as well as networks and hardware), all in full transparency. We are one of the last remaining cloud companies to master the highly specific area of data centers – almost all other companies in the market use large real estate actors’ sites for their international expansions.\n\nWe are the only triple play-type cloud supplier to provide all three services: [data center and private infrastructure colocation](https://www.scaleway.com/en/datacenter/), [dedicated high-end servers](https://www.scaleway.com/en/dedibox/) for maximum control and impact and a modern and flexible [public cloud](https://www.scaleway.com/en/elements/) ecosystem.\n\nOur command of the whole value chain allows us to offer competitive pricing and innovative services, while never compromising due to economic considerations.\n\nWith regard to data center colocation, we are one of the biggest French players and an important European player. For many years we have been hosting Gartner Magic Quadrant companies on our infrastructure, as well as many other companies which are well-known in Europe, thus demonstrating our stringency with regard to colocation.\n\n**The regulatory context: Regulation ensures that people and the environment are protected. The operator, along with the insurer, handles asset protection.**\n\nData centers in France are governed by labor law, and above a certain power, by ICPE-type decrees (establishments classified for environmental protection).\n\nRegulation on this is light-handed and mainly concerns safe evacuation of staff (emergency exits, smoke extraction, etc.) in case of incidents, and environmental protection. For example, it does not require the installation of fire detectors or fire suppression systems, nor any asset-protection measures.\n\nIt is important to understand that the technical design and inclusion of asset protection measures in a data center is therefore entirely dependent on the project owner and the operator, but also on the insurer’s conditions on the level of coverage and deductibles. To summarize: these regulations ensure the protection of people and the environment, the operator and the insurer ensure the protection of physical assets.\n\nThere has long been confusion between design resilience and certification such as ISO standards. The aim of certification is to standardize governmental practices and business processes, but this provides no guarantee to the client of proper design, rules, or implementation with regard to asset protection in a data center. The recent unfortunate incident, which affected infrastructure with SECNUMCloud (an initiative by the French National Cybersecurity Agency), ISO-27001 and even HDS certification, demonstrates this. Certification of compliance with an ISO, HDS or SECNUMCloud standard is by no means a guarantee of the physical security of a data center.\n\nRegarding asset protection, in France, APSAD certification delivered by insurance companies and the CNPP (National Center for Prevention and Protection) provides a guarantee of reliability and effectiveness for asset security. This certification is based on reference standards and requirements, stemming from experience of incidents, which apply to the design of the facility, desired results, training of staff, and maintenance. This voluntary certification is extremely strict, costly, and demanding. For us, it represents the minimum requirement for a data center, bearing in mind the sensitivity of the assets hosted. This APSAD certification is a guarantee of security.\n\n## Risks in data centers: we reveal all\n\nThe risk is very low if it is correctly addressed by measures built into the data center.\n\nIn our experience, inverters and batteries represent the highest risk.\n\nOver the past ten years, across its data centers in France using around 40MW, Scaleway has experienced one battery fire, on September 16, 2019.\n\u003cblockquote class=\"twitter-tweet\" style=\"border:none;\" width=\"550\" height=\"700\"\u003e\n \u003ca href=\"https://twitter.com/a_bermingham/status/1173556991895228416\"\u003e\u003c/a\u003e\n\u003c/blockquote\u003e \n\u003cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"\u003e\u003c/script\u003e\n\nand four inverter explosions\n\n\u003cblockquote class=\"twitter-tweet\" style=\"width=\"550\" height=\"600\"\u003e\n \u003ca href=\"https://twitter.com/a_bermingham/status/597700890280251392\"\u003e\u003c/a\u003e\n\u003c/blockquote\u003e \n\u003cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"\u003e\u003c/script\u003e\n\n\u003cblockquote class=\"twitter-tweet\" style=\"border:none;\" width=\"550\" height=\"800\"\u003e\n \u003ca href=\"https://twitter.com/a_bermingham/status/595261996330790912\"\u003e\u003c/a\u003e\n\u003c/blockquote\u003e \n\u003cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"\u003e\u003c/script\u003e\n\n\u003cblockquote class=\"twitter-tweet\" style=\"border:none;\" width=\"550\" height=\"300\"\u003e\n\u003ca href=\"https://twitter.com/a_bermingham/status/491276637821210626\"\u003e\u003c/a\u003e\n\u003c/blockquote\u003e \n\u003cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"\u003e\u003c/script\u003e\n\nOn June 27, 2013, the neighbor of our DC2 data center, specialized in recycling paper, burned down:\n\u003cblockquote class=\"twitter-tweet\" style=\"border:none;\" width=\"550\" height=\"300\"\u003e\n \u003ca href=\"https://twitter.com/a_bermingham/status/350298052558462976\"\u003e\u003c/a\u003e\n\u003c/blockquote\u003e \n\u003cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"\u003e\u003c/script\u003e\n\nMore recently, on June 2, 2019, the neighbor of our DC5 data center, specialized in chemical processing, also burned down just a few meters away from our premises:\n\n\u003cblockquote class=\"twitter-tweet\" style=\"border:none;\" width=\"550\" height=\"750\"\u003e\n \u003ca href=\"https://twitter.com/a_bermingham/status/1135151776360013825\"\u003e\u003c/a\u003e\n\u003c/blockquote\u003e \n\u003cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\"\u003e\u003c/script\u003e\n\nTo be very clear, running a data center means anticipating and managing risk. This risk may be internal or external, and it is real.\n\nOur job is to plan for every eventuality, no matter how unlikely. Each time, the design of our data center and our automatic mechanisms worked perfectly and prevented your assets from being affected by a major incident, with no outages.\n\n![](https://www-uploads.scaleway.com/blog-image-7.webp)\n\n## Scaleway’s approach\n\nScaleway’s approach is based around three objectives:\n\n1. Isolate any incidents to stop them becoming bigger, using compartmentation methods.\n2. Control the incident, without interrupting production, using automatic mechanisms.\n3. Facilitate intervention by the emergency services and our staff.\n\nThis approach is applied by all data centers around the world. In this sector, not only asset protection but also business continuity is paramount, even in the unlikely event of a fire.\n\n**Passive protection (built-in)**\n\nWe divide each data center into compartments, all of which are fire-resistant. The walls, flooring, ceilings, doors, and windows are designed to resist fire and prevent it spreading to the rest of the building. The surface area and duration of this resistance depends on the risk involved.\n\nIn other words, if there is an incident within one compartment, it will not spread to the rest of the building for at least one or two hours.\n\nFor example:\n\n- We consider the places where inverters and transformers are located to be particularly at risk of fire (pursuant to the decree of June 25, 1980), due to high electrical power and/or presence of batteries. They have two-hour fire protection.\n- Redundant systems are isolated in separate fire-proof compartments with two-hour fire protection.\n- Computer rooms are split into 150 – 1700m2 compartments (depending on the data center). They have one-hour fire protection.\n\nVentilation ducts are shut off by valves which close automatically in case of fire to stop it spreading. Cable passages are [caulked](https://www.flamtec.fr/pdf/calfeutrement.pdf) and treated with mastic and intumescent paint.\n\nOur level of fireproofing is ensured by mineral wool sandwich panels with fire properties corresponding to APSAD standard D14-A, or concrete with a certain thickness, and specially designed doors.\n\nSmoke is just as dangerous as fire itself. Each compartment has a smoke extraction system which is able to work in 400ºC heat for two hours.\n\nWith regard to the neighbors of our data centers (which have caused problems on two occasions throughout our history), where we are not able to implement a 10-meter distance, we protect our centers with fire protection walls, heavy-duty roads and fire hydrants which allow the emergency services to safely intervene.\n\nFinally, since DC3, we install all high-risk equipment such as power generators and high-voltage transformers outside the building.\n\n**Active protection**\n\nOur data centers are all standard equipped with fire detection systems corresponding to APSAD DC7 or N7.\n\n- DC2 uses the DFHS multipoint system from [DEF](https://www.defonline.com/application/data-centers-telecoms/)\n- DC3 and DC4 use the [VESDA](https://xtralis.com/file/8185) multipoint system\n- DC5 uses both the [VESDA](https://xtralis.com/file/8185) system and OSID system from [Xtralis](https://xtralis.com/file/7136) due to the specific nature of the site.\n\nThese are highly advanced systems. They work by taking air samples and are unaffected by the significant air currents present in data centers. These are reliable early detection systems, which can detect a fire in under 40 seconds. The manufacturer carries out maintenance twice per year, which also has specific certification (APSAD D7).\n\nThe first intervention or check in the event of a potential detection is carried out by a fire safety agent specially trained in firefighting (SSIAP2), present 24/7 at all data centers, and our technicians. They use installed fire extinguishers which are certified APSAD N4 or fire hose cabinets installed in the storage spaces of DC2, DC3 and DC5.\n\n_**N.B.:** Our APSAD DC7, N7 and N4 certification and periodic maintenance certification (APSAD Q7 and Q4) are available by request from technical support._\n\n**Fire suppression**\n\nOur sector requires service and operating continuity even in the event of a fire. There are two main types of automatic fire suppression systems on the market which can put out a fire without interrupting services:\n\n- Gas systems (FM200, Novec, Inergen, Nitrogen, etc.)\n- Water mist systems (HiFog, Fogtec, Semco, etc.)\n\nAs for sprinkler systems, which are a specific requirement in the USA, these do not allow operating continuity.\n\nWe use both of the above systems:\n\n- DC2 uses the Semco water mist FM, VdS/OH1 and DIFT certified system\n- DC4 uses the 3M NOVEC 1230 gas fire suppression APSAD R13 certified system\n- DC3 and DC5 use the Marioff HiFog water mist FM and VdS/OH1 certified system (N.B.: currently being installed in DC5 as part of the site extension which is underway).\n\nWe avoid using gas systems due to a number of hard drive incidents which have occurred over the past few years, caused by the noise made by these systems(1)\n\nIn light of the unjustified disinclination of the market toward water mist systems, we tested them in real conditions in June 2012, in conjunction with the CNRS (French National Center for Scientific Research) and in the presence of our clients, to measure their effectiveness in extinguishing a fire without damaging IT equipment:\n\n\u003ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/0Ot4pby2D7o\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen\u003e\u003c/iframe\u003e\n\u003cbr\u003e\nWe also tested this approach on live transformers in September 2012:\n![](https://www-uploads.scaleway.com/fire_check_472bc8adb9.webp)\nThis system was installed at Scaleway, with an autonomous water supply and electric power from a generator in case the electricity is deliberately switched off by firefighters.\n\nSince then, this automatic water mist fire suppression system has become widely used in almost all data centers around the world, and is recognized as the most effective system.\n\n_**N.B.:** Our FM, VdS/OH1, DIFT and APSAD R13 certification and periodic maintenance (APSAD Q13) certification is available by request from technical support._\n\n**Facilitating emergency services intervention**\n\nOur buildings and rooms are built with fire-resistance in mind, to allow safe intervention by the emergency services if an incident occurs. They are made of concrete or fire-resistant mineral wool sandwich panels.\n\nTo raise the alarm, DC3 is equipped with a specific priority telephone (TASAL – Automatically Monitored Line Telephone) installed by the fire department.\n\nAll our data centers have a limited height (maximum 11 meters), are equipped with fire hydrants, heavy-duty roads and a fire water run-off collection system in line with regulations.\n\n**Audit**\n\nWe are proud of our data centers and their security. We consider that we have implemented the best solutions to protect your most valuable asset: your data. We are well aware of the huge responsibility this represents. There can be no compromises when it comes to your data.\n\nFor this reason, and because of the high level of protection implemented in our data centers, we are covered by the best insurance on the market.\n\nOur four data centers are audited by our insurer at least once per year. They can also be audited by you, our client, accompanied by experts chosen by you.\n\nOur certification, risk analyses, and safety information can be consulted and audited upon request. We only charge for the time our teams spend assisting you and putting together the required technical files.\n\nWe regularly organize visits to our data centers, particularly on heritage days, and we would be delighted to welcome you into the heart of our infrastructure as soon as the health situation allows.\n\n**Scaleway’s approach in other data centers**\n\nAll our data centers in France belong to Scaleway, and we also have colocation data centers in the Netherlands and Poland.\n\nThese data centers have not been designed by, and are not run by Scaleway, rather we work with [Iron Mountain](https://www.ironmountain.com/data-centers/locations/amsterdam-data-center) and [Equinix](https://www.equinix.fr/data-centers/).\n\nWe have a long-term contract with these partners, and we regularly audit their sites to ensure they apply similar criteria as for our own sites in terms of infrastructure availability and asset security.\n\nThe APSAD certification and reference standard does not exist outside of France, but each country has similar technical reference standards that many data centers follow and adhere to, such as [VdS](https://vds.de/en/about-vds). \n \n_N.B.: The certification for our colocation partners is available by request from technical support._\n\n \n###### (1)(\u003chttps://www.silicon.fr/test-anti-incendie-sourd-datacenter-ing-157345.html\u003e \u0026 \u003chttp://www.availabilitydigest.com/public%5Farticles/0602/inergen%5Fnoise.pdf\u003e)","createdAt":"2023-01-18T11:30:00.702Z","updatedAt":"2024-10-29T13:45:59.793Z","publishedAt":"2023-01-18T11:31:45.040Z","locale":"en","tags":"Data\nSecurity","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":11,"excerpt":"In light of recent events, many of you have asked us about the practices and methods we use to protect our data centers. We will respond with transparency in this article.\n","author":"Arnaud de Bermingham","h1":"How we protect your data","createdOn":"2021-05-26"}}]},"meta":{"id":962,"title":"How Data is powering tech efficiency, privacy and value(s) ","description":"Data management has never been more critical to business success. But how can it be handled efficiently, whilst respecting privacy, and generate value... in line with a company's core values? ","ogtype":null,"ogtitle":"How Data is powering tech efficiency, privacy and value(s) ","ogdescription":null,"noindex":false},"localizations":{"data":[]}}}]},"meta":{"id":1617,"title":"How Everdian delivers “life-saving” real-time critical insights, via AI","description":"AI startup Everdian chose Scaleway not just for its powerful GPU clusters, but also because Scaleway’s simplicity means new team members can be onboarded in just a few weeks. More inside!","ogtype":null,"ogtitle":"How Everdian delivers “life-saving” real-time critical insights, via AI","ogdescription":"AI startup Everdian chose Scaleway not just for its powerful GPU clusters, but also because Scaleway’s simplicity means new team members can be onboarded in just a few weeks. More inside!","noindex":false,"ogimage":{"data":[{"id":3241,"attributes":{"name":"Natural-Language-Processing-AI-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"large_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"large_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"284.79","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"small_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"small_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"108.87","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"medium_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"medium_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"194.75","width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"thumbnail_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"thumbnail_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"38.57","width":245,"height":152}},"hash":"Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","ext":".webp","mime":"image/webp","size":366.43,"url":"https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-02-15T13:43:21.487Z","updatedAt":"2024-10-14T14:18:30.813Z"}}]}},"localizations":{"data":[]}}},"_nextI18Next":{"initialI18nStore":{"en":{"common":{"open":"Open","close":"Close","backTo":"Back to {{page}}","seeMore":"See more","skip":"Skip {{to}}","toLogin":"to login","toMain":"to main content","toFooter":"to footer section","footer":{"followUs":"Follow us","subLinks":{"contracts":{"href":"/en/contracts/","title":"Contracts"},"legalNotice":{"href":"/en/legal-notice/","title":"Legal Notice"},"privacyPolicy":{"href":"/en/privacy-policy/","title":"Privacy Policy"},"cookie":{"href":"/en/cookie/","title":"Cookie"},"securityMeasures":{"href":"https://security.scaleway.com","title":"Security Measures"}}},"breadcrumb":{"homepageLink":{"home":{"href":"/","title":"Home"}}},"cookies":{"acceptAll":"Accept all","rejectAll":"Reject all","save":"Save settings","panelManagementTitle":"Manage cookies settings","panelConsent":{"title":"Cookie time!","description":"We use cookies in order to improve our website and to offer you a better experience. You can also consult our ","linkLabel":"Cookie policy","link":"/en/privacy-policy/","settings":"Manage your preferences"},"categories":{"functional":{"title":"Functional","subtitle":"Always active","description":"These cookies are required for the website to function properly and to allow you to use its services and features. Without these cookies, we would be unable to provide certain requested services or features."},"analytics":{"title":"Analytics","description":"These cookies are used to monitor the performance of our site and to enhance your browsing experience."},"marketing":{"title":"Marketing","description":"These cookies are used to understand user behavior in order to provide you with a more relevant browsing experience or personalize the content on our site."}}}},"blog":{"tagsAriaLabel":"Tags list. Click to choose as filter.","timeToRead":"{{min}} min read","recommendedArticles":"Recommended articles","pagination":{"next":"Forward to Next","previous":"Back to Previous","goToPage":"Go to page ","currentPage":"Current page: "},"copyButton":{"copied":"Copied!","defaultValue":"Copy","code":"Copy code"},"home":{"title":"Scaleway Blog - All posts","description":"Scaleway’s blog helps developers and startups to build, deploy and scale applications.","heading":"Scaleway Blog","articleOfMonth":"Must read","latestArticles":"Latest articles","popularArticles":"Most popular articles"},"categoryPage":{"build":{"title":"Build Projects with Scaleway","description":"Learn how to easily build and develop projects using Scaleway products."},"deploy":{"title":"Deploy Applications with Scaleway","description":"Discover how to deploy your applications smoothly with Scaleway."},"scale":{"title":"Scale Your Applications with Scaleway","description":"Find out how to efficiently scale your applications on Scaleway."},"incidents":{"title":"Incident Reports","description":"All the latest updates on Scaleway Cloud ecosystem incidents, and how they were resolved."}},"authorPage":{"title_one":"A {{author}}'s post","title_other":"All {{author}}'s posts","description_one":"Discover a blog post written by {{author}}.","description_other":"Discover all the blog posts written by {{author}}."}},"pages":{"available_zones":"Available zones:","city":"{{code}}:","AMS":"Amsterdam","PAR":"Paris","WAW":"Warsaw","yes":"Yes","no":"No","daily":"Daily","weekly":"Weekly","monthly":"Monthly","yearly":"Yearly","published":"Published on","seeMore":"See more","blocks":{"calculator":{"choose":"Choose your plan","availabilityZone":"Availability Zone","instanceType":"Instance Type","quantity":"Quantity","selectPlaceholder":"Select...","volumeSize":"Volume Size","volumeSizeHelper":"Min. 10 GB","volumeType":"Volume Type","sizeUnit":"GB","flexibleIp":"Flexible IPv4","ipHelper":"You need a Flexible IP if you want to get an Instance with a public IPv4.\n Uncheck this box if you already have one available on your account, or if you don’t need an IPv4.","noOtherType":"No other type available with this Instance"},"productFaq":{"title":"Frequently asked questions"},"productTutorials":{"title":"Get started with tutorials"},"customerStories":{"defaultTitle":"Customer success stories"}},"templates":{"beta":{"discovery":{"title":"Discovery","description":"Discovery products are prototypical versions of a product. This phase aims to validate an idea and to prove there is interest in the product. During the Discovery phase, customers can be contacted by the Product team to ask them to share their thoughts on the product and to help with the development of the new solution.\nProducts in the Discovery phase are not guaranteed to be released. The duration of the Discovery phase may vary depending on the product."},"early-access":{"title":"Early Access","description":""},"private":{"title":"Private Beta","description":"Private Beta products are early versions of future products or features. This phase allows users to test, validate a product in development, and give feedback to the Product team.\nPrivate Beta is limited to users selected by the Product Development team. Users can request access to the product via a form and the development team will grant access rights. The Private Beta stage usually lasts three to six months."},"public":{"title":"Public Beta","description":"Public Beta products are ready to be tested by all customers. Public Beta products may not include all of the final product’s features.\nPublic Beta is the last stage of testing before a product is released in General Availability.\nThese products are intended to be used in test environments unless specified otherwise. The Public Beta phase lasts six months on average."}},"domains":{"register":"Register","registerInfo":"Price before tax\nFirst year registration's price.","transfer":"Transfer","transferInfo":"Price before tax\nTransfer price for domain during first year.","renewing":"Renewing","renewingInfo":"Price before tax\nSecond year registration's price.","restoration":"Restoration","restorationInfo":"Price before tax\nPrice for renewing after expiration and/or redemption period."},"contact":{"titleForm":"Your contact","firstName":"First Name","lastName":"Last Name","email":"Email","jobRole":"Job Role","tel":"Phone (format: 33600000000)","digits":"Only digits","acme":"ACME","8ball":"8ball","emailPlaceholder":"username@domain.tld","john":"John","doe":"Doe","SocietyTitle":"Your société","company":"Company","industry":"Industry","country":"Country","legal":"Your data will be processed by Scaleway S.A.S. in order to manage your request. To know more, visit our","privacy":"Privacy Policy","ctaLabel":"Be contacted","optional":"(optional)","countries":{"labelCountry":"Select your country","france":"France","germany":"Germany","austria":"Austria","belgium":"Belgium","czechia":"Czechia","denmark":"Denmark","estonia":"Estonia","finland":"Finland","greece":"Greece","hungary":"Hungary","ireland":"Ireland","italy":"Italy","latvia":"Latvia","lithuania":"Lithuania","norway":"Norway","netherlands":"Netherlands","poland":"Poland","portugal":"Portugal","romania":"Romania","slovenia":"Slovenia","spain":"Spain","sweden":"Sweden","switzerland":"Switzerland","gb":"United Kingdom of Great Britain and Northern Ireland","usa":"United States of America","other":"Other"},"industries":{"labelIndustry":"Select your industry","computerSoftware":"Computer Software","consulting":"Consulting","ecommerce":"E-commerce","education":"Education","energy":"Energy","finance":"Finance,","gaming":"Gaming","hospitalHealthcare":"Hospital \u0026 Healthcare","infoTechnoServices":"Information Technology \u0026 Services","manufacturing":"Manufacturing","media":"Media","publicSector":"Public Sector","retail":"Retail","startups":"Startups","technology":"Technology","telecommunications":"Telecommunications","transportTruckRailroad":"Transportation/Trucking/Railroad"}},"contactPartner":{"partner":{"title":"Partner Identification","domain":"Your domain","placeholder":"example.com"},"opportunity":{"title":"Opportunity information","company":"Company name","project":"Project name","yourProject":"Your project name","name":"Contact name","firstName":"Contact first name","email":"Contact email","number":"Contact phone number","job":"Contact job title","onlyDigits":"Only digits","countries":{"title":"Country","labelCountry":"Select your country","france":"France","germany":"Germany","austria":"Austria","belgium":"Belgium","czechia":"Czechia","denmark":"Denmark","estonia":"Estonia","finland":"Finland","greece":"Greece","hungary":"Hungary","ireland":"Ireland","italy":"Italy","latvia":"Latvia","lithuania":"Lithuania","norway":"Norway","netherlands":"Netherlands","poland":"Poland","portugal":"Portugal","romania":"Romania","slovenia":"Slovenia","spain":"Spain","sweden":"Sweden","switzerland":"Switzerland","gb":"United Kingdom of Great Britain and Northern Ireland","usa":"United States of America","other":"Other"}},"product":{"title":"Product scope","label":"Description : Compute, Containers, AI …"},"timing":{"title":"Timing","timeScale":"Time scale","placeholder":"Select time scale","budget":"Budget","company":"Company name","event":"Compelling event","eventPlaceholder":"Reason to act","date":"Estimated close date"},"budget":{"title":"Budget","overallBudget":"Overall project budget","productBudget":"Product budget (MRR)","currency":"In euros €"},"legal":"Your data will be processed by Scaleway S.A.S. in order to manage your request. To know more, visit our","privacy":"Privacy Policy","ctaLabel":"Submit"},"testimonials":{"title":"Customer Success Story","readMore":"Read more"},"pricingPage":{"backButton":"Back to Pricing page","title":"All Range","legal":"Legal notice","backToProducts":"Return to products","openAll":"Open all","closeAll":"Close all","close":"Close","open":"Open","regions":"Regions","subtableToggleButtons":"Subtable open buttons","viewPricing":"View pricing"},"partnerProgram":{"showingPartners_zero":"No partners to show","showingPartners_one":"Showing {{count}} partner","showingPartners_other":"Showing {{count}} partners","type_one":"Partner type","type_other":"Partner types","expertise_one":"Expertise","expertise_other":"Expertises","industry_one":"Industry","industry_other":"Industries","location_one":"Location","location_other":"Locations","filtersTitle":"Filters","clearFiltersLabel":"Clear filters"},"partnerPage":{"partners":"Partners","details":"Details","activity":"Activity","contact":"Contact","viewWebsite":"View Website"}},"notFound":{"title":"Page not found","text":"It seems that the page you want to access does not exist. Please check your URL or renew your request later.","link":"Return to homepage"}}}},"initialLocale":"en","ns":["common","blog","pages"],"userConfig":{"i18n":{"locales":["default","en","fr"],"defaultLocale":"default","localeDetection":false},"default":{"i18n":{"locales":["default","en","fr"],"defaultLocale":"default","localeDetection":false}}}},"header":{"mainNavigationItems":[{"id":542,"title":"Dedibox and Bare Metal","menuAttached":false,"order":1,"path":"/DediboxBareMetal","type":"WRAPPER","uiRouterKey":"dedibox-and-bare-metal-1","slug":"dedibox-bare-metal","external":false,"items":[{"id":543,"title":"Dedibox - dedicated servers","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Dedibox","type":"INTERNAL","uiRouterKey":"dedibox-dedicated-servers","slug":"dedibox-bare-metal-dedibox","external":false,"related":{"id":29,"title":"Dedibox","path":"/dedibox/","scheduledAt":null,"createdAt":"2022-04-19T15:29:02.488Z","updatedAt":"2024-11-22T10:20:26.181Z","publishedAt":"2022-04-28T17:05:07.122Z","locale":"en","__contentType":"api::page.page","navigationItemId":543,"__templateName":"Generic"},"items":[{"id":544,"title":"Start","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Dedibox/Start","type":"INTERNAL","uiRouterKey":"start-2","slug":"dedibox-bare-metal-dedibox-start","external":false,"related":{"id":53,"title":"Start","path":"/dedibox/start/","scheduledAt":null,"createdAt":"2022-04-21T16:44:17.577Z","updatedAt":"2024-11-22T10:20:26.898Z","publishedAt":"2022-04-28T17:12:40.426Z","locale":"en","__contentType":"api::page.page","navigationItemId":544,"__templateName":"Generic"},"items":[],"description":"Affordable servers with the best price-performance ratio on the market"},{"id":545,"title":"Pro","menuAttached":false,"order":2,"path":"/DediboxBareMetal/Dedibox/Pro","type":"INTERNAL","uiRouterKey":"pro-4","slug":"dedibox-bare-metal-dedibox-pro","external":false,"related":{"id":9,"title":"Pro","path":"/dedibox/pro/","scheduledAt":null,"createdAt":"2022-04-07T13:51:48.537Z","updatedAt":"2024-11-22T10:20:26.820Z","publishedAt":"2022-04-28T17:04:00.983Z","locale":"en","__contentType":"api::page.page","navigationItemId":545,"__templateName":"Generic"},"items":[],"description":"Perfect balance of processing power, memory and storage"},{"id":546,"title":"Core","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Dedibox/Core","type":"INTERNAL","uiRouterKey":"core-1","slug":"dedibox-bare-metal-dedibox-core","external":false,"related":{"id":14,"title":"Core","path":"/dedibox/core/","scheduledAt":null,"createdAt":"2022-04-11T09:05:58.588Z","updatedAt":"2024-11-22T10:20:26.821Z","publishedAt":"2022-04-28T17:04:22.560Z","locale":"en","__contentType":"api::page.page","navigationItemId":546,"__templateName":"Generic"},"items":[],"description":"The high performance backbone of your mission-critical infrastructure"},{"id":547,"title":"Store","menuAttached":false,"order":4,"path":"/DediboxBareMetal/Dedibox/Store","type":"INTERNAL","uiRouterKey":"store-2","slug":"dedibox-bare-metal-dedibox-store","external":false,"related":{"id":5,"title":"Store","path":"/dedibox/store/","scheduledAt":null,"createdAt":"2022-04-01T15:14:47.812Z","updatedAt":"2024-11-22T10:20:26.902Z","publishedAt":"2022-04-28T17:03:51.376Z","locale":"en","__contentType":"api::page.page","navigationItemId":547,"__templateName":"Generic"},"items":[],"description":"For mission-critical data, fast storage, backup and streaming"},{"id":832,"title":"GPU","menuAttached":false,"order":5,"path":"/DediboxBareMetal/Dedibox/GPU_ddx","type":"INTERNAL","uiRouterKey":"gpu-9","slug":"dedibox-bare-metal-dedibox-gpu-ddx","external":false,"related":{"id":1454,"title":"GPU","path":"/dedibox/gpu/","scheduledAt":null,"createdAt":"2024-10-31T10:01:24.876Z","updatedAt":"2024-11-22T10:20:26.906Z","publishedAt":"2024-11-07T07:38:37.573Z","locale":"en","__contentType":"api::page.page","navigationItemId":832,"__templateName":"Generic"},"items":[],"description":"Dedicated GPU power with reliable performance and stability"},{"id":548,"title":"Dedirack","menuAttached":false,"order":6,"path":"/DediboxBareMetal/Dedibox/Dedirack","type":"INTERNAL","uiRouterKey":"dedirack-1","slug":"dedibox-bare-metal-dedibox-dedirack","external":false,"related":{"id":155,"title":"Dedirack","path":"/dedibox/dedirack/","scheduledAt":null,"createdAt":"2022-05-02T10:08:21.002Z","updatedAt":"2024-11-22T10:20:26.900Z","publishedAt":"2022-05-02T10:46:06.212Z","locale":"en","__contentType":"api::page.page","navigationItemId":548,"__templateName":"Generic"},"items":[],"description":"Host your Hardware in our secured French datacenters"},{"id":742,"title":"Dedibox VPS","menuAttached":false,"order":7,"path":"/DediboxBareMetal/Dedibox/VPS","type":"INTERNAL","uiRouterKey":"dedibox-vps","slug":"dedibox-bare-metal-dedibox-vps","external":false,"related":{"id":1234,"title":"Dedibox VPS","path":"/dedibox-vps/","scheduledAt":null,"createdAt":"2024-05-08T16:42:21.258Z","updatedAt":"2024-08-28T09:39:00.756Z","publishedAt":"2024-05-14T16:28:25.184Z","locale":"en","__contentType":"api::page.page","navigationItemId":742,"__templateName":"Generic"},"items":[],"description":"60 locations worldwide, starting at €4,99/month"}],"description":""},{"id":553,"title":"Elastic Metal - bare metal cloud","menuAttached":false,"order":2,"path":"/DediboxBareMetal/elasticmetal","type":"INTERNAL","uiRouterKey":"elastic-metal-bare-metal-cloud-1","slug":"dedibox-bare-metal-elasticmetal","external":false,"related":{"id":87,"title":"Elastic Metal","path":"/elastic-metal/","scheduledAt":null,"createdAt":"2022-04-28T12:45:28.696Z","updatedAt":"2024-11-08T15:01:56.485Z","publishedAt":"2022-04-28T13:22:46.501Z","locale":"en","__contentType":"api::page.page","navigationItemId":553,"__templateName":"Generic"},"items":[{"id":554,"title":"Aluminium","menuAttached":false,"order":1,"path":"/DediboxBareMetal/elasticmetal/Aluminium","type":"INTERNAL","uiRouterKey":"aluminium-1","slug":"dedibox-bare-metal-elasticmetal-aluminium","external":false,"related":{"id":8,"title":"Aluminium","path":"/elastic-metal/aluminium/","scheduledAt":null,"createdAt":"2022-04-06T13:13:04.829Z","updatedAt":"2024-11-08T15:01:56.748Z","publishedAt":"2022-04-28T17:04:04.448Z","locale":"en","__contentType":"api::page.page","navigationItemId":554,"__templateName":"Generic"},"items":[],"description":"Fully dedicated bare metal servers with native cloud integration, at the best price"},{"id":557,"title":"Beryllium","menuAttached":false,"order":2,"path":"/DediboxBareMetal/elasticmetal/Beryllium","type":"INTERNAL","uiRouterKey":"beryllium-1","slug":"dedibox-bare-metal-elasticmetal-beryllium","external":false,"related":{"id":15,"title":"Beryllium","path":"/elastic-metal/beryllium/","scheduledAt":null,"createdAt":"2022-04-11T10:57:25.297Z","updatedAt":"2024-11-08T15:01:56.754Z","publishedAt":"2022-04-28T17:13:35.576Z","locale":"en","__contentType":"api::page.page","navigationItemId":557,"__templateName":"Generic"},"items":[],"description":"Powerful, balanced and reliable servers for production-grade applications"},{"id":556,"title":"Iridium","menuAttached":false,"order":3,"path":"/DediboxBareMetal/elasticmetal/Iridium","type":"INTERNAL","uiRouterKey":"iridium-1","slug":"dedibox-bare-metal-elasticmetal-iridium","external":false,"related":{"id":810,"title":"Iridium","path":"/elastic-metal/iridium/","scheduledAt":null,"createdAt":"2023-04-27T13:53:48.244Z","updatedAt":"2024-11-08T15:01:56.752Z","publishedAt":"2023-05-29T08:52:19.666Z","locale":"en","__contentType":"api::page.page","navigationItemId":556,"__templateName":"Generic"},"items":[],"description":"Powerful dedicated server designed to handle high-workload applications"},{"id":555,"title":"Lithium","menuAttached":false,"order":4,"path":"/DediboxBareMetal/elasticmetal/Lithium","type":"INTERNAL","uiRouterKey":"lithium-1","slug":"dedibox-bare-metal-elasticmetal-lithium","external":false,"related":{"id":16,"title":"Lithium","path":"/elastic-metal/lithium/","scheduledAt":null,"createdAt":"2022-04-11T11:15:36.538Z","updatedAt":"2024-11-08T15:01:56.753Z","publishedAt":"2022-04-28T17:13:30.074Z","locale":"en","__contentType":"api::page.page","navigationItemId":555,"__templateName":"Generic"},"items":[],"description":"Designed with huge local storage to keep, back up, and protect your data"},{"id":833,"title":"Titanium","menuAttached":false,"order":5,"path":"/DediboxBareMetal/elasticmetal/Titanium","type":"INTERNAL","uiRouterKey":"titanium","slug":"dedibox-bare-metal-elasticmetal-titanium","external":false,"related":{"id":1457,"title":"Titanium","path":"/elastic-metal/titanium/","scheduledAt":null,"createdAt":"2024-10-31T15:08:59.416Z","updatedAt":"2024-11-08T15:52:51.005Z","publishedAt":"2024-11-07T06:52:37.648Z","locale":"en","__contentType":"api::page.page","navigationItemId":833,"__templateName":"Generic"},"items":[],"description":"Power and stability of dedicated GPU hardware integrated into the Scaleway ecosystem"}],"description":""},{"id":558,"title":"Apple","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Apple","type":"INTERNAL","uiRouterKey":"apple-2","slug":"dedibox-bare-metal-apple","external":false,"related":{"id":1088,"title":"Apple Mac mini","path":"/apple-mac-mini/","scheduledAt":null,"createdAt":"2024-01-31T15:28:49.276Z","updatedAt":"2024-11-06T08:30:29.831Z","publishedAt":"2024-08-02T07:56:22.454Z","locale":"en","__contentType":"api::page.page","navigationItemId":558,"__templateName":"Generic"},"items":[{"id":561,"title":"Mac mini M1","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Apple/M1","type":"INTERNAL","uiRouterKey":"mac-mini-m1-1","slug":"dedibox-bare-metal-apple-m1","external":false,"related":{"id":91,"title":"Hello m1","path":"/hello-m1/","scheduledAt":null,"createdAt":"2022-04-28T15:24:50.963Z","updatedAt":"2024-11-06T08:29:13.324Z","publishedAt":"2023-10-16T14:15:59.310Z","locale":"en","__contentType":"api::page.page","navigationItemId":561,"__templateName":"Generic"},"items":[],"description":"Enjoy the Mac mini experience with great simplicity"},{"id":560,"title":"Mac mini M2","menuAttached":false,"order":2,"path":"/DediboxBareMetal/Apple/m2","type":"INTERNAL","uiRouterKey":"mac-mini-m2-2","slug":"dedibox-bare-metal-apple-m2","external":false,"related":{"id":1086,"title":"mac mini M2","path":"/mac-mini-m2/","scheduledAt":null,"createdAt":"2024-01-31T09:30:46.938Z","updatedAt":"2024-08-07T16:00:48.720Z","publishedAt":"2024-02-05T15:21:02.196Z","locale":"en","__contentType":"api::page.page","navigationItemId":560,"__templateName":"Generic"},"items":[],"description":"Perform your daily tasks with speed and efficiency"},{"id":559,"title":"Mac mini M2 Pro","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Apple/M2pro","type":"INTERNAL","uiRouterKey":"mac-mini-m2-pro-1","slug":"dedibox-bare-metal-apple-m2pro","external":false,"related":{"id":991,"title":"mac mini M2 pro","path":"/mac-mini-m2-pro/","scheduledAt":null,"createdAt":"2023-10-25T08:56:21.435Z","updatedAt":"2024-08-07T16:02:51.939Z","publishedAt":"2023-11-16T12:11:33.094Z","locale":"en","__contentType":"api::page.page","navigationItemId":559,"__templateName":"Generic"},"items":[],"description":"Realize your most ambitious projects thanks to a new level of power"}],"description":""}],"description":""},{"id":562,"title":"Compute","menuAttached":false,"order":2,"path":"/Compute","type":"WRAPPER","uiRouterKey":"compute-3","slug":"compute-4","external":false,"items":[{"id":563,"title":"Virtual Instances","menuAttached":false,"order":1,"path":"/Compute/VirtualInstances","type":"INTERNAL","uiRouterKey":"virtual-instances-1","slug":"compute-virtual-instances","external":false,"related":{"id":655,"title":"Virtual Instances","path":"/virtual-instances/","scheduledAt":null,"createdAt":"2023-02-20T10:48:52.279Z","updatedAt":"2024-08-28T07:01:50.413Z","publishedAt":"2023-02-28T08:32:03.960Z","locale":"en","__contentType":"api::page.page","navigationItemId":563,"__templateName":"Generic"},"items":[{"id":567,"title":"Production-Optimized","menuAttached":false,"order":1,"path":"/Compute/VirtualInstances/Prod","type":"INTERNAL","uiRouterKey":"production-optimized-2","slug":"compute-virtual-instances-prod","external":false,"related":{"id":657,"title":"Production-Optimized Instances","path":"/production-optimized-instances/","scheduledAt":null,"createdAt":"2023-02-20T15:13:14.415Z","updatedAt":"2024-10-30T14:59:58.375Z","publishedAt":"2023-02-28T08:34:34.739Z","locale":"en","__contentType":"api::page.page","navigationItemId":567,"__templateName":"Generic"},"items":[],"description":"Dedicated vCPU for the most demanding workloads (x86)"},{"id":566,"title":"Workload-Optimized","menuAttached":false,"order":2,"path":"/Compute/VirtualInstances/Workload-Optimized","type":"INTERNAL","uiRouterKey":"workload-optimized-1","slug":"compute-virtual-instances-workload-optimized","external":false,"related":{"id":802,"title":"Workload-Optimized Instances","path":"/workload-optimized-instances/","scheduledAt":null,"createdAt":"2023-04-25T12:38:13.577Z","updatedAt":"2024-08-28T12:05:29.294Z","publishedAt":"2023-05-26T13:36:52.797Z","locale":"en","__contentType":"api::page.page","navigationItemId":566,"__templateName":"Generic"},"items":[],"description":"Secure, scalable VMs, equipped for high memory and compute demands (x86)"},{"id":565,"title":"Cost-Optimized","menuAttached":false,"order":3,"path":"/Compute/VirtualInstances/Cost-Optimized","type":"INTERNAL","uiRouterKey":"cost-optimized-1","slug":"compute-virtual-instances-cost-optimized","external":false,"related":{"id":656,"title":"Cost-Optimized Instances","path":"/cost-optimized-instances/","scheduledAt":null,"createdAt":"2023-02-20T12:55:45.865Z","updatedAt":"2024-08-28T08:44:44.416Z","publishedAt":"2023-02-28T08:34:47.421Z","locale":"en","__contentType":"api::page.page","navigationItemId":565,"__templateName":"Generic"},"items":[],"description":"Highly reliable and priced affordably Instances with shared vCPUs (x86 and ARM)"},{"id":564,"title":"Learning","menuAttached":false,"order":4,"path":"/Compute/VirtualInstances/Learning","type":"INTERNAL","uiRouterKey":"learning-1","slug":"compute-virtual-instances-learning","external":false,"related":{"id":13,"title":"Stardust Instances","path":"/stardust-instances/","scheduledAt":null,"createdAt":"2022-04-11T09:03:33.397Z","updatedAt":"2024-05-15T13:51:19.969Z","publishedAt":"2022-04-28T17:04:10.708Z","locale":"en","__contentType":"api::page.page","navigationItemId":564,"__templateName":"Generic"},"items":[],"description":"A tiny instance to test and host your personal projects (x86)"}],"description":""},{"id":568,"title":"GPU","menuAttached":false,"order":2,"path":"/Compute/gpu","type":"INTERNAL","uiRouterKey":"gpu-8","slug":"compute-gpu","external":false,"related":{"id":1025,"title":"GPU Instances","path":"/gpu-instances/","scheduledAt":null,"createdAt":"2023-11-30T13:15:51.769Z","updatedAt":"2024-11-19T16:38:15.121Z","publishedAt":"2023-12-12T12:52:20.083Z","locale":"en","__contentType":"api::page.page","navigationItemId":568,"__templateName":"Generic"},"items":[{"id":571,"title":"L4 GPU Instance","menuAttached":false,"order":1,"path":"/Compute/gpu/L4","type":"INTERNAL","uiRouterKey":"l4-gpu-instance","slug":"compute-gpu-l4","external":false,"related":{"id":1108,"title":"L4 GPU Instance","path":"/l4-gpu-instance/","scheduledAt":null,"createdAt":"2024-02-28T16:20:43.240Z","updatedAt":"2024-11-20T14:49:27.542Z","publishedAt":"2024-03-04T13:37:45.809Z","locale":"en","__contentType":"api::page.page","navigationItemId":571,"__templateName":"Generic"},"items":[],"description":"Maximize your AI infrastructures with a versatile Instance"},{"id":572,"title":"L40S GPU Instance","menuAttached":false,"order":2,"path":"/Compute/gpu/L40s","type":"INTERNAL","uiRouterKey":"l40-s-gpu-instance","slug":"compute-gpu-l40s","external":false,"related":{"id":1221,"title":"L40S GPU Instance","path":"/l40s-gpu-instance/","scheduledAt":null,"createdAt":"2024-04-26T13:37:31.531Z","updatedAt":"2024-11-20T14:50:10.681Z","publishedAt":"2024-04-29T12:12:07.466Z","locale":"en","__contentType":"api::page.page","navigationItemId":572,"__templateName":"Generic"},"items":[],"description":"Universal Instance, faster than L4 and cheaper than H100 PCIe"},{"id":569,"title":"H100 PCIe GPU Instance","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/h100-pcie-try-it-now/","type":"EXTERNAL","uiRouterKey":"h100-pc-ie-gpu-instance-4","slug":{},"external":true,"description":"Accelerate your model training with the most high-end AI chip"},{"id":570,"title":"GPU 3070 Instances","menuAttached":false,"order":4,"path":"/Compute/gpu/3070","type":"INTERNAL","uiRouterKey":"gpu-3070-instances-1","slug":"compute-gpu-3070","external":false,"related":{"id":397,"title":"GPU 3070 Instances","path":"/gpu-3070-instances/","scheduledAt":null,"createdAt":"2022-05-30T11:52:26.506Z","updatedAt":"2023-11-16T16:38:12.184Z","publishedAt":"2022-05-30T12:33:10.212Z","locale":"en","__contentType":"api::page.page","navigationItemId":570,"__templateName":"Generic"},"items":[],"description":"Dedicated NVIDIA® RTX 3070 with the best price/performance ratio"},{"id":573,"title":"Render GPU Instances","menuAttached":false,"order":5,"path":"/Compute/gpu/render","type":"INTERNAL","uiRouterKey":"render-gpu-instances","slug":"compute-gpu-render","external":false,"related":{"id":52,"title":"GPU Render Instances","path":"/gpu-render-instances/","scheduledAt":null,"createdAt":"2022-04-21T16:00:29.592Z","updatedAt":"2024-09-25T09:40:12.404Z","publishedAt":"2022-04-28T17:12:46.136Z","locale":"en","__contentType":"api::page.page","navigationItemId":573,"__templateName":"Generic"},"items":[],"description":"Dedicated Tesla P100s for all your Machine Learning \u0026 Artificial Intelligence needs."}],"description":""},{"id":574,"title":"Serverless","menuAttached":false,"order":3,"path":"/Compute/Serverless","type":"WRAPPER","uiRouterKey":"serverless-7","slug":"compute-serverless","external":false,"items":[{"id":576,"title":"Serverless Functions","menuAttached":false,"order":1,"path":"/Compute/Serverless/Functions","type":"INTERNAL","uiRouterKey":"serverless-functions-1","slug":"compute-serverless-functions","external":false,"related":{"id":50,"title":"Serverless Functions","path":"/serverless-functions/","scheduledAt":null,"createdAt":"2022-04-21T15:28:10.687Z","updatedAt":"2024-07-05T11:44:44.356Z","publishedAt":"2022-04-28T17:12:49.569Z","locale":"en","__contentType":"api::page.page","navigationItemId":576,"__templateName":"Generic"},"items":[],"description":"Experience an easy way to run your code on the cloud"},{"id":575,"title":"Serverless Containers","menuAttached":false,"order":2,"path":"/Compute/Serverless/Containers","type":"INTERNAL","uiRouterKey":"serverless-containers-2","slug":"compute-serverless-containers","external":false,"related":{"id":7,"title":"Serverless Containers","path":"/serverless-containers/","scheduledAt":null,"createdAt":"2022-04-04T07:02:24.178Z","updatedAt":"2024-07-05T11:46:09.955Z","publishedAt":"2022-04-28T17:03:54.693Z","locale":"en","__contentType":"api::page.page","navigationItemId":575,"__templateName":"Generic"},"items":[],"description":"Easily run containers on the cloud with a single command"},{"id":579,"title":"Serverless Jobs","menuAttached":false,"order":3,"path":"/Compute/Serverless/Jobs","type":"INTERNAL","uiRouterKey":"serverless-jobs-1","slug":"compute-serverless-jobs","external":false,"related":{"id":980,"title":"Serverless Jobs","path":"/serverless-jobs/","scheduledAt":null,"createdAt":"2023-10-13T16:05:31.205Z","updatedAt":"2024-08-20T12:28:03.639Z","publishedAt":"2023-12-07T15:55:35.668Z","locale":"en","__contentType":"api::page.page","navigationItemId":579,"__templateName":"Generic"},"items":[],"description":"Run batches of tasks in the cloud"}],"description":""},{"id":580,"title":"Containers","menuAttached":false,"order":4,"path":"/Compute/Containers","type":"INTERNAL","uiRouterKey":"containers-4","slug":"compute-containers","external":false,"related":{"id":465,"title":"Containers","path":"/containers/","scheduledAt":null,"createdAt":"2022-07-29T15:09:20.535Z","updatedAt":"2024-08-28T07:05:23.005Z","publishedAt":"2023-02-27T13:53:48.270Z","locale":"en","__contentType":"api::page.page","navigationItemId":580,"__templateName":"Generic"},"items":[{"id":581,"title":"Kubernetes Kapsule","menuAttached":false,"order":1,"path":"/Compute/Containers/Kapsule","type":"INTERNAL","uiRouterKey":"kubernetes-kapsule-1","slug":"compute-containers-kapsule","external":false,"related":{"id":6,"title":"Kubernetes Kapsule","path":"/kubernetes-kapsule/","scheduledAt":null,"createdAt":"2022-04-01T15:40:18.523Z","updatedAt":"2024-04-30T14:13:12.823Z","publishedAt":"2022-11-02T17:14:27.738Z","locale":"en","__contentType":"api::page.page","navigationItemId":581,"__templateName":"Generic"},"items":[],"description":"Kubernetes exclusively for Scaleway products and resources"},{"id":582,"title":"Kubernetes Kosmos","menuAttached":false,"order":2,"path":"/Compute/Containers/Kosmos","type":"INTERNAL","uiRouterKey":"kubernetes-kosmos-1","slug":"compute-containers-kosmos","external":false,"related":{"id":43,"title":"Kubernetes Kosmos","path":"/kubernetes-kosmos/","scheduledAt":null,"createdAt":"2022-04-20T17:18:27.347Z","updatedAt":"2024-07-12T09:35:39.810Z","publishedAt":"2022-04-28T17:13:15.597Z","locale":"en","__contentType":"api::page.page","navigationItemId":582,"__templateName":"Generic"},"items":[],"description":"Multi-cloud Kubernetes for Scaleway and external providers resources"},{"id":583,"title":"Container Registry","menuAttached":false,"order":3,"path":"/Compute/Containers/containerregisrt","type":"INTERNAL","uiRouterKey":"container-registry-1","slug":"compute-containers-containerregisrt","external":false,"related":{"id":39,"title":"Container Registry","path":"/container-registry/","scheduledAt":null,"createdAt":"2022-04-20T14:07:31.417Z","updatedAt":"2023-11-15T08:49:34.191Z","publishedAt":"2022-04-28T17:06:10.179Z","locale":"en","__contentType":"api::page.page","navigationItemId":583,"__templateName":"Generic"},"items":[],"description":"An easy-to-use Docker repository"}],"description":""}],"description":""},{"id":584,"title":"AI","menuAttached":false,"order":3,"path":"/AI","type":"WRAPPER","uiRouterKey":"ai","slug":"ai-1","external":false,"items":[{"id":585,"title":"Clusters","menuAttached":false,"order":1,"path":"/AI/Clusters","type":"WRAPPER","uiRouterKey":"clusters-1","slug":"ai-clusters","external":false,"items":[{"id":588,"title":"Custom-built Clusters","menuAttached":false,"order":1,"path":"/AI/Clusters/AIsuper","type":"INTERNAL","uiRouterKey":"custom-built-clusters","slug":"ai-clusters-a-isuper","external":false,"related":{"id":953,"title":"Custom-built Clusters","path":"/custom-built-clusters/","scheduledAt":null,"createdAt":"2023-09-22T14:14:40.961Z","updatedAt":"2024-10-29T12:48:55.663Z","publishedAt":"2023-10-04T14:49:01.987Z","locale":"en","__contentType":"api::page.page","navigationItemId":588,"__templateName":"Generic"},"items":[],"description":"Build the next Foundation Model with one of the fastest and most energy-efficient supercomputers in the world"},{"id":776,"title":"On Demand Cluster","menuAttached":false,"order":2,"path":"/AI/Clusters/Clusterondemand","type":"INTERNAL","uiRouterKey":"on-demand-cluster","slug":"ai-clusters-clusterondemand","external":false,"related":{"id":1266,"title":"Cluster On Demand ","path":"/cluster-on-demand/","scheduledAt":null,"createdAt":"2024-05-16T15:00:19.723Z","updatedAt":"2024-11-08T08:52:40.598Z","publishedAt":"2024-05-21T14:10:00.511Z","locale":"en","__contentType":"api::page.page","navigationItemId":776,"__templateName":"Generic"},"items":[],"description":"Rent a GPU-cluster from 32 to more than a thousand GPUs to speed up distributed training"}],"description":""},{"id":592,"title":"Model-as-a-service","menuAttached":false,"order":2,"path":"/AI/ManagedServices","type":"WRAPPER","uiRouterKey":"model-as-a-service-1","slug":"ai-managed-services","external":false,"items":[{"id":593,"title":"Managed Inference","menuAttached":false,"order":1,"path":"/AI/ManagedServices/llm","type":"INTERNAL","uiRouterKey":"managed-inference-2","slug":"ai-managed-services-llm","external":false,"related":{"id":1303,"title":"Inference","path":"/inference/","scheduledAt":null,"createdAt":"2024-06-13T13:16:26.427Z","updatedAt":"2024-11-15T14:11:15.846Z","publishedAt":"2024-06-28T12:43:39.677Z","locale":"en","__contentType":"api::page.page","navigationItemId":593,"__templateName":"Generic"},"items":[],"description":"Deploy AI models in a dedicated inference infrastructure. Get tailored security and predictable throughput"},{"id":824,"title":"Generative APIs","menuAttached":false,"order":2,"path":"/AI/ManagedServices/GenerativeAPIs","type":"INTERNAL","uiRouterKey":"generative-ap-is-2","slug":"ai-managed-services-generative-ap-is","external":false,"related":{"id":1418,"title":"Generative APIs","path":"/generative-apis/","scheduledAt":null,"createdAt":"2024-10-10T16:23:00.732Z","updatedAt":"2024-11-20T17:52:03.232Z","publishedAt":"2024-10-11T12:17:56.286Z","locale":"en","__contentType":"api::page.page","navigationItemId":824,"__templateName":"Generic"},"items":[],"description":"Consume AI models instantly via a simple API call. All hosted in Europe"}],"description":""},{"id":586,"title":"GPU Instances","menuAttached":false,"order":3,"path":"/AI/gpu","type":"WRAPPER","uiRouterKey":"gpu-instances","slug":"ai-gpu","external":false,"items":[{"id":589,"title":"L40S GPU Instance","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/l40s-gpu-instance/","type":"EXTERNAL","uiRouterKey":"l40-s-gpu-instance-1","slug":{},"external":true,"description":"Accelerate the next generation of AI-enabled applications with the universal L40S GPU Instance, faster than L4 and cheaper than H100 PCIe"},{"id":590,"title":"L4 GPU Instance","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/l4-gpu-instance/","type":"EXTERNAL","uiRouterKey":"l4-gpu-instance-1","slug":{},"external":true,"description":"Maximize your AI infrastructure's potential with a versatile and cost-effective GPU Instance"},{"id":587,"title":"H100 PCIe GPU Instance","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/h100-pcie-try-it-now/","type":"EXTERNAL","uiRouterKey":"h100-pc-ie-gpu-instance-2","slug":{},"external":true,"description":"Accelerate your model training with the most high-end AI chip"},{"id":591,"title":"Render GPU Instance","menuAttached":false,"order":4,"path":"https://www.scaleway.com/en/gpu-render-instances/","type":"EXTERNAL","uiRouterKey":"render-gpu-instance-1","slug":{},"external":true,"description":"Dedicated Tesla P100s for all your Machine Learning \u0026 Artificial Intelligence needs"}],"description":""}],"description":""},{"id":594,"title":"Storage","menuAttached":false,"order":4,"path":"/Storage","type":"WRAPPER","uiRouterKey":"storage-3","slug":"storage-2","external":false,"items":[{"id":602,"title":"Storage","menuAttached":false,"order":1,"path":"/Storage/storage","type":"WRAPPER","uiRouterKey":"storage-4","slug":"storage-storage","external":false,"items":[{"id":604,"title":"Object Storage","menuAttached":false,"order":1,"path":"/Storage/storage/ObjectStorage","type":"INTERNAL","uiRouterKey":"object-storage-4","slug":"storage-storage-object-storage","external":false,"related":{"id":652,"title":"Object Storage","path":"/object-storage/","scheduledAt":null,"createdAt":"2023-02-16T09:44:56.414Z","updatedAt":"2024-10-25T13:10:50.377Z","publishedAt":"2023-03-07T18:05:15.061Z","locale":"en","__contentType":"api::page.page","navigationItemId":604,"__templateName":"Generic"},"items":[],"description":"Amazon S3-compatible and Multi-AZ resilient object storage service. Ensuring high availability for your data"},{"id":605,"title":"Scaleway Glacier","menuAttached":false,"order":2,"path":"/Storage/storage/glacier","type":"INTERNAL","uiRouterKey":"scaleway-glacier-1","slug":"storage-storage-glacier","external":false,"related":{"id":17,"title":"Glacier Cold storage","path":"/glacier-cold-storage/","scheduledAt":null,"createdAt":"2022-04-11T11:58:13.079Z","updatedAt":"2024-10-25T13:13:55.154Z","publishedAt":"2022-04-28T17:13:24.608Z","locale":"en","__contentType":"api::page.page","navigationItemId":605,"__templateName":"Generic"},"items":[],"description":"Cold Storage class to secure long-term object storage. Ideal for deep archived data."},{"id":606,"title":"Block Storage","menuAttached":false,"order":3,"path":"/Storage/storage/BlockStorage","type":"INTERNAL","uiRouterKey":"block-storage-3","slug":"storage-storage-block-storage","external":false,"related":{"id":141,"title":"Block Storage","path":"/block-storage/","scheduledAt":null,"createdAt":"2022-05-02T08:20:39.280Z","updatedAt":"2024-10-30T16:13:44.480Z","publishedAt":"2022-05-02T08:28:12.783Z","locale":"en","__contentType":"api::page.page","navigationItemId":606,"__templateName":"Generic"},"items":[],"description":"Flexible and reliable storage for demanding workloads"}],"description":""}],"description":""},{"id":595,"title":"Network","menuAttached":false,"order":5,"path":"/Network","type":"WRAPPER","uiRouterKey":"network-3","slug":"network-4","external":false,"items":[{"id":603,"title":"Network","menuAttached":false,"order":1,"path":"/Network/Network","type":"WRAPPER","uiRouterKey":"network-4","slug":"network-network","external":false,"items":[{"id":607,"title":"Virtual Private Cloud","menuAttached":false,"order":1,"path":"/Network/Network/VPC","type":"INTERNAL","uiRouterKey":"virtual-private-cloud-1","slug":"network-network-vpc","external":false,"related":{"id":885,"title":"VPC","path":"/vpc/","scheduledAt":null,"createdAt":"2023-07-11T14:38:07.412Z","updatedAt":"2024-06-18T10:05:19.765Z","publishedAt":"2023-07-11T14:38:10.387Z","locale":"en","__contentType":"api::page.page","navigationItemId":607,"__templateName":"Generic"},"items":[],"description":"Secure your cloud resources with ease on a resilient regional network"},{"id":609,"title":"Public Gateway","menuAttached":false,"order":2,"path":"/Network/Network/public","type":"INTERNAL","uiRouterKey":"public-gateway-1","slug":"network-network-public","external":false,"related":{"id":54,"title":"Public Gateway","path":"/public-gateway/","scheduledAt":null,"createdAt":"2022-04-22T09:34:12.578Z","updatedAt":"2024-09-11T14:24:49.432Z","publishedAt":"2022-04-28T17:13:01.025Z","locale":"en","__contentType":"api::page.page","navigationItemId":609,"__templateName":"Generic"},"items":[],"description":" A single and secure entrance to your infrastructure"},{"id":608,"title":"Load Balancer","menuAttached":false,"order":3,"path":"/Network/Network/load","type":"INTERNAL","uiRouterKey":"load-balancer-1","slug":"network-network-load","external":false,"related":{"id":45,"title":"Load Balancer","path":"/load-balancer/","scheduledAt":null,"createdAt":"2022-04-21T07:46:46.140Z","updatedAt":"2024-07-24T14:48:37.806Z","publishedAt":"2022-11-18T08:58:30.309Z","locale":"en","__contentType":"api::page.page","navigationItemId":608,"__templateName":"Generic"},"items":[],"description":"Improve the performance of your services as you grow"},{"id":610,"title":"Domains and DNS","menuAttached":false,"order":4,"path":"/Network/Network/DomainsandDNS","type":"INTERNAL","uiRouterKey":"domains-and-dns-1","slug":"network-network-domainsand-dns","external":false,"related":{"id":44,"title":"Domains and DNS","path":"/domains-and-dns/","scheduledAt":null,"createdAt":"2022-04-21T07:26:18.059Z","updatedAt":"2024-03-05T17:01:32.782Z","publishedAt":"2022-04-28T17:13:12.082Z","locale":"en","__contentType":"api::page.page","navigationItemId":610,"__templateName":"Generic"},"items":[],"description":"Buy domain names and manage DNS. Find your favourite extensions at a fair price"},{"id":792,"title":"IPAM (IP Address Manager)","menuAttached":false,"order":5,"path":"/Network/Network/IPAM","type":"INTERNAL","uiRouterKey":"ipam-ip-address-manager","slug":"network-network-ipam","external":false,"related":{"id":1300,"title":"IPAM","path":"/ipam/","scheduledAt":null,"createdAt":"2024-06-07T13:07:18.728Z","updatedAt":"2024-07-12T10:47:10.965Z","publishedAt":"2024-07-10T07:39:07.627Z","locale":"en","__contentType":"api::page.page","navigationItemId":792,"__templateName":"Generic"},"items":[],"description":"Centralize and simplify your Scaleway IP address management"},{"id":820,"title":"Edge Services","menuAttached":false,"order":6,"path":"/Network/Network/EdgeServices","type":"INTERNAL","uiRouterKey":"edge-services","slug":"network-network-edge-services","external":false,"related":{"id":1399,"title":"Edge Services","path":"/edge-services/","scheduledAt":null,"createdAt":"2024-07-12T10:30:47.181Z","updatedAt":"2024-11-04T15:19:29.792Z","publishedAt":"2024-09-24T10:34:53.990Z","locale":"en","__contentType":"api::page.page","navigationItemId":820,"__templateName":"Generic"},"items":[],"description":"Expose your HTTP services to the internet with security, reliability, and efficiency by design."}],"description":""}],"description":""},{"id":596,"title":"Data \u0026 Tools","menuAttached":false,"order":6,"path":"/ManagedServices","type":"WRAPPER","uiRouterKey":"data-2","slug":"managed-services","external":false,"items":[{"id":611,"title":"Data","menuAttached":false,"order":1,"path":"/ManagedServices/Data","type":"WRAPPER","uiRouterKey":"data","slug":"managed-services-data","external":false,"items":[{"id":612,"title":"Managed Database for PostgreSQL \u0026 MySQL","menuAttached":false,"order":1,"path":"/ManagedServices/Data/SQL","type":"INTERNAL","uiRouterKey":"managed-database-for-postgre-sql-2","slug":"managed-services-data-sql","external":false,"related":{"id":48,"title":"Database","path":"/database/","scheduledAt":null,"createdAt":"2022-04-21T14:06:34.262Z","updatedAt":"2024-07-02T15:50:10.807Z","publishedAt":"2022-04-28T17:12:57.201Z","locale":"en","__contentType":"api::page.page","navigationItemId":612,"__templateName":"Generic"},"items":[],"description":"New generation of Relational Databases designed to scale on-demand"},{"id":613,"title":"Managed Database for Redis™","menuAttached":false,"order":2,"path":"/ManagedServices/Data/Redis","type":"INTERNAL","uiRouterKey":"managed-database-for-redis-1","slug":"managed-services-data-redis","external":false,"related":{"id":427,"title":"Managed Database for Redis™","path":"/managed-database-for-redistm/","scheduledAt":null,"createdAt":"2022-06-10T13:30:28.356Z","updatedAt":"2024-06-18T10:05:41.869Z","publishedAt":"2022-07-27T15:29:59.282Z","locale":"en","__contentType":"api::page.page","navigationItemId":613,"__templateName":"Generic"},"items":[],"description":"Accelerate your web application with powerful caching of Memory Databases"},{"id":614,"title":"Managed MongoDB®","menuAttached":false,"order":3,"path":"/ManagedServices/Data/document","type":"INTERNAL","uiRouterKey":"managed-mongo-db","slug":"managed-services-data-document","external":false,"related":{"id":890,"title":"Managed MongoDB","path":"/managed-mongodb/","scheduledAt":null,"createdAt":"2023-07-25T07:58:39.536Z","updatedAt":"2024-11-14T08:16:03.033Z","publishedAt":"2023-10-03T08:31:21.477Z","locale":"en","__contentType":"api::page.page","navigationItemId":614,"__templateName":"Generic"},"items":[],"description":"Drive your own document-oriented database. Let us managed the engine"},{"id":781,"title":"Serverless SQL Database","menuAttached":false,"order":4,"path":"/ManagedServices/Data/Serverless_SQL","type":"INTERNAL","uiRouterKey":"serverless-sql-database-2","slug":"managed-services-data-serverless-sql","external":false,"related":{"id":823,"title":"Serverless Sql Database","path":"/serverless-sql-database/","scheduledAt":null,"createdAt":"2023-05-11T22:46:48.805Z","updatedAt":"2024-11-06T14:51:53.874Z","publishedAt":"2023-05-11T22:47:00.320Z","locale":"en","__contentType":"api::page.page","navigationItemId":781,"__templateName":"Generic"},"items":[],"description":"Go serverless with fully managed database"},{"id":780,"title":"Messaging and Queuing","menuAttached":false,"order":5,"path":"/ManagedServices/Data/m\u0026q","type":"INTERNAL","uiRouterKey":"messaging-and-queuing-1","slug":"managed-services-data-m-and-q","external":false,"related":{"id":642,"title":"Messaging and Queuing","path":"/messaging-and-queuing/","scheduledAt":null,"createdAt":"2023-02-09T16:38:42.456Z","updatedAt":"2024-05-21T14:34:56.011Z","publishedAt":"2023-02-09T16:46:35.902Z","locale":"en","__contentType":"api::page.page","navigationItemId":780,"__templateName":"Generic"},"items":[],"description":"Send messages and events without having to manage your message broker"},{"id":822,"title":"Distributed Data Lab","menuAttached":false,"order":6,"path":"/ManagedServices/Data/DataLab","type":"INTERNAL","uiRouterKey":"distributed-data-lab","slug":"managed-services-data-data-lab","external":false,"related":{"id":949,"title":"Distributed Data Lab ","path":"/distributed-data-lab/","scheduledAt":null,"createdAt":"2023-09-21T11:57:12.802Z","updatedAt":"2024-10-30T15:28:03.991Z","publishedAt":"2024-09-27T15:10:48.257Z","locale":"en","__contentType":"api::page.page","navigationItemId":822,"__templateName":"Generic"},"items":[],"description":"Speed up data processing over very large volumes of data with an Apache Spark™ managed solution."}],"description":""},{"id":619,"title":"Business Applications","menuAttached":false,"order":2,"path":"/ManagedServices/ManagedServices","type":"WRAPPER","uiRouterKey":"business-applications","slug":"managed-services-managed-services","external":false,"items":[{"id":620,"title":"Web Hosting","menuAttached":false,"order":1,"path":"/ManagedServices/ManagedServices/hosting","type":"INTERNAL","uiRouterKey":"web-hosting-4","slug":"managed-services-managed-services-hosting","external":false,"related":{"id":47,"title":"Web hosting","path":"/web-hosting/","scheduledAt":null,"createdAt":"2022-04-21T11:51:48.689Z","updatedAt":"2024-11-20T15:59:55.910Z","publishedAt":"2022-04-28T13:34:58.879Z","locale":"en","__contentType":"api::page.page","navigationItemId":620,"__templateName":"Generic"},"items":[],"description":"Hosting for individuals, professionals, and everyone in between."},{"id":621,"title":"Web Platform","menuAttached":false,"order":2,"path":"/ManagedServices/ManagedServices/WebPlatform","type":"INTERNAL","uiRouterKey":"web-platform-2","slug":"managed-services-managed-services-web-platform","external":false,"related":{"id":576,"title":"Web Platform - powered by Clever Cloud","path":"/web-platform-powered-by-clever-cloud/","scheduledAt":null,"createdAt":"2022-12-07T14:07:50.856Z","updatedAt":"2023-11-16T15:19:36.970Z","publishedAt":"2022-12-13T08:01:42.916Z","locale":"en","__contentType":"api::page.page","navigationItemId":621,"__templateName":"Generic"},"items":[],"description":"Ship your applications only in a few clicks."},{"id":622,"title":"Transactional Email","menuAttached":false,"order":3,"path":"/ManagedServices/ManagedServices/tem","type":"INTERNAL","uiRouterKey":"transactional-email-2","slug":"managed-services-managed-services-tem","external":false,"related":{"id":776,"title":"Transactional Email (TEM)","path":"/transactional-email-tem/","scheduledAt":null,"createdAt":"2023-04-05T16:33:35.536Z","updatedAt":"2024-10-21T14:45:56.496Z","publishedAt":"2023-04-06T10:30:43.491Z","locale":"en","__contentType":"api::page.page","navigationItemId":622,"__templateName":"Generic"},"items":[],"description":"Instant delivery of your transactional emails"},{"id":623,"title":"Cockpit","menuAttached":false,"order":4,"path":"/ManagedServices/ManagedServices/Cockpit","type":"INTERNAL","uiRouterKey":"cockpit-2","slug":"managed-services-managed-services-cockpit","external":false,"related":{"id":814,"title":"Cockpit","path":"/cockpit/","scheduledAt":null,"createdAt":"2023-05-02T08:04:46.085Z","updatedAt":"2024-07-05T11:54:39.588Z","publishedAt":"2023-05-04T16:18:10.562Z","locale":"en","__contentType":"api::page.page","navigationItemId":623,"__templateName":"Generic"},"items":[],"description":"Monitor infrastructures in minutes with a fully managed observability solution"},{"id":784,"title":"IoT Hub","menuAttached":false,"order":5,"path":"/ManagedServices/ManagedServices/iot","type":"INTERNAL","uiRouterKey":"io-t-hub","slug":"managed-services-managed-services-iot","external":false,"related":{"id":31,"title":"Iot hub","path":"/iot-hub/","scheduledAt":null,"createdAt":"2022-04-20T04:58:03.085Z","updatedAt":"2023-11-15T15:42:53.313Z","publishedAt":"2022-04-28T17:13:21.005Z","locale":"en","__contentType":"api::page.page","navigationItemId":784,"__templateName":"Generic"},"items":[],"description":"A purpose-built bridge between connected hardware and cloud."}],"description":""},{"id":615,"title":"Security \u0026 Organization","menuAttached":false,"order":3,"path":"/ManagedServices/SecurityandAccount","type":"WRAPPER","uiRouterKey":"security-3","slug":"managed-services-securityand-account","external":false,"items":[{"id":618,"title":"Identity and Access Management (IAM)","menuAttached":false,"order":1,"path":"/ManagedServices/SecurityandAccount/iam","type":"INTERNAL","uiRouterKey":"identity-and-access-management-iam-1","slug":"managed-services-securityand-account-iam","external":false,"related":{"id":569,"title":"IAM","path":"/iam/","scheduledAt":null,"createdAt":"2022-12-02T16:25:06.762Z","updatedAt":"2024-08-22T09:40:22.523Z","publishedAt":"2022-12-06T15:27:30.794Z","locale":"en","__contentType":"api::page.page","navigationItemId":618,"__templateName":"Generic"},"items":[],"description":"The easiest way to safely collaborate in the cloud"},{"id":616,"title":"Secret Manager","menuAttached":false,"order":2,"path":"/ManagedServices/SecurityandAccount/secretmanager","type":"INTERNAL","uiRouterKey":"secret-manager-1","slug":"managed-services-securityand-account-secretmanager","external":false,"related":{"id":779,"title":"Secret Manager","path":"/secret-manager/","scheduledAt":null,"createdAt":"2023-04-11T11:04:18.808Z","updatedAt":"2024-08-28T09:57:43.021Z","publishedAt":"2023-04-26T07:47:45.718Z","locale":"en","__contentType":"api::page.page","navigationItemId":616,"__templateName":"Generic"},"items":[],"description":"Protect your sensitive data across your cloud infrastructure"},{"id":617,"title":"Cost Manager","menuAttached":false,"order":3,"path":"/ManagedServices/SecurityandAccount/cost-manager","type":"INTERNAL","uiRouterKey":"cost-manager-1","slug":"managed-services-securityand-account-cost-manager","external":false,"related":{"id":1186,"title":"Cost Manager","path":"/cost-manager/","scheduledAt":null,"createdAt":"2024-04-08T07:36:07.839Z","updatedAt":"2024-04-08T09:14:21.699Z","publishedAt":"2024-04-08T09:14:21.666Z","locale":"en","__contentType":"api::page.page","navigationItemId":617,"__templateName":"Generic"},"items":[],"description":"Easily track your consumption in an all-in-one tool"},{"id":830,"title":"Environmental Footprint Calculator","menuAttached":false,"order":4,"path":"/ManagedServices/SecurityandAccount/Footprint","type":"INTERNAL","uiRouterKey":"environmental-footprint-calculator","slug":"managed-services-securityand-account-footprint","external":false,"related":{"id":1450,"title":"Environmental Footprint Calculator","path":"/environmental-footprint-calculator/","scheduledAt":null,"createdAt":"2024-10-28T14:47:30.518Z","updatedAt":"2024-11-05T16:23:53.555Z","publishedAt":"2024-11-04T12:12:34.311Z","locale":"en","__contentType":"api::page.page","navigationItemId":830,"__templateName":"Generic"},"items":[],"description":"Accurately track your environmental impact and make informed choices"}],"description":""},{"id":624,"title":"Developer Tools","menuAttached":false,"order":4,"path":"/ManagedServices/DeveloperTools","type":"WRAPPER","uiRouterKey":"developer-tools","slug":"managed-services-developer-tools","external":false,"items":[{"id":625,"title":"Scaleway API","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/developers/api/","type":"EXTERNAL","uiRouterKey":"scaleway-api-2","slug":{},"external":true,"description":"The Public Interface for developers"},{"id":626,"title":"CLI","menuAttached":false,"order":2,"path":"/ManagedServices/DeveloperTools/cli","type":"INTERNAL","uiRouterKey":"cli-2","slug":"managed-services-developer-tools-cli","external":false,"related":{"id":187,"title":"CLI","path":"/cli/","scheduledAt":null,"createdAt":"2022-05-03T08:37:17.214Z","updatedAt":"2024-08-22T05:35:23.543Z","publishedAt":"2022-05-03T11:43:09.246Z","locale":"en","__contentType":"api::page.page","navigationItemId":626,"__templateName":"Generic"},"items":[],"description":"Deploy and manage your infrastructure directly from the command line"},{"id":627,"title":"Terraform","menuAttached":false,"order":3,"path":"/ManagedServices/DeveloperTools/terraform","type":"INTERNAL","uiRouterKey":"terraform-1","slug":"managed-services-developer-tools-terraform","external":false,"related":{"id":40,"title":"Terraform","path":"/terraform/","scheduledAt":null,"createdAt":"2022-04-20T14:37:30.508Z","updatedAt":"2023-11-15T08:32:57.793Z","publishedAt":"2022-04-28T17:05:15.208Z","locale":"en","__contentType":"api::page.page","navigationItemId":627,"__templateName":"Generic"},"items":[],"description":"Securely and efficiently provision and manage Infrastructure as Code with Terraform"}],"description":""}],"description":""},{"id":597,"title":"Solutions","menuAttached":false,"order":7,"path":"/Solutions","type":"WRAPPER","uiRouterKey":"solutions-2","slug":"solutions-2","external":false,"items":[{"id":628,"title":"Industries","menuAttached":false,"order":1,"path":"/Solutions/Industries","type":"WRAPPER","uiRouterKey":"industries-1","slug":"solutions-industries","external":false,"items":[{"id":629,"title":"Artificial Intelligence","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/ai-solutions/","type":"EXTERNAL","uiRouterKey":"artificial-intelligence","slug":{},"external":true,"description":""},{"id":630,"title":"Public Sector","menuAttached":false,"order":2,"path":"/Solutions/Industries/PublicSector","type":"INTERNAL","uiRouterKey":"public-sector","slug":"solutions-industries-public-sector","external":false,"related":{"id":986,"title":"Public sector solutions","path":"/public-sector-solutions/","scheduledAt":null,"createdAt":"2023-10-20T14:23:52.057Z","updatedAt":"2024-09-30T17:00:38.498Z","publishedAt":"2023-11-30T14:58:23.419Z","locale":"en","__contentType":"api::page.page","navigationItemId":630,"__templateName":"Generic"},"items":[],"description":""},{"id":631,"title":"Gaming","menuAttached":false,"order":3,"path":"/Solutions/Industries/Gaming","type":"INTERNAL","uiRouterKey":"gaming-1","slug":"solutions-industries-gaming","external":false,"related":{"id":1024,"title":"Gaming Cloud Solutions","path":"/gaming-cloud-solutions/","scheduledAt":null,"createdAt":"2023-11-29T17:06:47.458Z","updatedAt":"2024-09-24T13:29:47.657Z","publishedAt":"2023-12-13T16:53:50.074Z","locale":"en","__contentType":"api::page.page","navigationItemId":631,"__templateName":"Generic"},"items":[],"description":""},{"id":633,"title":"Media and Entertainment","menuAttached":false,"order":4,"path":"/Solutions/Industries/MediaandEntertainment","type":"INTERNAL","uiRouterKey":"media-and-entertainment","slug":"solutions-industries-mediaand-entertainment","external":false,"related":{"id":1048,"title":"Media and Entertainment","path":"/media-and-entertainment/","scheduledAt":null,"createdAt":"2023-12-13T16:23:27.055Z","updatedAt":"2024-09-24T13:30:40.809Z","publishedAt":"2024-01-02T18:08:08.725Z","locale":"en","__contentType":"api::page.page","navigationItemId":633,"__templateName":"Generic"},"items":[],"description":""},{"id":632,"title":"Retail and E-commerce","menuAttached":false,"order":5,"path":"/Solutions/Industries/Retail","type":"INTERNAL","uiRouterKey":"retail-and-e-commerce-2","slug":"solutions-industries-retail","external":false,"related":{"id":1105,"title":"E-commerce retail Solutions","path":"/e-commerce-retail-solutions/","scheduledAt":null,"createdAt":"2024-02-28T09:44:45.583Z","updatedAt":"2024-09-24T13:12:26.843Z","publishedAt":"2024-04-02T14:56:24.762Z","locale":"en","__contentType":"api::page.page","navigationItemId":632,"__templateName":"Generic"},"items":[],"description":""},{"id":634,"title":"Startup Program","menuAttached":false,"order":6,"path":"/Solutions/Industries/Startup","type":"INTERNAL","uiRouterKey":"startup-program-1","slug":"solutions-industries-startup","external":false,"related":{"id":82,"title":"Startup program","path":"/startup-program/","scheduledAt":null,"createdAt":"2022-04-27T19:14:18.251Z","updatedAt":"2024-08-27T13:22:49.823Z","publishedAt":"2022-05-11T15:19:00.591Z","locale":"en","__contentType":"api::page.page","navigationItemId":634,"__templateName":"Generic"},"items":[],"description":""},{"id":794,"title":"Financial Services","menuAttached":false,"order":7,"path":"/Solutions/Industries/FinancialServices","type":"INTERNAL","uiRouterKey":"financial-services","slug":"solutions-industries-financial-services","external":false,"related":{"id":1381,"title":"Financial services solutions","path":"/financial-services-solutions/","scheduledAt":null,"createdAt":"2024-08-06T12:19:51.917Z","updatedAt":"2024-11-12T09:58:52.666Z","publishedAt":"2024-08-06T12:31:25.580Z","locale":"en","__contentType":"api::page.page","navigationItemId":794,"__templateName":"Generic"},"items":[],"description":""},{"id":826,"title":"Industrial","menuAttached":false,"order":8,"path":"/Solutions/Industries/Industrial","type":"INTERNAL","uiRouterKey":"industrial","slug":"solutions-industries-industrial","external":false,"related":{"id":1411,"title":"Industrial solutions","path":"/industrial-solutions/","scheduledAt":null,"createdAt":"2024-10-02T10:14:37.728Z","updatedAt":"2024-11-08T16:36:55.075Z","publishedAt":"2024-10-03T16:29:42.042Z","locale":"en","__contentType":"api::page.page","navigationItemId":826,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":635,"title":"Use Cases","menuAttached":false,"order":2,"path":"/Solutions/usecases","type":"WRAPPER","uiRouterKey":"use-cases","slug":"solutions-usecases","external":false,"items":[{"id":638,"title":"Cloud Storage Solutions","menuAttached":false,"order":1,"path":"/Solutions/usecases/cloudstorage","type":"INTERNAL","uiRouterKey":"cloud-storage-solutions","slug":"solutions-usecases-cloudstorage","external":false,"related":{"id":595,"title":"Cloud Storage Solutions","path":"/cloud-storage-solutions/","scheduledAt":null,"createdAt":"2022-12-19T13:31:12.676Z","updatedAt":"2024-10-25T13:40:34.304Z","publishedAt":"2023-01-31T10:48:28.580Z","locale":"en","__contentType":"api::page.page","navigationItemId":638,"__templateName":"Generic"},"items":[],"description":""},{"id":637,"title":"Kubernetes Solutions","menuAttached":false,"order":2,"path":"/Solutions/usecases/kub-sol","type":"INTERNAL","uiRouterKey":"kubernetes-solutions-1","slug":"solutions-usecases-kub-sol","external":false,"related":{"id":616,"title":"Kubernetes Solutions","path":"/kubernetes-solutions/","scheduledAt":null,"createdAt":"2023-01-10T16:25:48.652Z","updatedAt":"2024-11-20T16:45:40.105Z","publishedAt":"2023-03-28T07:49:24.834Z","locale":"en","__contentType":"api::page.page","navigationItemId":637,"__templateName":"Generic"},"items":[],"description":""},{"id":636,"title":"Serverless Applications","menuAttached":false,"order":3,"path":"/Solutions/usecases/ServerlessApplications","type":"INTERNAL","uiRouterKey":"serverless-applications-1","slug":"solutions-usecases-serverless-applications","external":false,"related":{"id":780,"title":"Build Scalable Applications With Serverless","path":"/build-scalable-applications-with-serverless/","scheduledAt":null,"createdAt":"2023-04-12T08:42:06.395Z","updatedAt":"2024-05-15T13:59:21.827Z","publishedAt":"2023-05-12T06:59:34.924Z","locale":"en","__contentType":"api::page.page","navigationItemId":636,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":639,"title":"Web Hosting","menuAttached":false,"order":3,"path":"/Solutions/WebHosting","type":"WRAPPER","uiRouterKey":"web-hosting-3","slug":"solutions-web-hosting","external":false,"items":[{"id":640,"title":"Managed Web Hosting","menuAttached":false,"order":1,"path":"/Solutions/WebHosting/ManagedWebHosting","type":"INTERNAL","uiRouterKey":"managed-web-hosting-1","slug":"solutions-web-hosting-managed-web-hosting","external":false,"related":{"id":827,"title":"Managed Web Hosting","path":"/managed-web-hosting/","scheduledAt":null,"createdAt":"2023-05-15T09:39:39.531Z","updatedAt":"2024-08-28T06:42:02.109Z","publishedAt":"2023-05-15T12:31:13.810Z","locale":"en","__contentType":"api::page.page","navigationItemId":640,"__templateName":"Generic"},"items":[],"description":""},{"id":641,"title":"Dedicated Web Hosting","menuAttached":false,"order":2,"path":"/Solutions/WebHosting/DedicatedWebHosting","type":"INTERNAL","uiRouterKey":"dedicated-web-hosting","slug":"solutions-web-hosting-dedicated-web-hosting","external":false,"related":{"id":798,"title":"Dedicated Web Hosting","path":"/dedicated-web-hosting/","scheduledAt":null,"createdAt":"2023-04-25T09:15:11.185Z","updatedAt":"2024-08-28T06:37:46.212Z","publishedAt":"2023-05-29T08:11:44.369Z","locale":"en","__contentType":"api::page.page","navigationItemId":641,"__templateName":"Generic"},"items":[],"description":""}],"description":""}],"description":""},{"id":744,"title":"Resources","menuAttached":false,"order":8,"path":"/Resources","type":"WRAPPER","uiRouterKey":"resources-2","slug":"resources-3","external":false,"items":[{"id":746,"title":"Ecosystem","menuAttached":false,"order":1,"path":"/Resources/Ecosystem","type":"WRAPPER","uiRouterKey":"ecosystem","slug":"resources-ecosystem","external":false,"items":[{"id":751,"title":"All products","menuAttached":false,"order":1,"path":"/Resources/Ecosystem/All_products","type":"INTERNAL","uiRouterKey":"all-products-2","slug":"resources-ecosystem-all-products","external":false,"related":{"id":223,"title":"All Products","path":"/all-products/","scheduledAt":null,"createdAt":"2022-05-09T13:56:36.517Z","updatedAt":"2024-10-28T10:43:19.295Z","publishedAt":"2022-05-09T14:37:46.378Z","locale":"en","__contentType":"api::page.page","navigationItemId":751,"__templateName":"Generic"},"items":[],"description":""},{"id":828,"title":"Product updates","menuAttached":false,"order":2,"path":"/Resources/Ecosystem/Productupdates","type":"INTERNAL","uiRouterKey":"product-updates","slug":"resources-ecosystem-productupdates","external":false,"related":{"id":1451,"title":"Product updates","path":"/product-updates/","scheduledAt":null,"createdAt":"2024-10-28T16:25:15.626Z","updatedAt":"2024-10-30T16:22:06.602Z","publishedAt":"2024-10-30T16:21:39.156Z","locale":"en","__contentType":"api::page.page","navigationItemId":828,"__templateName":"Generic"},"items":[],"description":""},{"id":750,"title":"Betas","menuAttached":false,"order":3,"path":"/Resources/Ecosystem/betas","type":"INTERNAL","uiRouterKey":"betas","slug":"resources-ecosystem-betas","external":false,"related":{"id":90,"title":"Betas","path":"/betas/","scheduledAt":null,"createdAt":"2022-04-28T14:06:08.789Z","updatedAt":"2024-11-05T16:26:58.483Z","publishedAt":"2022-04-28T14:39:18.717Z","locale":"en","__contentType":"api::page.page","navigationItemId":750,"__templateName":"Generic"},"items":[],"description":""},{"id":747,"title":"Changelog","menuAttached":false,"order":4,"path":"https://www.scaleway.com/en/docs/changelog/","type":"EXTERNAL","uiRouterKey":"changelog-2","slug":{},"external":true,"description":""},{"id":758,"title":"Blog","menuAttached":false,"order":5,"path":"https://www.scaleway.com/en/blog/","type":"EXTERNAL","uiRouterKey":"blog-2","slug":{},"external":true,"description":""}],"description":""},{"id":745,"title":"Community","menuAttached":false,"order":2,"path":"/Resources/Community","type":"WRAPPER","uiRouterKey":"community","slug":"resources-community","external":false,"items":[{"id":748,"title":"Slack Community","menuAttached":false,"order":1,"path":"https://slack.scaleway.com/","type":"EXTERNAL","uiRouterKey":"slack-community-2","slug":{},"external":true,"description":""},{"id":749,"title":"Feature Requests","menuAttached":false,"order":2,"path":"https://feature-request.scaleway.com/","type":"EXTERNAL","uiRouterKey":"feature-requests-2","slug":{},"external":true,"description":""},{"id":757,"title":"Scaleway Learning","menuAttached":false,"order":3,"path":"/Resources/Community/Scaleway_Learning","type":"INTERNAL","uiRouterKey":"scaleway-learning-2","slug":"resources-community-scaleway-learning","external":false,"related":{"id":597,"title":"Scaleway Learning","path":"/scaleway-learning/","scheduledAt":null,"createdAt":"2022-12-20T08:57:37.886Z","updatedAt":"2024-08-22T15:58:41.554Z","publishedAt":"2023-01-02T21:14:10.049Z","locale":"en","__contentType":"api::page.page","navigationItemId":757,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":752,"title":"Company","menuAttached":false,"order":3,"path":"/Resources/Company","type":"WRAPPER","uiRouterKey":"company-1","slug":"resources-company","external":false,"items":[{"id":756,"title":"Events","menuAttached":false,"order":1,"path":"/Resources/Company/Events","type":"INTERNAL","uiRouterKey":"events-1","slug":"resources-company-events","external":false,"related":{"id":699,"title":"Events","path":"/events/","scheduledAt":null,"createdAt":"2023-03-13T09:14:30.830Z","updatedAt":"2024-11-21T14:08:26.020Z","publishedAt":"2023-03-13T09:14:41.552Z","locale":"en","__contentType":"api::page.page","navigationItemId":756,"__templateName":"Generic"},"items":[],"description":""},{"id":796,"title":"Marketplace","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/marketplace/","type":"EXTERNAL","uiRouterKey":"marketplace","slug":{},"external":true,"description":""},{"id":755,"title":"Careers","menuAttached":false,"order":3,"path":"/Resources/Company/Careers","type":"INTERNAL","uiRouterKey":"careers-1","slug":"resources-company-careers","external":false,"related":{"id":766,"title":"Careers","path":"/careers/","scheduledAt":null,"createdAt":"2023-03-31T14:17:38.589Z","updatedAt":"2024-07-16T10:08:23.648Z","publishedAt":"2024-02-12T15:39:28.684Z","locale":"en","__contentType":"api::page.page","navigationItemId":755,"__templateName":"Generic"},"items":[],"description":""},{"id":753,"title":"About us","menuAttached":false,"order":4,"path":"/Resources/Company/Aboutus","type":"INTERNAL","uiRouterKey":"about-us-1","slug":"resources-company-aboutus","external":false,"related":{"id":195,"title":"About us","path":"/about-us/","scheduledAt":null,"createdAt":"2022-05-03T13:05:13.546Z","updatedAt":"2023-12-14T09:00:58.075Z","publishedAt":"2022-05-11T12:26:40.217Z","locale":"en","__contentType":"api::page.page","navigationItemId":753,"__templateName":"Generic"},"items":[],"description":""},{"id":788,"title":"Labs","menuAttached":false,"order":5,"path":"https://labs.scaleway.com/","type":"EXTERNAL","uiRouterKey":"labs-4","slug":{},"external":true,"description":""},{"id":754,"title":"Customer Testimonials","menuAttached":false,"order":6,"path":"/Resources/Company/customer-testimonials","type":"INTERNAL","uiRouterKey":"customer-testimonials","slug":"resources-company-customer-testimonials","external":false,"related":{"id":294,"title":"Customer testimonials","path":"/customer-testimonials/","scheduledAt":null,"createdAt":"2022-05-19T15:33:42.418Z","updatedAt":"2024-07-08T12:41:04.663Z","publishedAt":"2022-05-19T15:37:23.202Z","locale":"en","__contentType":"api::page.page","navigationItemId":754,"__templateName":"Generic"},"items":[],"description":""}],"description":""}],"description":""},{"id":598,"title":"Pricing","menuAttached":false,"order":9,"path":"/pricing","type":"INTERNAL","uiRouterKey":"pricing-2","slug":"pricing-1","external":false,"related":{"id":1236,"title":"Pricing","path":"/pricing/","scheduledAt":null,"createdAt":"2024-05-14T07:33:54.370Z","updatedAt":"2024-09-30T10:00:47.281Z","publishedAt":"2024-05-14T13:19:03.795Z","locale":"en","__contentType":"api::page.page","navigationItemId":598,"__templateName":"Generic"},"items":[],"description":""}],"topBarNavigationItems":[{"id":425,"title":"Docs","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/docs/","type":"EXTERNAL","uiRouterKey":"docs","slug":{},"external":true},{"id":427,"title":"Contact","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/contact/","type":"EXTERNAL","uiRouterKey":"contact-2","slug":{},"external":true,"description":""}],"MOTD":{"id":7803,"label":"NEW: Dedicated GPU power with Dedibox GPU!","url":null,"page":{"data":{"id":1454,"attributes":{"title":"GPU","path":"/dedibox/gpu/","scheduledAt":null,"createdAt":"2024-10-31T10:01:24.876Z","updatedAt":"2024-11-22T10:20:26.906Z","publishedAt":"2024-11-07T07:38:37.573Z","locale":"en"}}}},"ctaList":{"dediboxCTAList":[{"id":6611,"label":"Log in","url":"https://console.online.net/en/login","page":{"data":null}},{"id":6612,"label":"Sign up","url":"https://console.online.net/en/user/subscribe","page":{"data":null}}],"defaultCTAList":[{"id":6610,"label":"Log in","url":"https://console.scaleway.com/login","page":{"data":null}},{"id":6609,"label":"Sign up","url":"https://console.scaleway.com/register","page":{"data":null}}]}},"footer":[{"id":276,"title":"Products","menuAttached":false,"order":1,"path":"/products","type":"WRAPPER","uiRouterKey":"products","slug":"products-2","external":false,"items":[{"id":283,"title":"All Products","menuAttached":false,"order":1,"path":"/products/AllProducts","type":"INTERNAL","uiRouterKey":"all-products","slug":"products-all-products","external":false,"related":{"id":223,"title":"All Products","path":"/all-products/","scheduledAt":null,"createdAt":"2022-05-09T13:56:36.517Z","updatedAt":"2024-10-28T10:43:19.295Z","publishedAt":"2022-05-09T14:37:46.378Z","locale":"en","__contentType":"api::page.page","navigationItemId":283,"__templateName":"Generic"},"items":[],"description":""},{"id":759,"title":"Betas","menuAttached":false,"order":2,"path":"/products/betas","type":"INTERNAL","uiRouterKey":"betas-1","slug":"products-betas","external":false,"related":{"id":90,"title":"Betas","path":"/betas/","scheduledAt":null,"createdAt":"2022-04-28T14:06:08.789Z","updatedAt":"2024-11-05T16:26:58.483Z","publishedAt":"2022-04-28T14:39:18.717Z","locale":"en","__contentType":"api::page.page","navigationItemId":759,"__templateName":"Generic"},"items":[],"description":""},{"id":281,"title":"Bare Metal","menuAttached":false,"order":3,"path":"/products/BareMetal","type":"INTERNAL","uiRouterKey":"bare-metal-2","slug":"products-bare-metal","external":false,"related":{"id":961,"title":"Bare Metal","path":"/bare-metal/","scheduledAt":null,"createdAt":"2023-09-27T07:45:06.975Z","updatedAt":"2024-04-02T15:19:04.661Z","publishedAt":"2023-10-17T12:08:02.344Z","locale":"en","__contentType":"api::page.page","navigationItemId":281,"__templateName":"Generic"},"items":[],"description":""},{"id":284,"title":"Dedibox","menuAttached":false,"order":4,"path":"/products/Dedibox","type":"INTERNAL","uiRouterKey":"dedibox-4","slug":"products-dedibox","external":false,"related":{"id":29,"title":"Dedibox","path":"/dedibox/","scheduledAt":null,"createdAt":"2022-04-19T15:29:02.488Z","updatedAt":"2024-11-22T10:20:26.181Z","publishedAt":"2022-04-28T17:05:07.122Z","locale":"en","__contentType":"api::page.page","navigationItemId":284,"__templateName":"Generic"},"items":[],"description":""},{"id":282,"title":"Elastic Metal","menuAttached":false,"order":5,"path":"/products/ElasticMetal","type":"INTERNAL","uiRouterKey":"elastic-metal-4","slug":"products-elastic-metal","external":false,"related":{"id":87,"title":"Elastic Metal","path":"/elastic-metal/","scheduledAt":null,"createdAt":"2022-04-28T12:45:28.696Z","updatedAt":"2024-11-08T15:01:56.485Z","publishedAt":"2022-04-28T13:22:46.501Z","locale":"en","__contentType":"api::page.page","navigationItemId":282,"__templateName":"Generic"},"items":[],"description":""},{"id":285,"title":"Compute Instances","menuAttached":false,"order":6,"path":"/products/Compute","type":"INTERNAL","uiRouterKey":"compute-instances","slug":"products-compute","external":false,"related":{"id":655,"title":"Virtual Instances","path":"/virtual-instances/","scheduledAt":null,"createdAt":"2023-02-20T10:48:52.279Z","updatedAt":"2024-08-28T07:01:50.413Z","publishedAt":"2023-02-28T08:32:03.960Z","locale":"en","__contentType":"api::page.page","navigationItemId":285,"__templateName":"Generic"},"items":[],"description":""},{"id":286,"title":"GPU","menuAttached":false,"order":7,"path":"/products/GPu","type":"INTERNAL","uiRouterKey":"gpu-6","slug":"products-g-pu","external":false,"related":{"id":1025,"title":"GPU Instances","path":"/gpu-instances/","scheduledAt":null,"createdAt":"2023-11-30T13:15:51.769Z","updatedAt":"2024-11-19T16:38:15.121Z","publishedAt":"2023-12-12T12:52:20.083Z","locale":"en","__contentType":"api::page.page","navigationItemId":286,"__templateName":"Generic"},"items":[],"description":""},{"id":287,"title":"Containers","menuAttached":false,"order":8,"path":"/products/Containers","type":"INTERNAL","uiRouterKey":"containers-6","slug":"products-containers","external":false,"related":{"id":465,"title":"Containers","path":"/containers/","scheduledAt":null,"createdAt":"2022-07-29T15:09:20.535Z","updatedAt":"2024-08-28T07:05:23.005Z","publishedAt":"2023-02-27T13:53:48.270Z","locale":"en","__contentType":"api::page.page","navigationItemId":287,"__templateName":"Generic"},"items":[],"description":""},{"id":288,"title":"Object Storage","menuAttached":false,"order":9,"path":"/products/ObjectStorage","type":"INTERNAL","uiRouterKey":"object-storage-4","slug":"products-object-storage","external":false,"related":{"id":652,"title":"Object Storage","path":"/object-storage/","scheduledAt":null,"createdAt":"2023-02-16T09:44:56.414Z","updatedAt":"2024-10-25T13:10:50.377Z","publishedAt":"2023-03-07T18:05:15.061Z","locale":"en","__contentType":"api::page.page","navigationItemId":288,"__templateName":"Generic"},"items":[],"description":""},{"id":289,"title":"Block Storage","menuAttached":false,"order":10,"path":"/products/BlockStorage","type":"INTERNAL","uiRouterKey":"block-storage-4","slug":"products-block-storage","external":false,"related":{"id":141,"title":"Block Storage","path":"/block-storage/","scheduledAt":null,"createdAt":"2022-05-02T08:20:39.280Z","updatedAt":"2024-10-30T16:13:44.480Z","publishedAt":"2022-05-02T08:28:12.783Z","locale":"en","__contentType":"api::page.page","navigationItemId":289,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":275,"title":"Resources","menuAttached":false,"order":2,"path":"/resources","type":"WRAPPER","uiRouterKey":"resources","slug":"resources-3","external":false,"items":[{"id":290,"title":"Documentation","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/docs/","type":"EXTERNAL","uiRouterKey":"documentation","slug":{},"external":true,"description":""},{"id":292,"title":"Changelog","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/docs/changelog/","type":"EXTERNAL","uiRouterKey":"changelog","slug":{},"external":true,"description":""},{"id":291,"title":"Blog","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/blog/","type":"EXTERNAL","uiRouterKey":"blog","slug":{},"external":true,"description":""},{"id":293,"title":"Feature Requests","menuAttached":false,"order":4,"path":"https://feature-request.scaleway.com/","type":"EXTERNAL","uiRouterKey":"feature-requests","slug":{},"external":true,"description":""},{"id":321,"title":"Slack Community","menuAttached":false,"order":5,"path":"https://slack.scaleway.com/","type":"EXTERNAL","uiRouterKey":"slack-community-2","slug":{},"external":true,"description":""}],"description":""},{"id":280,"title":"Contact","menuAttached":false,"order":3,"path":"/Contact","type":"WRAPPER","uiRouterKey":"contact-2","slug":"contact-4","external":false,"items":[{"id":294,"title":"Create a ticket","menuAttached":false,"order":1,"path":"https://console.scaleway.com/support/create/","type":"EXTERNAL","uiRouterKey":"create-a-ticket","slug":{},"external":true,"description":""},{"id":296,"title":"Report Abuse","menuAttached":false,"order":2,"path":"https://console.scaleway.com/support/abuses/create/","type":"EXTERNAL","uiRouterKey":"report-abuse","slug":{},"external":true,"description":""},{"id":295,"title":"Status","menuAttached":false,"order":3,"path":"https://status.scaleway.com/","type":"EXTERNAL","uiRouterKey":"status","slug":{},"external":true,"description":""},{"id":298,"title":"Dedibox Console online.net","menuAttached":false,"order":4,"path":"https://console.online.net/fr/login","type":"EXTERNAL","uiRouterKey":"dedibox-console-online-net","slug":{},"external":true,"description":""},{"id":407,"title":"Support plans","menuAttached":false,"order":5,"path":"/Contact/Support","type":"INTERNAL","uiRouterKey":"support-plans","slug":"contact-support","external":false,"related":{"id":493,"title":"Assistance","path":"/assistance/","scheduledAt":null,"createdAt":"2022-09-26T15:14:28.440Z","updatedAt":"2024-08-28T07:19:37.841Z","publishedAt":"2022-10-03T12:20:34.441Z","locale":"en","__contentType":"api::page.page","navigationItemId":407,"__templateName":"Generic"},"items":[],"description":""},{"id":409,"title":"Brand resources","menuAttached":false,"order":6,"path":"https://ultraviolet.scaleway.com/6dd9b5c45/p/62b4e2-ultraviolet","type":"EXTERNAL","uiRouterKey":"brand-resources","slug":{},"external":true,"description":""}],"description":""},{"id":436,"title":"Company","menuAttached":false,"order":4,"path":"/scw","type":"WRAPPER","uiRouterKey":"company","slug":"scw","external":false,"items":[{"id":440,"title":"About us","menuAttached":false,"order":1,"path":"/scw/About-us","type":"INTERNAL","uiRouterKey":"about-us","slug":"scw-about-us","external":false,"related":{"id":195,"title":"About us","path":"/about-us/","scheduledAt":null,"createdAt":"2022-05-03T13:05:13.546Z","updatedAt":"2023-12-14T09:00:58.075Z","publishedAt":"2022-05-11T12:26:40.217Z","locale":"en","__contentType":"api::page.page","navigationItemId":440,"__templateName":"Generic"},"items":[],"description":""},{"id":441,"title":"Events","menuAttached":false,"order":2,"path":"/scw/events","type":"INTERNAL","uiRouterKey":"events","slug":"scw-events","external":false,"related":{"id":699,"title":"Events","path":"/events/","scheduledAt":null,"createdAt":"2023-03-13T09:14:30.830Z","updatedAt":"2024-11-21T14:08:26.020Z","publishedAt":"2023-03-13T09:14:41.552Z","locale":"en","__contentType":"api::page.page","navigationItemId":441,"__templateName":"Generic"},"items":[],"description":""},{"id":798,"title":"Marketplace","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/marketplace/","type":"EXTERNAL","uiRouterKey":"marketplace-2","slug":{},"external":true,"description":""},{"id":439,"title":"Environment ","menuAttached":false,"order":4,"path":"/scw/environment","type":"INTERNAL","uiRouterKey":"environment","slug":"scw-environment","external":false,"related":{"id":59,"title":"Environmental leadership ","path":"/environmental-leadership/","scheduledAt":null,"createdAt":"2022-04-26T08:30:15.289Z","updatedAt":"2024-11-09T10:51:38.014Z","publishedAt":"2022-04-28T17:12:24.574Z","locale":"en","__contentType":"api::page.page","navigationItemId":439,"__templateName":"Generic"},"items":[],"description":""},{"id":790,"title":"Social Responsibility","menuAttached":false,"order":5,"path":"/scw/SocialResponsibility","type":"INTERNAL","uiRouterKey":"social-responsibility","slug":"scw-social-responsibility","external":false,"related":{"id":184,"title":"Social responsibility","path":"/social-responsibility/","scheduledAt":null,"createdAt":"2022-05-03T07:48:38.038Z","updatedAt":"2024-08-28T07:08:11.382Z","publishedAt":"2022-05-03T13:08:48.890Z","locale":"en","__contentType":"api::page.page","navigationItemId":790,"__templateName":"Generic"},"items":[],"description":""},{"id":438,"title":"Security","menuAttached":false,"order":6,"path":"/scw/security","type":"INTERNAL","uiRouterKey":"security-4","slug":"scw-security","external":false,"related":{"id":190,"title":"Security and resilience","path":"/security-and-resilience/","scheduledAt":null,"createdAt":"2022-05-03T10:22:40.696Z","updatedAt":"2024-08-28T08:56:56.744Z","publishedAt":"2022-05-11T12:39:01.810Z","locale":"en","__contentType":"api::page.page","navigationItemId":438,"__templateName":"Generic"},"items":[],"description":""},{"id":782,"title":"Shared Responsibility Model","menuAttached":false,"order":7,"path":"/scw/Model","type":"INTERNAL","uiRouterKey":"shared-responsibility-model","slug":"scw-model","external":false,"related":{"id":1180,"title":"Shared Responsibility Model","path":"/shared-responsibility-model/","scheduledAt":null,"createdAt":"2024-04-04T15:54:36.614Z","updatedAt":"2024-11-18T13:28:57.006Z","publishedAt":"2024-04-04T15:56:39.573Z","locale":"en","__contentType":"api::page.page","navigationItemId":782,"__templateName":"Generic"},"items":[],"description":""},{"id":442,"title":"News","menuAttached":false,"order":8,"path":"/scw/news","type":"INTERNAL","uiRouterKey":"news","slug":"scw-news","external":false,"related":{"id":263,"title":"News","path":"/news/","scheduledAt":null,"createdAt":"2022-05-19T10:28:45.212Z","updatedAt":"2022-05-31T07:47:17.728Z","publishedAt":"2022-05-19T10:29:13.394Z","locale":"en","__contentType":"api::page.page","navigationItemId":442,"__templateName":"Generic"},"items":[],"description":""},{"id":443,"title":"Careers","menuAttached":false,"order":9,"path":"/scw/career/","type":"INTERNAL","uiRouterKey":"careers","slug":"scw-career","external":false,"related":{"id":766,"title":"Careers","path":"/careers/","scheduledAt":null,"createdAt":"2023-03-31T14:17:38.589Z","updatedAt":"2024-07-16T10:08:23.648Z","publishedAt":"2024-02-12T15:39:28.684Z","locale":"en","__contentType":"api::page.page","navigationItemId":443,"__templateName":"Generic"},"items":[],"description":""},{"id":445,"title":"Scaleway Learning","menuAttached":false,"order":10,"path":"/scw/learning","type":"INTERNAL","uiRouterKey":"scaleway-learning","slug":"scw-learning","external":false,"related":{"id":597,"title":"Scaleway Learning","path":"/scaleway-learning/","scheduledAt":null,"createdAt":"2022-12-20T08:57:37.886Z","updatedAt":"2024-08-22T15:58:41.554Z","publishedAt":"2023-01-02T21:14:10.049Z","locale":"en","__contentType":"api::page.page","navigationItemId":445,"__templateName":"Generic"},"items":[],"description":""},{"id":444,"title":"Client Success Stories","menuAttached":false,"order":11,"path":"/scw/clientstor/","type":"INTERNAL","uiRouterKey":"client-success-stories","slug":"scw-clientstor","external":false,"related":{"id":294,"title":"Customer testimonials","path":"/customer-testimonials/","scheduledAt":null,"createdAt":"2022-05-19T15:33:42.418Z","updatedAt":"2024-07-08T12:41:04.663Z","publishedAt":"2022-05-19T15:37:23.202Z","locale":"en","__contentType":"api::page.page","navigationItemId":444,"__templateName":"Generic"},"items":[],"description":""},{"id":437,"title":"Labs","menuAttached":false,"order":12,"path":"https://labs.scaleway.com/en/","type":"EXTERNAL","uiRouterKey":"labs","slug":{},"external":true,"description":""}],"description":""}],"pageType":"post"},"__N_SSG":true},"page":"/blog/[slug]","query":{"slug":"how-everdian-delivers-life-saving-real-time-critical-insights-via-ai"},"buildId":"85BYxc5vA-nbO8Fs_1Ijf","isFallback":false,"gsp":true,"locale":"en","locales":["default","en","fr"],"defaultLocale":"default","scriptLoader":[]}</script></body></html>

Pages: 1 2 3 4 5 6 7 8 9 10