CINXE.COM

<!DOCTYPE html><html lang="en"><head><meta charSet="utf-8" data-next-head=""/><meta name="viewport" content="width=device-width" data-next-head=""/><title data-next-head="">Page 3 | Scaleway Blog - All posts</title><link rel="canonical" href="https://www.scaleway.com/en/blog/3/" data-next-head=""/><meta name="description" content="Scaleway’s blog helps developers and startups to build, deploy and scale applications." data-next-head=""/><link href="/favicon/blog/favicon.svg" type="image/svg+xml" rel="icon" data-next-head=""/><link href="/favicon/blog/favicon.ico" rel="icon" data-next-head=""/><link href="/favicon/blog/apple-touch-icon-180x180.png" rel="apple-touch-icon" sizes="180x180" data-next-head=""/><link href="/favicon/blog/apple-touch-icon-180x180.png" type="image/png" rel="shortcut icon" sizes="180x180" data-next-head=""/><meta name="robots" content="noindex" data-next-head=""/><link rel="preload" href="/_next/static/media/a34f9d1faa5f3315-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/_next/static/media/2d141e1a38819612-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/_next/static/css/167c96f3591d2921.css" as="style"/><link rel="stylesheet" href="/_next/static/css/167c96f3591d2921.css" data-n-g=""/><link rel="preload" href="/_next/static/css/0540dd5abe2c353c.css" as="style"/><link rel="stylesheet" href="/_next/static/css/0540dd5abe2c353c.css" data-n-p=""/><link rel="preload" href="/_next/static/css/92ffb8ebc71df939.css" as="style"/><link rel="stylesheet" href="/_next/static/css/92ffb8ebc71df939.css" data-n-p=""/><link rel="preload" href="/_next/static/css/8c86baaf62d4e650.css" as="style"/><link rel="stylesheet" href="/_next/static/css/8c86baaf62d4e650.css" data-n-p=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/_next/static/chunks/polyfills-42372ed130431b0a.js"></script><script src="/_next/static/chunks/webpack-6e462b92e6c8d3a0.js" defer=""></script><script src="/_next/static/chunks/framework-53ea874194e1abc4.js" defer=""></script><script src="/_next/static/chunks/main-89a27af27eefdb26.js" defer=""></script><script src="/_next/static/chunks/pages/_app-df5edb74c54ac48f.js" defer=""></script><script src="/_next/static/chunks/725-4cb47eb93e1704d7.js" defer=""></script><script src="/_next/static/chunks/192-12b8575b735ac4f4.js" defer=""></script><script src="/_next/static/chunks/943-f28b13fb2cb3c8c1.js" defer=""></script><script src="/_next/static/chunks/341-5be8fc4bc325b722.js" defer=""></script><script src="/_next/static/chunks/655-9ac0451386b6be56.js" defer=""></script><script src="/_next/static/chunks/564-9502a1580dab5ad9.js" defer=""></script><script src="/_next/static/chunks/pages/blog/%5Bslug%5D-2f4890f58654917f.js" defer=""></script><script src="/_next/static/4xZKwUKlhtIRe3nXE5xXw/_buildManifest.js" defer=""></script><script src="/_next/static/4xZKwUKlhtIRe3nXE5xXw/_ssgManifest.js" defer=""></script></head><body><div id="__next"><style data-emotion="css-global 0"></style><div class="__variable_375d66 __variable_f77ac8 container"><div class="blog"><header class="HeaderBlog_headerContainer__n3f6s full-width"><div class="container"><div class="HeaderBlog_header__CTV5V"><div class="HeaderBlog_logo__kbnMY"><a href="/en/blog/"><img alt="Scaleway Blog" loading="lazy" width="240" height="40" decoding="async" data-nimg="1" style="color:transparent" srcSet="/_next/static/media/logo-blog.49246fc4.svg 1x, /_next/static/media/logo-blog.49246fc4.svg 2x" src="/_next/static/media/logo-blog.49246fc4.svg"/></a><a href="#main" class="SkipLink_link__wUma3">Skip to main content</a><a href="#footer" class="SkipLink_link__wUma3">Skip to footer section</a><button class="HeaderBlog_menuButton__PP1O7" type="button"><style data-emotion="css 3sqif5">.css-3sqif5{vertical-align:middle;fill:currentColor;height:1em;width:1em;min-width:1em;min-height:1em;}.css-3sqif5 .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-3sqif5 elxvigq0"><path fill-rule="evenodd" d="M2 4.75A.75.75 0 0 1 2.75 4h14.5a.75.75 0 0 1 0 1.5H2.75A.75.75 0 0 1 2 4.75M2 10a.75.75 0 0 1 .75-.75h14.5a.75.75 0 0 1 0 1.5H2.75A.75.75 0 0 1 2 10m0 5.25a.75.75 0 0 1 .75-.75h14.5a.75.75 0 0 1 0 1.5H2.75a.75.75 0 0 1-.75-.75" clip-rule="evenodd"></path></svg></button></div><nav class="HeaderBlog_topNav__cNrI_ font-body-small-regular"><ul class="HeaderBlog_links__1jfH4"><li><a href="/en/blog/incidents/">Incidents</a></li><li><a href="https://www.scaleway.com/en/docs/" class="cta-inline cta-size-big">Docs</a></li><li><a href="https://www.scaleway.com/en/contact/" class="cta-inline cta-size-big">Contact</a></li></ul><ul class="HeaderBlog_language__IixQV"><li><span class="sr-only">English</span><span>en</span></li><li><a href="/fr/blog/3/"><span class="sr-only">Français</span><span aria-hidden="true">fr</span></a></li></ul></nav><nav class="HeaderBlog_bottomNav__wIZob"><a class="cta-primary cta-size-small" href="/en/">Discover Scaleway</a><div class="HeaderBlog_socials__eZU_7"><a href="https://x.com/Scaleway/"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path d="M15.203 1.875h2.757l-6.023 6.883 7.085 9.367h-5.547l-4.345-5.68-4.972 5.68H1.4l6.442-7.363-6.797-8.887h5.688l3.928 5.193zm-.967 14.6h1.527L5.903 3.438H4.264z"></path></svg><span class="sr-only">X</span></a><a href="https://slack.scaleway.com/"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path fill-rule="evenodd" d="M6.056 3.419a1.75 1.75 0 0 0 1.75 1.751H9.39a.167.167 0 0 0 .167-.166V3.419a1.75 1.75 0 1 0-3.501 0m3.5 4.392a1.75 1.75 0 0 0-1.75-1.751H3.417a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752m-6.123 6.142a1.75 1.75 0 0 0 1.75-1.752v-1.585a.167.167 0 0 0-.167-.166H3.433a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752m4.376-3.503a1.75 1.75 0 0 0-1.75 1.751v4.38a1.75 1.75 0 1 0 3.5 0V12.2a1.75 1.75 0 0 0-1.75-1.751m7.01-2.639a1.75 1.75 0 1 1 3.501 0 1.75 1.75 0 0 1-1.75 1.752h-1.584a.167.167 0 0 1-.167-.167zm-.875 0a1.75 1.75 0 1 1-3.5 0V3.42a1.75 1.75 0 1 1 3.5 0zm0 8.77a1.75 1.75 0 0 0-1.75-1.752H10.61a.167.167 0 0 0-.167.167v1.585a1.75 1.75 0 1 0 3.501 0m-3.5-4.38a1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752 1.75 1.75 0 0 0-1.75-1.751h-4.39a1.75 1.75 0 0 0-1.75 1.751" clip-rule="evenodd"></path></svg><span class="sr-only">Slack</span></a><a href="/en/blog/rss.xml"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path d="M3.75 3a.75.75 0 0 0-.75.75v.5c0 .414.336.75.75.75H4c6.075 0 11 4.925 11 11v.25c0 .414.336.75.75.75h.5a.75.75 0 0 0 .75-.75V16C17 8.82 11.18 3 4 3z"></path><path d="M3 8.75A.75.75 0 0 1 3.75 8H4a8 8 0 0 1 8 8v.25a.75.75 0 0 1-.75.75h-.5a.75.75 0 0 1-.75-.75V16a6 6 0 0 0-6-6h-.25A.75.75 0 0 1 3 9.25zM7 15a2 2 0 1 1-4 0 2 2 0 0 1 4 0"></path></svg><span class="sr-only">RSS</span></a></div></nav></div></div></header><main id="main" class="BlogHomepage_home__vfe6_"><h1 class="font-heading-header-title">Scaleway Blog</h1><nav class="TopBar_navBar__jEc9M"><a class="TopBar_link__c_MXa TopBar_isActive__bqGIp" href="/en/blog/"><style data-emotion="css 3sqif5">.css-3sqif5{vertical-align:middle;fill:currentColor;height:1em;width:1em;min-width:1em;min-height:1em;}.css-3sqif5 .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-3sqif5 elxvigq0"><path fill-rule="evenodd" d="M2 4.727A2.727 2.727 0 0 1 4.727 2h1.978a2.727 2.727 0 0 1 2.727 2.727v1.978a2.727 2.727 0 0 1-2.727 2.727H4.727A2.727 2.727 0 0 1 2 6.705zM4.727 3.5C4.05 3.5 3.5 4.05 3.5 4.727v1.978c0 .677.55 1.227 1.227 1.227h1.978c.677 0 1.227-.55 1.227-1.227V4.727c0-.678-.55-1.227-1.227-1.227zm5.841 1.227A2.727 2.727 0 0 1 13.296 2h1.977A2.727 2.727 0 0 1 18 4.727v1.978a2.727 2.727 0 0 1-2.727 2.727h-1.977a2.727 2.727 0 0 1-2.728-2.727zM13.296 3.5c-.678 0-1.228.55-1.228 1.227v1.978c0 .677.55 1.227 1.228 1.227h1.977c.678 0 1.227-.55 1.227-1.227V4.727c0-.678-.55-1.227-1.227-1.227zM2 13.296a2.727 2.727 0 0 1 2.727-2.728h1.978a2.727 2.727 0 0 1 2.727 2.728v1.977A2.727 2.727 0 0 1 6.705 18H4.727A2.727 2.727 0 0 1 2 15.273zm2.727-1.228c-.678 0-1.227.55-1.227 1.228v1.977c0 .678.55 1.227 1.227 1.227h1.978c.677 0 1.227-.55 1.227-1.227v-1.977c0-.678-.55-1.228-1.227-1.228zm5.841 1.228a2.727 2.727 0 0 1 2.728-2.728h1.977A2.727 2.727 0 0 1 18 13.296v1.977A2.727 2.727 0 0 1 15.273 18h-1.977a2.727 2.727 0 0 1-2.728-2.727zm2.728-1.228c-.678 0-1.228.55-1.228 1.228v1.977c0 .678.55 1.227 1.228 1.227h1.977c.678 0 1.227-.55 1.227-1.227v-1.977c0-.678-.55-1.228-1.227-1.228z" clip-rule="evenodd"></path></svg>all</a><a class="TopBar_link__c_MXa" href="/en/blog/build/">build</a><a class="TopBar_link__c_MXa" href="/en/blog/deploy/">deploy</a><a class="TopBar_link__c_MXa" href="/en/blog/scale/">scale</a></nav><section><h2 class="font-heading-highlighted BlogHomepage_heading__DyvxL">Latest articles</h2><div class="PostsGrid_posts___KcIk"><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/how-everdian-delivers-life-saving-real-time-critical-insights-via-ai/">How Everdian delivers “life-saving” real-time critical insights, via AI - interview with Cedric Milinaire</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/scale/">Scale</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">AI</span><span class="Tag_tag__JS3kY">Testimonial</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/jean-baptiste-fourmont/">Jean-Baptiste Fourmont</a></address><div><time dateTime="2024-07-22">22/07/24</time><span class="blogDot" aria-hidden="true">•</span><span>4 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Cover_4054f8d0da.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/how-we-rebranded-scaleway-in-three-months-navigating-uncharted-territories-part-1/">How we rebranded Scaleway in three months, navigating uncharted territories ✨ Part 1</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/deploy/">Deploy</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">Design</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/jess-anelli/">Jess Anelli</a></address><div><time dateTime="2024-07-09">09/07/24</time><span class="blogDot" aria-hidden="true">•</span><span>5 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/retrieval-augmented-generation-building-a-rag-pipeline-with-scaleways-managed-inference/">Retrieval-Augmented Generation: Building a RAG Pipeline with Scaleway’s Managed Inference</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/build/">Build</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">AI</span><span class="Tag_tag__JS3kY">RAG</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/sebastian-tatut/">Sebastian Tatut</a></address><div><time dateTime="2024-07-04">04/07/24</time><span class="blogDot" aria-hidden="true">•</span><span>5 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/scaleway-private-lb-for-golemai-security-assets/">Scaleway Private LB for Golem.ai Security Assets</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/deploy/">Deploy</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">Load Balancer</span><span class="Tag_tag__JS3kY">Managed Services</span><span class="Tag_tag__JS3kY">How to</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/kevin-baude/">Kevin Baude</a></address><div><time dateTime="2024-06-17">17/06/24</time><span class="blogDot" aria-hidden="true">•</span><span>5 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/the-cloud-and-retail-in-2024-and-beyond/">The Cloud and Retail in 2024 and beyond</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/build/">Build</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">Retail</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/james-martin/">James Martin</a></address><div><time dateTime="2024-06-14">14/06/24</time><span class="blogDot" aria-hidden="true">•</span><span>4 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/quantum-computing-in-2024-the-state-of-play/">Quantum computing in 2024: The State of Play</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/build/">Build</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">Quantum</span><span class="Tag_tag__JS3kY">R&amp;D</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/valentin-macheret/">Valentin Macheret</a></address><div><time dateTime="2024-06-04">04/06/24</time><span class="blogDot" aria-hidden="true">•</span><span>6 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/your-rag-powered-ai-app-in-50-lines-of-code/">Your RAG-powered AI app in 50 lines of code!</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/build/">Build</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">AI</span><span class="Tag_tag__JS3kY">H100</span><span class="Tag_tag__JS3kY">L4</span><span class="Tag_tag__JS3kY">RAG</span><span class="Tag_tag__JS3kY">open source</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/diego-coy/">Diego Coy</a></address><div><time dateTime="2024-04-09">09/04/24</time><span class="blogDot" aria-hidden="true">•</span><span>14 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/how-to-optimize-your-cloud-infrastructure-for-video-streaming-and-encoding/">How to optimize your cloud infrastructure for video streaming and encoding - Webinar report</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/scale/">Scale</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">Video</span><span class="Tag_tag__JS3kY">Media &amp; Entertainment</span><span class="Tag_tag__JS3kY">Testimonial</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/james-martin/">James Martin</a></address><div><time dateTime="2024-03-28">28/03/24</time><span class="blogDot" aria-hidden="true">•</span><span>4 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/hosterra-greenit-testimonial/">How to use 38% less energy when using Bare Metal... thanks to Hosterra!</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/deploy/">Deploy</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">Green IT</span><span class="Tag_tag__JS3kY">Bare Metal</span><span class="Tag_tag__JS3kY">Testimonial</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/james-martin/">James Martin</a></address><div><time dateTime="2024-03-12">12/03/24</time><span class="blogDot" aria-hidden="true">•</span><span>5 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/ollama-from-zero-to-running-an-llm-in-less-than-2-minutes/">Ollama: from zero to running an LLM in less than 2 minutes!</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/build/">Build</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">AI</span><span class="Tag_tag__JS3kY">H100</span><span class="Tag_tag__JS3kY">open source</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/diego-coy/">Diego Coy</a></address><div><time dateTime="2024-03-08">08/03/24</time><span class="blogDot" aria-hidden="true">•</span><span>6 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/get-started-ai-cost-emissions-mindmatch/">How to get started in AI without excessive cost, or emissions! - MindMatch guest post</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/build/">Build</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">AI</span><span class="Tag_tag__JS3kY">Startups</span><span class="Tag_tag__JS3kY">Sustainability</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/zofia-smolen/">Zofia Smoleń</a></address><div><time dateTime="2024-02-26">26/02/24</time><span class="blogDot" aria-hidden="true">•</span><span>7 min read</span></div></div></div></article><article class="ArticleCard_articleCard__91jgG"><div class="blogImage ArticleCard_img__SYe4N"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp"/></div><div class="ArticleCard_contentContainer__WjTiq"><div class="ArticleCard_content__SQ9Aj"><h2 class="font-heading-title blogArticleTitle ArticleCard_heading__G9xUc"><a class="breakout-link" href="/en/blog/infrastructures-for-llms-in-the-cloud/">Infrastructures for LLMs in the cloud</a></h2><div class="blogCategory ArticleCard_category__lf8S7"><a href="/en/blog/build/">Build</a></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl ArticleCard_tags__u6zr9"><span class="Tag_tag__JS3kY">AI</span></div></div><div class="ArticleCard_footer__LyyE1"><address class="blogAuthor"><a href="/en/blog/author/fabien-da-silva/">Fabien da Silva</a></address><div><time dateTime="2024-02-21">21/02/24</time><span class="blogDot" aria-hidden="true">•</span><span>6 min read</span></div></div></div></article></div></section><nav class="Pagination_pagination__HCEiZ" role="navigation" aria-label="Pagination"><ul><li aria-hidden="false"><a href="/en/blog/2/"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path d="M9.322 3.22a.75.75 0 0 1 0 1.06L4.561 9.042H17.25a.75.75 0 0 1 0 1.5H4.56l4.762 4.761a.75.75 0 0 1-1.06 1.06l-6.042-6.04a.75.75 0 0 1 0-1.06L8.26 3.22a.75.75 0 0 1 1.061 0" clip-rule="evenodd"></path></svg><span class="sr-only">Back to Previous</span></a></li><li><a href="/en/blog/"><span class="sr-only">Go to page </span>1</a></li><li><a href="/en/blog/2/"><span class="sr-only">Go to page </span>2</a></li><li><a class="Pagination_active__pphGX" aria-current="true" href="/en/blog/3/"><span class="sr-only">Current page: </span>3</a></li><li><a href="/en/blog/4/"><span class="sr-only">Go to page </span>4</a></li><li><a href="/en/blog/5/"><span class="sr-only">Go to page </span>5</a></li><li aria-hidden="false"><a href="/en/blog/4/"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path d="M10.678 3.22a.75.75 0 0 1 1.06 0l6.042 6.04a.75.75 0 0 1 0 1.061l-6.041 6.042a.75.75 0 0 1-1.061-1.061l4.761-4.761H2.75a.75.75 0 0 1 0-1.5h12.69L10.677 4.28a.75.75 0 0 1 0-1.06" clip-rule="evenodd"></path></svg><span class="sr-only">Forward to Next</span></a></li></ul></nav><section class="ExtraPosts_container__0fO7Q"><h2 class="font-heading-highlighted ExtraPosts_title__hqJSu">Most popular articles</h2><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/big-efficient-open-the-ai-future-we-saw-coming/">Big, Efficient, Open: The AI Future We Saw Coming</a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">Last week&#x27;s AI Action Summit highlighted key principles shaping the future of AI: Big, Efficient, and Open. Read the full article for an inside look at the event and insights about it.</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/build/">Build</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/frederic-bardolle/">Frédéric Bardolle</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2025-02-19">19/02/25</time><span class="blogDot" aria-hidden="true">•</span><span>4 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">ai-PULSE</span><span class="Tag_tag__JS3kY">AI Action Summit</span></div></div></article></div><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/what-is-a-document-database-a-comprehensive-guide/">What Is a Document Database? A Comprehensive Guide</a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">Discover the power of document databases, their benefits and use cases, in managing complex, semi-structured, and unstructured data.</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/build/">Build</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/nevine-ismael-et-walter-timmermans/">Névine Ismael et Walter Timmermans</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2025-02-12">12/02/25</time><span class="blogDot" aria-hidden="true">•</span><span>3 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">Managed MongoDB</span><span class="Tag_tag__JS3kY">Document</span><span class="Tag_tag__JS3kY">Database</span></div></div></article></div><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/overcoming-the-challenges-of-cloud-environmental-impact-measurement/">Overcoming the challenges of Cloud environmental impact measurement</a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">Scaleway&#x27;s Environmental Footprint Calculator measures carbon emissions and water usage, empowering users to optimize infrastructures and embrace responsible digital practices.</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/build/">Build</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/elise-auvray/">Elise Auvray</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2024-12-23">23/12/24</time><span class="blogDot" aria-hidden="true">•</span><span>4 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">Sustainability </span><span class="Tag_tag__JS3kY">Green IT</span><span class="Tag_tag__JS3kY">Environmental Footprint Calculator </span></div></div></article></div></section></main><footer id="footer" class="Footer_footer__dXXGl full-width"><div class="container"><div class="Footer_categories__GKzcP"><div><div class="Footer_title__SsUPi">Products</div><ul><li><a class="cta-inline cta-size-big" href="/en/all-products/">All Products</a></li><li><a class="cta-inline cta-size-big" href="/en/betas/">Betas</a></li><li><a class="cta-inline cta-size-big" href="/en/bare-metal/">Bare Metal</a></li><li><a class="cta-inline cta-size-big" href="/en/dedibox/">Dedibox</a></li><li><a class="cta-inline cta-size-big" href="/en/elastic-metal/">Elastic Metal</a></li><li><a class="cta-inline cta-size-big" href="/en/virtual-instances/">Compute Instances</a></li><li><a class="cta-inline cta-size-big" href="/en/gpu-instances/">GPU</a></li><li><a class="cta-inline cta-size-big" href="/en/containers/">Containers</a></li><li><a class="cta-inline cta-size-big" href="/en/object-storage/">Object Storage</a></li><li><a class="cta-inline cta-size-big" href="/en/block-storage/">Block Storage</a></li></ul></div><div><div class="Footer_title__SsUPi">Resources</div><ul><li><a href="https://www.scaleway.com/en/docs/" class="cta-inline cta-size-big">Documentation</a></li><li><a href="https://www.scaleway.com/en/docs/changelog/" class="cta-inline cta-size-big">Changelog</a></li><li><a class="cta-inline cta-size-big" href="https://www.scaleway.com/en/blog/">Blog</a></li><li><a href="https://feature-request.scaleway.com/" class="cta-inline cta-size-big">Feature Requests</a></li><li><a href="https://slack.scaleway.com/" class="cta-inline cta-size-big">Slack Community</a></li></ul></div><div><div class="Footer_title__SsUPi">Contact</div><ul><li><a href="https://console.scaleway.com/support/create/" class="cta-inline cta-size-big">Create a ticket</a></li><li><a href="https://console.scaleway.com/support/abuses/create/" class="cta-inline cta-size-big">Report Abuse</a></li><li><a href="https://status.scaleway.com/" class="cta-inline cta-size-big">Status</a></li><li><a href="https://console.online.net/fr/login" class="cta-inline cta-size-big">Dedibox Console online.net</a></li><li><a class="cta-inline cta-size-big" href="/en/assistance/">Support plans</a></li><li><a href="https://ultraviolet.scaleway.com/6dd9b5c45/p/62b4e2-ultraviolet" class="cta-inline cta-size-big">Brand resources</a></li></ul></div><div><div class="Footer_title__SsUPi">Company</div><ul><li><a class="cta-inline cta-size-big" href="/en/about-us/">About us</a></li><li><a class="cta-inline cta-size-big" href="/en/events/">Events</a></li><li><a href="https://www.scaleway.com/en/marketplace/" class="cta-inline cta-size-big">Marketplace</a></li><li><a class="cta-inline cta-size-big" href="/en/environmental-leadership/">Environment </a></li><li><a class="cta-inline cta-size-big" href="/en/social-responsibility/">Social Responsibility</a></li><li><a class="cta-inline cta-size-big" href="/en/security-and-resilience/">Security</a></li><li><a class="cta-inline cta-size-big" href="/en/shared-responsibility-model/">Shared Responsibility Model</a></li><li><a class="cta-inline cta-size-big" href="/en/news/">News</a></li><li><a class="cta-inline cta-size-big" href="/en/careers/">Careers</a></li><li><a class="cta-inline cta-size-big" href="/en/scaleway-learning/">Scaleway Learning</a></li><li><a class="cta-inline cta-size-big" href="/en/customer-testimonials/">Client Success Stories</a></li><li><style data-emotion="css je8g23">.css-je8g23{pointer-events:none;}</style><style data-emotion="css s7jpo2">.css-s7jpo2{background-color:transparent;border:none;padding:0;color:#34a8ff;-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;text-underline-offset:2px;text-decoration-color:transparent;-webkit-transition:text-decoration-color 250ms ease-out;transition:text-decoration-color 250ms ease-out;gap:0.5rem;position:relative;cursor:pointer;width:-webkit-fit-content;width:-moz-fit-content;width:fit-content;font-size:1rem;font-family:Inter,Asap,sans-serif;font-weight:500;letter-spacing:0;line-height:1.5rem;paragraph-spacing:0;text-case:none;}.css-s7jpo2 .e1afnb7a2{-webkit-transition:-webkit-transform 250ms ease-out;transition:transform 250ms ease-out;}.css-s7jpo2 >*{pointer-events:none;}.css-s7jpo2:hover,.css-s7jpo2:focus{outline:none;-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;color:#6fc2ff;text-decoration-color:#6fc2ff;}.css-s7jpo2:hover .e1afnb7a2,.css-s7jpo2:focus .e1afnb7a2{-webkit-transform:translate(-0.25rem, 0);-moz-transform:translate(-0.25rem, 0);-ms-transform:translate(-0.25rem, 0);transform:translate(-0.25rem, 0);}.css-s7jpo2[data-variant='inline']{-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;}.css-s7jpo2:hover::after,.css-s7jpo2:focus::after{background-color:#34a8ff;}.css-s7jpo2:active{text-decoration-thickness:2px;}</style><a href="https://labs.scaleway.com/en/" target="_blank" rel="noopener noreferrer" class="css-s7jpo2 e1afnb7a0" variant="bodyStrong" data-variant="standalone">Labs<style data-emotion="css zpkqdi">.css-zpkqdi{display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;padding-bottom:0.25rem;}</style><span class="css-zpkqdi e1afnb7a1"><style data-emotion="css 1f2k2gl">.css-1f2k2gl{margin-left:0.5rem;}</style><style data-emotion="css 1jx3y9">.css-1jx3y9{vertical-align:middle;fill:currentColor;height:14px;width:14px;min-width:14px;min-height:14px;margin-left:0.5rem;}.css-1jx3y9 .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="e1afnb7a2 css-1jx3y9 euz0z380"><path d="M6.22 8.72a.75.75 0 0 0 1.06 1.06l5.22-5.22v1.69a.75.75 0 0 0 1.5 0v-3.5a.75.75 0 0 0-.75-.75h-3.5a.75.75 0 0 0 0 1.5h1.69z"></path><path d="M3.5 6.75c0-.69.56-1.25 1.25-1.25H7A.75.75 0 0 0 7 4H4.75A2.75 2.75 0 0 0 2 6.75v4.5A2.75 2.75 0 0 0 4.75 14h4.5A2.75 2.75 0 0 0 12 11.25V9a.75.75 0 0 0-1.5 0v2.25c0 .69-.56 1.25-1.25 1.25h-4.5c-.69 0-1.25-.56-1.25-1.25z"></path></svg></span></a></li></ul></div></div><div class="Footer_socialsContainer__FuhFv"><a href="/en/"><img alt="Scaleway" loading="lazy" width="166" height="32" decoding="async" data-nimg="1" style="color:transparent" srcSet="/_next/static/media/logo.7e2996cb.svg 1x, /_next/static/media/logo.7e2996cb.svg 2x" src="/_next/static/media/logo.7e2996cb.svg"/></a><div><p>Follow us</p><a class="Footer_socialLink__9UK2B" href="https://x.com/Scaleway/"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path d="M15.203 1.875h2.757l-6.023 6.883 7.085 9.367h-5.547l-4.345-5.68-4.972 5.68H1.4l6.442-7.363-6.797-8.887h5.688l3.928 5.193zm-.967 14.6h1.527L5.903 3.438H4.264z"></path></svg><span class="sr-only">x</span></a><a class="Footer_socialLink__9UK2B" href="https://slack.scaleway.com/"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path fill-rule="evenodd" d="M6.056 3.419a1.75 1.75 0 0 0 1.75 1.751H9.39a.167.167 0 0 0 .167-.166V3.419a1.75 1.75 0 1 0-3.501 0m3.5 4.392a1.75 1.75 0 0 0-1.75-1.751H3.417a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752m-6.123 6.142a1.75 1.75 0 0 0 1.75-1.752v-1.585a.167.167 0 0 0-.167-.166H3.433a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752m4.376-3.503a1.75 1.75 0 0 0-1.75 1.751v4.38a1.75 1.75 0 1 0 3.5 0V12.2a1.75 1.75 0 0 0-1.75-1.751m7.01-2.639a1.75 1.75 0 1 1 3.501 0 1.75 1.75 0 0 1-1.75 1.752h-1.584a.167.167 0 0 1-.167-.167zm-.875 0a1.75 1.75 0 1 1-3.5 0V3.42a1.75 1.75 0 1 1 3.5 0zm0 8.77a1.75 1.75 0 0 0-1.75-1.752H10.61a.167.167 0 0 0-.167.167v1.585a1.75 1.75 0 1 0 3.501 0m-3.5-4.38a1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752 1.75 1.75 0 0 0-1.75-1.751h-4.39a1.75 1.75 0 0 0-1.75 1.751" clip-rule="evenodd"></path></svg><span class="sr-only">slack</span></a><a class="Footer_socialLink__9UK2B" href="https://www.instagram.com/scaleway/"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path fill-rule="evenodd" d="M1.667 9.719c0-2.848 0-4.272.563-5.356A5 5 0 0 1 4.362 2.23c1.084-.563 2.507-.563 5.355-.563h.566c2.848 0 4.272 0 5.355.563a5 5 0 0 1 2.132 2.133c.563 1.084.563 2.508.563 5.356v.566c0 2.848 0 4.272-.562 5.356a5 5 0 0 1-2.133 2.133c-1.083.563-2.507.563-5.355.563h-.566c-2.848 0-4.271 0-5.355-.563a5 5 0 0 1-2.132-2.133c-.563-1.084-.563-2.508-.563-5.356zm3.67.284a4.668 4.668 0 1 0 9.336 0 4.668 4.668 0 0 0-9.336 0m7.697 0a3.03 3.03 0 1 1-6.06 0 3.03 3.03 0 1 1 6.06 0m2.912-4.854a1.09 1.09 0 1 1-2.18 0 1.09 1.09 0 0 1 2.18 0" clip-rule="evenodd"></path></svg><span class="sr-only">instagram</span></a><a class="Footer_socialLink__9UK2B" href="https://www.linkedin.com/company/scaleway/"><style data-emotion="css x3mert">.css-x3mert{vertical-align:middle;fill:currentColor;height:1.25rem;width:1.25rem;min-width:1.25rem;min-height:1.25rem;}.css-x3mert .fillStroke{stroke:currentColor;fill:none;}</style><svg viewBox="0 0 20 20" class="css-x3mert elxvigq0"><path fill-rule="evenodd" d="M18.332 18.166a.167.167 0 0 1-.167.167h-3.09a.167.167 0 0 1-.167-.167V12.5c0-1.599-.608-2.492-1.874-2.492-1.377 0-2.096.93-2.096 2.492v5.666a.167.167 0 0 1-.167.167H7.804a.167.167 0 0 1-.166-.167V7.39c0-.092.074-.167.166-.167h2.967c.092 0 .167.075.167.167v.67c0 .174.275.26.39.131a3.88 3.88 0 0 1 2.96-1.307c2.357 0 4.044 1.439 4.044 4.415zM3.7 5.767a2.043 2.043 0 0 1-2.035-2.05c0-1.132.91-2.05 2.035-2.05s2.034.918 2.034 2.05-.91 2.05-2.034 2.05m-1.704 12.4c0 .091.074.166.166.166H5.27a.167.167 0 0 0 .167-.167V7.39a.167.167 0 0 0-.167-.167H2.163a.167.167 0 0 0-.166.167z" clip-rule="evenodd"></path></svg><span class="sr-only">linkedIn</span></a></div></div><ul class="Footer_sublinks__Mjpw0"><li><a href="/en/contracts/">Contracts</a></li><li><a href="/en/legal-notice/">Legal Notice</a></li><li><a href="/en/privacy-policy/">Privacy Policy</a></li><li><a href="/en/cookie/">Cookie</a></li><li><a href="https://security.scaleway.com">Security Measures</a></li><li><a hrefLang="fr" href="/fr/accessibility/">Accessibility statement</a></li></ul><span class="Footer_brand__qv1gM">© 1999-<!-- -->2025<!-- --> - Scaleway SAS</span></div></footer></div><div id="portal"></div></div></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"localizations":{"data":[{"id":1,"attributes":{"path":"/blog/3","locale":"fr"}}]},"_nextI18Next":{"initialI18nStore":{"en":{"common":{"open":"Open","close":"Close","backTo":"Back to {{page}}","seeMore":"See more","skip":"Skip {{to}}","toLogin":"to login","toMain":"to main content","toFooter":"to footer section","results":"Number of results: {{resultsLength}}","yourEmail":"Your Email","submit":"Submit","header":{"motdTitle":"Top highlight! "},"footer":{"followUs":"Follow us","subLinks":{"contracts":{"href":"/en/contracts/","title":"Contracts"},"legalNotice":{"href":"/en/legal-notice/","title":"Legal Notice"},"privacyPolicy":{"href":"/en/privacy-policy/","title":"Privacy Policy"},"cookie":{"href":"/en/cookie/","title":"Cookie"},"securityMeasures":{"href":"https://security.scaleway.com","title":"Security Measures"},"accessibility":{"href":"/fr/accessibility/","title":"Accessibility statement","hrefLang":"fr"}}},"breadcrumb":{"homepageLink":{"home":{"href":"/","title":"Home"}}},"cookies":{"acceptAll":"Accept all","rejectAll":"Reject all","save":"Save settings","panelManagementTitle":"Manage cookies settings","panelConsent":{"title":"Cookie time!","description":"We use cookies in order to improve our website and to offer you a better experience. You can also consult our ","linkLabel":"Cookie policy","link":"/en/privacy-policy/","settings":"Manage your preferences"},"categories":{"functional":{"title":"Functional","subtitle":"Always active","description":"These cookies are required for the website to function properly and to allow you to use its services and features. Without these cookies, we would be unable to provide certain requested services or features."},"analytics":{"title":"Analytics","description":"These cookies are used to monitor the performance of our site and to enhance your browsing experience."},"marketing":{"title":"Marketing","description":"These cookies are used to understand user behavior in order to provide you with a more relevant browsing experience or personalize the content on our site."}}}},"blog":{"tagsAriaLabel":"Tags list. Click to choose as filter.","timeToRead":"{{min}} min read","recommendedArticles":"Recommended articles","pagination":{"next":"Forward to Next","previous":"Back to Previous","goToPage":"Go to page ","currentPage":"Current page: "},"copyButton":{"copied":"Copied!","defaultValue":"Copy","code":"Copy code"},"home":{"title":"Scaleway Blog - All posts","description":"Scaleway’s blog helps developers and startups to build, deploy and scale applications.","heading":"Scaleway Blog","articleOfMonth":"Must read","latestArticles":"Latest articles","popularArticles":"Most popular articles"},"categoryPage":{"build":{"title":"Build Projects with Scaleway","description":"Learn how to easily build and develop projects using Scaleway products."},"deploy":{"title":"Deploy Applications with Scaleway","description":"Discover how to deploy your applications smoothly with Scaleway."},"scale":{"title":"Scale Your Applications with Scaleway","description":"Find out how to efficiently scale your applications on Scaleway."},"incidents":{"title":"Incident Reports","description":"All the latest updates on Scaleway Cloud ecosystem incidents, and how they were resolved."}},"authorPage":{"title_one":"A {{author}}'s post","title_other":"All {{author}}'s posts","description_one":"Discover a blog post written by {{author}}.","description_other":"Discover all the blog posts written by {{author}}."}}}},"initialLocale":"en","ns":["common","blog"],"userConfig":{"i18n":{"locales":["default","en","fr"],"defaultLocale":"default","localeDetection":false},"default":{"i18n":{"locales":["default","en","fr"],"defaultLocale":"default","localeDetection":false}}}},"posts":[{"title":"How Everdian delivers “life-saving” real-time critical insights, via AI","path":"how-everdian-delivers-life-saving-real-time-critical-insights-via-ai/","description":"_Why does the new generation of European AI startups increasingly turn to Scaleway? It’s not just to access the European cloud’s most powerful GPU cluster. As Cedric Milinaire, Director General \u0026 CTO of France’s [Everdian](https://everdian.com/) explains, it’s also to accelerate growth, as Scaleway’s simplicity means new team members can be onboarded in just a few weeks. Find out more below!_\n\nEverdian is an AI startup specialized in real-time decision making. Its main differentiator is that it **uses multiple proprietary AI models capable of analyzing large streams of data in real time**, to alert strategic decision makers about key ongoing events. Users can build custom dashboards to visualize results and generate their own alerts.\n\nBased on algorithms that could broadly be classified as NLP (Natural Language Processing), its activity covers:\n\n-\tReal-time security alerts (for example, if there’s a fire alert in a building, the AI helps to get the news out there, and show relevant videos to facilitate fast localization of the fire)\n-\tMulti-source monitoring, to detect fast-spreading topics, and for fact checking\n-\tIn the financial sector, predicting market shares and stock evaluations, as well as sentiment detection with regards to big announcements\n-\tMany other use cases, such as finding a client’s stolen assets on second hand reseller platforms.\n\n\nEverdian uses all types of data, including text, images and videos. For training, the team annotates real world data, then adds synthetic data to improve it. **Today, the metadata is often more important than the data itself. So Everdian needs to tweak the datasets to optimize its effects**. This can lead to significant improvements in the fields of privacy and energy efficiency.\n\n\n## How it works\n\nAI startups are everywhere right now, as are hype-fueled funding rounds. But Everdian’s objective is to make a difference in the real world.\n\n**“When you handle use cases with human lives at stake, ten seconds is really important,”** says Milinaire. “For example, we’re used by search and rescue teams to alert them about the occurrence of fire incidents. We provide context with live video feeds and various information posted online. Without us, the only information they may have is that the fire’s in the building. **We can tell them - based on data posted online - it’s on the 5th floor and not the 6th. And that saves lives.”**\n\nTo perform such a feat, Everdian collects data streams into large graphs and analyzes the multiple data points; the level of filtering depends on the services and use cases.\n\nFor instance, image analysis services provide more accurate reports than public opinions (often blurry and contradicting). Then feedback correlation and source comparison will provide a clear idea of any situation and enable Everdian to share the most relevant information. \n\nThe startup’s proprietary clustering algorithm and AI models analyze image and video similarity, in order to only keep relevant ones. Naturally, the larger the dataset, the harder it is to filter through the noise. \n\nIndeed, the most frequent challenge is understanding the different data points. **When Everdian detects critical events, it only wants images of that event, not of people giving their opinion about it**. And it needs to select the one best video - not several - that gives the clearest idea of what’s happening. In short, to be able to share only the most relevant and critical information first.\n\n\n## Why they chose Scaleway\n\nEverdian’s number one need is GPUs, “because we analyze millions of texts and images”, says Milinaire, “**so we need access to a whole cluster of GPUs in order to optimize our models, syncing them to the hardware. So [Scaleway’s H100s](https://www.scaleway.com/en/h100-pcie-try-it-now/) are really useful for us**.”\n\nThey also need highly efficient storage; this is important when handling large amounts of data. For this, Everdian uses Elasticsearch, as it allows for archiving that lets clients “dig through data”, as Milinaire puts it. Everdian uses snapshots on Scaleway Block Storage here.\n\nSo the startup’s main pain points were:\n\n1.\tCost, as GPUs are expensive\n2.\tAvailability: H100 only available as spot instances at first\n3.\tVariety: Everdian’s work requires a wide variety of GPUs _(we’re working on it!)_\n\n\nWhen searching for a cloud provider, Scaleway’s offering and tools largely matched Everdian’s requirements. The main drawback was the security part, as Scaleway was less advanced than other CSPs at that time. Security is a key factor for Everdian, as all new customers demand comprehensive documentation and guarantees on this front.\n\n**In the end, the tradeoff was positive, as Everdian’s choice meant they could access advanced cloud features and considerable quantities of GPUs**. Individual NVIDIA H100s, as well as entire clusters, are required to analyze millions of texts and images. After that, models are optimized, in sync with the hardware capabilities of each machine.\n\n\n## Building the architecture\n\nTo provide a solution able to auto-scale, auto-heal and auto-upgrade, the decision was made to **containerize everything and always build on Kubernetes** (via Scaleway’s [Kapsule](https://www.scaleway.com/en/kubernetes-kapsule/) product). Then, due to the complexity of data sources, services and customers it has to manage, Everdian opted for a microservices-focused approach. \n\nTheir main feature request was for dedicated control planes (in general availability since Autumn 2023) to enable higher levels of resilience and controls. Then, they built everything around those Kubernetes clusters: backups, data and videos, all hosted on Object Storage.\n\nEverdian’s tech teams have notably praised the simplicity and efficiency of Scaleway Kapsule, especially compared with larger CSPs’ equivalent products.\n\nThey were also reassured by [VPC](https://www.scaleway.com/en/vpc/), where the ability to communicate between different zones, thanks to Scaleway’s Multi-AZ offering, where data is redundant across several availability zones, was perceived as a great advantage. Everdian started in the PAR 1 data center region, then extended to PAR 2 to access those lovely new GPUs, whilst accessing a better level of resilience and reliability.\n\nOne missing feature is still the VPN, that Everdian completed themselves for their internal tooling. Their feedback has been noted and Scaleway’s team is working on it.\n\nMilinaire’s current wishlist now includes managed Elasticsearch: a wish Scaleway heard, and so is now looking for others’ points of view in its product discovery approach.\n\n\n## Preparing the next stages of growth by faster onboarding\n\nEverdian found that Scaleway was the ideal cloud provider to ramp up their teams’ technical expertise quickly. “**On Scaleway’s platform, our tech teams were operational in a matter of weeks; much faster than with hyperscaler cloud providers**,” says Milinaire, who adds:\n\n**“We hired a DevSecOps. I didn’t explain anything about Scaleway to him. I just said ‘this is in the Scaleway console, figure it out. You can do it!’ Not long afterwards, he was creating VPCs everywhere!”**\n\nAnother example was remote employees, who require quick and autonomous onboarding to use other services in a matter of days, without any mentoring or further explanations.\n\nCompared with hyperscalers, this accessibility helps Everdian’s teams be more productive and enables the company to welcome new tech staff more quickly, thereby boosting their impact. With other providers, a non-knowledgeable team member would take weeks to onboard, after reading documentation before being able to start using their first cloud products. \n\nEverdian also cites the **proximity of Scaleway’s support staff as a key differentiator: “my feedback is always taken into consideration”**, says Milinaire.\n\nThis will be critical for Everdian’s next stages of growth, given its ambitious roadmap. Such as reworking the organization and project leveraging new features, along with the always improving IAM and network capabilities of Scaleway. \n\nAnother area of improvement will be the AI model optimization - as Everdian grows, their consumption of compute power grows exponentially - needing detailed attention of their AI scientists and technology teams. \n","createdAt":"2024-07-22T12:58:29.370Z","updatedAt":"2024-08-08T12:47:00.644Z","publishedAt":"2024-07-22T13:09:56.966Z","locale":"en","tags":"AI\nTestimonial","popular":false,"articleOfTheMonth":false,"category":"Scale","timeToRead":4,"excerpt":"AI startup Everdian chose Scaleway not just for its powerful GPU clusters, but also because Scaleway’s simplicity means new team members can be onboarded in just a few weeks. More inside!","author":"Jean-Baptiste Fourmont","h1":"How Everdian delivers “life-saving” real-time critical insights, via AI - interview with Cedric Milinaire","createdOn":"2024-07-22","image":{"data":{"id":3241,"attributes":{"name":"Natural-Language-Processing-AI-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"large_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"large_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"284.79","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"small_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"small_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"108.87","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"medium_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"medium_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"194.75","width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","hash":"thumbnail_Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","mime":"image/webp","name":"thumbnail_Natural-Language-Processing-AI-Illustration-Blog.webp","path":null,"size":"38.57","width":245,"height":152}},"hash":"Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87","ext":".webp","mime":"image/webp","size":366.43,"url":"https://www-uploads.scaleway.com/Natural_Language_Processing_AI_Illustration_Blog_1f9c21fa87.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-02-15T13:43:21.487Z","updatedAt":"2024-10-14T14:18:30.813Z"}}}},{"title":"How we rebranded Scaleway in three months, navigating uncharted territories - Part 1","path":"how-we-rebranded-scaleway-in-three-months-navigating-uncharted-territories-part-1/","description":"## Part 1: From “cloud of choice” to “cloud empowered” ⚡️\n \n2023 marks the year when Xavier Niel, the Iliad Group, and Scaleway **announced a considerable** [investment in AI](https://www.maddyness.com/2023/11/17/xavier-niel-reunit-un-casting-5-etoiles-pour-defendre-une-ia-open-source-a-la-francaise/). This major technological shift was set to irreversibly change cloud consumption habits in the market.\n\n**It was time to rethink our brand to align with this new ambition**. \n\nThe timing was also perfect with the growing momentum around AI, and the major shift this new technology was bringing to the sector. This momentum would culminate at [ai-PULSE](https://www.ai-pulse.eu/), which was set to gather the domain's leaders in Paris that November.\n\nIt was really challenging; but we did it! And we are excited to share this journey with you today. Brace yourself!\n\n\n\n## Setting the stage for brand scaling ⛰️ \n\n\n### The mission brief\nAfter the initial excitement of the announcement, one thing became clear: **our existing brand seemed disconnected from our new goals and the expansion of our audience**. \nThe lone developer behind their code screen was no longer our sole focus. We now had to captivate larger enterprises across many new sectors, or cutting-edge startups focused on AI, with high expectations for sophistication and professionalism.\n\n**Today, our offering revolutionizes cloud architectures, bringing innovation and performance to an unprecedented level, including AI-driven solutions**.\n\n![This colorful brand has served us well for three wonderful years](https://www-uploads.scaleway.com/1_oldbrand_95e713db3d.webp)\n\n\nWe knew that to embody this **true technological turning point**, it was necessary to initiate **a graphical shift**.\n\nHowever, the main challenge was that our rebranding initiative wasn't just about changing our main assets and calling it a day. It involved **updating a huge entire system and visual language that serves as the foundation across thousands of assets** for all our Scalers. This required careful planning for long-term updates, ensuring seamless operational continuity amidst our rapid company growth.\n\nAnd as a concrete example, consider our extensive catalog of products, each accompanied by its own illustration. **We needed to update and continuously create these numerous product illustrations in a consistent style**. This ongoing task, among many others, was essential to maintain a cohesive visual identity that resonated with our evolving brand story. \n\n**While rethinking all our existing assets, we also needed to ensure that our new brand could scale smoothly with our rapidly growing product offer**.\n\n\n![50 product images to update if we were to change our identity... and new ones to create every month](https://www-uploads.scaleway.com/2_oldproducts_962c507bba.webp)\n\n\n### Changing our identity while preserving our DNA\n\nBeyond our transformation, **it was essential to preserve our brand DNA**. We wanted to remain recognizable, clearly delineate the visual evolution, and retain what appealed to our users:\n\n- **Our logo would remain unchanged** because it is well recognized by our users and any change seemed risky\n- **Purple**, a rare color among European cloud providers, had to be retained to set us apart\n- **We still wanted to keep an important dose of color**, to avoid becoming monochrome\n- **2D illustrations, although less prominent**, would be maintained, but with less central importance\n- **We reduced figurative illustrations**, such as those featuring people, to signify the shift to a less playful, more mature image\n- **Our mascot, Skylar**, would not disappear completely, but would be less used in our external communication\n- **We would retain the ‘Inter’ font**, chosen over a year ago due to its readability and clarity on our interfaces and website.\n\n\nLast but not least… We began this project in June 2023, and had only three months to change everything. **Everything had to be ready by early November to launch our new brand at ai-PULSE**, the flagship event on artificial intelligence in Europe that we are organizing. We were in the starting blocks!\n\n![What a perfect, ideal, and BEAUTIFUL schedule, isn't it? Well... Part 2 of this article will surprise you with what real planning ended up looking like!](https://www-uploads.scaleway.com/3_planning_80404e7a5d.webp)\n\n\n\n## Choosing the best allies for the best results 🦸 \n\n### Graphic exploration without limitations\n\nTo assist us in this mission, we decided to work with exceptional talents: [Anne Thai](https://www.linkedin.com/in/annethai/), a talented artistic director, and Romain Briaux (founder of [Hervé Studio](https://www.herve.paris/). Two experts with advanced 3D skills and extensive experience in branding and visual storytelling. **With their expertise and our vision, we had everything we needed to create a strong new image for Scaleway**. \n\nWe sought something entirely new, free from creative constraints, for the initial spark. \n\nThe first benchmarks were promising, featuring strong visual storytelling but with a subtle and elegant expression around the theme of space:\n![](https://www-uploads.scaleway.com/4_benchmarks_d67b364267.gif)\n\n\nThe 3D approach introduced a sophisticated aesthetic to our brand perception. It perfectly fulfilled our desire for a distinctive identity while aligning with industry trends, and the initial creative directions truly captivated us.\n\n![](https://www-uploads.scaleway.com/5_explorations_37e2e91895.webp)\n\n\n## The beginnings of a new graphical universe 🪐\n\nAlthough we liked all the approaches, some were too abstract and minimalist, which created too radical a break from the existing brand.\n\n**We gathered feedback from our best user advocates**, the product managers, and product marketing managers. Their involvement at this stage was important to truly embrace the change, and spread it both internally and externally. \n\nIt took a few adjustments to achieve a rendering that marked a true departure from the old style without deviating too much from the original visuals. We aimed to avoid causing confusion among our audience.\n\nTo find the right balance, we chose an approach **inspired by the shapes of our current products, but with a very different graphical treatment**.\n\nJust as the sun illuminates the planets, our 3D product illustrations are lit by their environment. When you rotate them 360 degrees, the lighting changes, mimicking the sun's movement around the Earth.\n\n![](https://www-uploads.scaleway.com/6_iterations_fce7c46763.webp)\n\n\n**Each of the 50 visuals required special attention** to perfectly align with the new brand aesthetic. Our goal was to change the perception of our offering with these new, more understated, less colorful, and less cluttered renderings.\n\n\n\n![Each product visual required several days of work](https://www-uploads.scaleway.com/7_newproducts_a4fd716acc.webp)\n\nIn parallel with these product visuals, which hold a central place in our brand, we collaborated with Anne and Romain **to create a generic and central Key Visual that represents the essence of our offering**. Our objective was to convey the idea of a complete, modular, interconnected ecosystem that reflects the power of our infrastructures. \n\nCreatively, this challenge was not easy to meet. We explored various visual renderings, playing with abstraction, movement, light effects, as well as notions of space and floating, while integrating our product visuals.\n\n![](https://www-uploads.scaleway.com/8_keyvisualexplo_2f0ff29400.webp)\n\nWe ultimately opted for the concept of interconnected platforms with an isometric perspective. The metallic texture of the background allowed for the reflection of off-screen lights, which we also incorporated into our product visuals, ensuring perfect coherence with our new graphic approach.\nThis visual was modular: by changing the displayed products, **it perfectly illustrated the idea of a customizable cloud infrastructure**, tailored to the specific needs of our clients.\n\n![A modular and impactful Key Visual](https://www-uploads.scaleway.com/9_keyvisual_423893e413.webp)\n\nThis visual served as the basis for the introductory video presenting our new offering. Created by Hervé Studio, it announced our new positioning and unveiled our new identity. **This element represented the culmination of all our creative work, encapsulated in a powerful and engaging narrative**.\n\nThe result is stunning - a big shoutout to Hervé Studio for their amazing work!\n\n\u003ciframe width=\"800\" height=\"480\" src=\"https://www.youtube.com/embed/TnDpsRUiBYU?si=MONGkw0Be4zvE-mE\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" allowfullscreen\u003e\u003c/iframe\u003e\n\n**We were more than satisfied with the outcome!**\n\nNow that our new visual identity was defined, it was time to integrate this new brand internally and update the numerous assets and layouts that would support it. \n\nStay tuned for part 2 to discover the rest of this adventure!\n![](https://www-uploads.scaleway.com/11_finish_d304164ed9.gif)\n","createdAt":"2024-07-08T13:02:55.526Z","updatedAt":"2024-07-08T14:42:33.562Z","publishedAt":"2024-07-08T14:11:31.464Z","locale":"en","tags":"Design","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":5,"excerpt":"The onset of major cloud industry changes, including the arrival of AI, drove our Design team to rebrand Scaleway in just three months. How? Find out here!","author":"Jess Anelli","h1":"How we rebranded Scaleway in three months, navigating uncharted territories ✨ Part 1","createdOn":"2024-07-09","image":{"data":{"id":3823,"attributes":{"name":"Cover.webp","alternativeText":null,"caption":null,"width":1920,"height":1110,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Cover_4054f8d0da.webp","hash":"large_Cover_4054f8d0da","mime":"image/webp","name":"large_Cover.webp","path":null,"size":"280.56","width":1000,"height":578,"sizeInBytes":489115},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Cover_4054f8d0da.webp","hash":"small_Cover_4054f8d0da","mime":"image/webp","name":"small_Cover.webp","path":null,"size":"65.11","width":500,"height":289,"sizeInBytes":125485},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Cover_4054f8d0da.webp","hash":"medium_Cover_4054f8d0da","mime":"image/webp","name":"medium_Cover.webp","path":null,"size":"148.34","width":750,"height":434,"sizeInBytes":271063},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Cover_4054f8d0da.webp","hash":"thumbnail_Cover_4054f8d0da","mime":"image/webp","name":"thumbnail_Cover.webp","path":null,"size":"21.15","width":245,"height":142,"sizeInBytes":39461}},"hash":"Cover_4054f8d0da","ext":".webp","mime":"image/webp","size":302.04,"url":"https://www-uploads.scaleway.com/Cover_4054f8d0da.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-07-08T14:05:38.368Z","updatedAt":"2024-07-08T14:05:38.368Z"}}}},{"title":"Retrieval-Augmented Generation: Building a RAG Pipeline with Scaleway’s Managed Inference","path":"retrieval-augmented-generation-building-a-rag-pipeline-with-scaleways-managed-inference/","description":"Retrieval Augmented Generation (RAG) is one of the most sought-after solutions when using AI, and for good reason. It addresses some of the main limitations of Large Language Models (LLMs) such as a static knowledge base, inexact information, and hallucinations.\n\nWhile there is a plethora of online material discussing RAG systems, most of them use high-level components that mask the building blocks composing a RAG pipeline. In this article, we’ll use a more grassroots approach to analyze the structure of such systems and build one using Scaleway’s elements, notably one of the latest entries in our portfolio: Managed Inference.\n\n## The Anatomy of a RAG System \nLet’s start by describing a typical use case. You want to build an assistant that can answer questions and provide precise information using your company’s data. You can do this by providing users with a chat application that leverages a foundation model to answer queries. Today, you can choose from a multitude of foundation models and quickly set up such a system. The problem is that none of these models were trained using your data, and even if they were, by the time you put your system into production, the data will already be stale.\n\nThis leaves you with two choices: either you create your own foundation model, or you take an existing one and fine-tune it using your company’s data. RAG provides a third way, that allows you to retrieve your own data based on user queries and use the retrieved information to pass an enriched context to a foundation model. The model then uses that context to answer the original query.\n\n![](https://www-uploads.scaleway.com/RAG_1_4f5c1b6204.webp)\n\n## Key Components of a RAG System\nWe now have enough information to identify the main components of our solution:\n- **Data Source:** This can be a data lake, internal documents in the form of PDFs, images, sounds, or even web pages.\n- **Embeddings Model:** A specialized type of model that generates vector representations of the input data.\n- **Vector Database:** A specialized type of database that stores vectors and the associated data, providing mechanisms to compare these vectors based on similarity.\n- **Foundation Model:** This can be your typical Large Language Model.\n\nHowever, we are still missing some components. We need to ingest the raw data from our Data Source, like parse PDFs, scrape web pages, and so on. We need a Scraper/Parser component to achieve that.\nThen, the raw data needs to be preprocessed before we can pass it to the Embeddings Model. We need to normalize and tokenize it properly before passing it as input to the embeddings model. The same goes for user queries; they must be normalized and tokenized using the same preprocessor. Thus, we have identified our missing components:\n- **Scraper/Parser:** We’ll use BeautifulSoup as our scraper and PyPDF2 as our PDF parser to generate the raw data.\n- **Preprocessor:** We’ll use Hugging Face’s AutoTokenizer from the Transformers library and spaCy to tokenize our raw data.\n\n\n## Structure of the RAG Pipeline\nNow that we have all our puzzle pieces in place, a pattern emerges in the structure of our RAG pipeline. We can clearly identify two sub-systems:\n\n![](https://www-uploads.scaleway.com/RAG_2_9813803b3a.webp)\n\n\n1. **Ingest Sub-System:** Responsible for pulling information from the Data Source and passing that raw data to the Preprocessor, which transforms that data into tokens that can then be used by the Embeddings Model to generate vectors. The vectors and their associated raw data are then stored in the Vector Database.\n2. **Query/Retrieval Sub-System:** Handles the user query the same way as the Ingest sub-system handles the raw data: it gets normalized and tokenized, then passed to the Embeddings Model to generate its vector representation. The query vector is then used to perform a similarity search using the Vector Database and retrieve the data that is closest to the user query. That data is used to generate an enriched context that is then passed together with the user query to the Foundation Model, which then generates the response.\n\n## Building the Ingest Sub-System\nWith this information, we can design the Ingest sub-system, which includes:\n- **Data Sources**\n- **Scraper/Parser:** Extracts raw data.\n- **Preprocessor:** Normalizes and tokenizes data.\n- **Embeddings Model:** Generates vectors.\n- **Vector Database:** Stores vectors and associated data.\n\nFortunately, Scaleway offers most of these components as managed services, simplifying the implementation process.\nScaleway’s newly developed [Managed Inference](https://www.scaleway.com/en/docs/ai-data/managed-inference/) service, now in public beta, can be used to quickly and securely deploy an easy-to-use LLM endpoint based on a select list of open-source models. It can be used to deploy a scalable, ready-to-use [Sentence-t5-xxl embedding model](https://www.scaleway.com/en/docs/ai-data/managed-inference/reference-content/sentence-t5-xxl/) in less than 5 minutes. Check the [Quickstart](https://www.scaleway.com/en/docs/ai-data/managed-inference/quickstart/) guide to learn how to create an embeddings endpoint. At the end of the Quickstart, you’ll end up with an endpoint in the form: https://\u003cendpoint-url\u003e/v1/embeddings. All of Scaleway’s Managed Inference endpoints follow OpenAI’s API spec, so if you already have a system using that spec, you can use Managed Inference as a drop-in replacement.\n\nThe same goes for the Vector Database. Scaleway provides a [PostgreSQL Managed Database](https://www.scaleway.com/en/docs/managed-databases/postgresql-and-mysql/) with a plethora of [available extensions](https://www.scaleway.com/en/docs/faq/databases-for-postgresql-and-mysql/#which-postgresql-extensions-are-available), one of which is the *pgvector* extension that enables vector support for PostgreSQL. Make sure to check the [Quickstart guide](https://www.scaleway.com/en/docs/managed-databases/postgresql-and-mysql/quickstart/) to deploy a resilient production-ready vector database in just a few clicks.\n\nThis leaves us with the Scrapper/Parser and the Preprocessor. You can find sample implementations for these two components in the dedicated [Github repository](https://github.com/sebtatut/scw-rag-managed-inference/tree/main) in the form of two services using a REST API.\n\nOnce Scaleway’s managed components and our sample implementations are in place, all we have to do is assemble them to obtain our Ingest pipeline. \n![](https://www-uploads.scaleway.com/RAG_3_b913b0c241.webp)\n\n\nA. The Scraper/Parser pulls data from the external Data Sources. In this example, we’ll scrape information from Scaleway’s Github documentation and parse data from PDFs uploaded on Amazon S3-compatible Scaleway’s Object Storage.\nB. The raw data is sent to the Preprocessor, which normalizes it and tokenizes it appropriately for the Embeddings Model provided via Scaleway’s Managed Inference.\nC. The preprocessed data is sent to the Embeddings Model via a POST request using the endpoint generated once the service is started.\nD. The Embeddings Model returns the generated vectors to the Preprocessor.\nE. The Preprocessor stores the embeddings together with the associated data in the PostgreSQL database.\n\nThanks to Scaleway’s managed services, we have an Ingest pipeline up and running in no time.\n\n\n## Building the Query/Retrieval Sub-System\nThis sub-system reuses some of the components of the Ingest sub-system. The Preprocessor, Managed PostgreSQL Database, and the Embeddings Model provided via the Managed Inference service are all reused. We still need a Foundation Model to which we can pass an enriched context as well as the chat interface that sends the user’s queries to it and receives the responses.\n\nOnce again, Scaleway’s Managed Inference comes to the rescue. You can use the same Quickstart guide as before, only this time we’ll use a [Llama-3-8b-instruct](https://www.scaleway.com/en/docs/ai-data/managed-inference/reference-content/llama-3-8b-instruct/) as our Foundation Model. This is a perfect fit for our assistant. \n\nA basic chat application is provided in the same Github repository as before. \n\nOnce we hook everything together, we have our Query/Retrieval sub-system:\n\n![](https://www-uploads.scaleway.com/RAG_4_45f8c134d1.webp)\n\n1. The user sends a query via the Chat Web application.\n2. The Chat Web application forwards the raw query to the Preprocessor, which, as in the Ingest sub-system case, normalizes and tokenizes the query.\n3. The preprocessed user query is sent to the Embeddings Model as a POST request using the Managed Inference endpoint.\n4. The Embeddings Model returns the vector embeddings to the Preprocessor.\n5. The Preprocessor then uses these embeddings to perform a vector similarity search using the Managed PostgreSQL pgvector extension and retrieves documents related to the user query.\n6. The Preprocessor uses these documents to create an augmented prompt by creating an enriched context that is then passed together with the user query to the Foundation Model as a POST request to the endpoint provided by Managed Inference.\n7. The Foundation Model answers the user query based on the enriched context and returns the response to the Preprocessor.\n8. The Preprocessor formats the response and returns it to the Chat Web application, which displays the answer to the user.\n\nThis is a basic example that illustrates the building blocks found throughout any RAG pipeline. By leveraging Scaleway’s managed services, you can quickly deploy an effective RAG system, allowing you to focus on fine-tuning and expanding your pipeline to meet specific requirements.\n\n\n## Conclusion\nBuilding a RAG pipeline with managed solutions offered by Scaleway streamlines the process of implementing such systems. By leveraging components like Managed Inference for the embeddings and foundation models and a managed database like PostgreSQL with the pgvector extension, deployment becomes faster and more scalable, allowing businesses to focus more on fine-tuning their systems to meet specific needs.\n\nHowever, there is more to a RAG system beyond the basics covered in this article. Different chunking strategies, such as different sentence tokenizers or splitters or adjacent sequence clustering, can significantly improve data processing and retrieval accuracy. Additionally, optimizing vector database retrieval methods using the pgvector extension can further enhance the system performance. For instance, using `ivfflat` iindex creation can greatly speed up similarity searches. Further fine-tuning by using the `lists` and `probes` parameters can also help in balancing between speed and accuracy.\n\nIn conclusion, while Scaleway’s managed solutions greatly simplify the setup and deployment of a RAG pipeline, as with any system, one has to strike a balance between speed and accuracy by exploring the different aspects of such solutions. \n\n\n\n_Thanks to Diego Coy for his extra research for this article!_\n","createdAt":"2024-07-04T12:21:21.993Z","updatedAt":"2024-10-28T16:21:30.845Z","publishedAt":"2024-07-04T13:51:02.813Z","locale":"en","tags":"AI\nRAG","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":5,"excerpt":"Retrieval Augmented Generation (RAG) is one of the most sought-after solutions when using AI, and for good reason. Find out how to use it in this post!","author":"Sebastian Tatut","h1":"Retrieval-Augmented Generation: Building a RAG Pipeline with Scaleway’s Managed Inference","createdOn":"2024-07-04","image":{"data":{"id":3784,"attributes":{"name":"Managed Inference visual.webp","alternativeText":null,"caption":null,"width":2174,"height":1344,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Managed_Inference_visual_1fdaca592e.webp","hash":"large_Managed_Inference_visual_1fdaca592e","mime":"image/webp","name":"large_Managed Inference visual.webp","path":null,"size":"582.55","width":1000,"height":618,"sizeInBytes":994878},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Managed_Inference_visual_1fdaca592e.webp","hash":"small_Managed_Inference_visual_1fdaca592e","mime":"image/webp","name":"small_Managed Inference visual.webp","path":null,"size":"154.02","width":500,"height":309,"sizeInBytes":266055},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Managed_Inference_visual_1fdaca592e.webp","hash":"medium_Managed_Inference_visual_1fdaca592e","mime":"image/webp","name":"medium_Managed Inference visual.webp","path":null,"size":"341.84","width":750,"height":464,"sizeInBytes":574554},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Managed_Inference_visual_1fdaca592e.webp","hash":"thumbnail_Managed_Inference_visual_1fdaca592e","mime":"image/webp","name":"thumbnail_Managed Inference visual.webp","path":null,"size":"39.91","width":245,"height":151,"sizeInBytes":68288}},"hash":"Managed_Inference_visual_1fdaca592e","ext":".webp","mime":"image/webp","size":798.01,"url":"https://www-uploads.scaleway.com/Managed_Inference_visual_1fdaca592e.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-06-25T09:06:32.536Z","updatedAt":"2024-06-25T09:06:32.536Z"}}}},{"title":"Scaleway Private LB for Golem.ai Security Assets","path":"scaleway-private-lb-for-golemai-security-assets/","description":"At [Golem.ai](http://golem.ai/), our day-to-day decisions and choices justify the trust our customers place in us.\n\nSecurity is at the heart of each and every one of them.\n\nThat's why we distinguish between public and private traffic at the very heart of our applications and our Kubernetes Hosting Platform operated by [Scaleway](https://www.scaleway.com/en/), our French Cloud provider.\n\nThus, Public traffic, which by definition enables our customers to consume our applications, is strictly controlled by our powerful Web Application Firewall (WAF) and AntiDDoS, while private traffic is dedicated to the administration of our applications by our Teams, and is only accessible via a ZeroTrust VPN Tunnel connected to the Private LoadBalancer.\n\nThis gives us a 360° view of network activity upstream and downstream of the platform.\n\nIn terms of watertightness between our different STAGING, PREPRODUCTION, PRODUCTION and MONITORING environments, each has its own virtual private network ([VLAN Layer 2 within a VPC](https://www.scaleway.com/en/vpc/)).\n\nThis prevents private flows from being opened up to the outside world, and isolates flows between environments if necessary.\n\n### **Endpoint : Public / Private Flow**\n![](https://www-uploads.scaleway.com/Golem_LB_1_491e00bd39.webp)\n\n### **Operation**\n\nThe operations described below correspond to the needs of [Golem.ai](http://golem.ai/).\n**Adapt to your needs !**\n\nTo use a Private LoadBalancer between our application and the [Golem.ai](http://golem.ai/) Private Network, we'll need to perform the following steps in order.\n\n## 1 . Create a new Private LB with WebUI , CLI or Terraform\n\nClick **Load Balancers** in the Network section of the [Scaleway console](https://console.scaleway.com/) side menu. If you have not already created a Load Balancer, the product creation page is displayed. Otherwise, your list of existing Load Balancers displays. Then, choose \"Private Load Balancer\". [More info here](https://www.scaleway.com/en/docs/network/load-balancer/quickstart/#how-to-create-a-load-balancer), and below...\n\n```js\nInformations\n\n1. LB Name\n\n Fill in the name of the loadbalancer, respecting the nomenclature.\n private-lb-private-“env”-“domain without extension”\n eg . private-lb-prod-test for test.golem.ai\n\n2. LB Zone\n\n Select zone where application is localised (PARIS 1 by default)\n\n3. LB Model\n\n Select LB-S model.\n 200 Mbps of bandwidth is more than enough.\n\n4. LB Type\n\n Select Private Load Balancer\n \n5. Finalise\n\t\n Click on \"Create Load Balancer\"\n ```\n \n## 2 . Linking your application to PrivateLB\n![](https://www-uploads.scaleway.com/Golem_LB_3_853d4b9980.webp)\n\nTo use Private LB, we no longer need to use Ingress.\n\nTraffic is sent directly to the application's service in a private flow !\n\nScaleway Private LB ⇒ SVC K8s ⇒ Application\n\nIn our Service K8s object, we use the following annotations:\n\n```js\nservice.beta.kubernetes.io/scw-loadbalancer-externally-managed : true \nservice.beta.kubernetes.io/scw-loadbalancer-id : zone/lb_id\nservice.beta.kubernetes.io/scw-loadbalancer-zone : zone\n```\n\nPlease just note what CCM will or won’t manage in this case:\n • Won’t create/delete the LB.\n • Ignores the global configurations (such as size, private mode, IPs).\n • Won’t detach private networks attached to the LB.\n • won’t manage extra frontends and backends not starting with the service id.\n • Will refuse to manage a LB with a name starting with the cluster id.\n \n E.g.:\n ```js\n ---\napiVersion: v1\nkind: Service\nmetadata:\n name: test-lb-private\n namespace: ops-tools\n annotations:\n service.beta.kubernetes.io/scw-loadbalancer-externally-managed: \"true\"\n service.beta.kubernetes.io/scw-loadbalancer-id: \"fr-par-1/0d1b714f-2767-4937-a9e5-4399cfd45338\"\n service.beta.kubernetes.io/scw-loadbalancer-zone: fr-par-1\n labels:\n app.kubernetes.io/name: test-lb-private\n app.kubernetes.io/instance: test-lb-private\nspec:\n ports:\n - name: https\n port: 443 \u003c= IMPORTANT: 443 port for use of HTTPS certificate in Private LB\n protocol: TCP\n targetPort: 80\n - name: test\n port: 81\n protocol: TCP\n targetPort: 81\n selector:\n app.kubernetes.io/name: test-lb-private\n app.kubernetes.io/instance: test-lb-private\n type: LoadBalancer \u003c= IMPORTANT: LoadBalancer type for link\n```\n\nApply.\n\n\n## 3. Adjust LB settings\n\n![](https://www-uploads.scaleway.com/Golem_LB_4_12f94f0b40.webp)\n\nChange the name of the frontend using port 443 to the domain you want to use it on.\nthe “[golem.ai](http://golem.ai/)” extension is mandatory.\n\nIn the following, we'll show you how to use a different domain extension to access the application from a different environment.\n![](https://www-uploads.scaleway.com/Golem_LB_5_dcc21e7a3f.webp)\n\n![](https://www-uploads.scaleway.com/Golem_LB_6_31a1d1d712.webp)\n\nIf we are accessing the application from a different environment(s) than the one it was created in, we must add the private networks:\n![](https://www-uploads.scaleway.com/Golem_LB_7_9db1d2f301.webp)\n\n\n## 4 . Adding a DNS entry\n\nFor HTTPS access to the application from its environment or from other environments, we need to add an A entry.\n\nPrivate @IP for [test.golem.ai](http://test.golem.ai) : 172.16.0.53 (See the loadbalancer's private network @IP)\n![](https://www-uploads.scaleway.com/Golem_LB_8_9c90481326.webp)\n\n\n## 5 . Create Let's Encrypt certificate and associate it with frontend\n\nScaleway's Private Loadbalancer doesn’t support automatic management of let's encrypt certificates by CertManager.\n\nSo we’ve to manage their entire lifecycle and deploy them ourselves.\n\nTo avoid this, we've set up a mechanism using the APIs of Scaleway, CloudFlare and Certbot, as described below.\n\n**Requirements :**\n\n⇒ Available on the [Golem.ai](http://golem.ai/) Github Repository : https://github.com/golem-ai/privatelb-scaleway\n\nTo use this script, you'll need [certbot](https://certbot.eff.org/instructions) and the [certbot-dns-cloudflare](https://certbot-dns-cloudflare.readthedocs.io/en/stable/) plugin installed on the runtime environment.\n\nTwo files : \n\n- certmanager-privatelbscw.sh for LB certificate management\n- cloudflare.ini for Certbot ACME management on CloudFlare DNS Provider\n\nImportant fields are here :\n\nOn certmanager-privatelbscw.sh\n```js\n##Certbot##\nlets_server=https://acme-v02.api.letsencrypt.org/directory\nauth_email=XXXXXXXXX\ncloudflare_config=cloudflare.ini\n##Scaleway API Token##\nscw_token=XXXXXXXXX\n##URI : Application name without domain extension##\nhostname=(\"test\")\n##Domain extension : golem.ai , toto.ai , tata.ai , titi.ai##\ndefault_envs=(\"golem.ai\")\n##Multi Private Network : To access resources from an environment other than the one from which the SVC K8s was created##\nmultipns=(\"no\")\n##LBs##\n#test:0d1b714f-2767-4937-a9e5-4399cfd45338\nprivate_lbs=(\"0d1b714f-2767-4937-a9e5-4399cfd45338\")\nzones=(\"fr-par-1\")\n```\n\nlets_server : Fill in the Let's encrypt environment with which you wish to generate the certificate ([production](https://community.letsencrypt.org/t/acme-v2-production-environment-wildcards/55578) or [staging](https://letsencrypt.org/docs/staging-environment/)).\n\nauth_email : Fill in the email address we wish to use for certificate generation \n\nscw_token : Secret token for Scaleway API authentication\n\ndomains : Fill in the domain we created earlier (test)\n\nmultipns : Would we like to access our applications from a non-production environments ? (no / yes)\n\nprivate_lbs : Our Private LB ID\n\nZone : Area from which we have created our Private LB\n\nOn cloudflare.ini\n```js\n# Cloudflare API token used by Certbot\ndns_cloudflare_api_token = XXXXXXXXX\n```\ndns_cloudflare_api_token : Fill with Cloudflare API token\n\n**Execution** :\n\nThe information entered in the example will be used to access the test application via the url [test.golem.ai](http://test.golem.ai/) from the production environment.\n\nOnce completed, save and run the script!\n\n```js\n./certmanager-privatelbscw.sh \nxxxxxxxx.golem.ai\nissuer=C = US, O = Let's Encrypt, CN = R3\nAug 20 13:34:06 2024 GMT\n----\nxxxxxxxx.golem.ai\nissuer=C = US, O = Let's Encrypt, CN = R3\nAug 25 10:56:11 2024 GMT\n----\nxxxxxxxx.golem.ai\nissuer=C = US, O = Let's Encrypt, CN = R3\nAug 25 10:47:04 2024 GMT\n----\nxxxxxxxx.golem.ai\nissuer=C = US, O = Let's Encrypt, CN = R3\nAug 27 11:42:58 2024 GMT\n----\nxxxxxxxx.golem.ai\nissuer=C = US, O = Let's Encrypt, CN = R3\nAug 27 11:48:00 2024 GMT\n----\nCould not read certificate from \u003cstdin\u003e\n40D7FB7F107F0000:error:1608010C:STORE routines:ossl_store_handle_load_result:unsupported:../crypto/store/store_result.c:151:\nUnable to load certificate\ntest.golem.ai\n\n\n----\nSaving debug log to /var/log/letsencrypt/letsencrypt.log\nRequesting a certificate for test.golem.ai\n\nSuccessfully received certificate.\nCertificate is saved at: /etc/letsencrypt/live/test.golem.ai/fullchain.pem\nKey is saved at: /etc/letsencrypt/live/test.golem.ai/privkey.pem\nThis certificate expires on 2024-08-28.\nThese files will be updated when the certificate renews.\nCertbot has set up a scheduled task to automatically renew this certificate in the background.\n\n- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\nIf you like Certbot, please consider supporting our work by:\n * Donating to ISRG / Let's Encrypt: https://letsencrypt.org/donate\n * Donating to EFF: https://eff.org/donate-le\n- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 4887 100 1178 100 3709 1593 5016 --:--:-- --:--:-- --:--:-- 6612\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 4442 0 4442 0 0 10888 0 --:--:-- --:--:-- --:--:-- 10887\nb33097cf-4e49-456b-990b-1bc8a64caef3\nfd7c47b7-9214-4945-9bae-67f18bfe0070\n443\n{\"id\":\"fd7c47b7-9214-4945-9bae-67f18bfe0070\", \"name\":\"test.golem.ai\", \"inbound_port\":443, \"backend\":{\"id\":\"b33097cf-4e49-456b-990b-1bc8a64caef3\", \"name\":\"915d146e-fef6-4873-92f9-58cbea07ac4e_tcp_30715\", \"forward_protocol\":\"tcp\", \"forward_port\":30715, \"forward_port_algorithm\":\"roundrobin\", \"sticky_sessions\":\"none\", \"sticky_sessions_cookie_name\":\"\", \"health_check\":{\"port\":30715, \"check_delay\":5000, \"check_timeout\":5000, \"check_max_retries\":5, \"check_send_proxy\":false, \"transient_check_delay\":null, \"tcp_config\":{}}, \"pool\":[\"172.16.0.54\", \"172.16.0.7\", \"172.16.0.25\", \"172.16.0.9\", \"172.16.0.21\", \"172.16.0.18\", \"172.16.0.15\", \"172.16.0.31\", \"172.16.0.14\", \"172.16.0.17\", \"172.16.0.22\", \"172.16.0.27\"], \"lb\":{\"id\":\"0d1b714f-2767-4937-a9e5-4399cfd45338\", \"name\":\"private-lb-prod-test\", \"description\":\"\", \"status\":\"ready\", \"instances\":[], \"organization_id\":\"a7ec9296-de32-44d3-9f95-611cd8ee8e20\", \"project_id\":\"a7ec9296-de32-44d3-9f95-611cd8ee8e20\", \"ip\":[], \"tags\":[], \"frontend_count\":2, \"backend_count\":2, \"type\":\"lb-s\", \"subscriber\":null, \"ssl_compatibility_level\":\"ssl_compatibility_level_intermediate\", \"created_at\":\"2024-05-30T10:43:08.836695Z\", \"updated_at\":\"2024-05-30T12:57:03.854798Z\", \"private_network_count\":1, \"route_count\":0, \"region\":\"fr-par\", \"zone\":\"fr-par-1\"}, \"send_proxy_v2\":false, \"timeout_server\":10000, \"timeout_connect\":600000, \"timeout_tunnel\":600000, \"on_marked_down_action\":\"on_marked_down_action_none\", \"proxy_protocol\":\"proxy_protocol_none\", \"created_at\":\"2024-05-30T11:39:17.384840Z\", \"updated_at\":\"2024-05-30T11:39:17.384840Z\", \"failover_host\":null, \"ssl_bridging\":false, \"ignore_ssl_server_verify\":null, \"redispatch_attempt_count\":0, \"max_retries\":3, \"max_connections\":null, \"timeout_queue\":null}, \"lb\":{\"id\":\"0d1b714f-2767-4937-a9e5-4399cfd45338\", \"name\":\"private-lb-prod-test\", \"description\":\"\", \"status\":\"ready\", \"instances\":[{\"id\":\"9bf4b0ab-8fcd-405a-9c9c-9e246e03b057\", \"status\":\"pending\", \"ip_address\":\"\", \"created_at\":\"2024-05-30T10:23:15.724826Z\", \"updated_at\":\"2024-05-30T12:59:23.088049484Z\", \"region\":\"fr-par\", \"zone\":\"fr-par-1\"}], \"organization_id\":\"a7ec9296-de32-44d3-9f95-611cd8ee8e20\", \"project_id\":\"a7ec9296-de32-44d3-9f95-611cd8ee8e20\", \"ip\":[], \"tags\":[], \"frontend_count\":2, \"backend_count\":2, \"type\":\"lb-s\", \"subscriber\":null, \"ssl_compatibility_level\":\"ssl_compatibility_level_intermediate\", \"created_at\":\"2024-05-30T10:43:08.836695Z\", \"updated_at\":\"2024-05-30T12:57:03.854798Z\", \"private_network_count\":1, \"route_count\":0, \"region\":\"fr-par\", \"zone\":\"fr-par-1\"}, \"timeout_client\":600000, \"certificate\":{\"id\":\"aba65e0d-7391-4685-b8c3-f86b88c2ade5\", \"type\":\"custom\", \"status\":\"ready\", \"common_name\":\"test.golem.ai\", \"subject_alternative_name\":[], \"fingerprint\":\"\", \"not_valid_before\":\"2024-05-30T11:59:18Z\", \"not_valid_after\":\"2024-08-28T11:59:17Z\", \"lb\":{\"id\":\"0d1b714f-2767-4937-a9e5-4399cfd45338\", \"name\":\"private-lb-prod-test\", \"description\":\"\", \"status\":\"ready\", \"instances\":[], \"organization_id\":\"a7ec9296-de32-44d3-9f95-611cd8ee8e20\", \"project_id\":\"a7ec9296-de32-44d3-9f95-611cd8ee8e20\", \"ip\":[], \"tags\":[], \"frontend_count\":2, \"backend_count\":2, \"type\":\"lb-s\", \"subscriber\":null, \"ssl_compatibility_level\":\"ssl_compatibility_level_intermediate\", \"created_at\":\"2024-05-30T10:43:08.836695Z\", \"updated_at\":\"2024-05-30T12:57:03.854798Z\", \"private_network_count\":1, \"route_count\":0, \"region\":\"fr-par\", \"zone\":\"fr-par-1\"}, \"name\":\"test.golem.ai-2024-05-30\", \"created_at\":\"2024-05-30T12:59:21.483814Z\", \"updated_at\":\"2024-05-30T12:59:23.049179683Z\", \"status_details\":null}, \"certificate_ids\":[\"aba65e0d-7391-4685-b8c3-f86b88c2ade5\"], \"created_at\":\"2024-05-30T11:39:17.735956Z\", \"updated_at\":\"2024-05-30T12:59:23.045618828Z\", \"enable_http3\":true}\n```\n\n![](https://www-uploads.scaleway.com/Golem_LB_9_1379db2d4e.webp)\n\nThe certificate has been created and is associated with the frontend on which we want to access the application on port 443.\n\nRenewal only within 30 days of expiry.\n\n**Access to the application :**\n\n⇒ [https://test.golem.ai/](https://test.golem.ai/)\n\nOnce again, [test.golem.ai](http://test.golem.ai/) is only accessible from our ZeroTrust [Golem.ai](http://golem.ai/) Private Network.\nOutside this network, the application doesn’t exist (172.16.0.53 / non-routable IP address).\n\n```js\nnslookup test.golem.ai\nServer:\t\t127.0.0.53\nAddress:\t127.0.0.53#53\n\nNon-authoritative answer:\nName:\ttest.golem.ai\nAddress: 172.16.0.53\n```\n","createdAt":"2024-06-17T09:29:35.887Z","updatedAt":"2024-08-27T09:35:56.080Z","publishedAt":"2024-06-18T07:16:13.305Z","locale":"en","tags":"Load Balancer\nManaged Services\nHow to","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":5,"excerpt":"How can Scaleway Load Balancers best be deployed to manage security assets? Kevin Baude, Senior Platform Engineer at Golem.ai, explains in this exclusive blogpost!","author":"Kevin Baude","h1":"Scaleway Private LB for Golem.ai Security Assets","createdOn":"2024-06-17","image":{"data":{"id":2950,"attributes":{"name":"Load-Balancers-Card.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Load_Balancers_Card_8ddffabfc0.webp","hash":"large_Load_Balancers_Card_8ddffabfc0","mime":"image/webp","name":"large_Load-Balancers-Card.webp","path":null,"size":885.66,"width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Load_Balancers_Card_8ddffabfc0.webp","hash":"small_Load_Balancers_Card_8ddffabfc0","mime":"image/webp","name":"small_Load-Balancers-Card.webp","path":null,"size":221.65,"width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Load_Balancers_Card_8ddffabfc0.webp","hash":"medium_Load_Balancers_Card_8ddffabfc0","mime":"image/webp","name":"medium_Load-Balancers-Card.webp","path":null,"size":505.98,"width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Load_Balancers_Card_8ddffabfc0.webp","hash":"thumbnail_Load_Balancers_Card_8ddffabfc0","mime":"image/webp","name":"thumbnail_Load-Balancers-Card.webp","path":null,"size":"32.60","width":245,"height":152}},"hash":"Load_Balancers_Card_8ddffabfc0","ext":".webp","mime":"image/webp","size":279.08,"url":"https://www-uploads.scaleway.com/Load_Balancers_Card_8ddffabfc0.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-11-15T06:04:15.452Z","updatedAt":"2023-11-15T06:04:15.452Z"}}}},{"title":"The Cloud and Retail in 2024 and beyond","path":"the-cloud-and-retail-in-2024-and-beyond/","description":"\"Digitally augmented and profitable physical outlets represent the foundations of the future of retail\", according to Gartner® Research (\"Industry Positioning: Map Your Retail Technology Value Statements to Customer and Associate Expectations\", Gartner, July 5, 2023). \n\nIn other words, despite the democratization of ecommerce in recent years, physical stores remain the most profitable type of retail; where consumers depend most. Yet, according to Gartner, \"many retail product leaders fail to capture the full value of effective positioning\" (same source).\n\n\n## Retail players' cloud selection criteria\n\nWhy are modern retailers increasingly turning to the cloud today? Because the cloud \"facilitates access to operational and inventory data,\" according to [Comarch](https://www.comarch.com/trade-and-services/ict/news/cloud-computing-in-retail-benefits-market-insights-and-future-predictions/), \"which increases the level of data security, improves the user experience, optimizes inventory as well as disaster management, thereby increasing profitability.\"\n\nAmong the main benefits of the cloud for retail, Comarch cites **inventory optimization**, especially predictive and real-time; **enhanced security** thanks to techniques such as encryption; **improved customer experience**, thanks in particular to behavioral analysis or offer personalization; the ability to **manage prices dynamically**; **omnichannel experience management** (blending e-commerce with the physical store experience); and last but not least, **scalability**. Indeed, cloud models can automatically adjust available resources according to fluctuating demand levels (cf. \"the Black Friday effect\", below).\n\n\n## Retailers' cloud product needs\n\nWhat are the main qualities that retailers are looking for when it comes to cloud products?\n\n**Scalability and elasticity**: the \"Black Friday effect\", when an ecommerce site suddenly moves to a high level of traffic, is often evoked to illustrate how tools such as auto-scaling, [load balancers](https://www.scaleway.com/en/load-balancer/) and especially [Kubernetes](https://www.scaleway.com/en/kubernetes-kapsule/) can help in such cases. By rapidly, and often automatically, allocating additional resources to its cloud infrastructure, managed services enable ecommerce platforms to manage fluctuating traffic, dynamically increasing or reducing resources.\n\n**Data security and compliance**: Security is a key criterion for the entire retail industry, since every transaction contains sensitive consumer data. Protecting customer data is therefore paramount. This involves encryption, access controls and regular security audits. \n\n**Network security and DDoS protection**: In addition to customer data, infrastructure security is also paramount. This includes network security measures such as firewalls, intrusion detection/prevention systems and securing API endpoints. DDoS mitigation strategies are essential to protect against large-scale attacks.\n\n**High availability and disaster recovery**: E-commerce sites need to be up and running without interruption. This requires redundancy, backup systems and strategies for rapid recovery from potential failures.\n\n**Performance optimization**: It's important to balance resource utilization to ensure fast loading times and smooth operation. This includes database optimization, use of content delivery networks (CDNs) for faster load times, and efficient caching strategies.\n\n**Microservices and containerization**: Managing a complex architecture of small, independent services involves orchestrating containers, discovering services and ensuring efficient, secure inter-service communication.\n\n**Data storage and management**: Distributors need to manage large volumes of data. They therefore need solutions such as [object storage](https://www.scaleway.com/en/object-storage/), databases (SQL and NoSQL) and data warehousing. This aspect also includes real-time data processing and big data analysis capabilities.\n\n**API management and integration**: It's important to be able to manage numerous APIs for different services, ensuring that they are well documented, version-controlled and securely exposed. This also involves the integration of various third-party services and internal systems.\n\n\n## AI: The future of retail?\n\nArtificial intelligence is a topic that retailers are naturally taking a close look at right now. Indeed, according to Gartner, \"by 2027, 90% of leading retail players will execute at least one generative AI deployment that will transform their business\" (source: \"Top Retail CIO Industry Trends for Increasing Customer Centricity in 2024\", Gartner, January 2024). More than half of them plan to dedicate investment, and teams, according to the same report : \n\n![Gartner Retail and AI graph](https://www-uploads.scaleway.com/Gartner_Retail_and_AI_graph_da50f141da.webp)\n\nAmong the many examples of AI augmenting the retail experience, let's cite three major companies:\n- **Carrefour** is currently experimenting with three cases based on ChatGPT: an advisor to accompany customers through the purchasing cycle on carrefour.fr; AI to generate product information sheets for the Carrefour range, both in-store and on the website; and finally, AI to optimize employee experience, to provide the right support to product purchasing teams, for example. Behind these deployments is access to GPT-4, OpenAI's latest model, via Microsoft Azure.\n- **IKEA** trained the Stable Diffusion image-generation model with its catalogs from the 70s and 80s. The result was a new range of particularly innovative and creative products. These prototypes were created under the aegis of the Swedish brand's innovation laboratory, SPACE10.\n- **Estée Lauder** relies on PaLM2, Google's LLM provided through Google Cloud, to monitor consumer behavior and purchasing habits through social networks, e-commerce platforms, call centers, among other data sources.\n\n\nIs that enough to revolutionize the sector? Let's not be too hasty! \n\nAI, through the cloud, however, remains the area that could well transform retail in the future. Especially if it helps solve the sector's number one dilemma: maximizing the profitability of physical stores by leveraging digital. Duly noted!\n","createdAt":"2024-06-17T08:55:25.686Z","updatedAt":"2024-07-04T16:17:45.011Z","publishedAt":"2024-06-17T08:56:34.046Z","locale":"en","tags":"Retail","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":4,"excerpt":"What do retailers need from the cloud? And how does AI fit into their plans? Let's find out...","author":"James Martin","h1":"The Cloud and Retail in 2024 and beyond","createdOn":"2024-06-14","image":{"data":{"id":3758,"attributes":{"name":"RetailEcommerce-Illustration-1920X1080.webp","alternativeText":null,"caption":null,"width":2560,"height":1440,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp","hash":"large_Retail_Ecommerce_Illustration_1920_X1080_432585c587","mime":"image/webp","name":"large_RetailEcommerce-Illustration-1920X1080.webp","path":null,"size":"409.65","width":1000,"height":563,"sizeInBytes":760880},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp","hash":"small_Retail_Ecommerce_Illustration_1920_X1080_432585c587","mime":"image/webp","name":"small_RetailEcommerce-Illustration-1920X1080.webp","path":null,"size":"119.42","width":500,"height":281,"sizeInBytes":217247},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp","hash":"medium_Retail_Ecommerce_Illustration_1920_X1080_432585c587","mime":"image/webp","name":"medium_RetailEcommerce-Illustration-1920X1080.webp","path":null,"size":"238.48","width":750,"height":422,"sizeInBytes":441753},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp","hash":"thumbnail_Retail_Ecommerce_Illustration_1920_X1080_432585c587","mime":"image/webp","name":"thumbnail_RetailEcommerce-Illustration-1920X1080.webp","path":null,"size":"37.73","width":245,"height":138,"sizeInBytes":61551}},"hash":"Retail_Ecommerce_Illustration_1920_X1080_432585c587","ext":".webp","mime":"image/webp","size":523.3,"url":"https://www-uploads.scaleway.com/Retail_Ecommerce_Illustration_1920_X1080_432585c587.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-06-14T16:34:34.247Z","updatedAt":"2024-11-27T10:17:35.130Z"}}}},{"title":"Quantum computing in 2024: The State of Play","path":"quantum-computing-in-2024-the-state-of-play/","description":"_Quantum computing is one of tech’s hottest buzzwords right now. But what is it, exactly? Why should we care? And where does the cloud fit in? Have no fear, our in-house expert is here… over to Valentin Macheret, R\u0026D Engineer for Scaleway Labs!_\n\n\n## Why does quantum computing exist?\n\nQuantum computing arose from the intersection of two fields: the need for better knowledge about information processing at the quantum level, and the growing need for computing power to solve complex problems such as materials simulation and optimization.\n\nQuantum computing is already vast and will continue to develop over the coming years. Here, we are concerned only with the computational aspect. \n\nOne key property of quantum mechanics, superposition, allows a quantum system, such as a photon or an atom, to be in 2 states at the same time. This quantum information-carrying system is called a *quantum bit, or qubit*, and is the basis of all calculations. \n\nThis peculiarity in the qubit enables quantum computers to have tenfold parallelism (doing several things at the same time), making it possible to explore more solutions simultaneously than with binary, bit-based computers. \n\nImagine having to get out of a maze. In a classical program, you'd sequentially choose left or right at each intersection and keep track of the path taken until you found the exit. *In the quantum version of this program, you can explore both left and right at the same time*. The time needed to find the solution is exponentially reduced. That's what quantum computing is all about! To find solutions to problems that would be too costly to calculate in the classical way.\n\n![Quantum maze](https://www-uploads.scaleway.com/Quantum_maze_3130160a2c.webp)\n\n\nDo such computers really exist? And can they [break the algorithm that secures our current communications](https://arxiv.org/abs/quant-ph/9508027), RSA-2048 encryption?\n\n\n## State of play and current limits\n\nYes, quantum computers do exist. But breaking RSA-2048 encryption isn't just around the corner. Let me explain. \n\nAll the players building computers (Quandela, IBM, IonQ, Pasqal, DWave...) are focusing on their own technology. Indeed, quantum hardware can be based on many different “quantum support”: photons, superconducting materials, trapped ions, neutral atoms, annealing... Each of these approaches has its own advantages and challenges. \n\nLet's not forget that nothing is simple in this field, and many approaches remain to be explored. Today, it would be pretentious to say which technology will come out on top.\n\n\n### Quantum computer assessment \n\nTo qualify the maturity of a quantum computer, we would be tempted to look at available qubits count. But that's only part of the story. In 2000, David Di Vincenzo, an engineer at IBM, proposed a set of 5 criteria for assessing the maturity of a quantum computer.\n\nHere are Di Vincenzo's criteria, and the reality on the ground in 2024:\n![Di Vincenzo Quantum criteria](https://www-uploads.scaleway.com/Di_Vincenzo_Quantum_criteria_8f64ae8274.webp)\n\n\nToday's major challenges concern the fidelity (i.e. quality) of qubits and the operations applied to them.\n\nTo quantify the power of a quantum computer, several metrics arose. These include [Quantum Volume](https://en.wikipedia.org/wiki/Quantum_volume) (IBM, 2020), which takes into account the number of qubits and their fidelity, or the [number of operations per second](https://arxiv.org/pdf/2110.14108) (IBM, 2021). The reality, however, is that few players are taking the time to measure and publish these metrics, as the priority remains to evolve and enhance the hardware.\n\n\n### Errors are everywhere\n\nOne of the biggest challenges facing all quantum computer manufacturers is to reduce the qubits error rate during operations. Errors can be caused by unwanted decoherence, where the state of a qubit can be altered by interaction with the environment (temperature, vibration, acoustic waves, electric fields, etc.), or by simple loss of the quantum support (a photon “lost” in a fiber, for example). \n\n![Errors everywhere](https://www-uploads.scaleway.com/Errors_everywhere_e3336f8c20.webp)\n\nThis highlights a big paradox in quantum computing: *keep isolating qubits from their environment as much as possible... while at the same time keep seeking to control them to perform operations on them.*\n\nTwo (non-exclusive) solutions stand out: 1) improving hardware to better isolate quantum information from disturbances, and 2) quantum error correction (QEC). Alice \u0026 Bob, a French quantum player, takes [a mixed approach](https://alice-bob.com/blog/more-quantum-computing-with-fewer-qubits-meet-our-new-error-correction-code/): creating superconducting qubits with natural resistance to bit-flip, a common type of error. \n\nThe key idea behind QEC is simple: use more qubits for information redundancy, and to apply correction operations in the event of errors. \n\nThat's why it's important to ask when a new quantum computer is announced: how good is qubit fidelity? How many will I need to use for error correction? \n\nToday, QEC is written manually into the quantum algorithms. It's a tedious task that requires dedicated engineers. QEC can be so cumbersome that it considerably reduces the number of “useful” qubits (information carriers), making it impossible to run certain algorithms.\n\nThere are emerging paradigms with incorporated QEC such as [measure-based computation](https://en.wikipedia.org/wiki/One-way_quantum_computer) (MBQC) or [fusion-based computation](https://www.nature.com/articles/s41467-023-36493-1) (FBQC). But these approaches are still theoretical and, to date, no quantum computer has been able to implement them successfully. \n\n\n### So, when will quantum computing be ready ?\n\nGetting back to the RSA-2048 example again, according to [this article](https://quantum-journal.org/papers/q-2021-04-15-433/) published in 2021, at least 20 million physical qubits (including correction) would be needed to factor a prime number sufficient to break the protocol. In 2024, there are at best a few hundred qubits on some computers.\n\n\n[An article from 2022](https://arxiv.org/pdf/2212.12372) estimates about 372 near-perfect qubits (99.9% fidelity) with a much slower hybrid approach... Prime number factorization estimations are commonplace, we must be cautious in the absence of consensus.\n\nIt will take at least another 5-7 years before we reach fault-tolerant quantum computing (FTQC). This transitional period before logical qubits (ie: robust to error) can be used is known as the Noisy Intermediate Scale Quantum (NISQ) era.\n\n\n## The place of quantum emulation in the ecosystem\n\nThe key idea behind quantum emulation is simple: use binary power to mimic the behavior of interactions in a real quantum computer (superposition, entanglement, decoherence...). There are emulators that specifically simulate a particular type of hardware (such as Quandela's exQalibur for photonics, or Pasqal's Pulser for neutral atoms).\n\nAlmost all emulators offer an error-free mode (ie: logical mode), as well as a mode with simulated quantum errors mentioned above (ie: physical mode).\n\nQuantum emulation is therefore positioned as an alternative way to explore quantum computing and prototyping algorithms without having to worry about hardware constraints, the availability of a quantum computer or computational errors (see figure 1).\n\n![Emulation into the quantum ecosystem](https://www-uploads.scaleway.com/Emulation_into_the_quantum_ecosystem_873e801355.webp)\n\n\nHowever, emulation has one major constraint: memory consumption, which doubles at each qubit. Storing the state vector of an N-qubit quantum system means storing 2^N x 8 bytes in memory. That's 8MB for 20 qubits, 8GB for 30 qubits... By 2024, emulators available on cloud offer up to 40 qubits, and this requires an entire supercomputer of GPUs.\n\n\n\n## A first step into Quantum Computing\n\nDeveloping quantum algorithms in 2024 is still an arduous task. It still means directly manipulating quantum logic gates, far from [Bool algebra](https://en.wikipedia.org/wiki/Boolean_algebra). Thus, some of these gates are very specific to a particular type of hardware.\n\nSDKs such as Cirq, myQLM, CUDA-Q, Qiskit or Perceval exist to design, build and run quantum algorithms on real or simulated quantum computers.\n\nHere's a trivial example, using Qiskit, of a quantum circuit producing [a GHZ state](https://en.wikipedia.org/wiki/Greenberger%E2%80%93Horne%E2%80%93Zeilinger_state) (3+ superimposed and interleaved qubits). A GHZ state is a common pattern, whether in QEC, quantum telecommunication ([QKD](https://en.wikipedia.org/wiki/Quantum_key_distribution)) or in algorithms such as [Grover's](https://en.wikipedia.org/wiki/Grover%27s_algorithm).\n\n```python\nfrom qiskit import QuantumCircuit\nfrom qiskit_aer import AerSimulator\n\n# The simulation object to emulate a quantum processor\nbackend = AerSimulator()\n\n# Define a quantum circuit that produces a 3-qubit GHZ state.\nqc = QuantumCircuit(3)\nqc.h(0) # Put the 1st qubit in superposed state\nqc.cx(0, 1) # Entangle the 1st qubit with the 2nd\nqc.cx(0, 2) # Entangle the 1st qubit with the 3rd… so the 2nd with the 3rd\nqc.measure_all() # Measure all the axis of the qubit\n\n# Execute the circuit on the simulator and retrieve the result\nresult = backend.run(qc, shots=1000).result() # Means that the circuit will be run 1000 times\n\nprint(result.get_counts())\n```\n\n\nIn this example, only 3 quantum gates (1 Hadamard gate, 2 Control-X gates) are applied. Compared with a binary processor, this would be equivalent to having applied as many logic gates (AND, OR, XOR...), so very limited. Although this is constantly changing, developing complete algorithms requires positioning thousands / millions of these gates. In 2024, quantum exploration is limited to a few dozen, sometimes hundreds of quantum gates. This is mainly due to the hardware limitations (error rate or huge memory consumption) and the lack of advanced high level tools.\n\nSome SDKs go a step further and already offer libraries of very specific quantum algorithms. Well-known algorithms are eigensolver variations or graph optimizations (here's an example of [QUBO with Perceval](https://perceval.quandela.net/docs/notebooks/QUBO.html)).\n\n\n## Quantum at Scaleway\n\nBefore quantum computing can become a seamlessly integrated into our technological landscape, there are still 2 steps to be taken:\nIncrease the reliability of quantum computers, with fewer calculation errors.\nIncrease their accessibility, reduce downtime and cut costs.\n\nScaleway's [Quantum as a Service](https://labs.scaleway.com/en/qaas/) (QaaS) addresses these issues by offering a suite of quantum emulators powered by a variety of hardware, from CPUs to GPU clusters.\n\nHere's a list of emulators proposed by Scaleway's QaaS offering:\n\n[Qsim](https://github.com/quantumlib/qsim): Developed by QuantumLib, it excels in state vector representation for maximum performance.\n\n[Aer](https://github.com/Qiskit/qiskit-aer): maintained by Qiskit, it is ideal for complex representations such as density matrices and tensor networks. It also benefits from multi-GPU support.\n\nexQalibur: Developed by [Quandela](https://www.quandela.com/)), this proprietary \u0026 optimized multi-GPU emulator is dedicated to linear-optical quantum computing (LOQC). It simulates unique photons stored into modes (small optical fibers) and optical operations (beam splitters and phase shifters).\n\n\nThese emulated quantum processors (QPUs) can be accessed via Perceval, developed by Quandela, and Qiskit, maintained by IBM. Let's take our example with Qiskit to use Scaleway's QaaS service:\n\n```python\nfrom qiskit import QuantumCircuit\nfrom qiskit_scaleway import ScalewayProvider\n\nprovider = ScalewayProvider(\n project_id=\"\u003cyour-scaleway-project-id\u003e\",\n secret_key=\"\u003cyour-scaleway-secret-key\u003e\",\n)\n\n# Retrieve a backend object with Aer emulation on a H100 GPU to run algorithms\n# Need to run over a Qsim emulator? Just change to “qsim_simulation_h100”\nbackend = provider.get_backend(\"aer_simulation_h100\")\n\n# Define a quantum circuit that produces a 3-qubit GHZ state.\nqc = QuantumCircuit(3)\n...\nqc.measure_all()\n\n# Create and send a job to the target session\nresult = backend.run(qc, shots=1000).result()\n...\n```\n\nAn QPU session emulated by Aer will immediately be created and will be kept for many minutes for the next runs.\n\nTo quantify this cloud offer, [benchmarks were carried](https://labs.scaleway.com/en/qaas/#benchmark) out to compare the [Quantum Volume](https://medium.com/qiskit/what-is-quantum-volume-anyway-a4dff801c36f) (calculating the largest possible square circuit) of the different platforms. Here is the benchmark result for Aer on CPU and GPU setups.\n\n![Quantum benchmark](https://www-uploads.scaleway.com/Quantum_benchmark_397714faff.webp)\n\n\nWe can see that the available platforms can easily exceed the 30 logical qubits, reaching up to 36 qubits* (37 if we switch to simple precision). Enough to break RSA-128, based [on this algorithm](https://arxiv.org/pdf/2212.12372)!\n\nIn conclusion, Scaleway’s Quantum as a Service (QaaS) makes it possible to : \nExplore quantum programming at lower cost: Scaleway offers an affordable service for researchers and developers wishing to learn about quantum programming without worrying about hardware errors or investing in expensive infrastructure.\n\nAvoid waiting for access to commercially available quantum computers: with waiting times of up to several weeks to access real quantum computers, quantum emulation saves precious time, particularly during testing and development phases.\n\nUnlock the limitations of a local computer: Scaleway's quantum emulation platforms enable the execution of quantum algorithms on a much larger scale than those possible on conventional computers.\n\nWith the emergence of quantum computing, which is radically transforming the way complex problems are formulated and solved, learning, democratizing and simplifying access to quantum algorithms are crucial issues. Scaleway is committed to ease this transition, by making quantum innovation accessible to a wider audience and supporting the technological breakthroughs that will push quantum computing a step further.\n\n_*This number may change depending on the QaaS service platforms._\n\n\n\u003cbr\u003e\u003cbr\u003e\n\n_Curious to find out more? Here’s some further reading:_\n\n- _[Quantum Computing for the Very Curious](https://quantum.country/qcvc) (2019)_\n- _[Understanding Quantum Computing](https://www.oezratty.net/wordpress/2023/understanding-quantum-technologies-2023/) by Olivier Ezratty (2023)_\n- _[The Beginning of Quantum Computing](https://www.youtube.com/watch?v=bVO5wdnicD4) by Pascale Senellart, in French (2021)_\n- _[Scaleway’s official QaaS page](https://labs.scaleway.com/en/qaas/) and [console integration](https://console.scaleway.com/qaas)_\n- _[Qiskit webpage](https://www.ibm.com/quantum/qiskit) and [Qiskit Scaleway package](https://github.com/scaleway/qiskit-scaleway)_\n- _[Perceval webpage](https://perceval.quandela.net/) and [Perceval Scaleway integration](https://perceval.quandela.net/docs/providers.html#scaleway)_\n","createdAt":"2024-06-04T06:41:37.998Z","updatedAt":"2024-06-18T07:51:30.400Z","publishedAt":"2024-06-05T07:11:27.438Z","locale":"en","tags":"Quantum\nR\u0026D","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":6,"excerpt":"What is quantum computing, exactly? Why should we care? And where does the cloud fit in? Have no fear, our in-house expert is here…","author":"Valentin Macheret","h1":"Quantum computing in 2024: The State of Play","createdOn":"2024-06-04","image":{"data":{"id":3596,"attributes":{"name":"Content_Quantum-as-a-service-Illustration-Content.webp","alternativeText":null,"caption":null,"width":950,"height":550,"formats":{"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp","hash":"small_Content_Quantum_as_a_service_Illustration_Content_bd5d98be46","mime":"image/webp","name":"small_Content_Quantum-as-a-service-Illustration-Content.webp","path":null,"size":"84.77","width":500,"height":289,"sizeInBytes":143559},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp","hash":"medium_Content_Quantum_as_a_service_Illustration_Content_bd5d98be46","mime":"image/webp","name":"medium_Content_Quantum-as-a-service-Illustration-Content.webp","path":null,"size":"185.61","width":750,"height":434,"sizeInBytes":305080},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp","hash":"thumbnail_Content_Quantum_as_a_service_Illustration_Content_bd5d98be46","mime":"image/webp","name":"thumbnail_Content_Quantum-as-a-service-Illustration-Content.webp","path":null,"size":"23.68","width":245,"height":142,"sizeInBytes":41662}},"hash":"Content_Quantum_as_a_service_Illustration_Content_bd5d98be46","ext":".webp","mime":"image/webp","size":78.03,"url":"https://www-uploads.scaleway.com/Content_Quantum_as_a_service_Illustration_Content_bd5d98be46.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-06-04T06:46:56.696Z","updatedAt":"2024-06-04T06:46:56.696Z"}}}},{"title":"Your RAG-powered AI app in 50 lines of code!","path":"your-rag-powered-ai-app-in-50-lines-of-code/","description":"## Introduction\n\nThis article continues the journey we embarked on a few weeks back with our last _practical AI_ blog post: “[Ollama: from zero to running an LLM in less than 2 minutes!](https://www.scaleway.com/en/blog/ollama-from-zero-to-running-an-llm-in-less-than-2-minutes/)” where we leveraged Ollama to procure and serve an LLM in a virtual machine equipped with a GPU, Scaleway's [H100 PCIe GPU Instance](https://www.scaleway.com/en/h100-pcie-try-it-now/). After going through that article you may have been inspired to integrate AI capabilities into your own applications (Did you? Let me know via the [Scaleway Community](https://scaleway-community.slack.com)!) and you may have realized that even though thousands of possibilities opened up for you, there may still be some scenarios missing in the picture, such as the ability to make an LLM interact with your data. This is where RAG, the focus of this article, comes in.\n\nThe term RAG stands for Retrieval-augmented generation, which is a technique that **_augments_** the usefulness of an LLM by enabling it to **_generate_** responses based on an extended set of information you provide. This “extended set of information” may come in the form of basically any type of structured (your typical database or a spreadsheet) or unstructured data (text documents, or even media files) and needs to be further processed and stored in a specific way such that the model can easily find patterns within it, in order to **_retrieve_** the right information. If such information cannot be found, instead of confidently providing a _hallucinated_ answer, the LLM can be instructed to simply say “_Hey, good question! I don't know ¯\\\\_(ツ)_/¯_” or another response you consider appropriate for your use case.\n\nThe work we did when using Ollama to run an LLM laid the foundations we need for this new blog post where we will use that same hands-on approach to harness the power of AI, and thus we will focus only on the really important concepts and leave the more complex ones for later. This also means we will continue to use Python, and I'll assume you have an Instance running your preferred LLM with Ollama.\n\n\n## Hands-on with RAG\n\nThe importance of RAG lies in its ability to improve an LLM's accuracy and reliability. LLMs by themselves rely entirely on the knowledge gained through their training phase to generate output, which can sometimes result in inaccurate or outdated responses. RAG addresses this issue by incorporating external sources of information into the response generation pipeline with the added benefit of not needing to update or “fine-tune” the original model — a process that might require large amounts of compute power —, making it a simpler and more efficient approach.\n\nWe will build a simple app that will use an LLM (Llama2:70b) to go through Scaleway's public documentation repository and try to find the answer to an input question provided by the user. The base example has 50 lines of code, and we will see how we can improve its functionality by adding a few more here and there.\n\nWe will use [LlamaIndex](https://docs.llamaindex.ai/en/stable/) “a simple, flexible data framework for connecting custom data sources to large language models” — as they describe it — as our main tool to achieve our goal. We will also make use of an 'embedding model' that will transform documents — or chunks of data — into a numerical representation (vectors would be the fancy/proper term) based on their attributes. And finally, a 'Vector Database' that will store the numerical representations of our documents, for easier consumption by the whole pipeline.\n\n\n### Architectural Overview\n\nThe system looks something like this:\n\n\u003ca href=\"https://www-uploads.scaleway.com/llamaindex_rag_ollama_qdrant_c3d2205b78.webp\" target=\"_blank\" rel=\"noopener\" style=\"text-decoration: none\"\u003e\n![A diagram showcasing the system's architecture: files are loaded into a 'vector database', this database is queried with the user prompt, and context documents that match are returned to the LLM, which will in turn generate a response to be sent back to the user.](https://www-uploads.scaleway.com/llamaindex_rag_ollama_qdrant_c3d2205b78.webp)\n\u003c/a\u003e\n\n### Setup\n\nAll the commands and code are meant to be run inside your GPU Instance. Feel free to [check the documentation](https://www.scaleway.com/en/docs/compute/gpu/how-to/create-manage-gpu-instance/#how-to-connect-to-a-gpu-instance) if you need a refresher.\n\nYou can use your preferred text editor, in my case I still like Visual Studio Code and its [Remote Development](https://code.visualstudio.com/docs/remote/remote-overview) feature lets me connect to my instance by logging in via SSH. It automatically installs a server on my Instance that allows me to edit and run code that lives in the remote Instance just the same way as I'd do it on my local environment. But if you know how to exit Vim, by all means, feel free to use it.\n\n\n#### The environment\n\nIt's always a good idea to set up a virtual environment for your project, and I like to go simple, so I default to virtualenv:\n\n```bash\n\nmkdir rag-example\n\ncd rag-example\n\napt update\n\napt install python3.10-venv -y\n\npython3 -m venv .venv\n\nsource .venv/bin/activate\n\n```\n\n\n#### Running the Vector Database\n\nThere are many “Vector Databases'' to choose from nowadays. [Qdrant is an open source](https://github.com/qdrant/qdrant) one that's written in Rust, has many official client libraries, and can be easily run via docker:\n\n```bash\n\ndocker run -d -p 6333:6333 --name qdrant qdrant/qdrant \n\n```\n\nAnd if for some reason you decide to use a different Vector Database, LlamaIndex makes it easy for you to migrate with a few tweaks.\n\n\n#### Dependencies\n\nWe'll need to install the LlamaIndex package, our open source workhorse:\n\n```bash\npip install llama-index\n```\n\nAnd while we're at it, why not install all the other dependencies?\n\n```bash\npip install llama-index-llms-ollama llama-index-embeddings-huggingface llama-index-vector-stores-qdrant qdrant-client\n```\n\n\u003e `llama-index-llms-ollama` is the LlamaIndex wrapper that allows us to use a model served by Ollama\n\n\u003e `llama-index-embeddings-huggingface` is the LlamaIndex wrapper for HuggingFace embedding models (more on those later on)\n\n\u003e `llama-index-vector-stores-qdrant` is the LlamaIndex 'Vector Store' integration for Qdrant\n\n\u003e `qdrant-client` is the official Python Qdrant library\n\n\n#### Getting the “data source”\n\nAs mentioned before, this example will use the Scaleway Documentation as its data source. Scaleway docs are maintained by a dedicated team of professional technical writers, but they're also a collaborative effort that the community can contribute to. That's why it is available as an open source [repository on GitHub](https://github.com/scaleway/docs-content). For this example, we will only clone the main branch with a depth of 1\n\n```bash\ngit clone https://github.com/scaleway/docs-content.git --depth 1\n```\n\nIf you explore the repo, you'll find several directories and files linked to the deployment process, which are not important to us. The content we're after lives inside the files with the **mdx** extension. These MDX files use the [Markdown syntax](https://spec.commonmark.org/0.31.2/) and have a Frontmatter header including associated metadata (title, description, categories, tags, publishing date, etc).\n\n\n### The code\n\n\n#### Imports, constants, and settings\n\nDon't focus too much on the _imports_, we're simply bringing the packages we installed before along with a couple more included in the standard library.\n\nAfter the imports we set 3 constants: The local directory where we want to store the IDs and hashes associated with the documents we will feed to our vector database, the location of our documents (the Scaleway documentation), and the name of the collection we want to use for this app in our vector database — think of a database name.\n\n```python\nimport sys\nfrom pathlib import Path\nfrom llama_index.core import Settings, StorageContext, VectorStoreIndex, SimpleDirectoryReader\nfrom llama_index.embeddings.huggingface import HuggingFaceEmbedding\nfrom llama_index.llms.ollama import Ollama\nfrom llama_index.vector_stores.qdrant import QdrantVectorStore\nimport qdrant_client\n\nSTORAGE_DIR= \"./storage\"\nDOCS_DIR = \"./docs-content\"\nCOLLECTION_NAME = \"scw_docs\"\n\nllm = Ollama(model = \"llama2:70b\") # Using a local LLM served by Ollama\nembed_model = HuggingFaceEmbedding(model_name = \"sentence-transformers/multi-qa-distilbert-dot-v1\", embed_batch_size = 768, device = \"cuda\") # Assigning an embedding model from HuggingFace\n\nSettings.llm = llm\nSettings.embed_model = embed_model\n```\n\nThe next few lines define the 2 models we will use, the LLM, and the embeddings model. Finally, Lammaindex's `Settings.llm` and `Settings.embed_model` will set those values globally within this app's context.\n\n\n##### Embeddings Model\n\nWe've been mentioning embeddings and vector databases for a while now, and it's time to spend a few lines making sure we have a basic understanding of their relationship. As mentioned before, an 'embedding model' is capable of taking in input data, such as text, a document, or an image, and projecting it into a vector (an array of numbers) that represents the _entity's_ meaning or features. When converted into a numerical representation (a vector), a machine can establish relationships between entities by calculating their positions and proximity within the vectorial space. The way an entity is represented in a vectorial space will depend on the embedding model being used. There are embedding models specifically trained to find text, answer questions, and look for images based on text input (and vice-versa). On top of that, you have to consider the languages these models have been trained on, the amount of data they were fed with, etc. A good place to start learning more is the [Sentence Transformers framework documentation](https://www.sbert.net/docs/pretrained_models.html).\n\nHere I picked `multi-qa-distilbert-dot-v1` because it's been trained on Q\u0026A tasks from various sources and it showed good results when compared with other embedding models.\n\n\n### Setting up the Vector Store\n\nCalling `qdrant_client.QdrantClient()` without any arguments will use the default connection values which will point to _localhost_ on port 6333. By the way, you can visit _\u003cyour instance's public domain\u003e:6333/dashboard_ to check out your [Qdrant's Web UI](https://qdrant.tech/documentation/web-ui/).\n\nThen we have the “vector store”. A vector store is a storage system that holds the embedding vectors of nodes (document chunks), and the nodes themselves. These stores are used in machine learning and AI applications to efficiently store and retrieve high-dimensional vectors, which are often used to represent complex data like text, images, and more.\n\n```python\nclient = qdrant_client.QdrantClient()\nvector_store = QdrantVectorStore(client = client, collection_name = COLLECTION_NAME)\n```\n\n\n\n### Loading Documents\n\nOnce the vector store and storage context are created, we can now move to the next stage: loading the files and converting them into documents. “_Wait, \u003cspan style=\"text-decoration:underline;\"\u003efiles\u003c/span\u003e are not \u003cspan style=\"text-decoration:underline;\"\u003edocuments\u003c/span\u003e?_” you may be wondering, and no, in this context, “A Document is a generic container around any data source [...] By default, a Document stores text along with some other attributes”. The main attributes are the metadata and relationships dictionaries, which contain additional information for a document (by default the file path, name, size, creation date, and last modified date), and their relationship with other documents and _Nodes_, respectively. A Node is a chunk of a Document.\n\nThe `get_documents` function receives a path string — in this case, the path to the Scaleway documentation directory —, and defines a list of directories we know we want to exclude from our 'document loading' process, like the `.git` folder because it's not relevant and the `index.mdx` because its contents don't actually add any useful information.\n\nThe `SimpleDirectoryReader` class takes in the path to the Scaleway documentation directory, a list of extensions we want it to look for (remember to add the `.` before the extension. It will save you hours of debugging time :/ ), whether or not we want it to recursively look for subdirectories (we do!), and the list of things we want to exclude. The `load_data` method will return the documents, which will include the text found in each file, along with some metadata.\n\n```python\ndef get_documents(dir_path):\n ignore_these = ['.git/**', '.github/**', '.husky/**', 'assets/**', 'bin/**', 'blocks/**', 'changelog/**', 'components/**', 'docs/**', 'menu/**', 'styles/**', 'contribute.mdx', 'index.mdx']\n return SimpleDirectoryReader(\n input_dir = dir_path,\n required_exts = [\".mdx\"],\n recursive = True,\n exclude = ignore_these\n ).load_data()\n```\n \n\nIn the code below, the `if` statement checks if this is the first time this script has been executed by checking if the storage dir exists in the filesystem. If this is the first time running, (that's the `else` branch), then the `get_documents` function is called and a storage context is created.\n\nLlamaIndex uses `StorageContext` to, well… store things. In this case to the `vector_store`, which is our Qdrant vector database.\n\n`vector_index` creates a new vector store index from the documents previously generated, splits them up in chunks, and loads them into the vector database.\n\nFinally, on the `else` branch, we persist to disk the document IDs and hashes that point to the vector database elements, and that's what happens on the last line when `vector_index.storage_context.persist` is called.\n\nOn the `if` branch we load the `StorageContext` from the file system by passing the path in the `persist_dir` argument, then create a vector index the same way as previously mentioned, except, instead of creating it `from_documents`, it is created `from_vector_store` because the data already exists in the vector database.\n\n```python\nif Path(STORAGE_DIR).exists():\n storage_context = StorageContext.from_defaults(persist_dir = STORAGE_DIR)\n vector_index = VectorStoreIndex.from_vector_store(\n vector_store = vector_store, storage_context = storage_context, show_progress = True\n )\nelse:\n docs = get_documents(DOCS_DIR)\n storage_context = StorageContext.from_defaults(vector_store = vector_store)\n vector_index = VectorStoreIndex.from_documents(\n documents= docs, storage_context = storage_context, show_progress = True\n )\n vector_index.storage_context.persist(STORAGE_DIR)\n```\n\n\n\n### Creating a Query Engine\n\nAt this point, a reference to the LLM was passed to LlamaIndex, the document embeddings were created and stored on the vector database, and all is left to do is to query the `vector_index`:\n\n```python\nif __name__ == \"__main__\":\n if len(sys.argv) \u003e 1:\n questin_string = sys.argv[1]\n query_engine = vector_index.as_query_engine()\n response = query_engine.query(str(questin_string))\n print(response)\n else:\n print(\"You forgot to pass in your question :-) simply put it within quotes after invoking this script: python3 main.py \\\"what is an instance?\\\"\")\n```\n\n\nFirst, we check if the script is being loaded as the main program, then we check the script arguments to make sure there's a query after the script call — we want to be able to call the script and pass a query along directly, such as `python3 main.py “what is an Instance?”`.\n\nThe `vector_index.as_query_engine()` creates a basic Query Engine instance that is then executed with the `query` method by passing the query string.\n\n\n### The result\n\nWhen you run your script for the first time with a query such as “how do I create a serverless job?” \n\n\n```bash\n python3 demo.py \"how do I create a serverless job?\"\n```\n\nYou will get an answer similar to this:\n\n```text\nYou can create a serverless job using the Scaleway console, Terraform, API, or CLI.\n\nUsing the Scaleway console, you can easily create a job definition and track your job runs. You can also monitor your jobs using Scaleway Cockpit.\n\nAlternatively, you can use Terraform to integrate serverless jobs into your infrastructure as code via the Terraform provider and resources.\n\nThe Scaleway HTTP API allows you to manage your serverless resources via HTTP calls, which can be useful when integrating jobs management into your automated tasks or continuous integration.\n\nYou can also use the Scaleway CLI, a simple command-line interface that allows you to create, update, delete, and list your serverless jobs. For example, you can use the CLI to deploy a job with the following command: `scw jobs definition create name=testjob cpu-limit=70 memory-limit=128 image-uri=docker.io/alpine:latest command=ls`.\n\nFinally, Scaleway SDKs are available for Go, JS, and Python, allowing you to manage your resources directly using your favorite languages.\n```\n\nThis is great! The LLM by itself wasn't trained on the latest release of the Scaleway documentation. But it doesn’t have to be! It can go through the document nodes retrieved by the 'Query Engine' from the vector database and use them as the context to not only return a single document's text, but to generate an appropriate response based on the set of available documents and nodes.\n\nAs promised, this example can deliver great results with just 50 lines of code, here's the complete code:\n\n```python\nimport sys\nfrom pathlib import Path\nfrom llama_index.core import Settings, StorageContext, VectorStoreIndex, SimpleDirectoryReader\nfrom llama_index.embeddings.huggingface import HuggingFaceEmbedding\nfrom llama_index.llms.ollama import Ollama\nfrom llama_index.vector_stores.qdrant import QdrantVectorStore\nimport qdrant_client\n\nSTORAGE_DIR= \"./storage\"\nDOCS_DIR = \"./docs-content\"\nCOLLECTION_NAME = \"scw_docs\"\n\nllm = Ollama(model = \"llama2:70b\")\nembed_model = HuggingFaceEmbedding(model_name = \"sentence-transformers/multi-qa-distilbert-dot-v1\", embed_batch_size = 768, device = \"cuda\")\n# If you're using a system with lower VRAM than the 80GB of the H100 PCIe Instance, such as the L4 GPU Instance, you can use the smaller models you'll find below. They are not as powerful as their larger pairs, but they'll get the job done\n# llm = Ollama(model = \"llama2:7b\")\n# embed_model = HuggingFaceEmbedding(model_name = \"sentence-transformers/multi-qa-MiniLM-L6-dot-v1\", embed_batch_size = 384, device = \"cuda\")\n\nSettings.llm = llm\nSettings.embed_model = embed_model\nclient = qdrant_client.QdrantClient()\nvector_store = QdrantVectorStore(client = client, collection_name = COLLECTION_NAME)\n\ndef get_documents(dir_path):\n ignore_these = ['.git/**', '.github/**', '.husky/**', 'assets/**', 'bin/**', 'blocks/**', 'changelog/**', 'components/**', 'docs/**', 'menu/**', 'styles/**', 'contribute.mdx', 'index.mdx']\n return SimpleDirectoryReader(\n input_dir = dir_path,\n required_exts = [\".mdx\"],\n recursive = True,\n exclude = ignore_these\n ).load_data()\n\nif Path(STORAGE_DIR).exists():\n storage_context = StorageContext.from_defaults(persist_dir = STORAGE_DIR)\n vector_index = VectorStoreIndex.from_vector_store(\n vector_store = vector_store, storage_context = storage_context, show_progress = True\n )\nelse:\n docs = get_documents(DOCS_DIR)\n storage_context = StorageContext.from_defaults(vector_store = vector_store)\n vector_index = VectorStoreIndex.from_documents(\n documents= docs, storage_context = storage_context, show_progress = True\n )\n vector_index.storage_context.persist(STORAGE_DIR)\n\nif __name__ == \"__main__\":\n if len(sys.argv) \u003e 1:\n questin_string = sys.argv[1]\n query_engine = vector_index.as_query_engine()\n response = query_engine.query(str(questin_string))\n print(response)\n else:\n print(\"You forgot to pass in your question :-) simply put it within quotes after invoking this script: python3 main.py \\\"what is an instance?\\\"\")\n```\n\n\n\n## Next steps\n\nThis app can serve as the foundation for bigger things. In this case, we are using a simple approach that uses many of the LlamaIndex default settings, but you could say there are endless possibilities for what you can achieve. You can try out different LLM and embedding models, feed it different kinds of data, try out different vector databases, create different vector stores for different types of data, and then process each using a different model. Let's say you want to create a chatbot (did I mention besides a Query Engine, [LlamaIndex also supports a Chat Engine](https://docs.llamaindex.ai/en/stable/understanding/putting_it_all_together/chatbots/building_a_chatbot/)?) that can help onboard new developers to your company. You'd want them to be able to quickly find the answers they need, but as is sometimes the case, information is spread around many sources, like Confluence (who doesn't just love Confluence's search?) or Notion pages for guidelines and \"How-to\" guides, but also Google Docs for meeting notes, spreadsheets for reports, and your repository's README and CONTRIBUTING files for detailed practical information on specific projects. All of these different sources can be loaded thanks to the many different integrations available on [Llama Hub](https://llamahub.ai/), the go-to place for data loaders and tools that can make it easier for your app to go even further. \n\n\n### Custom Metadata\n\nOne such addition that can take our example app one step forward is to make the document-loading process include an additional step: customizing the document's metadata. As mentioned before, by default, the `SimpleDirectoryReader` will take the following file attributes as metadata: file_path, file_name, file_size, creation_date, and last_modified_date. Some of these are not entirely helpful in our case, but there's something quite useful we can get out of the file path. As it turns out, the Scaleway documentation website's build process keeps the relative file paths as they are, only prepending the base path `https://www.scaleway.com/en/docs/` and removing the `.mdx` extension. Knowing this we can create new metadata that includes the public URL of the document. To do so we need to create a new function that we will pass as the value of `SimpleDirectoryReader`'s `file_metadata` argument. This function will in turn receive the file path string and needs to return a dictionary of metadata key-value pairs.\n\nHere's the `get_custom_metadata` function\n\n```python\ndef get_custom_metadata(file_path_str):\n current_dir = Path.cwd()\n absolute_file_path = Path(file_path_str)\n relative_dir_path = absolute_file_path.relative_to(current_dir).parts[1:-1]\n file_name = absolute_file_path.stem\n file_path = \"/\".join(relative_dir_path + (file_name,))\n return {\n 'file_name': file_name,\n 'file_path': file_path,\n 'public_url': \"https://www.scaleway.com/en/docs/\" + file_path,\n }\n```\n\n\nAnd here's how we pass it as a new argument to `SimpleDirectoryReader`:\n\n```python\ndef get_nodes_and_docs(dir_path):\n # ...\n return SimpleDirectoryReader(\n # ...\n file_metadata=get_custom_metadata,\n ).load_data()\n```\n\n\nWhat do we get after this? Well, not much. But this is only the first step towards something useful, instructing the LLM to generate a response following our guidelines.\n\n\n### Custom Prompt\n\nUnder the hood, LlamaIndex passes many [default prompts](https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/prompts/default_prompts.py) to the LLM to provide it with the required instructions for different steps of the generation process based on several different factors. However, we have the ability to set our custom prompts. One such prompt we can set is the `text_qa_template` the Query Engine can receive. This prompt allows us to define several instructions, as you can see below:\n\n```python\n#...\nfrom llama_index.core import PromptTemplate\n #...\n qa_prompt_str = (\n \"You're a helpful technical expert who provides answers based on the Scaleway Documentation.\\n\"\n \"Assume every question you receive is related to Scaleway. If you can't find the data to answer a question, or the question is out of the scope of Scaleway, say `I don't know.`, suggest visiting the documentation website and don't provide any further information.\\n\"\n \"Context information is below.\\n\"\n \"---------------------\\n\"\n \"{context_str}\\n\"\n \"---------------------\\n\"\n \"\\nInstructions:\\n\"\n \"- Based on the above Context information and no prior knowledge, provide a concise answer to the user Query below.\\n\"\n \"- Prioritize documents with the shallowest 'file_path' depth. If you can't find data to answer a question within the Scaleway Documentation, say I don't know.\\n\"\n \"- Always finish your answer with a separate paragraph linking to the most relevant document using the value of its 'metadata' 'public_url'.\\n\"\n \"Query: {query_str}\\n\"\n \"Answer: \"\n )\n query_engine = vector_index.as_query_engine(text_qa_template = PromptTemplate(qa_prompt_str))\n response = query_engine.query(str(questin_string))\n print(response)\n #...\n```\n\n\nWe're using this prompt to instruct the LLM to set the scope of the generated answer to the Scaleway platform using the provided context documents, disregarding any prior knowledge, and asking it to provide the public URL of the document it thinks is the most relevant.\n\nYou will notice two variables there, `context_str` and `query_str`, both of which are _automagically_ passed by LlamaIndex when `qa_prompt_str` is passed to `PromptTemplate` (a new class we need to import). `query_str` is the query provided by the user, and `context_str` is the information retrieved from the vector database, which was previously queried using the user's `query_str`. The LLM takes the instructions provided in the prompt, the context information, and the user query to finally synthesize a response.\n\nWhen invoking the script with the question “is there a cli?”, the generated response we get back looks like this:\n\n```text\nYes, you can use the Scaleway CLI to manage your resources. The Scaleway CLI allows you to interact with the Scaleway platform using commands instead of the graphical user interface. You can install the Scaleway CLI on your local machine and use it to create, modify, and delete resources.\n\nTo get started with the Scaleway CLI, you can refer to the Scaleway CLI documentation. This document provides an overview of the available commands, flags, and options, as well as examples of how to use them. Additionally, you can use the command-line help system to get information about a specific command or flag.\n\nFor more information on using the Scaleway CLI, please refer to the following document:\n\nhttps://developer.scaleway.com/en/compute/instances/api-cli/\n\n```\n\nAnd the query we used for the initial version, “how do I create a serverless job?” would look like this:\n\n```text\nSure! Here's how you can create a serverless job:\n\n1. Log in to your Scaleway account and navigate to the Serverless section in the side menu.\n\n2. Click on \"Jobs\" and then click on \"Create job\".\n\n3. Select the Scaleway Container Registry as the source of the container image.\n\n4. Choose the desired container registry namespace, select a container image and tag, and enter a name for the job.\n\n5. Select the region where you want to create the job and choose the resources that you want to allocate to the job.\n\n6. Define a cron schedule to specify when you want the job to run, and add any environment variables or startup commands as needed.\n\n7. Set a maximum duration for the job and verify the estimated cost.\n\n8. Click \"Create a job definition\" to finish creating the job.\n\nYou can find more detailed instructions and information on creating serverless jobs in the Scaleway documentation here: \u003chttps://www.scaleway.com/en/docs/serverless/jobs/how-to/create-job-from-scaleway-registry\u003e.\n\n```\n\nBoth responses accurately provide a concise answer to the questions and provide a link to the relevant documentation page for the user to learn more about the topic. \n\n\n### Further improvements\n\nAs discussed before, this example serves as a base to start building on top of, and many improvements can be made. In our case, the **mdx** files include frontmatter that contains relevant information that can be leveraged, such as its title, a description of the file contents, categories it applies to, and tags it can be grouped in. Additionally, the fact that **mdx** files not only use Markdown syntax, but also can include markup elements, or components, such as `\u003cMacro id=\"requirements\" /\u003e` or `\u003cNavigation title=\"See also\"\u003e`, can confuse the embedding model. In this case, parsing the documents with a proper [Reader from Llama hub](https://llamahub.ai/?tab=readers), or [creating your own](https://docs.llamaindex.ai/en/stable/module_guides/loading/simpledirectoryreader/#extending-to-other-file-types), can improve the overall performance and accuracy of your app.\n\nOther improvements can include the automation of the process of pulling the latest version of the documentation and the associated vector database update, using improved storage methods, experimenting with other databases, tweaking the model's parameters, definitely trying out different [Response Modes](https://docs.llamaindex.ai/en/stable/module_guides/deploying/query_engine/response_modes/?h=response+mode), and protecting our Instance so that only people allowed to access these resources can consume them. \n\n\n## Conclusion\n\nIn conclusion, RAG is a powerful technique that can improve the accuracy and reliability of generative AI models. By using external sources of information, RAG enables developers to create sophisticated AI systems that are more accurate and extendable. In this article, we went through the very basics on how to get started with RAG by leveraging LlamaIndex, Qdrant, Ollama, and sentence-transformers embedding models. We covered various aspects of RAG, including setting up the environment, loading documents, running a vector database, creating a vector store, and creating a Query Engine.\n\nWe then considered the many possibilities that lie beyond this base setup and improved its functionality by prompting the model to generate responses that include the answer's public documentation page URL. By following these steps, you can create your own RAG system that can be used for various applications that leverage your data with the power of open source tools, LLMs, and Scaleway's AI solutions.\n","createdAt":"2024-04-09T23:46:37.321Z","updatedAt":"2024-04-17T10:22:23.490Z","publishedAt":"2024-04-11T13:31:04.708Z","locale":"en","tags":"AI\nH100\nL4\nRAG\nopen source","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":14,"excerpt":"RAG improves LLM's accuracy and reliability by incorporating external sources into the response generation pipeline. This makes using an LLM a more reliable and powerful tool for your AI applications\n","author":"Diego Coy","h1":"Your RAG-powered AI app in 50 lines of code!","createdOn":"2024-04-09","image":{"data":{"id":3363,"attributes":{"name":"AI-usecases-Generative-AI-Illustration-1920X1080.webp","alternativeText":null,"caption":null,"width":1920,"height":1080,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","hash":"large_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","mime":"image/webp","name":"large_AI-usecases-Generative-AI-Illustration-1920X1080.webp","path":null,"size":"383.44","width":1000,"height":563},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","hash":"small_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","mime":"image/webp","name":"small_AI-usecases-Generative-AI-Illustration-1920X1080.webp","path":null,"size":"116.01","width":500,"height":281},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","hash":"medium_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","mime":"image/webp","name":"medium_AI-usecases-Generative-AI-Illustration-1920X1080.webp","path":null,"size":"236.45","width":750,"height":422},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","hash":"thumbnail_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","mime":"image/webp","name":"thumbnail_AI-usecases-Generative-AI-Illustration-1920X1080.webp","path":null,"size":"35.75","width":245,"height":138}},"hash":"AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","ext":".webp","mime":"image/webp","size":1166.95,"url":"https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-03-13T15:51:30.699Z","updatedAt":"2024-04-09T23:40:40.615Z"}}}},{"title":"How to optimize your cloud infrastructure for video streaming and encoding","path":"how-to-optimize-your-cloud-infrastructure-for-video-streaming-and-encoding/","description":"Today, the cloud is an essential part of video processing. Naturally, when you consider Netflix and YouTube alone accounted for [over a quarter](https://www.statista.com/chart/15692/distribution-of-global-downstream-traffic/) of global web traffic in 2022. Furthermore, that and related activities are set to grow: IT spending in the global media and entertainment sector rose by 34% between 2019 and 2024, [according to Gartner](https://www.gartner.com/en/documents/4967131). \n\nWhat do these trends imply for companies in the sector today, particularly for video companies? We brought three of them together for a dedicated webinar late March 2024. Here are the highlights!\n\n\"I've been in the video sector for 18-20 years, notably on the VLC, FFmpeg and x264 projects,\" said Jean-Baptiste Kempf, president of VideoLAN and inventor of the pioneering VLC open source player, by way of introduction. \"Today, all video found on the cloud and on websites is managed by open source projects, whether it’s on Facebook, YouTube or Twitch. This is one of the few areas of the cloud where there is virtually no non-open source competition, and all proprietary solutions are based on these solutions. But it's true that today you need significant resources for transcoding - so a lot of computing power - and adapted infrastructures that are capable of scaling according to your needs at certain times. So the importance of cloud providers is paramount.\"\n \nOur three speakers then presented their solutions.\n\n \n**Emmanuel Fréard**, Co-Founder and CTO of [blastream](https://blastream.com), presented his cloud-based livestreaming service, which enables events to be produced and broadcast directly over the web. \"It’s a sort of mini-[OBS](https://obsproject.com/) in the cloud, controlled directly from the web, similarly to what our American competitors, such as StreamYard, offer,\" explained Fréard. **Blastream has the particularity of being white-label, which makes it suitable for uses such as webinars, but also live shows, as well as special activations like the broadcast of a remote-controlled car race, with Orange**. To achieve this, blastream relies on MCU technologies, with ingestion and broadcasting notably via WebRTC. On the cloud side, blastream depends on the reliability and availability of Scaleway's CPU resources - \"we have an average of 25 seconds of instance availability, which is very comfortable for our customers and end users\", asserted Fréard - as well as the sovereign aspect, as video data must remain RGPD-compliant at all times.\n \n\n**Geoffrey Signorato**, Lead Developer at [Streamfizz](https://www.streamfizz.com), then presented this video streaming platform created in 2012, by parent company Webcastor. Streamfizz features a responsive, customizable player and alternative to GAFAM solutions, with a strong focus on data confidentiality. Indeed, **Streamfizz's cloud configuration \"ensures that videos are not sent to services outside France\", explains Signorato; \"this is why Streamfizz has developed its entire encoding system in-house\"**. This system consists of uploading video files to object storage, before passing them on to a pool of 3070 and RENDER-S GPUs in a private network for encoding and subtitling, before they go back to object storage. Streamfizz also relies on Scaleway's Kubernetes Kapsule to manage scale-up and scale-down of GPU Instances.\n \n\n**François Caron**, CEO of [EMPREINTE.COM](https://www.empreinte.com), then explained how this \"pioneer in online video\" began in 1989, producing CD-ROMs for Wanadoo (France Télécom), before moving on to webTV, streaming, display and webinars. To do this, **Empreinte relies on Scaleway Kapsule, Load Balancer, Object Storage, Container Registry and MySQL, enabling it to meet substantial load requirements, such as 70,000 simultaneous users for one customer**, or 3,000 multilingual live broadcasts per year for another. Caron was particularly pleased that his sector had created 40,000 jobs to date - \"we have talent in this country, and applications that are used worldwide\" - while regretting that said players \"don't talk to each other enough.\" \n\n\n![Video webinar speakers](https://www-uploads.scaleway.com/Video_webinar_speakers_955ef8f3b4.webp)\n\n\n## Why sovereignty comes first with video\n\n**Sovereignty** being the common ground between these three players, the discussion part of the webinar began on this topic. \"It's a determining factor\", insisted Caron, particularly when managing the video content of clients’ intranets, as is the case with Empreinte. \"Some servers are in the US, which can reveal confidential information. CIOs don't want to look at this sort of thing... except when we talk to them about cybersecurity. Controlling cloud perimeters as tightly as possible is decisive for security.\"\n\nIt's also a key sales argument, according to **Signorato. \"It's a real selling point to be able to tell our customers that we know exactly where their video will go when they upload it**. We know that it stays in France, who encodes it and where, where it's uploaded... Sovereignty is not something clients think about by themselves, but when we tell them about it, it tends to reassure them.\"\n\nIn addition to reassuring customers, **sovereignty is \"mandatory for some customers\", according to Fréard. \"Some CIOs impose it. And that's fine with us!” **he said. “We prefer to work with people we can talk to, which isn't always the case for non-European companies. It's a sales pitch, but also a technical one, and one that has real added value for us as a startup.\"\n\n\n## Availability and flexibility: the keys to success\n\nFréard then returned to his need for **guaranteed CPU Instances available within 25 seconds. \"We don't want to have a whole pool of servers available at all times and only use them 10% of the time. We prefer to call on resources at a moment's notice\"**, he said. “This makes economic and environmental sense. And the faster these resources are available, the better it is for the end customer.”\n\nThis need for flexibility is covered for Streamfizz by Kubernetes autoscaling, \"which allows us to define a minimum of replicates that will run on Instances\", explained Signorato. \"Whenever Kubernetes receives a CPU or RAM load, the autoscaler will automatically switch on Instances,\" he added. **\"This elasticity allows us both to manage peak loads, and to limit resources to the minimum\" when Streamfizz doesn't need resources**. “There's a whole configuration to be done in advance, but once properly set up, we have a certain robustness on production.\"\n\n\"We're trying to do better with less\", summarized Empreinte's CEO. **Last year, the company’s sales grew by 40%, while its cloud bill fell by 50%, \"simply because we managed [resources] better\", explained Caron**. \"Application optimization, intelligent dispatch management, multi-operator operations, transparent disaster recovery, scalability management, switching to object-oriented activity... all help in making considerable savings, and in having increasingly reliable applications.\"\n\nThese aspects of FinOps are concerns of many customers, confirmed Fabien Ganderatz, Solution Architect at Scaleway: \"we make a point of allowing customers to consume as they wish, when they wish, and as quickly as possible, in terms of resource activation.\"\n\n\n## GPUs, CPUs, and cost optimization\n\nAs Signorato has [already explained on our blog](https://www.scaleway.com/fr/blog/webcastor-migration-interview/), the switch from CPUs to GPUs for video encoding has brought Streamfizz considerable advantages in terms of time and cost. \"To encode video with CPUs, you need to quickly access a lot of computing power,\" he said during the webinar. \"In our V2, we decided to go with GPUs. They take a little longer to start up, but they handle video tasks much faster. Thanks to that, plus relying on Kubernetes for scale-up/in, **we achieved four times faster encoding for four times cheaper [with 3070 GPUs], at almost the same quality as with CPUs.**\" Streamfizz also uses RENDER-S GPUs to do transcription and create subtitles automatically. \n\nLast but not least, the company is currently testing NVIDIA's new L4 GPUs, and \"the results are rather encouraging; we're getting the same quality, almost the same encoding time, but at a lower cost, since the GPUs are cheaper, and above all billed by the minute,” said Signorato. This last point is very important, because with GPUs billed by the hour, you could end up paying an hour for just ten minutes' use.\n\nBut in the end, between CPUs and GPUs, which one wins when it comes to video? \n\n\"In terms of encoding finesse, GPUs don't outperform CPUs,\" said **Caron. \"GPUs go faster, but do some things less well for that reason. And when you want to process the remaining 20%, it consumes 80% more energy [than a CPU].\"** Empreinte therefore relies mostly on CPU Instances, except when customers have specific needs.\n\nAs for Fréard and blastream, \"for real-time encoding, we've found that with GPUs, we can increase encoding power. Whereas with CPUs we can go up to [a resolution of] 1080p and 30 fps [frames per second], with GPUs we can reach 4K [ultra-high resolution]. Except that **streaming in 4K on the web today doesn't make much sense, at least not live, especially as there are significant environmental impacts**. But here's the thing: in specific cases, GPUs help us - those who stream with OBS can test this themselves - but we stick with CPUs in most cases.\"\n\nIn conclusion, these three apparently similar French players have three very different practices when it comes to GPUs and CPUs. As Caron pointed out, \"this is a good illustration of the market!\" What's more, \"it's constantly evolving,\" added Signorato.\n\nIn any case, the recent integration of ARM CPU nodes into Scaleway's Kubernetes offering will open up new perspectives for video orchestration, Ganderatz pointed out. A development made all the more interesting by the fact that ARM will open up new possibilities in the field of video, he added.\n\n\n## Future video trends\n\nWhen it comes to the future of video on the cloud, \"we're not sure of anything yet,\" said Signorato, while acknowledging that ARM remains an interesting new field to explore.\n\n\"On the encoding side, there are certainly things coming,\" said Fréard. \"HEVC [High Efficiency Video Coding] is interesting; and the predictive aspect will certainly emerge. I've seen that Opus has included predictive on its audio codec, and so maybe other codecs will do the same.\"\n\n**\"The big challenge for video is to be green,\" Caron insisted**. \"Since we've lowered our cloud consumption by 50%, we’d like this trend to take off internationally.\"\n\nFréard concluded in this vein, highlighting French expertise in the video field - as evidenced by Jean-Baptiste Kempf. He also stressed the importance of movements such as [Greening of Streaming](https://www.greeningofstreaming.org/), and that of reducing the environmental impact of streaming globally. One to watch moving forwards!\n","createdAt":"2024-03-28T10:54:15.018Z","updatedAt":"2024-09-30T11:26:02.708Z","publishedAt":"2024-03-28T11:05:30.733Z","locale":"en","tags":"Video\nMedia \u0026 Entertainment\nTestimonial","popular":false,"articleOfTheMonth":false,"category":"Scale","timeToRead":4,"excerpt":"What are the best ways to optimize your cloud infrastructure for video streaming and encoding? French experts answered in our webinar late March. Here's what they said!","author":"James Martin","h1":"How to optimize your cloud infrastructure for video streaming and encoding - Webinar report","createdOn":"2024-03-28","image":{"data":{"id":3215,"attributes":{"name":"Media-Entertainment-GenericHero.webp","alternativeText":null,"caption":null,"width":1424,"height":880,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Media_Entertainement_Generic_Hero_7705874711.webp","hash":"large_Media_Entertainement_Generic_Hero_7705874711","mime":"image/webp","name":"large_Media-Entertainement-GenericHero.webp","path":null,"size":"325.59","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Media_Entertainement_Generic_Hero_7705874711.webp","hash":"small_Media_Entertainement_Generic_Hero_7705874711","mime":"image/webp","name":"small_Media-Entertainement-GenericHero.webp","path":null,"size":"104.66","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Media_Entertainement_Generic_Hero_7705874711.webp","hash":"medium_Media_Entertainement_Generic_Hero_7705874711","mime":"image/webp","name":"medium_Media-Entertainement-GenericHero.webp","path":null,"size":"200.87","width":750,"height":463},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Media_Entertainement_Generic_Hero_7705874711.webp","hash":"thumbnail_Media_Entertainement_Generic_Hero_7705874711","mime":"image/webp","name":"thumbnail_Media-Entertainement-GenericHero.webp","path":null,"size":"39.80","width":245,"height":151}},"hash":"Media_Entertainement_Generic_Hero_7705874711","ext":".webp","mime":"image/webp","size":109.3,"url":"https://www-uploads.scaleway.com/Media_Entertainement_Generic_Hero_7705874711.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-02-01T16:19:21.297Z","updatedAt":"2024-02-01T16:19:30.035Z"}}}},{"title":"hosterra-greenit-testimonial","path":"hosterra-greenit-testimonial/","description":"_Pierre Lannoy is the founder of [Hosterra](https://hosterra.eu/), a Lille-based company specializing in Green IT. He offers energy-optimized Elastic Metal servers, enabling his customers - mainly web agencies - to reduce their carbon footprint, and that of their clients. This involves detailed reporting, and reliance on DC5, Europe's most sustainable datacenter. An exemplary Scaleway customer to watch! Read on to find out why…_\n\n\n## Scaleway Blog: Is digital responsibility becoming increasingly important today?\n\n**Pierre Lannoy**: Green IT is increasingly becoming a must-have. For private sector RFPs (requests for proposals), the trend is undeniable. In the last RFP I won, Hosterra's eco-responsibility and sustainability, including in the social sense, were key criteria: 25% of the score in this case. And I’m seeing this increasingly often.\n\nI have a real advantage in these RFPs by having most of my infrastructures with Scaleway. The infrastructures I sell to my customers are [Elastic Metal](https://www.scaleway.com/en/elastic-metal/); machines which really allow us to measure the power consumption of each process. And since we can measure it, we can have an impact on it (by optimizing it). \n\nAt Scaleway, it already helps me enormously to have your [PUE and WUE](https://www.scaleway.com/en/environmental-leadership/) data. But also, the fact that you make your servers last [up to ten years, versus three or four elsewhere] helps me a lot too. So I have the operational and efficiency aspect, which I share with my customers; I don’t hide from them the fact that I work with you. Finally, Scaleway doesn’t engage in greenwashing; you don’t congratulate yourself on things that aren’t true. \n\n\n## Why did you create Hosterra?\n\nI used to work for a very large retail company, which switched 96% of its IT from its own data centers to Google. I was in charge of those highly valuable contracts. I saw that I could trust neither the eco-responsibility aspect, nor the privacy aspect. On this second point, they claim that by signing contracts with their Irish and therefore European entities, this resolves a number of points related to GDPR. This is completely false. An Irish subsidiary is still a subsidiary: the parent company has a say in everything, because of US legislation.\n\nAs I couldn't change things internally by myself, I decided to set up my own company to host services (web, mail... self-service PaaS). I wanted to show that it was possible to do this in France, while respecting ethical and sustainable principles, to set up a [technological] company that could be functional.\n\n**Hosterra was created in September 2022. On January 23, 2023, I had my first customer**.\n\nToday, our customers include :\n- 85% of my sales: Web agencies like Kodama, as well as freelance web designers and the like, who choose us either because their customers have eco-responsible needs, or because our services are affordable\n- 10%: direct public sector\n- 5%: individuals who have a blog/need emails, families who want mailboxes with their family names...\n\nWhatever the type of customer, they join us based on their convictions, because Hosterra's values speak to them.\n\n\n## How does it work, technically speaking?\n\nI need a high level of observability, and machine measurement. This is only possible with Elastic Metal servers, to which I've added virtualization layers myself. As I'm able to measure very precisely, I've carried out a lot of tests, which have resulted in a lot of errors... but enough successes to improve the operation.\n\nFor example, I've found that for a PHP website, to get the same performance, you can either have a stack with Apache as web server and NGINX as proxy, or do everything with [LiteSpeed Enterprise](https://www.litespeedtech.com/products/litespeed-web-server/features). \n\n**LiteSpeed Enterprise requires 46% fewer CPU cycles than Apache**. And yet, the CPU cycle is the most costly in terms of energy.\n\nIT energy costs, globally speaking, come from:\n- Disks (very difficult to optimize, as 500 GB = 500 GB)\n- CPUs\n- Memory\n\n**Since memory consumes much less energy than the CPU, I try to use less CPU and more memory**. We use memory for caching. A very positive point for Scaleway is that the CPU/memory ratios of your Elastic Metal servers are unusually high. You don't see that anywhere else. That's how I make the services I deliver more efficient.\n\nWherever I can cache, I cache. **Every time there's an operation required, I'll check what's in the cache, and issue what's in it, rather than relaunching the calculation with the processor**. This relieves the CPU of some of its normal activity.\n\nFurthermore, you have caching at every level: between the operating system and the service - between the OS and Apache, for example - or between the PHP and the file system... the possibilities are numerous. All you have to do is understand it; develop the tools to do it; and then measure from the outset whether what you're doing improves power consumption or not.\n\n**Very few publishers try to make their applications more energy-efficient**. LiteSpeed Enterprise does this very well; its editor has already understood that certain things can be cached, rather than recalculated each time.\n\n\n## Shouldn't developers be incentivized on energy optimization?\n\nDevelopers often think in terms of Moore's Law: they don’t care about optimization. They know that in 18 months' time, their application will perform like a Formula 1 car, because they know that there will be more computing power in the future. So why bother? **The main challenge my manager gives me is to deliver in record time. The whole software industry is like that. Nobody tries to do more with less!**\n\n\n## What are the results of the optimizations you've implemented?\n\nIn very specific cases of web servers running Apache and MariaDB, **I use 38% less electricity**... thanks to a series of measures:\n- LiteSpeed instead of Apache\n- Using my virtualization system \n- My cache optimizations...\n\nAnd a whole host of other things. I can't reveal all! But everyone knows that using more memory saves CPU... How I implement all this requires a few family secrets :)\n\n\n## How exactly do you measure?\n\nI measure the server's real consumption, because **with Elastic Metal, I have access to real-time data from the motherboard, memory, processors, BUS and disks**. This is thanks to third-party software, such as [IPMI](https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface) (Intelligent Platform Management Interface), which searches the motherboard registers. \n\nIPMI is still standard on hardware servers. All I have to do is write lines of code to fetch these values, and upload them to an InfluxDB database, where I can analyze them. So I can measure them in real time and in great detail. **It's standard on machines, it's just that people don't take the time to do (these analyses). This data has been around for decades, but nobody uses it**. But as long as there are no instances/virtualization, you have access to this information. All you need is access to the physical server/hardware, at chassis level (on instances, this wouldn't be possible, as the VM doesn't have access to the hardware).\n\nI also measure the degree of heating per server, and can even measure the volume of air delivered, thanks to the fan speed data!\n\n\n## How do you share this information with your customers?\n\n1. [On my website](https://hosterra.eu/store/web-hosting), prior to purchase, I commit to maximum energy and water consumption. So, for example, for our VPS-1 offer, I guarantee the service won't consume more than 7kWh of electricity, or 5cl of water per year.\n\n![Hosterra_customer_monitoring_dashboard.webp](https://www-uploads.scaleway.com/Hosterra_customer_monitoring_dashboard_a83c6240d9.webp)\n2. In clients’ dashboard, department by department, they can track electricity and water consumption (above), day by day, as well as emissions. \n\nHow did I get there?\n\nFirst, to determine your data centers’ carbon intensity, I gleaned figures from various Scaleway docs (reports, datacenter factsheets, etc). Using the most pessimistic synthesis possible, I arrived at 8 g/kWh (DC5) and 9 g/kWh (DC2); these are the only two datacenters I'm using at the moment. I then corrected with their average PUE, published on your site, to arrive at 9.8/kWh and 12.9/kWh (approximately).\nI then supplemented this information with the information you submitted to the [Green Web Foundation](https://www.thegreenwebfoundation.org/green-web-check/?url=www.scaleway.com), which confirms that your energy is 100% renewable. \n**So I have the carbon cost of the two datacenters I use. I multiply that by the consumption of each customer's servers... and that gives their carbon impact**.\n\nFinally, **electricity consumption** is based on the INPI data from each machine; and **water consumption** is extrapolated from the WUE of each datacenter.\n\nIf we also had this rate of change in carbon intensity (throughout the day), that would be great! But then... what we're able to do with the figures we've got isn't bad.\n\nEventually, I also want to be able to show my customers the energy costs of each website they've created for each of their customers. But I don't know how to make it practical yet!\n\n\n## Are Hosterra's values the main vector for attracting customers?\n\nYes, you have to educate people on these subjects. So I organize meetups, conferences, meetings, RFPs, and make all this visible to customers, e.g. in their dashboards...\n**It's really important to make people understand that there's no other approach than facts and figures. You can't improve what you can't measure**.\n\nI have direct competitors who have their own data centers, but they're not part of this virtuous circle, this knock-on effect. That's where web agencies are really important, because more and more customers are asking these kinds of questions. There's a real groundswell of interest in eco-design right now. Even with WordPress! All agencies are getting on board.\n\nOn the other hand, some web agencies tell me that they feel like a drop in the bucket compared to the negative impact of our small websites. I tell them we've got to start somewhere! **If everyone contributes their drop of water, in the end we'll have an ocean**. And you can't wait for others to do it before you do it yourself, otherwise we'll never get out of there.\n\n\n## What are Hosterra's next steps?\n\n- Expand the range of services: I do web hosting, mail hosting and storage. In the next few months, I'll be moving on to hosting for WordPress, which is highly optimized for electricity.\n- To grow the business, I'm planning to switch to larger Elastic Metal (Titanium type), because of my mass of customers. If you manage to put a lot of customers on a very large machine, the PUE of your machine will be much better, because pooling is better for efficiency (that's the very principle of the public cloud).\n\n","createdAt":"2024-03-12T16:25:23.627Z","updatedAt":"2025-01-19T20:21:06.938Z","publishedAt":"2024-03-28T10:21:32.159Z","locale":"en","tags":"Green IT\nBare Metal\nTestimonial","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":5,"excerpt":"Hosterra's Pierre Lannoy obtains 38% energy savings from Scaleway Elastic Metal servers, which he then lets out to his clients, for website, email and self-service PaaS. He explains how!","author":"James Martin","h1":"How to use 38% less energy when using Bare Metal... thanks to Hosterra!","createdOn":"2024-03-12","image":{"data":{"id":3355,"attributes":{"name":"Sustainability-Illustration-1920X1080.webp","alternativeText":null,"caption":null,"width":1920,"height":1080,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Sustainability_Illustration_1920_X1080_d4842ba1e7.webp","hash":"large_Sustainability_Illustration_1920_X1080_d4842ba1e7","mime":"image/webp","name":"large_Sustainability-Illustration-1920X1080.webp","path":null,"size":"451.15","width":1000,"height":563},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Sustainability_Illustration_1920_X1080_d4842ba1e7.webp","hash":"small_Sustainability_Illustration_1920_X1080_d4842ba1e7","mime":"image/webp","name":"small_Sustainability-Illustration-1920X1080.webp","path":null,"size":"110.77","width":500,"height":281},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Sustainability_Illustration_1920_X1080_d4842ba1e7.webp","hash":"medium_Sustainability_Illustration_1920_X1080_d4842ba1e7","mime":"image/webp","name":"medium_Sustainability-Illustration-1920X1080.webp","path":null,"size":"240.54","width":750,"height":422},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Sustainability_Illustration_1920_X1080_d4842ba1e7.webp","hash":"thumbnail_Sustainability_Illustration_1920_X1080_d4842ba1e7","mime":"image/webp","name":"thumbnail_Sustainability-Illustration-1920X1080.webp","path":null,"size":"35.34","width":245,"height":138}},"hash":"Sustainability_Illustration_1920_X1080_d4842ba1e7","ext":".webp","mime":"image/webp","size":315.23,"url":"https://www-uploads.scaleway.com/Sustainability_Illustration_1920_X1080_d4842ba1e7.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-03-12T17:01:11.473Z","updatedAt":"2024-04-05T13:11:16.534Z"}}}},{"title":"Ollama: from zero to running an LLM in less than 2 minutes!","path":"ollama-from-zero-to-running-an-llm-in-less-than-2-minutes/","description":"\nThe Artificial Intelligence (AI) field has been fueled by open source initiatives from the very beginning, from data sets used in model training, frameworks, libraries and tooling, to the models themselves. These initiatives have been mainly focused on empowering researchers and a subset of experts to facilitate their investigations and further contributions. Fortunately for the rest of us – technologists without deep AI knowledge – there has been a wave of open source initiatives aimed at allowing us to leverage the new opportunities AI brings along.\n\nData sourcing, model training, math thinking, and its associated coding are done by a group of dedicated folks who then release models, such as Mixtral or Stable Diffusion. Then another group of people build wrappers around them to make the experience of using them a matter of basic configuration, and in some cases nowadays, just executing a command, allowing us to focus on leveraging the models and simply build on top of them. That’s the power of open source!\n\nOne such tool that has caught the internet’s attention lately is [Ollama](https://ollama.com/), a cross-platform tool that can be installed on a wide variety of hardware, including Scaleway’s [H100 PCIe GPU Instances](https://www.scaleway.com/en/h100-pcie-try-it-now/).\n\n\n## A model\n\nBefore diving into Ollama and how to use it, it is important to spend a few moments getting a basic understanding of what a machine learning (ML) model is. This is by no means intended to be an extensive explanation of AI concepts, but instead, a quick guide that will let you sort your way out to experience the power of AI firsthand.\n\nA _model_ is a representation of the patterns an algorithm has learned from analyzing data it was fed during its training phase. The goal of a Machine Learning model is to make predictions or decisions based on new, unseen data.\n\nA model is generally trained by feeding it labeled data or unlabeled – depending on the type of model – and then adjusting the model's parameters to minimize the error between the expected and actual outputs.\n\nBy the end of its training phase, a model will be distributed as either a set of multiple files including the patterns it learned, configuration files, or a single file containing everything it needs. The number of files will vary depending on the frameworks and tools used to train it, and most tools today can adapt to the different ways a model is distributed.\n\nThe size of a machine learning model refers to the number of parameters that make up the model, and in turn, its file size: from a couple of megabytes to tens of gigabytes. A larger model size typically means more complex patterns can be learned from the training data. However, larger models also require more computational resources which can negatively affect their practicality.\n\nSome of the most popular models today have been trained on huge amounts of data, with Llama2 reaching 70 Billion parameters (Also known as [Llama2 70B](https://huggingface.co/meta-llama/Llama-2-70b)), however, the model’s size doesn’t always correlate with its accuracy. Some other models that have been trained with fewer parameters claim they can outperform Llama 2 70B, such as [Mixtral 8x7B](https://mistral.ai/news/mixtral-of-experts/), in certain benchmarks.\n\n\n### Choosing the right tool for the job\n\nDeciding to use a model that is smaller in size – instead of a larger one that will potentially require larger sums of hardware resources – when the task at hand can be easily performed by it can be the most efficient optimization you can achieve without having to tweak anything else.\n\nDepending on your needs, using the 7B version of Llama 2 instead of the 70B one can cover your use case and provide faster results. In other cases, you may realize that using a model that has been trained to do a smaller set of specific tasks instead of the more generic ones can be the best call. Making the right choice will require some time trying out different alternatives, but this can yield improved inference times and hardware resource optimization.\n\nChoosing the right tool also can be seen from the hardware angle: should I use a regular x86-64 CPU, an ARM CPU, a gaming GPU, or a Tensor Core GPU…? And this is a conversation worth having in a separate blog post. For this scenario, we’ll stick with Scaleway’s H100 PCIe GPU Instances as they run the fastest hardware of its kind.\n\n\n## Ollama: up and running in less than 2 minutes\n\nFinally, we get to talk about Ollama, an open source tool that will hide away all the technical details and complexity of finding and downloading the right LLM, setting it up, and then deploying it. Ollama was originally developed with the idea of enabling people to run LLMs locally on their own computers, but that doesn’t mean you can’t use it on an H100 PCIe GPU Instance; in fact, its vast amount of resources will supercharge your experience.\n\nAfter [creating your H100 PCIe GPU Instance](https://www.scaleway.com/en/docs/compute/gpu/how-to/create-manage-gpu-instance/), getting Ollama up and running is just a matter of running the installation command:\n\n```bash\ncurl -fsSL https://ollama.com/install.sh | sh\n```\n\nNote: It’s always a good idea to take a moment to review installation scripts before execution. Although convenient, running scripts directly from the internet without understanding their content can pose significant security risks.\n\nOnce installed, you can run any of the supported models available [in their model library](https://ollama.com/library), for instance, [Mixtral](https://ollama.com/library/mixtral) from [Mistral AI](https://mistral.ai/) – a model licensed under Apache 2.0, that is on-par and sometimes outperforms GPT3.5 – by using the run command:\n\n```bash\nOllama run mixtral\n```\n\nOllama will begin the download process, which will take just a few seconds – thanks to the 10Gb/s networking capabilities of Scaleway’s H100 PCIe GPU Instances –, and once done, you will be able to interact with the model through your terminal. You can start a conversation with the model, as you would with ChatGPT, or any other AI chatbot; the difference here is that your conversation is kept locally within your H100 PCIe GPU Instance, and only you have access to the prompts you submit, and the answers you receive.\n\nThe [Ollama model library](https://ollama.com/library) showcases a variety of models you can try out on your own helping you decide what’s the best tool for the job, be it a compact model, such as [TinyLlama](https://ollama.com/library/tinyllama) or a big one, like [Llama2](https://ollama.com/library/llama2); there are multimodal models, like [LLaVA](https://ollama.com/library/llava), which include a vision encoder that enables both visual and language understanding. There are also models made for specific use cases, such as [Code Llama](https://ollama.com/library/codellama), an LLM that can help in the software development process, or [Samantha Mistral](https://ollama.com/library/samantha-mistral), a model trained in philosophy, psychology, and personal relationships.\n\nBut as you may be thinking, interacting with a model from a terminal through an SSH connection is a good way to experiment, but doesn’t allow you to bring any value to your users. Luckily, Ollama’s features don’t stop there.\n\n\n### Serving a Model\n\nBesides its simplicity, the reason we decided to highlight this tool for a first hands-on approach toward AI is its ability to expose the model as an API that you can interact with through HTTP requests.\n\nBy default, Ollama’s API server won’t accept requests from devices over the internet, however, you can change this behavior by updating Ollama’s system service settings, [as described in their documentation](https://github.com/Ollama/Ollama/blob/main/docs/faq.md#setting-environment-variables-on-linux).\n\nOnce the Ollama service restarts, you can begin making HTTP calls to your server:\n\n```shell\ncurl http://your-instance.instances.scw.cloud:11434/api/chat -d '{\n \"model\": \"mixtral\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are system that acts as API server. Answer with the following JSON: 'common names' (array of strings, a maximum of 3), 'family' (string), 'genus' (string), 'specific epithet' (string), 'distribution' (array of strings), 'origin' (array of strings), 'known uses' (a JSON object with a field named 'description' where you specify its uses by industry, and the following boolean fields: medicinal, edible).\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Lactuca sativa\"\n }\n ],\n \"stream\": false\n}'\n```\n\n* The “model” attribute lets you specify which model you want to chat with, ensuring flexibility in different use cases where more than one model is required to obtain the desired response.\n* The “messages” attribute allows you to specify messages by role. In this case, the message with the system role lets you define how the model should interact with the user messages. The message with the user role is the user prompt fed to the model.\n* The “stream”: false attribute will make the server reply with a single JSON, instead of a stream of objects split by token strings.\n\nThe API’s response to the previous request would look like this:\n\n```json\n{\n \"model\": \"mixtral\",\n \"created_at\": \"2023-12-31T14:35:23.089402623Z\",\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \" {\\n\\\"common\\\\_names\\\": [\\\"garden lettuce\\\", \\\"lettuce\\\", \\\"cultivated lettuce\\\"],\\n\\\"family\\\": \\\"Asteraceae\\\",\\n\\\"genus\\\": \\\"Lactuca\\\",\\n\\\"specific\\\\_epithet\\\": \\\"sativa\\\",\\n\\\"distribution\\\": [\\\"Native to the Mediterranean region, now widely cultivated worldwide\\\"],\\n\\\"origin\\\": [\\\"Originally domesticated in ancient Egypt over 4500 years ago\\\"],\\n\\\"known\\\\_uses\\\": {\\n\\\"description\\\": \\\"Lactuca sativa is primarily used as a leaf vegetable in salads and sandwiches. It is also used in soups, wraps, and other culinary applications. The leaves can be eaten raw or cooked.\\\",\\n\\\"medicinal\\\": true,\\n\\\"edible\\\": true\\n}\\n}\"\n },\n \"done\": true,\n // ... Skipped for simplicity\n}\n\n```\n\nHaving an API accessible over HTTP will give you the ability to empower your products and services by taking advantage of the model(s) of your choosing, and the guidance provided by your “system prompts”.\n\n\n### Integrating with your applications\n\nBeing able to interact with the model through an HTTP endpoint gives you the flexibility to call it from basically any device, platform, and programming language, and if you’re already using Python or JavaScript, there are official [Ollama libraries](https://ollama.com/blog/python-javascript-libraries) you can use to abstract some complexity away. Here’s the default example for the Python library:\n\n```python\nfrom ollama import Client\n\nOLLAMA_API_URL = \"http://your-instance.instances.scw.cloud:11434\"\nollama_client = Client(host=OLLAMA_API_URL)\n\nresponse = ollama_client.chat(model='llama2', messages=[\n{\n 'role': 'user',\n 'content': 'Why is the sky blue?',\n},\n])\n\nprint(response['message']['content'])\n```\n\nAssuming you already have deployed your services using Instances (Virtual Machines), bare metal, Elastic Metal, or a Serverless solution, making them talk to your model is only a matter of pointing them in the right direction, either by using regular HTTP calls using your preferred client, or one of the official libraries. For more information, check out Ollama’s [GitHub repository](https://github.com/Ollama/Ollama).\n\n\n## In conclusion\n\nEven though Ollama’s current tagline is “Get up and running with large language models, locally”, as you can see, it can be tweaked to serve its API over the internet and integrate with your existing software solutions in just a few minutes. Even if you decide to use a different approach when going to production, It is a great resource that can help you get familiar with the process of running and communicating with a large set of LLMs.\n\nNote: Even though there’s community interest in a built-in authentication method, currently Ollama does not prevent unauthorized access to the API, which means you should take measures to protect it using your preferred method ([Nginx Proxy Manager](https://nginxproxymanager.com/), or following and adapting [this guide](https://www.scaleway.com/en/docs/tutorials/nginx-reverse-proxy/) for instance) so it only accepts requests from your application server, for instance.\n\nThe open source tooling ecosystem around AI has skyrocketed during the last few years, and will continue to evolve, making it even easier for us developers to leverage AI in our applications without necessarily having to understand what’s happening under the hood: you can be a successful web developer without even understanding what the [V8 engine](https://v8.dev/) is, the same way you don’t need to understand how your car’s engine works before being able to drive.\n\nThis blog post guided you through one of the simplest approaches towards helping developers, and technologists in general, understand that “AI is doable” and it doesn’t take a team of AI researchers and years of studying to harness its power!\n","createdAt":"2024-03-08T10:29:24.381Z","updatedAt":"2024-04-19T18:43:35.722Z","publishedAt":"2024-03-08T12:51:24.879Z","locale":"en","tags":"AI\nH100\nopen source","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":6,"excerpt":"Tooling around AI has made it possible for us to use its powers without having to understand what’s happening under the hood, just like we don’t have to know how a car engine works before driving it.","author":"Diego Coy","h1":"Ollama: from zero to running an LLM in less than 2 minutes!","createdOn":"2024-03-08","image":{"data":{"id":3363,"attributes":{"name":"AI-usecases-Generative-AI-Illustration-1920X1080.webp","alternativeText":null,"caption":null,"width":1920,"height":1080,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","hash":"large_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","mime":"image/webp","name":"large_AI-usecases-Generative-AI-Illustration-1920X1080.webp","path":null,"size":"383.44","width":1000,"height":563},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","hash":"small_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","mime":"image/webp","name":"small_AI-usecases-Generative-AI-Illustration-1920X1080.webp","path":null,"size":"116.01","width":500,"height":281},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","hash":"medium_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","mime":"image/webp","name":"medium_AI-usecases-Generative-AI-Illustration-1920X1080.webp","path":null,"size":"236.45","width":750,"height":422},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","hash":"thumbnail_AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","mime":"image/webp","name":"thumbnail_AI-usecases-Generative-AI-Illustration-1920X1080.webp","path":null,"size":"35.75","width":245,"height":138}},"hash":"AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59","ext":".webp","mime":"image/webp","size":1166.95,"url":"https://www-uploads.scaleway.com/AI_usecases_Generative_AI_Illustration_1920_X1080_b7e6835d59.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-03-13T15:51:30.699Z","updatedAt":"2024-04-09T23:40:40.615Z"}}}},{"title":"get-started-AI-cost-emissions-mindmatch","path":"get-started-ai-cost-emissions-mindmatch/","description":"_This a guest post by Zofia Smoleń, Founder of Polish startup [MindMatch](https://mindmatch.pl/), a member of Scaleway's [Startup Program](https://www.scaleway.com/en/startup-program/) 🚀_\n\nOne of the greatest developments of recent years was making computers speak our language. Scientists have been working on language models (which are basically models predicting next sequence of letters) for some time already, but only recently they came up with models that actually work - Large Language Models (LLMs). The biggest issue with them is that they are… Large.\n\nLLMs have billions of parameters. In order to run them, you have to own quite a lot of computer power and use a significant amount of energy. For instance, OpenAI spends $700 000 daily on ChatGPT, and their model is highly optimized. For the rest of us, this kind of spending is neither good for your wallet, nor for the climate.\n\nSo in order to limit your spending and carbon footprint, you cannot just use whatever OpenAI or even Hugging Face provides. You have to dedicate some time and thought to come up with more frugal methods of getting the job done. That is exactly what [Scaleway Startup Program member] MindMatch has been doing lately.\n\nMindMatch is providing a place where Polish patients can seek mental help from specialists. Using an open-source LLM from Hugging Face, MindMatch recognizes their patients’ precise needs based on a description of their feelings. With that knowledge, MindMatch can find the right therapy for their patients. It is a Polish-only website, but you can type in English (or any other language) and the chatbot ([here](https://mindmatch.pl/chat)) will understand you and give you its recommendation. In this article, we wrap their thoughts on dealing with speed and memory problems in production.\n\n\n## 1. Define your needs\n\nWhat do you need to do exactly? Do you need to reply to messages in a human-like manner? Or do you just need to classify your text? Is it only topic extraction? \n\nRead your bibliography. Check how people approached your task. Obviously, start from the latest papers, because in AI (and especially Natural Language Processing), all the work becomes obsolete and outdated very quickly. But… taking a quick look at what people did before Transformers (the state-of-the-art model architecture behind ChatGPT) can do no harm. Moreover, you may find solutions that resolve your task almost as well as any modern model would (if your task is comparatively easy) and are simpler, faster and lighter.\n\nYou could start by simply looking at articles on Towards data science, but we also encourage you to browse through Google Scholar. A lot of work in data science is documented only in research papers so it actually makes sense to read them (as opposed to papers in social science).\n\nWhy does this matter? You don’t need a costly ChatGPT-like solution just to tell you whether your patient is talking about depression or anxiety. Defining your needs and scouring the internet in search of all solutions applied so far might give you a better view on your options, and help select those that make sense in terms of performance and model size.\n\n\n## 2. Set up your directory so that you can easily switch between different models and architectures\n\nThis is probably the most obvious step for all developers, but make sure that you store all the models, classes and functions (and obviously constants - for example labels that you want to classify) in a way that allows you to quickly iterate, without needing to dig deep into code. This will make it easier for you, but also for all non-technical people that will want to understand and work on the model. \n\nWhat worked well for MindMatch was even storing all the dictionaries in an external database that was modifiable via Content Management Systems. One of those dictionaries was a list of classes used by the model. This way non-technical people were able to test the model. Obviously, to reduce the database costs, MindMatch had to make sure that they only pull those classes when necessary.\n\nAlso, the right documentation will make it easier for you to use MLOps tools such as Mlflow. Even if it is just a prototype yet, it is better for you to prepare for the bright future of your product and further iterations.\n\nThere is a lot of information and guidance about how to set the directory so that it is neat and tidy. Browse Medium and other portals until you find enough inspiration for your purpose.\n\n\n## 3. Choose the right deployment model\n\nNow you’ve defined your needs, it’s time to choose the right solution. Since you want to use LLMs, you will most likely not even think about training your own model from scratch (unless you are a multi-billion company or a unicorn startup with high aspirations). So your options are limited to pre-trained models.\n\nFor the pre-trained models, there are basically two options. You can either call them through an API and get results generated on an external computer instance (what OpenAI offers), or you can install the model on your computer and run it there as well (that is what Hugging Face offers, for example).\n\nThe first option is usually more expensive, but that makes sense - you are using the computer power of another company, and it should come with a price. This way, you don’t have to worry about scalability. Usually, proprietary models like OpenAI’s work like that, so on top of that you also pay a fee for just using the model. But some companies producing open source models, like Mistral, also provide APIs. \n\nThe second option (installing the model on your computer) comes only with open source models. So you don’t pay for the model itself, but you have to run it on your computer. This option is often chosen by companies who don’t want to be dependent on proprietary models and prefer to have more control over their solution. It comes with a cost: that of storage and computing power. It is pretty rare for organizations to own physical instances with memory sufficient for running LLM models, so most companies (like MindMatch) choose to use cloud services for that purpose.\n\nThe choice between proprietary and open-source models depends on various factors, including the specific needs of the project, budget constraints, desired level of control and customization, and the importance of transparency and community support. In many cases it also depends on the level of domain knowledge within the organization. Proprietary models are usually easier to deploy.\n\n\n## 4. Fit the model to your purpose\n\nThe simpler the better. You should look for models that exactly match your needs. Assuming that you defined your needs already and did your research on Google Scholar, you should already know what solutions you are looking for. What now, then? Chances are, there are already at least a dozen of models that can solve your problem.\n\nWe strongly advise you to have a look at Hugging Face’s “Models” section. Choose the model type; and then, starting from the most popular (it usually makes the most sense), try those models on your data. Pay particular attention to the accuracy and size of the model. The smaller the model is, the cheaper it is. As for accuracy, remember that your data is different from what the model was trained on. So if you want to use your solution for medical applications, you might want to try models that were trained on medical data.\n\nAlso, remember that the pre-trained models are just language models. They don’t have any specialist knowledge. In fact, they rarely see any domain-specific words in training data. So don’t expect the model to talk easily about Euphyllophytes plants without any additional fine-tuning, Retrieval Augmented Generation (RAG) or at least prompt engineering. Any of those augmentations come with higher computing power cost.\n\nSo you need to be smart about what exactly you make your model do. For example, when MindMatch tried to use zero-shot classification to recognize ADHD (a phrase rarely seen in training datasets), they decided to make it recognize Hyperactivity instead. Hyperactivity being a more frequent keyword that could easily act as a proxy for ADHD, allowed MindMatch to improve accuracy without deteriorating speed.\n\n\n## 5. Run it on the right machine\n\nGPU or CPU? Many would assume that the answer lies simply between the speed and the price, as GPUs are generally more expensive and faster. That is usually true, but not always. Here are a few things to consider.\n\n\n### Model Size, Complexity and Parallelisation\n\nLarge and complex models, like GPT-4, benefit significantly from the processing power of GPUs, especially for tasks like training or running multiple instances simultaneously. GPUs have many more computing cores than CPUs, making them adept at parallel processing. This is particularly useful for the matrix and vector computations common in deep learning.\nBut in order to start up GPU processing data must be transferred from RAM to GPU memory (GRAM), which can be costly. If the data is large and amenable to parallel processing, this overhead is offset by faster processing on the GPU.\n\nGPUs may not perform as well with tasks that require sequential processing, such as those involving Recurrent Neural Networks (RNNs) or Long Short-Term Memory (LSTM) networks (this applies to some implementations of Natural Language Processing). The sequential computation in LSTM layers, for instance, doesn't align well with the GPU's parallel processing capabilities, leading to underutilization (10% - 20% GPU load).\n\nDespite their limitations in sequential computation, GPUs can be highly effective during the backpropagation phase of LSTM, where derivative computations can be parallelized, leading to higher GPU utilization (around 80%). \n\n\n### Inference vs. Training\n\nFor training large models, GPUs are almost essential due to their speed and efficiency (not in all cases, as mentioned above). However, for inference (especially with smaller models or less frequent requests), [CPUs can be sufficient and more cost-effective](https://www.scaleway.com/en/blog/why-cpus-also-make-sense-for-ai-inference/). If you are using a pre-trained model (you most probably are), you only care about inference, so don’t assume that GPU will be better - compare it with CPUs.\n\n\n### Scalability, Budget and Resources\n\nIf you need to scale up your operations (e.g., serving a large number of requests simultaneously), GPUs offer better scalability options compared to CPUs. \nGPUs are more expensive and consume more power. If budget and resources are limited, starting with CPUs and then scaling up to GPUs as needed can be a practical approach.\n\n\n## 6. Optimize it even further (for readers with technical backgrounds)\n\nAre all of the above obvious to you? Here are other techniques (that often require you to dig a little deeper) that allow for optimized runtime and memory.\n\n\n### Quantization\n\nQuantization is a technique used to optimize Large Language Models (LLMs) by reducing the precision of the model’s weights and activations. Typically, LLMs use 32 or 16 bits for each parameter, consuming significant memory. Quantization aims to represent these values with fewer bits, often as low as eight bits, without greatly sacrificing performance.\n\nThe process involves two key steps: rounding and clipping. Rounding adjusts the values to fit into the lower bit format, while clipping manages the range of values to prevent extremes. This reduction in precision and range enables the model to operate in a more compact format, saving memory space.\n\nBy quantizing a model, several benefits arise:\n- Reduced Memory Footprint: The model occupies less space, allowing larger models to fit into the same hardware\n- Enhanced Transfer Efficiency: It speeds up the model, especially in scenarios where bandwidth limits performance.\n\nHowever, quantizing LLMs comes with challenges:\n- Quantizing weights is straightforward as they are fixed post-training. But quantizing activations (input of transformer blocks) is more complex due to their varying range and outliers\n- In many GPUs, quantized weights (INT8) need to be converted back to higher precision (like FP16) for calculations, affecting efficiency\n- Managing the dynamic range of activations is crucial, as they often contain outliers. Techniques like selective precision (using higher precision for certain activations) or borrowing the dynamic range from weights are used.\n\n\n### Pruning\n\nPruning involves identifying and removing parameters in a model that are either negligible or redundant. One common method of pruning is sparsity, where values close to zero are set to zero, leading to a more condensed matrix representation that only includes non-zero values and their indices. This approach reduces the overall space occupied by the matrix compared to a fully populated, dense matrix.\n\nPruning can be categorized into two types:\n\n- Structured Pruning: This method reduces the model's size by eliminating entire structural elements like neurons, channels, or layers. Structured pruning effectively decreases the model size while preserving the general structure of the Large Language Model (LLM). It is more scalable and manageable for larger models compared to unstructured pruning\n- Unstructured Pruning: In this approach, individual weights or neurons are targeted independently, often by setting a threshold and zeroing out parameters that fall below it. It results in a sparser, irregular model structure that may require specialized handling. Unstructured pruning typically needs further fine-tuning or retraining to restore model accuracy. In large models with billions of parameters, this can become a complex and time-consuming process. To address this, techniques such as iterative fine-tuning, combining parameter-efficient tuning with pruning, and the implementation of SparseGPT are employed.\n\nSparseGPT, specifically, adopts a one-shot pruning strategy that bypasses the need for retraining. It approaches pruning as a sparse regression task, using an approximate solver that seeks a sufficiently good solution rather than an exact one. This approach significantly enhances the efficiency of SparseGPT.\n\nIn practice, SparseGPT has been successful in achieving high levels of unstructured sparsity in large GPT models, such as OPT-175B and BLOOM-176B. It can attain over 60% sparsity - a higher rate than what is typically achieved with structured pruning - with only a minimal increase in perplexity, which measures the model's predictive accuracy.\n\n\n### Distillation\n\nDistillation is a method of transferring knowledge from a larger model (teacher) to a smaller one (student). This is done by training the student model to mimic the teacher’s behavior, focusing on matching either the final layer outputs (logits) or intermediate layer activations. An example of this is DistilBERT, which retains most of BERT's capabilities but at a reduced size and increased speed. Distillation is especially useful when training data is scarce.\nHowever, be careful if you want to distill a model! Many state-of-the-art LLMs have restrictive licenses that prohibit using their outputs to train other LLMs. It is usually ok though, to use open-source models to train other LLMs.\n\n\n### Model serving techniques\n\nModel serving techniques aim to maximize the use of memory bandwidth during model execution. Key strategies include:\n- In-flight Batching: Processing multiple requests simultaneously, continuously replacing finished sequences with new requests to optimize GPU utilization.\n- Speculative Inference: Generating multiple future tokens based on a draft model, and then verifying or rejecting these predictions in parallel. This approach allows for faster text generation compared to the traditional token-by-token method.\n\n\n## Conclusion\n\nThere are many ways to optimize model performance, leading not only to lower costs but also to less waste and lower carbon footprint. Start from a high-level definition of your needs, test different solutions and then dig into details, reducing the cost even further. MindMatch still is testing different options of reaching satisfying accuracy with lower computational costs - it is a never ending process.\n","createdAt":"2024-02-26T14:20:53.327Z","updatedAt":"2024-02-26T14:25:12.462Z","publishedAt":"2024-02-26T14:25:12.395Z","locale":"en","tags":"AI\nStartups\nSustainability","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":7,"excerpt":"How can startups take their first steps with Large Language Models (LLMs)? Leveraging AI needn't cost the earth, explains MindMatch's Zofia Smoleń","author":"Zofia Smoleń","h1":"How to get started in AI without excessive cost, or emissions! - MindMatch guest post","createdOn":"2024-02-26","image":{"data":{"id":3240,"attributes":{"name":"Automatic-Speech-Recognition-AI-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","hash":"large_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","mime":"image/webp","name":"large_Automatic-Speech-Recognition-AI-Illustration-Blog.webp","path":null,"size":75.83,"width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","hash":"small_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","mime":"image/webp","name":"small_Automatic-Speech-Recognition-AI-Illustration-Blog.webp","path":null,"size":28.21,"width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","hash":"medium_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","mime":"image/webp","name":"medium_Automatic-Speech-Recognition-AI-Illustration-Blog.webp","path":null,"size":51,"width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","hash":"thumbnail_Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","mime":"image/webp","name":"thumbnail_Automatic-Speech-Recognition-AI-Illustration-Blog.jpg","path":null,"size":8.66,"width":245,"height":152}},"hash":"Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451","ext":".webp","mime":"image/webp","size":528.03,"url":"https://www-uploads.scaleway.com/Automatic_Speech_Recognition_AI_Illustration_Blog_e8870a4451.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2024-02-15T13:43:21.303Z","updatedAt":"2024-02-26T14:23:17.313Z"}}}},{"title":"infrastructures-for-llms-in-the-cloud","path":"infrastructures-for-llms-in-the-cloud/","description":"Open source makes LLMs (large language models) available to everyone. There are plenty of options available, especially for inference. You’ve probably heard of [Hugging Face’s inference library](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client), but there’s also [OpenLLM](https://github.com/bentoml/OpenLLM), [vLLM](https://docs.vllm.ai/en/latest/), and many others. \n\nThe main challenge, especially if you’re a company like Mistral AI building new LLMs, is that the architecture of your LLM has to be supported by all these solutions. They need to be able to talk to Hugging Face, to NVIDIA, to OpenLLM and so on.\n\nThe second challenge is the cost, especially that of the infrastructures you’ll need to scale your LLM deployment. For that, you have different solutions: \n\n1. Choosing the right GPUs (your LLM has to fit with them)\n2. Choosing the right techniques:\n- Quantization, which involves reducing the number of bytes used by the variables, so you can fit larger models into smaller memory constraints. That’s a give and take between the two, as that can have impacts on the accuracy of your model and its performance results\n- Fine-tuning methods, like parameter-efficient fine-tuning ([PEFT](https://github.com/huggingface/peft)). With PEFT methods, you can significantly decrease computational and memory cost by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. And you can combine PEFT methods with quantization too.\n\n\nThen you have to decide whether you host it yourself; you use a PaaS solution; or ready-to-use API endpoints, like what OpenAI does.\n\n\n## Choosing the right GPU\n\n![NVIDIA H100 - L4 - L40S](https://www-uploads.scaleway.com/NVIDIA_H100_L4_L40_S_b997aec7f9.webp)\n\nThe above is Scaleway’s offering, but similar offerings are currently being installed with most major cloud providers. \n\n- **H100 PCIe 5** is the flagship, NVIDIA’s most powerful GPU. It has interesting features like the Transformer Engine, a library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada Lovelace GPUs, to provide better performance with lower memory utilization in both training and inference. It speeds up training of Transformer models, meaning you can put twice the amount of variables in memory, in 8 bits instead of 16. Furthermore, NVIDIA’s Library helps make these changes simpler; plus a large amount of memory and memory bandwidth are key, as the faster you can load your memory, the faster your GPU will be\n- **L4 PCIe 4** can be seen as the modern successor to the NVIDIA T4, intended for inference, but perfectly capable of training smaller LLM models. Like H100, it can manage new data formats like FP8. It has less memory bandwidth than H100, but that may create some bottlenecks for certain use cases, like handling large batches of images for training computer vision models. In these cases, you may not see a significant performance boost compared with previous Ampere architecture for example. And unlike H100, this one has video and 3D rendering capabilities, so if you want to generate a synthetic dataset for computer vision with Blender, you can use this GPU\n- **L40S PCIe 4** is what NVIDIA considers as the new A100. It has twice the amount of memory as the L4, but with a larger memory bandwidth, and stronger compute performance too. For generative AI, according to NVIDIA, when you optimize your code with FP8 and so on, DGX with 8x A100 with 40 Gb NVlink can perform as well as 8 L40S PCIe 4 without NVLink, so that’s a powerful and interesting GPU.\n\n\n## Using GPU Instances tip 1: Docker images\n\n![NGC Catalog](https://www-uploads.scaleway.com/NGC_Catalog_6a93ebe2f5.webp)\n\nWhen using GPUs, use Docker images, and start with those offered by NVIDIA, which are free. This way, the code is portable, so it can run on your laptop, on a workstation, on a GPU Instance (whatever the cloud provider, so without lock-in), or on a powerful cluster (either with SLURM as the orchestrator if you’re in the HPC/AI world, or Kubernetes if you’re more in the AI/MLOps world).\n\nNVIDIA updates these images regularly, so you can benefit from performance improvements and bug/security fixes. A100 performance is significantly better now than it was at launch, and the same will apply to H100, L4 and so on. Also, there are a lot of time-saving features, which will allow you to make POCs more quickly, like framework and tools like NeMo, Riva and so on, which are available through the NGC catalog (above). \n\nThis also opens up the possibility to use an AI Enterprise license on supported hardware configurations, which is something typically only seen in cloud provider offers), which will give you support in case you meet bugs or performance issues, and even offers help from NVIDIA data scientists, to help you debug your code, and to get the best performance out of all of these softwares. And of course, you can choose your favorite platform, from PyTorch, TensorFlow, Jupyter Lab and so on.\n\n\n### Using Scaleway GPU Instances\n\nIn Scaleway’s GPU OS 12, we’ve already pre-installed Docker, so you can use it right out of the box. I’m often asked why there’s no CUDA or Anaconda preinstalled. The reason is these softwares should be executed inside the containers, because not all users have the same requirements. They may not be using the same versions of CUDA, cuDNN or Pytorch, for example, so it really depends on the user requirements. And it’s easier to use a container built by NVIDIA than installing and maintaining a Python AI environment. Furthermore, doing so makes it easier to reproduce results within your trainings or experiments.\n\nSo basically, you do this:\n\n```js\n## Connect to a GPU instance like H100-1-80G\n\nssh root@\u003creplace_with_instance_public_ip\u003e\n\n## Pull the Nvidia Pytorch docker image (or other image, with the software versions you need)\n\ndocker pull nvcr.io/nvidia/pytorch:24.01-py3\n[...]\n\n## Launch the Pytorch container\n\ndocker run --rm -it --runtime=nvidia \\\n-p 8888:8888 \\\n-p 6006:6006 \\\n-v /root/my-data/:/workspace \\\n-v /scratch/:/workspace/scratch \\\nnvcr.io/nvidia/pytorch:24.01-py3\n\n## You can work with Jupyter Lab, Pytorch etc…\n```\n\nIt’s much easier than trying to install your environment locally.\n\n\n\n## Using GPU Instances tip 2: MIG\n\n![MIG](https://www-uploads.scaleway.com/MIG_0309459e5f.webp)\n\nOne unique feature of the H100 is [MIG, or multi-instance GPU](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/index.html), which allows you to split your GPU into up to seven pieces. This is really useful when you want to optimize your workload. If you have workloads that don’t fully saturate GPUs, this is a nice way to have multiple workloads and maximize GPU utilization. It works with standalone VMs, and works really easily in Kubernetes. You request one GPU reference corresponding to the split you want to use for one GPU resource. \n\nIn Kubernetes, it’s is as easy as replacing in your deployment file the classic resource limits\n**nvidia.com/gpu: '1'**. by the desired MIG partition name, for **example, nvidia.com/mig-3g.40gb: 1**\n\n[Here’s the link](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/index.html) if you want to look into that.\n\n\n\n## Using GPU Instances tip 3: NVIDIA Transformer Engine \u0026 FP8\n\n![FP8](https://www-uploads.scaleway.com/FP_8_5f52cec619.webp)\n\nAll the latest generation of GPUs (available in the latest Nvidia GPU architecture, namely Hopper and Ada Lovelace) use the NVIDIA Transformer Engine, a library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada GPUs, to provide better performance with lower memory utilization in both training and inference.\n\nAs for their use of the FP8 data format, there are actually two kinds of FP8, which offer a tradeoff between the precision and the dynamic range of the numbers you can manipulate (cf. diagram). When training neural networks, both of these types may be utilized. Typically forward activations and weights require more precision, so the E4M3 datatype is best used during forward pass. In the backward pass, however, gradients flowing through the network typically are less susceptible to the loss of precision, but require higher dynamic range. Therefore they are best stored using E5M2 data format. This can even be managed automatically with the 'HYBRID' format ([more information here](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html#transformer_engine.common.recipe.Format)). \n\nThe Transformer Engine is not just for Transformers. As it can also optimize Linear operations, it can benefit other model architectures, like computer vision (cf. the [MNIST example](https://github.com/NVIDIA/TransformerEngine/tree/main/examples/pytorch/mnist)) So basically, you install the Transformer engine package with ‘pip’, load the package, and just test or replace certain operant modules (from your favorite deep learning frameworks) by the one provided in the Transformer engine package (cf. the MNIST example above). If you want to invest a bit of time in optimizing your code by using the Transformer Engine and the FP8 format in your code, you can. It’s good here to optimize, because you’ll use less memory, fit more variables, and speed up your inference and your training. So be sure to optimize your code!\n\n\n## Using LLMs in production: Creating an AI chatbot with RAG\n\n![RAG](https://www-uploads.scaleway.com/RAG_d213036576.webp)\n\nIf you want to do LLMs in production, you might want to create a chatbot, and to do that, you’ll probably want to fine-tune a model on your data for your specific use case. It’s easy with Hugging Face’s Transformers library in terms of code; but it can be hard to improve your results, as this takes lots of trial and error. \n\nAnother technique is to look at RAG, or [Retrieval Augmented Generation](https://blogs.nvidia.com/blog/what-is-retrieval-augmented-generation/), which you can do before fine-tuning, or instead of it. This way there’s less risk of breaking the model, as is a risk with fine-tuning. Plus with RAG, there’s no fine-tuning cost, as you don’t pay for the GPU usage of the multiple tries that fine-tuning requires; and you can keep your data private by hosting it locally. Furthermore, you reduce the risks of hallucinations, which are always a bad thing when you’re trying to [build an AI chatbot for your business](https://www.theguardian.com/world/2024/feb/16/air-canada-chatbot-lawsuit). So I’ve included the [documentation](https://docs.nvidia.com/ai-enterprise/workflows-generative-ai/0.1.0/index.html) that explains this system. NVIDIA even has a [GitHub project](https://github.com/NVIDIA/GenerativeAIExamples/blob/main/examples/README.md) to allow you to build your first AI chatbot with RAG in just five minutes. \n\n\n## What you need to train a foundational LLM\n\nFirstly, a lot of money! LLaMA’s [white paper](https://arxiv.org/abs/2302.13971) says it took 21 days to train LLaMa using 2048 A100 80GB GPUs. We can't possibly speculate on what that costs, but [someone else has here](https://www.reddit.com/r/LocalLLaMA/comments/15ggfjl/cost_of_training_llama_2_by_meta/?rdt=58095) (hint: it's a lot!)\n\nYou’ll also need a team of experts… but not necessarily hundreds! Mistral AI’s Mixture beat GPT3.5 (according to [Mistral AI’s benchmark](https://mistral.ai/news/mixtral-of-experts/)) with a team of less than 20 people. \n\nLots of data will also be required: you may have to scrape the internet for that, or rely on a partnership to help you. Then the data will need to be prepared, i.e. cleaned and deduplicated.\n\nFinally, you’ll need lots of compute power! If we look at this NVIDIA graphic:\n\n![Time to train by LLM size](https://www-uploads.scaleway.com/Time_to_train_by_LLM_size_32baec3420.webp)\n\n…we see there’s a big leap between A100 and H100 (from one month to one week’s training time for the biggest models).\n\n\n## How to handle lots of data\n\nOur Superpod customers use Spark for the data preparation, which uses CPUs (in the range of 10 000 vCPUs), and around 100 TB of block storage, before the dataset is stored in Object Storage. Scaleway is currently working on a Spark managed cluster offer, by the way: watch this space!\n\nNVIDIA also provides tools like [NeMo data Curator](https://www.nvidia.com/en-us/ai-data-science/products/nemo/get-started/) (through NGC/Nvidia AI Enterprise, so we’re talking about containers), which has functions like data download and text extraction, text re-formatting and cleaning, quality filtering, document-level deduplication, multilingual downstream-task decontamination and more.\n\nEven with these tools, data preparation can take a long time, but it has to be done before you start the training.\n\n\n## How to start training\n\nTo start training, you’ll need more than one GPU, so the building blocks will be **NVIDIA DGX H100**, which are ready-to-use computers with a set maximal server configuration, so you’ve got the best of the best:\n\n- **8x NVIDIA H100 80GB GPUs With 640 Gigabytes of Total GPU Memory**\n- 18x NVIDIA® NVLink® connections per GPU\n- 900 gigabytes per second of bidirectional GPU-to-GPU bandwidth, thanks to NVLink\n- **4x NVIDIA NVSwitches™**\n- 7.2 terabytes per second of bidirectional GPU-to-GPU bandwidth\n- 1.5X more than previous generation\n- **10x NVIDIA ConnectX®-7 400 Gigabits-Per-Second Network Interface**\n- 1 terabyte per second of peak bidirectional network bandwidth\n- Dual Intel Xeon Platinum 8480C processors, 112 cores total, and 2 TB System Memory\n- 30 Terabytes NVMe SSD - High speed storage for maximum performance.\n\n\nTo build a Superpod, you take that server, then put 32 of them together, no more, no less. That's what NVIDIA calls a Scaleable Unit. If you scale up four scalable units, you have 128 nodes, and that’s the SuperPOD H100 system. Each of the four units is 1 ExaFLOPS of FP8 format for a total of up to 4 ExaFLOPS in FP8, and the cluster is orchestrated by NVIDIA Base Command Manager, so NVIDIA software, with a SLURM orchestrator, which can launch jobs across multiple computers to do the training.\n\nSo at Scaleway, we’ve got two [supercomputers](https://www.scaleway.com/en/ai-supercomputers/):\n\n**Jeroboam**, the smaller version of the cluster, which was intended to learn to write code that’s multi-GPU and multi-nodes:\n- **2 NVIDIA DGX H100 nodes (16 Nvidia H100 GPU)** \t\n- Up to 63,2 PFLOPS (FP8 Tensor Core)\n- 8 Nvidia H100 80GB SXM GPUs with NVlink up to 900 GB/s per node\n- Dual CPU Intel Xeon Platinum 8480C (112 cores total at 2GHz)\n- 2TB of RAM \n- 2x 1.92TB NVMe for OS\n- 30,72 TB NVMe for Scratch Storage\n\n- Throughput (for 2 DGX) : Up to 40 GB/s Read and 30 GB/s Write\n- Nvidia Infiniband GPU interconnect network up to 400 Gb/s (at cluster level)\n- 60TB of DDN high-performance, low latency storage.\n\n\n**Nabuchodonosor**, the ‘real thing’ for training, which is also built for people who’ll want to train LLMs with videos, not just text, thanks to the large amount of high-performance storage…\n- **127 NVIDIA DGX H100 nodes (1016 Nvidia H100 GPU)** \n- Up to 4 EFLOPS (FP8 Tensor Core)\n- 8 Nvidia H100 80GB SXM GPUs with NVlink up to 900 GB/s per node\n- Dual CPU Intel Xeon Platinum 8480C (112 cores total at 2GHz)\n- 2TB of RAM \n- 2x 1.92TB NVMe for OS\n- 30,72 TB NVMe for Scratch Storage\n\n- Nvidia Infiniband GPU interconnect network up to 400 Gb/s (at cluster level)\n- 1,8PB of DDN high-performance, low latency storage \n- Throughput (for 127 DGX) : Up to 2,7 TB/s Read and 1,95 TB/s Write\n\n\n## Training LLMs\n\n![Data parallelism](https://www-uploads.scaleway.com/Data_parallelism_d738f8be28.webp)\n\nThe challenge of training LLMs on Nabuchodonosor is that it’s an HPC user experience, which means SLURM jobs, not Kubernetes. It’s still containers, though, which you build on top of NVIDIA NGC container images (Pytorch, Tensorflow, Jax…). That’s why when you write your code with these NGC images, even with a single small GPU, your code will be able to scale more easily. One best practice is if you have, say, 100 nodes, don’t launch your jobs on all of them. Keep a few spare in case one or two GPUs fail (it happens!) That way, if you have any issues, you can relaunch your jobs by replacing the faulty nodes.\n\nYou’ll need to write your code in special ways, to maximize performance by using data parallelism and model parallelism (computing across multiple GPUs at the same time); you can use resources like [Deepspeed](https://www.deepspeed.ai/training/) for this.\n\nThen there’s the End-to-End framework [Nvidia NeMo](https://github.com/NVIDIA/NeMo), which will also help you build, finetune and deploy generative AI models.\n\n\n## Superpod challenges\n\n![Superpods in DC5](https://www-uploads.scaleway.com/Superpods_in_DC_5_c0492a8517.webp)\n\nScaleway’s supercomputers were built in just three to seven months, so it was quite a logistical challenge to make sure all the parts were received in time, and connected the right way… with more than 5000 cables! \n\nProviding power is also quite a challenge: the Nabuchodonosor Superpod system’s power usage is 1.2 MW, which means we can only put two DGX units in each rack, so it’s not a great usage of data center surface space. Then there’s the cost of electricity, which is five times more in France than in the USA, for example. But as French electricity’s carbon intensity is very low, it generates around seven times less emissions than in Germany, for example. Furthermore, as all of Scaleway’s AI machines are hosted in DC5, which has no air conditioning and therefore uses 30-40% less energy than standard data centers, we can say this is one of the world’s most sustainable AI installations. [More on AI and sustainability here](https://www.scaleway.com/en/blog/how-sustainable-is-ai/).\n\n\n## What’s next?\n\n![NVIDIA Grace Hopper Superchip](https://www-uploads.scaleway.com/NVIDIA_Grace_Hopper_Superchip_781c5ab894.webp)\n\nScaleway will launch this year the [NVIDIA GH200 Grace Hopper Superchip](https://resources.nvidia.com/en-us-grace-cpu/nvidia-grace-hopper-2), which combines Grace ARM CPUs with Hopper GPUs in the same device, which are linked at 900 GB/s. You can connect 256 of these devices together, which is much larger than what you can connect in the DGX configuration described above (the 8 GPUs connected at 900 GB/s with NVlink in a single DGX H100 server node). And if you need more you can even connect several mesh of 256 GH200 via Infiniband at 400Gb/s. So it’s really for use cases where the memory is the bottleneck, so it’s really for HPC, and for inference of LLMs. When they’re all put together, it’s like a giant GPU, designed for the most demanding use cases, like healthcare and life sciences, for example. \n","createdAt":"2024-02-21T14:45:08.671Z","updatedAt":"2024-02-22T13:48:55.171Z","publishedAt":"2024-02-22T13:48:55.160Z","locale":"en","tags":"AI","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":6,"excerpt":"What do you need to know before getting started with state-of-the-art AI hardware like NVIDIA's H100 PCIe 5, or even Scaleway's Jeroboam or Nabuchodonosor supercomputers? Look no further...","author":"Fabien da Silva","h1":"Infrastructures for LLMs in the cloud","createdOn":"2024-02-21","image":{"data":{"id":2960,"attributes":{"name":"Nabu-SuperPod-Card.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Nabu_Card_827fe79a9e.webp","hash":"large_Nabu_Card_827fe79a9e","mime":"image/webp","name":"large_Nabu-Card.png","path":null,"size":1061.39,"width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Nabu_Card_827fe79a9e.webp","hash":"small_Nabu_Card_827fe79a9e","mime":"image/webp","name":"small_Nabu-Card.png","path":null,"size":267.23,"width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Nabu_Card_827fe79a9e.webp","hash":"medium_Nabu_Card_827fe79a9e","mime":"image/webp","name":"medium_Nabu-Card.png","path":null,"size":603.54,"width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Nabu_Card_827fe79a9e.webp","hash":"thumbnail_Nabu_Card_827fe79a9e","mime":"image/webp","name":"thumbnail_Nabu-Card.webp","path":null,"size":"41.24","width":245,"height":152}},"hash":"Nabu_Card_827fe79a9e","ext":".webp","mime":"image/webp","size":287.74,"url":"https://www-uploads.scaleway.com/Nabu_Card_827fe79a9e.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-11-15T06:04:36.097Z","updatedAt":"2023-11-15T10:18:01.608Z"}}}}],"popularPosts":[{"title":"Big, Efficient, Open: The AI Future We Saw Coming","path":"big-efficient-open-the-ai-future-we-saw-coming/","description":"\nLast November, at ai-PULSE, we told you where AI was heading: big, efficient, and open. Fast forward to last week’s AI Action Summit, and we saw it all play out in real time.\n \n## Big: Scaling Up for the AI Era\n\nAI needs infrastructure. That’s not news to us at Scaleway, but it’s now a message backed by serious commitments. French President Emmanuel Macron announced a €109 billion investment to reinforce Europe’s AI capabilities.\n\nWe’re doing our part. The iliad Group announced a massive [€3 billion investment](https://www.iliad.fr/en/actualites/article/the-iliad-group-is-investing-3-billion-in-ai) to build out the future of AI infrastructure. Our goal is to make sure Europe has the compute power to compete at the highest level. Because you need to reach a critical size to succeed in AI.\n\n![AI Action Summit Business Day (France Digitale – David AROUS)](https://www-uploads.scaleway.com/ai_action_summit_at_station_f_720_10bb3aaa3c.jpg)\n\nScaleway was the first European cloud provider to invest in [GPU clusters](https://www.scaleway.com/en/custom-built-clusters/) back in 2023. That early move paid off: we still have one of the biggest pools in Europe and some of the best models today were trained on our infrastructure. But being first isn’t always easy. Staying ahead means constant innovation, and we’re working hard to ensure our compute, network, and storage remain fast, reliable, and built for the future.\n\nWe’re not stopping here. Our 2025 roadmap is ambitious: we want to make access to large-scale compute easier than ever. AI’s future depends on [solid infrastructure](https://www.scaleway.com/en/cluster-on-demand/), and we’re building it.\n\n## Efficient: Doing More with Less\n\nEven before the AI Action Summit, DeepSeek stole the show by proving that AI can be smarter, not just bigger. And at the summit itself, Kyutai showed why efficiency matters. Their [Hibiki model](https://kyutai.org/2025/02/10/hibiki.html) is a voice-to-voice AI translation system that can run directly on a smartphone. It is a perfect example of what happens when a team of smart people focuses on efficiency.\n\nIndeed, raw power isn’t everything. If we were in Asimov’s Foundation series, Europe wouldn’t be the Empire flexing its might. We’d be the Foundation, outthinking the competition. In complex systems, smart always beats big.\n\nAnd let’s be real: Europe isn’t ignoring the climate crisis. Growth needs to be smart. France has one of the cleanest energy mixes in the world, thanks to nuclear power. That means AI models trained on French infrastructure have a lower carbon footprint than most alternatives.\n\n![Scaleway Environmental Footprint Calculator](https://www-uploads.scaleway.com/environmental_footprint_calculator_scaleway_console_1_d4dc232f39.png)\n\nAt Scaleway, we don’t just talk about efficiency. We track it. That’s why we launched the first version of our [Environmental Footprint Calculator](https://www.scaleway.com/en/environmental-footprint-calculator/). In 2025, we’ll keep expanding it, giving users clearer insights into the impact of the technology they use. Knowing your footprint matters. It helps you make smarter choices.\n\n## Open: AI in an Uncertain World\n\nThe AI Action Summit also made one thing clear: we live in unpredictable times. Yesterday’s allies might not be tomorrow’s. That’s why openness matters more than ever.\n\nOpen source is our best safeguard against uncertainty. It lets you control your future. And it also keeps innovation accessible to all. Because open source is about giving back. India’s role as co-chair of the summit reminds us that AI shouldn’t just serve a select few. It should benefit everyone, everywhere.\n\nFor Scaleway, open source is the best way to keep our clients in control, not locked in. We believe in giving choices and alternatives. That’s why, when we built our Generative APIs, we set ourselves a strict rule: make it compatible with OpenAI’s API. The result? Anyone can switch to the [open-weight models](https://www.scaleway.com/en/generative-apis/) we provide by changing a single line of code.\n\n![Scaleway Model-as-a-Service](https://www-uploads.scaleway.com/model_as_a_service_scaleway_console_1_217c3b481b.png)\n\nOpen source isn’t going anywhere. Thanks to the battle for talent, state-of-the-art open-weight models will keep emerging in 2025. The best contributors don’t want to build behind closed doors. They want to share their work with the world. They want to be on the right side of history, as [Sam Altman](https://www.vice.com/en/article/openai-ceo-sam-altman-says-theyve-been-on-the-wrong-side-of-history/) would say.\n\nAt Scaleway, we’re all in. We’ll keep deploying the latest and greatest open-weight models, ensuring that you stay in control.\n\n## 2025: The AI Playbook Remains the Same \n\nBig. Efficient. Open. If the AI Action Summit confirmed anything, it’s that these three principles will continue to define AI in 2025.\n\nWant to see what’s next? Mark your calendar for November 18th, because [ai-PULSE](https://www.ai-pulse.eu) will be back. And we’re just getting started.\n","createdAt":"2025-02-19T13:21:36.061Z","updatedAt":"2025-02-21T08:56:29.450Z","publishedAt":"2025-02-19T16:12:41.378Z","locale":"en","tags":"ai-PULSE\nAI Action Summit\n","popular":true,"articleOfTheMonth":true,"category":"Build","timeToRead":4,"excerpt":"Last week's AI Action Summit highlighted key principles shaping the future of AI: Big, Efficient, and Open. Read the full article for an inside look at the event and insights about it.","author":"Frédéric Bardolle","h1":"Big, Efficient, Open: The AI Future We Saw Coming","createdOn":"2025-02-19","image":{"data":{"id":4355,"attributes":{"name":"slack-imgs.jpg","alternativeText":null,"caption":null,"width":720,"height":480,"formats":{"small":{"ext":".jpg","url":"https://www-uploads.scaleway.com/small_slack_imgs_eb15652f7e.jpg","hash":"small_slack_imgs_eb15652f7e","mime":"image/jpeg","name":"small_slack-imgs.jpg","path":null,"size":46.06,"width":500,"height":333,"sizeInBytes":46063},"thumbnail":{"ext":".jpg","url":"https://www-uploads.scaleway.com/thumbnail_slack_imgs_eb15652f7e.jpg","hash":"thumbnail_slack_imgs_eb15652f7e","mime":"image/jpeg","name":"thumbnail_slack-imgs.jpg","path":null,"size":12.15,"width":234,"height":156,"sizeInBytes":12151}},"hash":"slack_imgs_eb15652f7e","ext":".jpg","mime":"image/jpeg","size":82.37,"url":"https://www-uploads.scaleway.com/slack_imgs_eb15652f7e.jpg","previewUrl":null,"provider":"aws-s3","provider_metadata":null,"createdAt":"2025-02-20T15:28:22.024Z","updatedAt":"2025-02-20T15:28:22.024Z"}}}},{"title":"What Is a Document Database? A Comprehensive Guide","path":"what-is-a-document-database-a-comprehensive-guide/","description":"The ever-changing landscape of data management has given rise to a new era of database technology. Document databases in particular were designed to better handle the vast amounts of semi-structured and unstructured data generated by modern products and applications and to keep pace with the growing volume and variety of data that demands flexible, scalable, and fast processing.\n\nIn this article, we’ll dive into what document databases are, how they work, and why they’ve become a preferred solution for managing complex data, offering flexibility that traditional databases struggle to provide.\n\n## A brief history of databases: from relational to document databases\n\nRelational databases, the backbone of data storage since the 1970s, were designed for structured data. Built on a fixed schema, they efficiently organize data into rows and columns, enabling easy querying and analysis. However, as the types of data businesses generate have evolved—think images, videos, and IoT data—so too have the demands placed on databases. \n\nThe rise of NoSQL databases in the early 2000s offered a solution to these new demands by providing flexible, schema-less architectures capable of storing vast amounts of unstructured data. Among these NoSQL options, document databases have emerged as a versatile tool that can adapt to the complexity and scale of modern data workloads.\n\n### Back to basics: what is a document database?\n\nA document database is a type of NoSQL database that stores data in document-like structures, most commonly using JSON or BSON formats. Each document represents a record, and within these documents, data is organized as key-value pairs, with the ability to nest arrays and objects. \n\nThe beauty of document databases lies in their flexibility. Unlike relational databases, which require data to fit into predefined schemas, document databases allow data to be stored in its original, often messy form. This makes them ideal for managing unstructured or semi-structured data **without requiring significant reformatting or processing**.\n\n## Structured vs. unstructured data\n\nData generally falls into two categories: structured and unstructured.\n\n- **Structured data** fits neatly into predefined formats, such as spreadsheets or relational databases\n- **Unstructured data**, on the other hand, doesn’t conform to a specific model. Examples include images, audio files, videos, and social media posts—types of data that relational databases struggle to handle efficiently.\n\nDocument databases bridge this gap by offering a flexible schema. This means you can store various types of data—whether structured, semi-structured, or unstructured—in their natural form, without having to alter or standardize them.\n\n## How document databases work\n\nIn a document database, each document is self-contained, meaning the structure of one document can differ significantly from the next. These databases are highly adaptable because they allow changes to be made on the fly, without the need for complex schema migrations.\n\n- **Flexible schema**: new fields can be added to documents at any time, providing agility in development and reducing the operational burden.\n- **Variety of formats**: multiple data formats can be stored **within the same collection**, enabling you to manage diverse data types together.\n\nThis architecture makes document databases** highly scalable** and** easy to maintain**, especially in applications where data is continuously changing or evolving.\n\n## Benefits of document databases\n\n**Reduced operational overhead**\nTraditional relational databases often require extensive data transformation to fit structured schemas, which can be labor-intensive and time-consuming. Document databases eliminate much of this overhead by allowing data to be stored in its original format, cutting down on reformatting efforts and freeing up resources to focus on more valuable tasks.\n\n**Improved agility**\nDocument databases’ flexible schema design enables rapid iterations and updates without the need for complex schema alterations. This allows teams to ship new features faster and adjust data models as business needs evolve, fostering greater agility in product development.\n\n**Performance**\nWhen it comes to performance, document databases have a distinct advantage in handling hierarchical data. By storing all relevant information within a single document, these databases are able to retrieve and manipulate data with greater efficiency. In contrast, relational databases often require the use of joins to gather related data, resulting in increased read and write latency. This fundamental difference in architecture enables document databases to deliver faster and more responsive performance, making them an attractive choice for applications that rely on complex, hierarchical data structures.\n\n## Use cases: when to choose a document database\n\nDocument databases are best suited in scenarios where data is **unpredictable, unstructured**, or **subject to rapid change**. Here are some common use cases where document databases are the better option:\n\n**Internet of Things (IoT)**\nIoT devices produce a continuous stream of data, often in different formats. Document databases can store this data as-is, enabling real-time processing and analysis without the need for data standardization.\n\n\n**Content Management Systems**\nData is often semi-structured and constantly evolving. By storing content, metadata, and related information in a single document, CMS platforms can efficiently manage and retrieve complex data sets, such as articles, blogs, and user profiles. This flexible data model enables developers to adapt to changing content requirements, while also providing fast and scalable performance, making it an ideal choice for large-scale content management applications.\n\n**E-commerce product catalogs**\nDocument databases allow for efficient storage and retrieval of complex product information, including descriptions, pricing, inventory, and customer reviews. By storing all product data in a single document, e-commerce platforms can quickly retrieve and update product information, reducing latency and improving the overall shopping experience. Additionally, document databases can handle large volumes of product data, making them an ideal choice for large-scale e-commerce applications with extensive product catalogs.\n\n**Mobile and web applications**\nThey often require flexible data models to accommodate changing user behavior, new features, and evolving business requirements. Document databases are well-suited for these applications, allowing developers to store and manage complex, semi-structured data in a flexible and adaptable way. By using a document database, developers can quickly iterate and refine their data models, adding new fields, documents, or collections as needed, without the need for costly and time-consuming schema changes, making it an ideal choice for agile development teams and fast-paced application development environments.\n\n## Relational vs. non-relational: how to choose the right database for your needs\n\nWhen evaluating database options, the primary consideration is often the specific needs of the application or use case. For applications where data consistency and integrity are paramount, and complex querying and reporting are essential, relational databases are the preferred choice. Their robust support for transactions, constraints, and joins ensures that data remains accurate and reliable, making them well-suited for applications that require strict data governance and compliance.\n\nOn the other hand, document databases are the ideal choice for applications that require flexibility, speed, and the ability to handle unstructured or semi-structured data. Their flexible schema and high-performance data retrieval capabilities make them perfect for building lightning-fast applications that require rapid data ingestion and processing. Additionally, document databases can efficiently handle large volumes of unstructured data, such as text, images, and videos, making them a popular choice for big data and real-time analytics applications.\n\n**Document databases for the modern data landscape**\n\nDocument databases offer a powerful, flexible solution for managing today’s data complexities. By allowing you to store unstructured and semi-structured data in its natural form, they eliminate the need for time-consuming data reformatting, reduce operational overhead, and increase agility in development.\n\nIf your organization is grappling with the challenges of handling growing data volumes, adopting a document database could be the key to unlocking more efficient data management and faster innovation. \n\nLearn more about our [Managed MongoDB®](https://www.scaleway.com/en/managed-mongodb/) database to discover how it can help streamline your data processes, cut costs, and accelerate your business growth.","createdAt":"2025-02-12T09:23:43.712Z","updatedAt":"2025-02-19T16:21:01.797Z","publishedAt":"2025-02-12T09:32:11.606Z","locale":"en","tags":"Managed MongoDB\nDocument\nDatabase","popular":true,"articleOfTheMonth":false,"category":"Build","timeToRead":3,"excerpt":"Discover the power of document databases, their benefits and use cases, in managing complex, semi-structured, and unstructured data. ","author":"Névine Ismael et Walter Timmermans","h1":"What Is a Document Database? A Comprehensive Guide","createdOn":"2025-02-12","image":{"data":{"id":1517,"attributes":{"name":"Documentation-DevAPI-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Documentation_Dev_API_Illustration_Blog_9121e48399.webp","hash":"large_Documentation_Dev_API_Illustration_Blog_9121e48399","mime":"image/webp","name":"large_Documentation-DevAPI-Illustration-Blog.webp","path":null,"size":"314.98","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Documentation_Dev_API_Illustration_Blog_9121e48399.webp","hash":"small_Documentation_Dev_API_Illustration_Blog_9121e48399","mime":"image/webp","name":"small_Documentation-DevAPI-Illustration-Blog.webp","path":null,"size":"111.41","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Documentation_Dev_API_Illustration_Blog_9121e48399.webp","hash":"medium_Documentation_Dev_API_Illustration_Blog_9121e48399","mime":"image/webp","name":"medium_Documentation-DevAPI-Illustration-Blog.webp","path":null,"size":"208.66","width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Documentation_Dev_API_Illustration_Blog_9121e48399.webp","hash":"thumbnail_Documentation_Dev_API_Illustration_Blog_9121e48399","mime":"image/webp","name":"thumbnail_Documentation-DevAPI-Illustration-Blog.webp","path":null,"size":"37.40","width":245,"height":152}},"hash":"Documentation_Dev_API_Illustration_Blog_9121e48399","ext":".webp","mime":"image/webp","size":424.2,"url":"https://www-uploads.scaleway.com/Documentation_Dev_API_Illustration_Blog_9121e48399.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-01-17T12:35:20.509Z","updatedAt":"2023-01-17T12:35:20.509Z"}}}},{"title":"Overcoming the challenges of Cloud environmental impact measurement","path":"overcoming-the-challenges-of-cloud-environmental-impact-measurement/","description":"At Scaleway, sustainability is a core concern. This why we decided to create a unique [environmental footprint calculator](https://www.scaleway.com/en/environmental-footprint-calculator/), designed to provide our users with a clear and comprehensive view of the environmental impact of our products. Unlike other cloud calculators, which only measure carbon for now, this tool measures not only carbon emissions (scopes 1, 2 and 3) - including hardware-specific energy consumption - but also water consumption. \n\nBased on as a robust a methodology as the PCR of French national ecological agency [ADEME](https://www.ademe.fr/en/frontpage/), the calculator provides transparent, reliable estimates, both at the time of purchase and in detailed and in detailed monthly reports. The aim is simple: to enable Scaleway clients to make informed decisions and optimize their infrastructures, in order to reduce their environmental impact.\n\nA detailed and comprehensive methodology is essential to guarantee the reliability of environmental impact data and to make it usable in infrastructure choices. By providing an accurate, multi-criteria view, we enable companies to understand the full ecological footprint of their operations.\n\nThis data, transparent and aligned with recognized standards, serves as a solid basis for comparing different options, identifying levers for improvement, and making informed decisions. In this way, a rigorous methodology does more than simply measure impact: it becomes a genuine strategic tool for effectively reducing one's footprint while supporting a more responsible digital transition.\n\nYet this journey has not been without its pitfalls. Accurately measuring environmental footprint, particularly scope 3 (which encompasses the indirect emissions of suppliers and purchased products), is a complex challenge.\n\n\n## Scope 3 data collection: a major challenge\n\nScope 3 represents the CO2 emissions of the suppliers and equipment we use. This category is often the most difficult to assess, as it depends on the quality and availability of data supplied by equipment manufacturers and other partners. Nonetheless, it is indispensable. \n\nAs a cloud provider, scope 3 represents over [80% of the carbon footprint of our services](https://www.itu.int/pub/D-STR-DIGITAL.04-2024). To ignore this category would be to miss out on a substantial part of our overall footprint, which makes our commitment to its measurement all the more crucial for comprehensive and relevant environmental impact monitoring.\n\nAt Scaleway, we have a wide variety of hardware equipment from multiple suppliers, some of which has **a lifespan in excess of 10 years**. We also practice intensive recycling, reusing components to integrate them into other servers, which extends the lifespan of our hardware. \n\nHowever, this approach adds a further layer of complexity to the precise calculation of the environmental impact of our servers, due to the traceability and (lack of) heterogeneity of the equipment used. **Collecting accurate impact data for these machines, particularly the older ones, proved a major challenge**. Indeed, some of our equipment suppliers did not have sufficiently detailed impact data, or such data was not always available for older equipment.\n\nAfter encountering a number of obstacles on this front, we decided to capitalize on the excellent work carried out by French association [Boavizta](https://boavizta.org/en), which has developed impact models based on emission factors and the average life cycle analysis (LCA) of hundreds of types of hardware, including servers. This enabled us to guarantee more robust estimates, although some uncertainty remains.\n\nIt is important to point out that our methodology is still evolving, and we are ready to integrate data from our suppliers as soon as they become available, in order to refine our calculations and guarantee even greater accuracy.\n\nAnother major challenge concerned our infrastructure servers used for in-house tools. The lack of comprehensive, automatable data on these servers, such as their specifications (CPU, RAM, etc.), made it difficult to assess their environmental impact linked to manufacturing. This time, the difficulty was linked to our internal organization. To overcome this problem, we implemented a progressive data collection method, automating as far as possible the retrieval of hardware specifications. At the same time, we applied generic impact coefficients based on standard specifications for servers for which data was not available.\n\n\n## Data center construction: the uncertainty of emission factors\n\nThe **environmental impact of data center construction is a particularly sensitive issue**. In the absence of specific data on the construction of each data center, we had to rely on estimates based on generic emission factors provided by ADEME. However, the level of uncertainty surrounding these estimates remains relatively high.\n\nWe have chosen to precisely document this uncertainty in our methodology, while incorporating the best available estimates. The use of emission factors per m² provided by ADEME enables us to calculate a carbon footprint in relation to the size of our infrastructures, although we continue to work on improving the accuracy of the data as new information becomes available.\n\n\n## Non-IT impact: complex methodological issues\n\n**Non-IT impacts** (offices, travel, etc.) represent a more indirect part of our environmental footprint, but are nonetheless crucial to measure. We had to define how and how often to update these data: should we base ourselves on annual averages, monthly data or choose another approach?\n\nWe opted for an annual update of non-IT data, based on personnel stats in our [Impact Report](https://www-uploads.scaleway.com/Impact_Report2024_A4_EN_e63efcae20.pdf), in order to offer our users a consistent and comparable overview. This decision enabled us to erase seasonality concerns, and to maintain a good balance between accuracy and practicality in the ongoing monitoring of our global footprint.\n\n\n## A constantly evolving process\n\nMeasuring environmental impact accurately and comprehensively is a complex challenge, particularly when it comes to scope 3 emissions. However, thanks to our commitment to transparency and the continuous improvement of our methods, we are able to provide our users with reliable information to help them reduce their carbon footprint. Our collaboration with experts and our participation in working groups such as ADEME's PCR group reinforce our ability to innovate in this crucial area.\n\nWe are pursuing our optimization mission, and will continue to listen to feedback from our users as we refine our tools and calculation methods.\n\nFind out more about the [environmental footprint calculator here](https://www.scaleway.com/en/environmental-footprint-calculator/); and check out our latest presentation on this topic, from Green IO Paris, [here](https://drive.google.com/file/d/1x45gJGdzx6epPqZTC8ZM1YAFGikWAd2k/view?usp=sharing)!","createdAt":"2024-12-23T15:39:16.546Z","updatedAt":"2025-02-06T15:02:41.307Z","publishedAt":"2024-12-23T16:44:16.522Z","locale":"en","tags":"Sustainability \nGreen IT\nEnvironmental Footprint Calculator ","popular":true,"articleOfTheMonth":true,"category":"Build","timeToRead":4,"excerpt":"Scaleway's Environmental Footprint Calculator measures carbon emissions and water usage, empowering users to optimize infrastructures and embrace responsible digital practices.","author":"Elise Auvray","h1":"Overcoming the challenges of Cloud environmental impact measurement","createdOn":"2024-12-23","image":{"data":{"id":4324,"attributes":{"name":"Content_Environmental-Footprint-Calculator-Illustration-Content.png","alternativeText":null,"caption":null,"width":950,"height":550,"formats":{"small":{"ext":".png","url":"https://www-uploads.scaleway.com/small_Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png","hash":"small_Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696","mime":"image/png","name":"small_Content_Environmental-Footprint-Calculator-Illustration-Content.png","path":null,"size":157.36,"width":500,"height":289,"sizeInBytes":157359},"medium":{"ext":".png","url":"https://www-uploads.scaleway.com/medium_Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png","hash":"medium_Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696","mime":"image/png","name":"medium_Content_Environmental-Footprint-Calculator-Illustration-Content.png","path":null,"size":341.85,"width":750,"height":434,"sizeInBytes":341853},"thumbnail":{"ext":".png","url":"https://www-uploads.scaleway.com/thumbnail_Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png","hash":"thumbnail_Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696","mime":"image/png","name":"thumbnail_Content_Environmental-Footprint-Calculator-Illustration-Content.png","path":null,"size":43.06,"width":245,"height":142,"sizeInBytes":43059}},"hash":"Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696","ext":".png","mime":"image/png","size":103.1,"url":"https://www-uploads.scaleway.com/Content_Environmental_Footprint_Calculator_Illustration_Content_9a4b81c696.png","previewUrl":null,"provider":"aws-s3","provider_metadata":null,"createdAt":"2025-02-06T15:02:29.273Z","updatedAt":"2025-02-17T09:39:12.928Z"}}}}],"header":{"mainNavigationItems":[{"id":542,"title":"Dedibox and Bare Metal","menuAttached":false,"order":1,"path":"/DediboxBareMetal","type":"WRAPPER","uiRouterKey":"dedibox-and-bare-metal-1","slug":"dedibox-bare-metal","external":false,"items":[{"id":543,"title":"Dedibox - dedicated servers","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Dedibox","type":"INTERNAL","uiRouterKey":"dedibox-dedicated-servers","slug":"dedibox-bare-metal-dedibox","external":false,"related":{"id":29,"title":"Dedibox","path":"/dedibox/","scheduledAt":null,"createdAt":"2022-04-19T15:29:02.488Z","updatedAt":"2024-12-02T21:42:14.962Z","publishedAt":"2022-04-28T17:05:07.122Z","locale":"en","__contentType":"api::page.page","navigationItemId":543,"__templateName":"Generic"},"items":[{"id":544,"title":"Start","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Dedibox/Start","type":"INTERNAL","uiRouterKey":"start-2","slug":"dedibox-bare-metal-dedibox-start","external":false,"related":{"id":53,"title":"Start","path":"/dedibox/start/","scheduledAt":null,"createdAt":"2022-04-21T16:44:17.577Z","updatedAt":"2024-12-02T21:47:26.792Z","publishedAt":"2022-04-28T17:12:40.426Z","locale":"en","__contentType":"api::page.page","navigationItemId":544,"__templateName":"Generic"},"items":[],"description":"Affordable servers with the best price-performance ratio on the market"},{"id":545,"title":"Pro","menuAttached":false,"order":2,"path":"/DediboxBareMetal/Dedibox/Pro","type":"INTERNAL","uiRouterKey":"pro-4","slug":"dedibox-bare-metal-dedibox-pro","external":false,"related":{"id":9,"title":"Pro","path":"/dedibox/pro/","scheduledAt":null,"createdAt":"2022-04-07T13:51:48.537Z","updatedAt":"2025-02-20T10:47:35.455Z","publishedAt":"2022-04-28T17:04:00.983Z","locale":"en","__contentType":"api::page.page","navigationItemId":545,"__templateName":"Generic"},"items":[],"description":"Perfect balance of processing power, memory and storage"},{"id":546,"title":"Core","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Dedibox/Core","type":"INTERNAL","uiRouterKey":"core-1","slug":"dedibox-bare-metal-dedibox-core","external":false,"related":{"id":14,"title":"Core","path":"/dedibox/core/","scheduledAt":null,"createdAt":"2022-04-11T09:05:58.588Z","updatedAt":"2025-02-06T16:24:41.969Z","publishedAt":"2022-04-28T17:04:22.560Z","locale":"en","__contentType":"api::page.page","navigationItemId":546,"__templateName":"Generic"},"items":[],"description":"The high performance backbone of your mission-critical infrastructure"},{"id":547,"title":"Store","menuAttached":false,"order":4,"path":"/DediboxBareMetal/Dedibox/Store","type":"INTERNAL","uiRouterKey":"store-2","slug":"dedibox-bare-metal-dedibox-store","external":false,"related":{"id":5,"title":"Store","path":"/dedibox/store/","scheduledAt":null,"createdAt":"2022-04-01T15:14:47.812Z","updatedAt":"2025-02-10T17:06:12.772Z","publishedAt":"2022-04-28T17:03:51.376Z","locale":"en","__contentType":"api::page.page","navigationItemId":547,"__templateName":"Generic"},"items":[],"description":"For mission-critical data, fast storage, backup and streaming"},{"id":832,"title":"GPU","menuAttached":false,"order":5,"path":"/DediboxBareMetal/Dedibox/GPU_ddx","type":"INTERNAL","uiRouterKey":"gpu-9","slug":"dedibox-bare-metal-dedibox-gpu-ddx","external":false,"related":{"id":1454,"title":"GPU","path":"/dedibox/gpu/","scheduledAt":null,"createdAt":"2024-10-31T10:01:24.876Z","updatedAt":"2025-02-06T16:21:10.102Z","publishedAt":"2024-11-07T07:38:37.573Z","locale":"en","__contentType":"api::page.page","navigationItemId":832,"__templateName":"Generic"},"items":[],"description":"Dedicated GPU power with reliable performance and stability"},{"id":548,"title":"Dedirack","menuAttached":false,"order":6,"path":"/DediboxBareMetal/Dedibox/Dedirack","type":"INTERNAL","uiRouterKey":"dedirack-1","slug":"dedibox-bare-metal-dedibox-dedirack","external":false,"related":{"id":155,"title":"Dedirack","path":"/dedibox/dedirack/","scheduledAt":null,"createdAt":"2022-05-02T10:08:21.002Z","updatedAt":"2024-12-02T21:42:15.571Z","publishedAt":"2022-05-02T10:46:06.212Z","locale":"en","__contentType":"api::page.page","navigationItemId":548,"__templateName":"Generic"},"items":[],"description":"Host your Hardware in our secured French datacenters"},{"id":742,"title":"Dedibox VPS","menuAttached":false,"order":7,"path":"/DediboxBareMetal/Dedibox/VPS","type":"INTERNAL","uiRouterKey":"dedibox-vps","slug":"dedibox-bare-metal-dedibox-vps","external":false,"related":{"id":1234,"title":"Dedibox VPS","path":"/dedibox-vps/","scheduledAt":null,"createdAt":"2024-05-08T16:42:21.258Z","updatedAt":"2024-12-02T22:03:11.926Z","publishedAt":"2024-05-14T16:28:25.184Z","locale":"en","__contentType":"api::page.page","navigationItemId":742,"__templateName":"Generic"},"items":[],"description":"60 locations worldwide, starting at €4,99/month"}],"description":""},{"id":553,"title":"Elastic Metal - bare metal cloud","menuAttached":false,"order":2,"path":"/DediboxBareMetal/elasticmetal","type":"INTERNAL","uiRouterKey":"elastic-metal-bare-metal-cloud-1","slug":"dedibox-bare-metal-elasticmetal","external":false,"related":{"id":87,"title":"Elastic Metal","path":"/elastic-metal/","scheduledAt":null,"createdAt":"2022-04-28T12:45:28.696Z","updatedAt":"2025-01-24T13:35:03.496Z","publishedAt":"2022-04-28T13:22:46.501Z","locale":"en","__contentType":"api::page.page","navigationItemId":553,"__templateName":"Generic"},"items":[{"id":554,"title":"Aluminium","menuAttached":false,"order":1,"path":"/DediboxBareMetal/elasticmetal/Aluminium","type":"INTERNAL","uiRouterKey":"aluminium-1","slug":"dedibox-bare-metal-elasticmetal-aluminium","external":false,"related":{"id":8,"title":"Aluminium","path":"/elastic-metal/aluminium/","scheduledAt":null,"createdAt":"2022-04-06T13:13:04.829Z","updatedAt":"2025-02-14T15:26:58.704Z","publishedAt":"2022-04-28T17:04:04.448Z","locale":"en","__contentType":"api::page.page","navigationItemId":554,"__templateName":"Generic"},"items":[],"description":"Fully dedicated bare metal servers with native cloud integration, at the best price"},{"id":557,"title":"Beryllium","menuAttached":false,"order":2,"path":"/DediboxBareMetal/elasticmetal/Beryllium","type":"INTERNAL","uiRouterKey":"beryllium-1","slug":"dedibox-bare-metal-elasticmetal-beryllium","external":false,"related":{"id":15,"title":"Beryllium","path":"/elastic-metal/beryllium/","scheduledAt":null,"createdAt":"2022-04-11T10:57:25.297Z","updatedAt":"2025-02-14T15:23:03.346Z","publishedAt":"2022-04-28T17:13:35.576Z","locale":"en","__contentType":"api::page.page","navigationItemId":557,"__templateName":"Generic"},"items":[],"description":"Powerful, balanced and reliable servers for production-grade applications"},{"id":556,"title":"Iridium","menuAttached":false,"order":3,"path":"/DediboxBareMetal/elasticmetal/Iridium","type":"INTERNAL","uiRouterKey":"iridium-1","slug":"dedibox-bare-metal-elasticmetal-iridium","external":false,"related":{"id":810,"title":"Iridium","path":"/elastic-metal/iridium/","scheduledAt":null,"createdAt":"2023-04-27T13:53:48.244Z","updatedAt":"2025-02-14T15:28:12.476Z","publishedAt":"2023-05-29T08:52:19.666Z","locale":"en","__contentType":"api::page.page","navigationItemId":556,"__templateName":"Generic"},"items":[],"description":"Powerful dedicated server designed to handle high-workload applications"},{"id":555,"title":"Lithium","menuAttached":false,"order":4,"path":"/DediboxBareMetal/elasticmetal/Lithium","type":"INTERNAL","uiRouterKey":"lithium-1","slug":"dedibox-bare-metal-elasticmetal-lithium","external":false,"related":{"id":16,"title":"Lithium","path":"/elastic-metal/lithium/","scheduledAt":null,"createdAt":"2022-04-11T11:15:36.538Z","updatedAt":"2025-02-20T10:52:48.984Z","publishedAt":"2022-04-28T17:13:30.074Z","locale":"en","__contentType":"api::page.page","navigationItemId":555,"__templateName":"Generic"},"items":[],"description":"Designed with huge local storage to keep, back up, and protect your data"},{"id":833,"title":"Titanium","menuAttached":false,"order":5,"path":"/DediboxBareMetal/elasticmetal/Titanium","type":"INTERNAL","uiRouterKey":"titanium","slug":"dedibox-bare-metal-elasticmetal-titanium","external":false,"related":{"id":1457,"title":"Titanium","path":"/elastic-metal/titanium/","scheduledAt":null,"createdAt":"2024-10-31T15:08:59.416Z","updatedAt":"2025-02-14T15:56:07.147Z","publishedAt":"2024-11-07T06:52:37.648Z","locale":"en","__contentType":"api::page.page","navigationItemId":833,"__templateName":"Generic"},"items":[],"description":"Power and stability of dedicated GPU hardware integrated into the Scaleway ecosystem"}],"description":""},{"id":558,"title":"Apple","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Apple","type":"INTERNAL","uiRouterKey":"apple-2","slug":"dedibox-bare-metal-apple","external":false,"related":{"id":1088,"title":"Apple Mac mini","path":"/apple-mac-mini/","scheduledAt":null,"createdAt":"2024-01-31T15:28:49.276Z","updatedAt":"2025-02-07T13:57:44.141Z","publishedAt":"2024-08-02T07:56:22.454Z","locale":"en","__contentType":"api::page.page","navigationItemId":558,"__templateName":"Generic"},"items":[{"id":561,"title":"Mac mini M1","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Apple/M1","type":"INTERNAL","uiRouterKey":"mac-mini-m1-1","slug":"dedibox-bare-metal-apple-m1","external":false,"related":{"id":91,"title":"Hello m1","path":"/hello-m1/","scheduledAt":null,"createdAt":"2022-04-28T15:24:50.963Z","updatedAt":"2025-02-07T14:01:43.056Z","publishedAt":"2023-10-16T14:15:59.310Z","locale":"en","__contentType":"api::page.page","navigationItemId":561,"__templateName":"Generic"},"items":[],"description":"Enjoy the Mac mini experience with great simplicity"},{"id":560,"title":"Mac mini M2","menuAttached":false,"order":2,"path":"/DediboxBareMetal/Apple/m2","type":"INTERNAL","uiRouterKey":"mac-mini-m2-2","slug":"dedibox-bare-metal-apple-m2","external":false,"related":{"id":1086,"title":"mac mini M2","path":"/mac-mini-m2/","scheduledAt":null,"createdAt":"2024-01-31T09:30:46.938Z","updatedAt":"2025-02-07T14:02:55.309Z","publishedAt":"2024-02-05T15:21:02.196Z","locale":"en","__contentType":"api::page.page","navigationItemId":560,"__templateName":"Generic"},"items":[],"description":"Perform your daily tasks with speed and efficiency"},{"id":559,"title":"Mac mini M2 Pro","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Apple/M2pro","type":"INTERNAL","uiRouterKey":"mac-mini-m2-pro-1","slug":"dedibox-bare-metal-apple-m2pro","external":false,"related":{"id":991,"title":"mac mini M2 pro","path":"/mac-mini-m2-pro/","scheduledAt":null,"createdAt":"2023-10-25T08:56:21.435Z","updatedAt":"2025-02-07T14:03:47.499Z","publishedAt":"2023-11-16T12:11:33.094Z","locale":"en","__contentType":"api::page.page","navigationItemId":559,"__templateName":"Generic"},"items":[],"description":"Realize your most ambitious projects thanks to a new level of power"},{"id":886,"title":"Mac mini M4","menuAttached":false,"order":4,"path":"/DediboxBareMetal/Apple/M4","type":"INTERNAL","uiRouterKey":"mac-mini-m4","slug":"dedibox-bare-metal-apple-m4","external":false,"related":{"id":1606,"title":"Mac mini M4","path":"/mac-mini-m4/","scheduledAt":null,"createdAt":"2025-01-21T15:05:39.847Z","updatedAt":"2025-02-07T14:05:10.096Z","publishedAt":"2025-01-24T08:17:07.190Z","locale":"en","__contentType":"api::page.page","navigationItemId":886,"__templateName":"Generic"},"items":[],"description":"Latest Apple silicon chip for intensive use cases."}],"description":""}],"description":""},{"id":562,"title":"Compute","menuAttached":false,"order":2,"path":"/Compute","type":"WRAPPER","uiRouterKey":"compute-3","slug":"compute-4","external":false,"items":[{"id":563,"title":"Virtual Instances","menuAttached":false,"order":1,"path":"/Compute/VirtualInstances","type":"INTERNAL","uiRouterKey":"virtual-instances-1","slug":"compute-virtual-instances","external":false,"related":{"id":655,"title":"Virtual Instances","path":"/virtual-instances/","scheduledAt":null,"createdAt":"2023-02-20T10:48:52.279Z","updatedAt":"2025-02-11T13:16:39.501Z","publishedAt":"2023-02-28T08:32:03.960Z","locale":"en","__contentType":"api::page.page","navigationItemId":563,"__templateName":"Generic"},"items":[{"id":567,"title":"Production-Optimized","menuAttached":false,"order":1,"path":"/Compute/VirtualInstances/Prod","type":"INTERNAL","uiRouterKey":"production-optimized-2","slug":"compute-virtual-instances-prod","external":false,"related":{"id":657,"title":"Production-Optimized Instances","path":"/production-optimized-instances/","scheduledAt":null,"createdAt":"2023-02-20T15:13:14.415Z","updatedAt":"2025-02-11T14:06:00.080Z","publishedAt":"2023-02-28T08:34:34.739Z","locale":"en","__contentType":"api::page.page","navigationItemId":567,"__templateName":"Generic"},"items":[],"description":"Dedicated vCPU for the most demanding workloads (x86)"},{"id":566,"title":"Workload-Optimized","menuAttached":false,"order":2,"path":"/Compute/VirtualInstances/Workload-Optimized","type":"INTERNAL","uiRouterKey":"workload-optimized-1","slug":"compute-virtual-instances-workload-optimized","external":false,"related":{"id":802,"title":"Workload-Optimized Instances","path":"/workload-optimized-instances/","scheduledAt":null,"createdAt":"2023-04-25T12:38:13.577Z","updatedAt":"2025-02-11T14:01:28.392Z","publishedAt":"2023-05-26T13:36:52.797Z","locale":"en","__contentType":"api::page.page","navigationItemId":566,"__templateName":"Generic"},"items":[],"description":"Secure, scalable VMs, equipped for high memory and compute demands (x86)"},{"id":565,"title":"Cost-Optimized","menuAttached":false,"order":3,"path":"/Compute/VirtualInstances/Cost-Optimized","type":"INTERNAL","uiRouterKey":"cost-optimized-1","slug":"compute-virtual-instances-cost-optimized","external":false,"related":{"id":656,"title":"Cost-Optimized Instances","path":"/cost-optimized-instances/","scheduledAt":null,"createdAt":"2023-02-20T12:55:45.865Z","updatedAt":"2025-02-11T14:09:33.243Z","publishedAt":"2023-02-28T08:34:47.421Z","locale":"en","__contentType":"api::page.page","navigationItemId":565,"__templateName":"Generic"},"items":[],"description":"Highly reliable and priced affordably Instances with shared vCPUs (x86 and ARM)"},{"id":564,"title":"Learning","menuAttached":false,"order":4,"path":"/Compute/VirtualInstances/Learning","type":"INTERNAL","uiRouterKey":"learning-1","slug":"compute-virtual-instances-learning","external":false,"related":{"id":13,"title":"Stardust Instances","path":"/stardust-instances/","scheduledAt":null,"createdAt":"2022-04-11T09:03:33.397Z","updatedAt":"2025-01-10T14:18:17.519Z","publishedAt":"2022-04-28T17:04:10.708Z","locale":"en","__contentType":"api::page.page","navigationItemId":564,"__templateName":"Generic"},"items":[],"description":"A tiny instance to test and host your personal projects (x86)"}],"description":""},{"id":568,"title":"GPU","menuAttached":false,"order":2,"path":"/Compute/gpu","type":"INTERNAL","uiRouterKey":"gpu-8","slug":"compute-gpu","external":false,"related":{"id":1025,"title":"GPU Instances","path":"/gpu-instances/","scheduledAt":null,"createdAt":"2023-11-30T13:15:51.769Z","updatedAt":"2024-11-19T16:38:15.121Z","publishedAt":"2023-12-12T12:52:20.083Z","locale":"en","__contentType":"api::page.page","navigationItemId":568,"__templateName":"Generic"},"items":[{"id":571,"title":"L4 GPU Instance","menuAttached":false,"order":1,"path":"/Compute/gpu/L4","type":"INTERNAL","uiRouterKey":"l4-gpu-instance","slug":"compute-gpu-l4","external":false,"related":{"id":1108,"title":"L4 GPU Instance","path":"/l4-gpu-instance/","scheduledAt":null,"createdAt":"2024-02-28T16:20:43.240Z","updatedAt":"2024-11-20T14:49:27.542Z","publishedAt":"2024-03-04T13:37:45.809Z","locale":"en","__contentType":"api::page.page","navigationItemId":571,"__templateName":"Generic"},"items":[],"description":"Maximize your AI infrastructures with a versatile Instance"},{"id":572,"title":"L40S GPU Instance","menuAttached":false,"order":2,"path":"/Compute/gpu/L40s","type":"INTERNAL","uiRouterKey":"l40-s-gpu-instance","slug":"compute-gpu-l40s","external":false,"related":{"id":1221,"title":"L40S GPU Instance","path":"/l40s-gpu-instance/","scheduledAt":null,"createdAt":"2024-04-26T13:37:31.531Z","updatedAt":"2025-01-14T09:22:19.084Z","publishedAt":"2024-04-29T12:12:07.466Z","locale":"en","__contentType":"api::page.page","navigationItemId":572,"__templateName":"Generic"},"items":[],"description":"Universal Instance, faster than L4 and cheaper than H100 PCIe"},{"id":569,"title":"H100 PCIe GPU Instance","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/h100-pcie-try-it-now/","type":"EXTERNAL","uiRouterKey":"h100-pc-ie-gpu-instance-4","slug":{},"external":true,"description":"Accelerate your model training with the most high-end AI chip"},{"id":570,"title":"GPU 3070 Instances","menuAttached":false,"order":4,"path":"/Compute/gpu/3070","type":"INTERNAL","uiRouterKey":"gpu-3070-instances-1","slug":"compute-gpu-3070","external":false,"related":{"id":397,"title":"GPU 3070 Instances","path":"/gpu-3070-instances/","scheduledAt":null,"createdAt":"2022-05-30T11:52:26.506Z","updatedAt":"2023-11-16T16:38:12.184Z","publishedAt":"2022-05-30T12:33:10.212Z","locale":"en","__contentType":"api::page.page","navigationItemId":570,"__templateName":"Generic"},"items":[],"description":"Dedicated NVIDIA® RTX 3070 with the best price/performance ratio"},{"id":573,"title":"Render GPU Instances","menuAttached":false,"order":5,"path":"/Compute/gpu/render","type":"INTERNAL","uiRouterKey":"render-gpu-instances","slug":"compute-gpu-render","external":false,"related":{"id":52,"title":"GPU Render Instances","path":"/gpu-render-instances/","scheduledAt":null,"createdAt":"2022-04-21T16:00:29.592Z","updatedAt":"2024-09-25T09:40:12.404Z","publishedAt":"2022-04-28T17:12:46.136Z","locale":"en","__contentType":"api::page.page","navigationItemId":573,"__templateName":"Generic"},"items":[],"description":"Dedicated Tesla P100s for all your Machine Learning \u0026 Artificial Intelligence needs."}],"description":""},{"id":574,"title":"Serverless","menuAttached":false,"order":3,"path":"/Compute/Serverless","type":"INTERNAL","uiRouterKey":"serverless-12","slug":"compute-serverless","external":false,"related":{"id":1582,"title":"Serverless","path":"/serverless/","scheduledAt":null,"createdAt":"2025-01-08T14:22:22.570Z","updatedAt":"2025-01-08T14:39:59.326Z","publishedAt":"2025-01-08T14:39:59.247Z","locale":"en","__contentType":"api::page.page","navigationItemId":574,"__templateName":"Generic"},"items":[{"id":576,"title":"Serverless Functions","menuAttached":false,"order":1,"path":"/Compute/Serverless/Functions","type":"INTERNAL","uiRouterKey":"serverless-functions-1","slug":"compute-serverless-functions","external":false,"related":{"id":50,"title":"Serverless Functions","path":"/serverless-functions/","scheduledAt":null,"createdAt":"2022-04-21T15:28:10.687Z","updatedAt":"2025-01-24T13:36:21.096Z","publishedAt":"2022-04-28T17:12:49.569Z","locale":"en","__contentType":"api::page.page","navigationItemId":576,"__templateName":"Generic"},"items":[],"description":"Experience an easy way to run your code on the cloud"},{"id":575,"title":"Serverless Containers","menuAttached":false,"order":2,"path":"/Compute/Serverless/Containers","type":"INTERNAL","uiRouterKey":"serverless-containers-2","slug":"compute-serverless-containers","external":false,"related":{"id":7,"title":"Serverless Containers","path":"/serverless-containers/","scheduledAt":null,"createdAt":"2022-04-04T07:02:24.178Z","updatedAt":"2025-01-24T13:37:08.972Z","publishedAt":"2022-04-28T17:03:54.693Z","locale":"en","__contentType":"api::page.page","navigationItemId":575,"__templateName":"Generic"},"items":[],"description":"Easily run containers on the cloud with a single command"},{"id":579,"title":"Serverless Jobs","menuAttached":false,"order":3,"path":"/Compute/Serverless/Jobs","type":"INTERNAL","uiRouterKey":"serverless-jobs-1","slug":"compute-serverless-jobs","external":false,"related":{"id":980,"title":"Serverless Jobs","path":"/serverless-jobs/","scheduledAt":null,"createdAt":"2023-10-13T16:05:31.205Z","updatedAt":"2024-08-20T12:28:03.639Z","publishedAt":"2023-12-07T15:55:35.668Z","locale":"en","__contentType":"api::page.page","navigationItemId":579,"__templateName":"Generic"},"items":[],"description":"Run batches of tasks in the cloud"}],"description":""},{"id":580,"title":"Containers","menuAttached":false,"order":4,"path":"/Compute/Containers","type":"INTERNAL","uiRouterKey":"containers-4","slug":"compute-containers","external":false,"related":{"id":465,"title":"Containers","path":"/containers/","scheduledAt":null,"createdAt":"2022-07-29T15:09:20.535Z","updatedAt":"2024-08-28T07:05:23.005Z","publishedAt":"2023-02-27T13:53:48.270Z","locale":"en","__contentType":"api::page.page","navigationItemId":580,"__templateName":"Generic"},"items":[{"id":581,"title":"Kubernetes Kapsule","menuAttached":false,"order":1,"path":"/Compute/Containers/Kapsule","type":"INTERNAL","uiRouterKey":"kubernetes-kapsule-1","slug":"compute-containers-kapsule","external":false,"related":{"id":6,"title":"Kubernetes Kapsule","path":"/kubernetes-kapsule/","scheduledAt":null,"createdAt":"2022-04-01T15:40:18.523Z","updatedAt":"2025-02-20T10:18:20.644Z","publishedAt":"2022-11-02T17:14:27.738Z","locale":"en","__contentType":"api::page.page","navigationItemId":581,"__templateName":"Generic"},"items":[],"description":"Kubernetes exclusively for Scaleway products and resources"},{"id":582,"title":"Kubernetes Kosmos","menuAttached":false,"order":2,"path":"/Compute/Containers/Kosmos","type":"INTERNAL","uiRouterKey":"kubernetes-kosmos-1","slug":"compute-containers-kosmos","external":false,"related":{"id":43,"title":"Kubernetes Kosmos","path":"/kubernetes-kosmos/","scheduledAt":null,"createdAt":"2022-04-20T17:18:27.347Z","updatedAt":"2024-07-12T09:35:39.810Z","publishedAt":"2022-04-28T17:13:15.597Z","locale":"en","__contentType":"api::page.page","navigationItemId":582,"__templateName":"Generic"},"items":[],"description":"Multi-cloud Kubernetes for Scaleway and external providers resources"},{"id":583,"title":"Container Registry","menuAttached":false,"order":3,"path":"/Compute/Containers/containerregisrt","type":"INTERNAL","uiRouterKey":"container-registry-1","slug":"compute-containers-containerregisrt","external":false,"related":{"id":39,"title":"Container Registry","path":"/container-registry/","scheduledAt":null,"createdAt":"2022-04-20T14:07:31.417Z","updatedAt":"2023-11-15T08:49:34.191Z","publishedAt":"2022-04-28T17:06:10.179Z","locale":"en","__contentType":"api::page.page","navigationItemId":583,"__templateName":"Generic"},"items":[],"description":"An easy-to-use Docker repository"}],"description":""}],"description":""},{"id":584,"title":"AI","menuAttached":false,"order":3,"path":"/AI","type":"WRAPPER","uiRouterKey":"ai","slug":"ai-1","external":false,"items":[{"id":585,"title":"Clusters","menuAttached":false,"order":1,"path":"/AI/Clusters","type":"WRAPPER","uiRouterKey":"clusters-1","slug":"ai-clusters","external":false,"items":[{"id":588,"title":"Custom-built Clusters","menuAttached":false,"order":1,"path":"/AI/Clusters/AIsuper","type":"INTERNAL","uiRouterKey":"custom-built-clusters","slug":"ai-clusters-a-isuper","external":false,"related":{"id":953,"title":"Custom-built Clusters","path":"/custom-built-clusters/","scheduledAt":null,"createdAt":"2023-09-22T14:14:40.961Z","updatedAt":"2024-10-29T12:48:55.663Z","publishedAt":"2023-10-04T14:49:01.987Z","locale":"en","__contentType":"api::page.page","navigationItemId":588,"__templateName":"Generic"},"items":[],"description":"Build the next Foundation Model with one of the fastest and most energy-efficient supercomputers in the world"},{"id":776,"title":"On Demand Cluster","menuAttached":false,"order":2,"path":"/AI/Clusters/Clusterondemand","type":"INTERNAL","uiRouterKey":"on-demand-cluster","slug":"ai-clusters-clusterondemand","external":false,"related":{"id":1266,"title":"Cluster On Demand ","path":"/cluster-on-demand/","scheduledAt":null,"createdAt":"2024-05-16T15:00:19.723Z","updatedAt":"2024-11-08T08:52:40.598Z","publishedAt":"2024-05-21T14:10:00.511Z","locale":"en","__contentType":"api::page.page","navigationItemId":776,"__templateName":"Generic"},"items":[],"description":"Rent a GPU-cluster from 32 to more than a thousand GPUs to speed up distributed training"}],"description":""},{"id":592,"title":"Model-as-a-service","menuAttached":false,"order":2,"path":"/AI/ManagedServices","type":"WRAPPER","uiRouterKey":"model-as-a-service-1","slug":"ai-managed-services","external":false,"items":[{"id":593,"title":"Managed Inference","menuAttached":false,"order":1,"path":"/AI/ManagedServices/llm","type":"INTERNAL","uiRouterKey":"managed-inference-2","slug":"ai-managed-services-llm","external":false,"related":{"id":1303,"title":"Inference","path":"/inference/","scheduledAt":null,"createdAt":"2024-06-13T13:16:26.427Z","updatedAt":"2025-02-10T10:29:33.032Z","publishedAt":"2024-06-28T12:43:39.677Z","locale":"en","__contentType":"api::page.page","navigationItemId":593,"__templateName":"Generic"},"items":[],"description":"Deploy AI models in a dedicated inference infrastructure. Get tailored security and predictable throughput"},{"id":824,"title":"Generative APIs","menuAttached":false,"order":2,"path":"/AI/ManagedServices/GenerativeAPIs","type":"INTERNAL","uiRouterKey":"generative-ap-is-2","slug":"ai-managed-services-generative-ap-is","external":false,"related":{"id":1418,"title":"Generative APIs","path":"/generative-apis/","scheduledAt":null,"createdAt":"2024-10-10T16:23:00.732Z","updatedAt":"2025-02-13T16:06:23.818Z","publishedAt":"2024-10-11T12:17:56.286Z","locale":"en","__contentType":"api::page.page","navigationItemId":824,"__templateName":"Generic"},"items":[],"description":"Consume AI models instantly via a simple API call. All hosted in Europe"}],"description":""},{"id":586,"title":"GPU Instances","menuAttached":false,"order":3,"path":"/AI/gpu","type":"WRAPPER","uiRouterKey":"gpu-instances","slug":"ai-gpu","external":false,"items":[{"id":589,"title":"L40S GPU Instance","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/l40s-gpu-instance/","type":"EXTERNAL","uiRouterKey":"l40-s-gpu-instance-1","slug":{},"external":true,"description":"Accelerate the next generation of AI-enabled applications with the universal L40S GPU Instance, faster than L4 and cheaper than H100 PCIe"},{"id":590,"title":"L4 GPU Instance","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/l4-gpu-instance/","type":"EXTERNAL","uiRouterKey":"l4-gpu-instance-1","slug":{},"external":true,"description":"Maximize your AI infrastructure's potential with a versatile and cost-effective GPU Instance"},{"id":587,"title":"H100 PCIe GPU Instance","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/h100-pcie-try-it-now/","type":"EXTERNAL","uiRouterKey":"h100-pc-ie-gpu-instance-2","slug":{},"external":true,"description":"Accelerate your model training with the most high-end AI chip"},{"id":591,"title":"Render GPU Instance","menuAttached":false,"order":4,"path":"https://www.scaleway.com/en/gpu-render-instances/","type":"EXTERNAL","uiRouterKey":"render-gpu-instance-1","slug":{},"external":true,"description":"Dedicated Tesla P100s for all your Machine Learning \u0026 Artificial Intelligence needs"}],"description":""}],"description":""},{"id":594,"title":"Storage","menuAttached":false,"order":4,"path":"/Storage","type":"WRAPPER","uiRouterKey":"storage-3","slug":"storage-2","external":false,"items":[{"id":602,"title":"Storage","menuAttached":false,"order":1,"path":"/Storage/storage","type":"WRAPPER","uiRouterKey":"storage-4","slug":"storage-storage","external":false,"items":[{"id":604,"title":"Object Storage","menuAttached":false,"order":1,"path":"/Storage/storage/ObjectStorage","type":"INTERNAL","uiRouterKey":"object-storage-4","slug":"storage-storage-object-storage","external":false,"related":{"id":652,"title":"Object Storage","path":"/object-storage/","scheduledAt":null,"createdAt":"2023-02-16T09:44:56.414Z","updatedAt":"2024-12-02T14:09:58.690Z","publishedAt":"2023-03-07T18:05:15.061Z","locale":"en","__contentType":"api::page.page","navigationItemId":604,"__templateName":"Generic"},"items":[],"description":"Amazon S3-compatible and Multi-AZ resilient object storage service. Ensuring high availability for your data"},{"id":605,"title":"Scaleway Glacier","menuAttached":false,"order":2,"path":"/Storage/storage/glacier","type":"INTERNAL","uiRouterKey":"scaleway-glacier-1","slug":"storage-storage-glacier","external":false,"related":{"id":17,"title":"Glacier Cold storage","path":"/glacier-cold-storage/","scheduledAt":null,"createdAt":"2022-04-11T11:58:13.079Z","updatedAt":"2024-10-25T13:13:55.154Z","publishedAt":"2022-04-28T17:13:24.608Z","locale":"en","__contentType":"api::page.page","navigationItemId":605,"__templateName":"Generic"},"items":[],"description":"Cold Storage class to secure long-term object storage. Ideal for deep archived data."},{"id":606,"title":"Block Storage","menuAttached":false,"order":3,"path":"/Storage/storage/BlockStorage","type":"INTERNAL","uiRouterKey":"block-storage-3","slug":"storage-storage-block-storage","external":false,"related":{"id":141,"title":"Block Storage","path":"/block-storage/","scheduledAt":null,"createdAt":"2022-05-02T08:20:39.280Z","updatedAt":"2025-02-05T14:21:18.667Z","publishedAt":"2022-05-02T08:28:12.783Z","locale":"en","__contentType":"api::page.page","navigationItemId":606,"__templateName":"Generic"},"items":[],"description":"Flexible and reliable storage for demanding workloads"}],"description":""}],"description":""},{"id":595,"title":"Network","menuAttached":false,"order":5,"path":"/Network","type":"WRAPPER","uiRouterKey":"network-3","slug":"network-4","external":false,"items":[{"id":603,"title":"Network","menuAttached":false,"order":1,"path":"/Network/Network","type":"WRAPPER","uiRouterKey":"network-4","slug":"network-network","external":false,"items":[{"id":607,"title":"Virtual Private Cloud","menuAttached":false,"order":1,"path":"/Network/Network/VPC","type":"INTERNAL","uiRouterKey":"virtual-private-cloud-1","slug":"network-network-vpc","external":false,"related":{"id":885,"title":"VPC","path":"/vpc/","scheduledAt":null,"createdAt":"2023-07-11T14:38:07.412Z","updatedAt":"2025-01-03T17:06:24.192Z","publishedAt":"2023-07-11T14:38:10.387Z","locale":"en","__contentType":"api::page.page","navigationItemId":607,"__templateName":"Generic"},"items":[],"description":"Secure your cloud resources with ease on a resilient regional network"},{"id":609,"title":"Public Gateway","menuAttached":false,"order":2,"path":"/Network/Network/public","type":"INTERNAL","uiRouterKey":"public-gateway-1","slug":"network-network-public","external":false,"related":{"id":54,"title":"Public Gateway","path":"/public-gateway/","scheduledAt":null,"createdAt":"2022-04-22T09:34:12.578Z","updatedAt":"2024-09-11T14:24:49.432Z","publishedAt":"2022-04-28T17:13:01.025Z","locale":"en","__contentType":"api::page.page","navigationItemId":609,"__templateName":"Generic"},"items":[],"description":" A single and secure entrance to your infrastructure"},{"id":608,"title":"Load Balancer","menuAttached":false,"order":3,"path":"/Network/Network/load","type":"INTERNAL","uiRouterKey":"load-balancer-1","slug":"network-network-load","external":false,"related":{"id":45,"title":"Load Balancer","path":"/load-balancer/","scheduledAt":null,"createdAt":"2022-04-21T07:46:46.140Z","updatedAt":"2024-07-24T14:48:37.806Z","publishedAt":"2022-11-18T08:58:30.309Z","locale":"en","__contentType":"api::page.page","navigationItemId":608,"__templateName":"Generic"},"items":[],"description":"Improve the performance of your services as you grow"},{"id":610,"title":"Domains and DNS","menuAttached":false,"order":4,"path":"/Network/Network/DomainsandDNS","type":"INTERNAL","uiRouterKey":"domains-and-dns-1","slug":"network-network-domainsand-dns","external":false,"related":{"id":44,"title":"Domains and DNS","path":"/domains-and-dns/","scheduledAt":null,"createdAt":"2022-04-21T07:26:18.059Z","updatedAt":"2024-03-05T17:01:32.782Z","publishedAt":"2022-04-28T17:13:12.082Z","locale":"en","__contentType":"api::page.page","navigationItemId":610,"__templateName":"Generic"},"items":[],"description":"Buy domain names and manage DNS. Find your favourite extensions at a fair price"},{"id":792,"title":"IPAM (IP Address Manager)","menuAttached":false,"order":5,"path":"/Network/Network/IPAM","type":"INTERNAL","uiRouterKey":"ipam-ip-address-manager","slug":"network-network-ipam","external":false,"related":{"id":1300,"title":"IPAM","path":"/ipam/","scheduledAt":null,"createdAt":"2024-06-07T13:07:18.728Z","updatedAt":"2024-11-29T16:49:38.669Z","publishedAt":"2024-07-10T07:39:07.627Z","locale":"en","__contentType":"api::page.page","navigationItemId":792,"__templateName":"Generic"},"items":[],"description":"Centralize and simplify your Scaleway IP address management"},{"id":820,"title":"Edge Services","menuAttached":false,"order":6,"path":"/Network/Network/EdgeServices","type":"INTERNAL","uiRouterKey":"edge-services-2","slug":"network-network-edge-services","external":false,"related":{"id":1614,"title":"Edge Services","path":"/edge-services/","scheduledAt":null,"createdAt":"2025-01-31T15:54:24.871Z","updatedAt":"2025-01-31T16:01:57.242Z","publishedAt":"2025-01-31T15:54:28.318Z","locale":"en","__contentType":"api::page.page","navigationItemId":820,"__templateName":"Generic"},"items":[],"description":"Expose your HTTP services to the internet with security, reliability, and efficiency by design."},{"id":858,"title":"InterLink","menuAttached":false,"order":7,"path":"/Network/Network/InterLink","type":"INTERNAL","uiRouterKey":"inter-link","slug":"network-network-inter-link","external":false,"related":{"id":900,"title":"Scaleway InterLink","path":"/scaleway-interlink/","scheduledAt":null,"createdAt":"2023-08-03T14:39:22.643Z","updatedAt":"2025-02-06T14:54:50.915Z","publishedAt":"2023-08-04T09:53:13.589Z","locale":"en","__contentType":"api::page.page","navigationItemId":858,"__templateName":"Generic"},"items":[],"description":"Establish a hosted connection from your infrastructure to your VPC via a partner's network"}],"description":""}],"description":""},{"id":836,"title":"Data","menuAttached":false,"order":6,"path":"/data","type":"WRAPPER","uiRouterKey":"data-2","slug":"data-3","external":false,"items":[{"id":837,"title":"Databases","menuAttached":false,"order":1,"path":"/data/Databases","type":"WRAPPER","uiRouterKey":"databases","slug":"data-databases","external":false,"items":[{"id":838,"title":"Managed Database for PostgreSQL and MySQL","menuAttached":false,"order":1,"path":"/data/Databases/PostgreSQL_MySQL","type":"INTERNAL","uiRouterKey":"managed-database-for-postgre-sql-and-my-sql","slug":"data-databases-postgre-sql-my-sql","external":false,"related":{"id":48,"title":"Database","path":"/database/","scheduledAt":null,"createdAt":"2022-04-21T14:06:34.262Z","updatedAt":"2024-07-02T15:50:10.807Z","publishedAt":"2022-04-28T17:12:57.201Z","locale":"en","__contentType":"api::page.page","navigationItemId":838,"__templateName":"Generic"},"items":[],"description":"Start seamless database operations"},{"id":839,"title":"Serverless SQL Database","menuAttached":false,"order":2,"path":"/data/Databases/SQL_database","type":"INTERNAL","uiRouterKey":"serverless-sql-database-1","slug":"data-databases-sql-database","external":false,"related":{"id":823,"title":"Serverless Sql Database","path":"/serverless-sql-database/","scheduledAt":null,"createdAt":"2023-05-11T22:46:48.805Z","updatedAt":"2025-02-21T08:33:55.172Z","publishedAt":"2023-05-11T22:47:00.320Z","locale":"en","__contentType":"api::page.page","navigationItemId":839,"__templateName":"Generic"},"items":[],"description":"Go full serverless and take the complexity out of PostgreSQL database"},{"id":840,"title":"Managed Database for Redis®","menuAttached":false,"order":3,"path":"/data/Databases/redis","type":"INTERNAL","uiRouterKey":"managed-database-for-redis-1","slug":"data-databases-redis","external":false,"related":{"id":427,"title":"Managed Database for Redis™","path":"/managed-database-for-redistm/","scheduledAt":null,"createdAt":"2022-06-10T13:30:28.356Z","updatedAt":"2024-12-02T13:13:32.070Z","publishedAt":"2022-07-27T15:29:59.282Z","locale":"en","__contentType":"api::page.page","navigationItemId":840,"__templateName":"Generic"},"items":[],"description":"Fully managed Redis®* in seconds"},{"id":841,"title":"Managed MongoDB®","menuAttached":false,"order":4,"path":"/data/Databases/MongoDB","type":"INTERNAL","uiRouterKey":"managed-mongo-db-1","slug":"data-databases-mongo-db","external":false,"related":{"id":890,"title":"Managed MongoDB","path":"/managed-mongodb/","scheduledAt":null,"createdAt":"2023-07-25T07:58:39.536Z","updatedAt":"2025-02-21T08:27:45.300Z","publishedAt":"2023-10-03T08:31:21.477Z","locale":"en","__contentType":"api::page.page","navigationItemId":841,"__templateName":"Generic"},"items":[],"description":"Create a scalable, secure, and fully managed NoSQL solution"}],"description":""},{"id":843,"title":"Messaging and Queuing","menuAttached":false,"order":2,"path":"/data/mq","type":"WRAPPER","uiRouterKey":"messaging-and-queuing-1","slug":"data-mq","external":false,"items":[{"id":846,"title":"NATS","menuAttached":false,"order":1,"path":"/data/mq/NATS","type":"INTERNAL","uiRouterKey":"nats","slug":"data-mq-nats","external":false,"related":{"id":1506,"title":"NATS","path":"/nats/","scheduledAt":null,"createdAt":"2024-12-02T16:34:48.084Z","updatedAt":"2025-02-06T15:43:48.739Z","publishedAt":"2024-12-04T14:30:28.012Z","locale":"en","__contentType":"api::page.page","navigationItemId":846,"__templateName":"Generic"},"items":[],"description":"Build distributed and scalable client-server applications"},{"id":844,"title":"Queues","menuAttached":false,"order":2,"path":"/data/mq/Queues","type":"INTERNAL","uiRouterKey":"queues","slug":"data-mq-queues","external":false,"related":{"id":1505,"title":"Queues","path":"/queues/","scheduledAt":null,"createdAt":"2024-12-02T15:40:46.474Z","updatedAt":"2024-12-04T14:31:05.795Z","publishedAt":"2024-12-04T14:31:05.505Z","locale":"en","__contentType":"api::page.page","navigationItemId":844,"__templateName":"Generic"},"items":[],"description":"Create a queue, configure its delivery and message parameters"},{"id":845,"title":"Topics and Events","menuAttached":false,"order":3,"path":"/data/mq/TopicsEvents","type":"INTERNAL","uiRouterKey":"topics-and-events","slug":"data-mq-topics-events","external":false,"related":{"id":1509,"title":"Topics \u0026 Events","path":"/topics-and-events/","scheduledAt":null,"createdAt":"2024-12-02T17:09:09.294Z","updatedAt":"2025-02-06T15:45:39.733Z","publishedAt":"2024-12-04T14:30:14.535Z","locale":"en","__contentType":"api::page.page","navigationItemId":845,"__templateName":"Generic"},"items":[],"description":"Sent to a variety of devices and platforms through a single code interface"}],"description":""}],"description":""},{"id":596,"title":"Tools","menuAttached":false,"order":7,"path":"/ManagedServices","type":"WRAPPER","uiRouterKey":"tools","slug":"managed-services-2","external":false,"items":[{"id":619,"title":"Managed Services","menuAttached":false,"order":1,"path":"/ManagedServices/ManagedServices","type":"WRAPPER","uiRouterKey":"managed-services","slug":"managed-services-managed-services","external":false,"items":[{"id":623,"title":"Cockpit","menuAttached":false,"order":1,"path":"/ManagedServices/ManagedServices/Cockpit","type":"INTERNAL","uiRouterKey":"cockpit-2","slug":"managed-services-managed-services-cockpit","external":false,"related":{"id":814,"title":"Cockpit","path":"/cockpit/","scheduledAt":null,"createdAt":"2023-05-02T08:04:46.085Z","updatedAt":"2024-12-02T08:25:58.250Z","publishedAt":"2023-05-04T16:18:10.562Z","locale":"en","__contentType":"api::page.page","navigationItemId":623,"__templateName":"Generic"},"items":[],"description":"Monitor infrastructures in minutes with a fully managed observability solution"},{"id":620,"title":"Web Hosting","menuAttached":false,"order":2,"path":"/ManagedServices/ManagedServices/hosting","type":"INTERNAL","uiRouterKey":"web-hosting-4","slug":"managed-services-managed-services-hosting","external":false,"related":{"id":47,"title":"Web hosting","path":"/web-hosting/","scheduledAt":null,"createdAt":"2022-04-21T11:51:48.689Z","updatedAt":"2024-11-20T15:59:55.910Z","publishedAt":"2022-04-28T13:34:58.879Z","locale":"en","__contentType":"api::page.page","navigationItemId":620,"__templateName":"Generic"},"items":[],"description":"Hosting for individuals, professionals, and everyone in between."},{"id":621,"title":"Web Platform","menuAttached":false,"order":3,"path":"/ManagedServices/ManagedServices/WebPlatform","type":"INTERNAL","uiRouterKey":"web-platform-2","slug":"managed-services-managed-services-web-platform","external":false,"related":{"id":576,"title":"Web Platform - powered by Clever Cloud","path":"/web-platform-powered-by-clever-cloud/","scheduledAt":null,"createdAt":"2022-12-07T14:07:50.856Z","updatedAt":"2023-11-16T15:19:36.970Z","publishedAt":"2022-12-13T08:01:42.916Z","locale":"en","__contentType":"api::page.page","navigationItemId":621,"__templateName":"Generic"},"items":[],"description":"Ship your applications only in a few clicks."},{"id":622,"title":"Transactional Email","menuAttached":false,"order":4,"path":"/ManagedServices/ManagedServices/tem","type":"INTERNAL","uiRouterKey":"transactional-email-2","slug":"managed-services-managed-services-tem","external":false,"related":{"id":776,"title":"Transactional Email (TEM)","path":"/transactional-email-tem/","scheduledAt":null,"createdAt":"2023-04-05T16:33:35.536Z","updatedAt":"2024-10-21T14:45:56.496Z","publishedAt":"2023-04-06T10:30:43.491Z","locale":"en","__contentType":"api::page.page","navigationItemId":622,"__templateName":"Generic"},"items":[],"description":"Instant delivery of your transactional emails"},{"id":842,"title":"Distributed Data Lab","menuAttached":false,"order":5,"path":"/ManagedServices/ManagedServices/DataLab","type":"INTERNAL","uiRouterKey":"distributed-data-lab-1","slug":"managed-services-managed-services-data-lab","external":false,"related":{"id":949,"title":"Distributed Data Lab ","path":"/distributed-data-lab/","scheduledAt":null,"createdAt":"2023-09-21T11:57:12.802Z","updatedAt":"2025-01-03T13:55:54.202Z","publishedAt":"2024-09-27T15:10:48.257Z","locale":"en","__contentType":"api::page.page","navigationItemId":842,"__templateName":"Generic"},"items":[],"description":"Speed up data processing over very large volumes of data with an Apache Spark™ managed solution"},{"id":784,"title":"IoT Hub","menuAttached":false,"order":6,"path":"/ManagedServices/ManagedServices/iot","type":"INTERNAL","uiRouterKey":"io-t-hub","slug":"managed-services-managed-services-iot","external":false,"related":{"id":31,"title":"Iot hub","path":"/iot-hub/","scheduledAt":null,"createdAt":"2022-04-20T04:58:03.085Z","updatedAt":"2023-11-15T15:42:53.313Z","publishedAt":"2022-04-28T17:13:21.005Z","locale":"en","__contentType":"api::page.page","navigationItemId":784,"__templateName":"Generic"},"items":[],"description":"A purpose-built bridge between connected hardware and cloud."}],"description":""},{"id":615,"title":"Security \u0026 Organization","menuAttached":false,"order":2,"path":"/ManagedServices/SecurityandAccount","type":"WRAPPER","uiRouterKey":"security-3","slug":"managed-services-securityand-account","external":false,"items":[{"id":618,"title":"Identity and Access Management (IAM)","menuAttached":false,"order":1,"path":"/ManagedServices/SecurityandAccount/iam","type":"INTERNAL","uiRouterKey":"identity-and-access-management-iam-1","slug":"managed-services-securityand-account-iam","external":false,"related":{"id":569,"title":"IAM","path":"/iam/","scheduledAt":null,"createdAt":"2022-12-02T16:25:06.762Z","updatedAt":"2025-01-10T14:30:40.377Z","publishedAt":"2022-12-06T15:27:30.794Z","locale":"en","__contentType":"api::page.page","navigationItemId":618,"__templateName":"Generic"},"items":[],"description":"The easiest way to safely collaborate in the cloud"},{"id":616,"title":"Secret Manager","menuAttached":false,"order":2,"path":"/ManagedServices/SecurityandAccount/secretmanager","type":"INTERNAL","uiRouterKey":"secret-manager-1","slug":"managed-services-securityand-account-secretmanager","external":false,"related":{"id":779,"title":"Secret Manager","path":"/secret-manager/","scheduledAt":null,"createdAt":"2023-04-11T11:04:18.808Z","updatedAt":"2024-08-28T09:57:43.021Z","publishedAt":"2023-04-26T07:47:45.718Z","locale":"en","__contentType":"api::page.page","navigationItemId":616,"__templateName":"Generic"},"items":[],"description":"Protect your sensitive data across your cloud infrastructure"},{"id":617,"title":"Cost Manager","menuAttached":false,"order":3,"path":"/ManagedServices/SecurityandAccount/cost-manager","type":"INTERNAL","uiRouterKey":"cost-manager-1","slug":"managed-services-securityand-account-cost-manager","external":false,"related":{"id":1186,"title":"Cost Manager","path":"/cost-manager/","scheduledAt":null,"createdAt":"2024-04-08T07:36:07.839Z","updatedAt":"2024-04-08T09:14:21.699Z","publishedAt":"2024-04-08T09:14:21.666Z","locale":"en","__contentType":"api::page.page","navigationItemId":617,"__templateName":"Generic"},"items":[],"description":"Easily track your consumption in an all-in-one tool"},{"id":830,"title":"Environmental Footprint Calculator","menuAttached":false,"order":4,"path":"/ManagedServices/SecurityandAccount/Footprint","type":"INTERNAL","uiRouterKey":"environmental-footprint-calculator","slug":"managed-services-securityand-account-footprint","external":false,"related":{"id":1450,"title":"Environmental Footprint Calculator","path":"/environmental-footprint-calculator/","scheduledAt":null,"createdAt":"2024-10-28T14:47:30.518Z","updatedAt":"2025-01-27T14:26:21.239Z","publishedAt":"2024-11-04T12:12:34.311Z","locale":"en","__contentType":"api::page.page","navigationItemId":830,"__templateName":"Generic"},"items":[],"description":"Accurately track your environmental impact and make informed choices"}],"description":""},{"id":624,"title":"Developer Tools","menuAttached":false,"order":3,"path":"/ManagedServices/DeveloperTools","type":"WRAPPER","uiRouterKey":"developer-tools","slug":"managed-services-developer-tools","external":false,"items":[{"id":625,"title":"Scaleway API","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/developers/api/","type":"EXTERNAL","uiRouterKey":"scaleway-api-2","slug":{},"external":true,"description":"The Public Interface for developers"},{"id":626,"title":"CLI","menuAttached":false,"order":2,"path":"/ManagedServices/DeveloperTools/cli","type":"INTERNAL","uiRouterKey":"cli-2","slug":"managed-services-developer-tools-cli","external":false,"related":{"id":187,"title":"CLI","path":"/cli/","scheduledAt":null,"createdAt":"2022-05-03T08:37:17.214Z","updatedAt":"2024-08-22T05:35:23.543Z","publishedAt":"2022-05-03T11:43:09.246Z","locale":"en","__contentType":"api::page.page","navigationItemId":626,"__templateName":"Generic"},"items":[],"description":"Deploy and manage your infrastructure directly from the command line"},{"id":627,"title":"Terraform","menuAttached":false,"order":3,"path":"/ManagedServices/DeveloperTools/terraform","type":"INTERNAL","uiRouterKey":"terraform-1","slug":"managed-services-developer-tools-terraform","external":false,"related":{"id":40,"title":"Terraform","path":"/terraform/","scheduledAt":null,"createdAt":"2022-04-20T14:37:30.508Z","updatedAt":"2023-11-15T08:32:57.793Z","publishedAt":"2022-04-28T17:05:15.208Z","locale":"en","__contentType":"api::page.page","navigationItemId":627,"__templateName":"Generic"},"items":[],"description":"Securely and efficiently provision and manage Infrastructure as Code with Terraform"}],"description":""}],"description":""},{"id":597,"title":"Solutions","menuAttached":false,"order":8,"path":"/Solutions","type":"WRAPPER","uiRouterKey":"solutions-2","slug":"solutions-2","external":false,"items":[{"id":628,"title":"Industries","menuAttached":false,"order":1,"path":"/Solutions/Industries","type":"WRAPPER","uiRouterKey":"industries-1","slug":"solutions-industries","external":false,"items":[{"id":631,"title":"Gaming","menuAttached":false,"order":1,"path":"/Solutions/Industries/Gaming","type":"INTERNAL","uiRouterKey":"gaming-1","slug":"solutions-industries-gaming","external":false,"related":{"id":1024,"title":"Gaming Cloud Solutions","path":"/gaming-cloud-solutions/","scheduledAt":null,"createdAt":"2023-11-29T17:06:47.458Z","updatedAt":"2024-09-24T13:29:47.657Z","publishedAt":"2023-12-13T16:53:50.074Z","locale":"en","__contentType":"api::page.page","navigationItemId":631,"__templateName":"Generic"},"items":[],"description":""},{"id":630,"title":"Public Sector","menuAttached":false,"order":2,"path":"/Solutions/Industries/PublicSector","type":"INTERNAL","uiRouterKey":"public-sector","slug":"solutions-industries-public-sector","external":false,"related":{"id":986,"title":"Public sector solutions","path":"/public-sector-solutions/","scheduledAt":null,"createdAt":"2023-10-20T14:23:52.057Z","updatedAt":"2024-09-30T17:00:38.498Z","publishedAt":"2023-11-30T14:58:23.419Z","locale":"en","__contentType":"api::page.page","navigationItemId":630,"__templateName":"Generic"},"items":[],"description":""},{"id":633,"title":"Media and Entertainment","menuAttached":false,"order":3,"path":"/Solutions/Industries/MediaandEntertainment","type":"INTERNAL","uiRouterKey":"media-and-entertainment","slug":"solutions-industries-mediaand-entertainment","external":false,"related":{"id":1048,"title":"Media and Entertainment","path":"/media-and-entertainment/","scheduledAt":null,"createdAt":"2023-12-13T16:23:27.055Z","updatedAt":"2024-09-24T13:30:40.809Z","publishedAt":"2024-01-02T18:08:08.725Z","locale":"en","__contentType":"api::page.page","navigationItemId":633,"__templateName":"Generic"},"items":[],"description":""},{"id":632,"title":"Retail and E-commerce","menuAttached":false,"order":4,"path":"/Solutions/Industries/Retail","type":"INTERNAL","uiRouterKey":"retail-and-e-commerce-2","slug":"solutions-industries-retail","external":false,"related":{"id":1105,"title":"E-commerce retail Solutions","path":"/e-commerce-retail-solutions/","scheduledAt":null,"createdAt":"2024-02-28T09:44:45.583Z","updatedAt":"2025-02-20T16:47:32.650Z","publishedAt":"2024-04-02T14:56:24.762Z","locale":"en","__contentType":"api::page.page","navigationItemId":632,"__templateName":"Generic"},"items":[],"description":""},{"id":794,"title":"Financial Services","menuAttached":false,"order":5,"path":"/Solutions/Industries/FinancialServices","type":"INTERNAL","uiRouterKey":"financial-services","slug":"solutions-industries-financial-services","external":false,"related":{"id":1381,"title":"Financial services solutions","path":"/financial-services-solutions/","scheduledAt":null,"createdAt":"2024-08-06T12:19:51.917Z","updatedAt":"2024-11-12T09:58:52.666Z","publishedAt":"2024-08-06T12:31:25.580Z","locale":"en","__contentType":"api::page.page","navigationItemId":794,"__templateName":"Generic"},"items":[],"description":""},{"id":826,"title":"Industrial","menuAttached":false,"order":6,"path":"/Solutions/Industries/Industrial","type":"INTERNAL","uiRouterKey":"industrial","slug":"solutions-industries-industrial","external":false,"related":{"id":1411,"title":"Industrial solutions","path":"/industrial-solutions/","scheduledAt":null,"createdAt":"2024-10-02T10:14:37.728Z","updatedAt":"2025-01-27T09:37:50.233Z","publishedAt":"2024-10-03T16:29:42.042Z","locale":"en","__contentType":"api::page.page","navigationItemId":826,"__templateName":"Generic"},"items":[],"description":""},{"id":875,"title":"Technology","menuAttached":false,"order":7,"path":"/Solutions/Industries/Technology","type":"INTERNAL","uiRouterKey":"technology","slug":"solutions-industries-technology","external":false,"related":{"id":1572,"title":"tech-solutions","path":"/tech-solutions/","scheduledAt":null,"createdAt":"2024-12-23T10:44:13.921Z","updatedAt":"2024-12-27T13:27:25.098Z","publishedAt":"2024-12-23T10:49:09.338Z","locale":"en","__contentType":"api::page.page","navigationItemId":875,"__templateName":"Generic"},"items":[],"description":""},{"id":876,"title":"Healthcare","menuAttached":false,"order":8,"path":"/Solutions/Industries/Healthcare","type":"INTERNAL","uiRouterKey":"healthcare","slug":"solutions-industries-healthcare","external":false,"related":{"id":1579,"title":"healthcare and life sciences solutions","path":"/healthcare-and-life-sciences-solutions/","scheduledAt":null,"createdAt":"2025-01-03T15:32:23.751Z","updatedAt":"2025-01-08T15:53:36.314Z","publishedAt":"2025-01-08T14:08:25.957Z","locale":"en","__contentType":"api::page.page","navigationItemId":876,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":635,"title":"Use Cases","menuAttached":false,"order":2,"path":"/Solutions/usecases","type":"WRAPPER","uiRouterKey":"use-cases","slug":"solutions-usecases","external":false,"items":[{"id":868,"title":"Artificial Intelligence","menuAttached":false,"order":1,"path":"/Solutions/usecases/ai","type":"INTERNAL","uiRouterKey":"artificial-intelligence-2","slug":"solutions-usecases-ai","external":false,"related":{"id":1443,"title":"ai solutions","path":"/ai-solutions/","scheduledAt":null,"createdAt":"2024-10-25T08:04:00.807Z","updatedAt":"2024-11-04T10:57:00.737Z","publishedAt":"2024-10-28T08:49:11.873Z","locale":"en","__contentType":"api::page.page","navigationItemId":868,"__templateName":"Generic"},"items":[],"description":""},{"id":638,"title":"Cloud Storage Solutions","menuAttached":false,"order":2,"path":"/Solutions/usecases/cloudstorage","type":"INTERNAL","uiRouterKey":"cloud-storage-solutions","slug":"solutions-usecases-cloudstorage","external":false,"related":{"id":595,"title":"Cloud Storage Solutions","path":"/cloud-storage-solutions/","scheduledAt":null,"createdAt":"2022-12-19T13:31:12.676Z","updatedAt":"2024-10-25T13:40:34.304Z","publishedAt":"2023-01-31T10:48:28.580Z","locale":"en","__contentType":"api::page.page","navigationItemId":638,"__templateName":"Generic"},"items":[],"description":""},{"id":637,"title":"Kubernetes Solutions","menuAttached":false,"order":3,"path":"/Solutions/usecases/kub-sol","type":"INTERNAL","uiRouterKey":"kubernetes-solutions-1","slug":"solutions-usecases-kub-sol","external":false,"related":{"id":616,"title":"Kubernetes Solutions","path":"/kubernetes-solutions/","scheduledAt":null,"createdAt":"2023-01-10T16:25:48.652Z","updatedAt":"2024-11-20T16:45:40.105Z","publishedAt":"2023-03-28T07:49:24.834Z","locale":"en","__contentType":"api::page.page","navigationItemId":637,"__templateName":"Generic"},"items":[],"description":""},{"id":636,"title":"Serverless Applications","menuAttached":false,"order":4,"path":"/Solutions/usecases/ServerlessApplications","type":"INTERNAL","uiRouterKey":"serverless-applications-1","slug":"solutions-usecases-serverless-applications","external":false,"related":{"id":780,"title":"Build Scalable Applications With Serverless","path":"/build-scalable-applications-with-serverless/","scheduledAt":null,"createdAt":"2023-04-12T08:42:06.395Z","updatedAt":"2024-05-15T13:59:21.827Z","publishedAt":"2023-05-12T06:59:34.924Z","locale":"en","__contentType":"api::page.page","navigationItemId":636,"__templateName":"Generic"},"items":[],"description":""},{"id":869,"title":"Managed Web Hosting","menuAttached":false,"order":5,"path":"/Solutions/usecases/wenhosting","type":"INTERNAL","uiRouterKey":"managed-web-hosting-1","slug":"solutions-usecases-wenhosting","external":false,"related":{"id":827,"title":"Managed Web Hosting","path":"/managed-web-hosting/","scheduledAt":null,"createdAt":"2023-05-15T09:39:39.531Z","updatedAt":"2024-08-28T06:42:02.109Z","publishedAt":"2023-05-15T12:31:13.810Z","locale":"en","__contentType":"api::page.page","navigationItemId":869,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":870,"title":"For Startups","menuAttached":false,"order":3,"path":"/Solutions/Startups","type":"WRAPPER","uiRouterKey":"for-startups","slug":"solutions-startups","external":false,"items":[{"id":873,"title":"Apply for Startup Program","menuAttached":false,"order":1,"path":"/Solutions/Startups/Apply","type":"INTERNAL","uiRouterKey":"apply-for-startup-program","slug":"solutions-startups-apply","external":false,"related":{"id":82,"title":"Startup program","path":"/startup-program/","scheduledAt":null,"createdAt":"2022-04-27T19:14:18.251Z","updatedAt":"2025-02-18T10:31:31.568Z","publishedAt":"2022-05-11T15:19:00.591Z","locale":"en","__contentType":"api::page.page","navigationItemId":873,"__templateName":"Generic"},"items":[],"description":""},{"id":871,"title":"Founders Program","menuAttached":false,"order":2,"path":"/Solutions/Startups/Founders","type":"INTERNAL","uiRouterKey":"founders-program","slug":"solutions-startups-founders","external":false,"related":{"id":805,"title":"Founders Program","path":"/startup-program/founders-program/","scheduledAt":null,"createdAt":"2023-04-26T15:15:16.052Z","updatedAt":"2025-02-18T10:31:32.123Z","publishedAt":"2023-04-26T15:30:48.551Z","locale":"en","__contentType":"api::page.page","navigationItemId":871,"__templateName":"Generic"},"items":[],"description":""},{"id":874,"title":"Early Stage Program","menuAttached":false,"order":3,"path":"/Solutions/Startups/Early","type":"INTERNAL","uiRouterKey":"early-stage-program","slug":"solutions-startups-early","external":false,"related":{"id":806,"title":"Early Stage Program","path":"/startup-program/early-stage-program/","scheduledAt":null,"createdAt":"2023-04-26T15:38:44.183Z","updatedAt":"2025-02-18T10:31:32.162Z","publishedAt":"2023-04-26T15:41:51.729Z","locale":"en","__contentType":"api::page.page","navigationItemId":874,"__templateName":"Generic"},"items":[],"description":""},{"id":872,"title":"Growth Stage","menuAttached":false,"order":4,"path":"/Solutions/Startups/Growth","type":"INTERNAL","uiRouterKey":"growth-stage","slug":"solutions-startups-growth","external":false,"related":{"id":807,"title":"Growth Stage Program","path":"/startup-program/growth-stage-program/","scheduledAt":null,"createdAt":"2023-04-26T15:50:16.870Z","updatedAt":"2025-02-18T10:31:32.172Z","publishedAt":"2023-04-26T15:52:22.068Z","locale":"en","__contentType":"api::page.page","navigationItemId":872,"__templateName":"Generic"},"items":[],"description":""}],"description":""}],"description":""},{"id":744,"title":"Resources","menuAttached":false,"order":9,"path":"/Resources","type":"WRAPPER","uiRouterKey":"resources-2","slug":"resources-3","external":false,"items":[{"id":746,"title":"Ecosystem","menuAttached":false,"order":1,"path":"/Resources/Ecosystem","type":"WRAPPER","uiRouterKey":"ecosystem","slug":"resources-ecosystem","external":false,"items":[{"id":751,"title":"All products","menuAttached":false,"order":1,"path":"/Resources/Ecosystem/All_products","type":"INTERNAL","uiRouterKey":"all-products-2","slug":"resources-ecosystem-all-products","external":false,"related":{"id":223,"title":"All Products","path":"/all-products/","scheduledAt":null,"createdAt":"2022-05-09T13:56:36.517Z","updatedAt":"2025-01-27T10:23:16.899Z","publishedAt":"2022-05-09T14:37:46.378Z","locale":"en","__contentType":"api::page.page","navigationItemId":751,"__templateName":"Generic"},"items":[],"description":""},{"id":828,"title":"Product updates","menuAttached":false,"order":2,"path":"/Resources/Ecosystem/Productupdates","type":"INTERNAL","uiRouterKey":"product-updates","slug":"resources-ecosystem-productupdates","external":false,"related":{"id":1451,"title":"Product updates","path":"/product-updates/","scheduledAt":null,"createdAt":"2024-10-28T16:25:15.626Z","updatedAt":"2025-01-07T09:57:23.124Z","publishedAt":"2024-10-30T16:21:39.156Z","locale":"en","__contentType":"api::page.page","navigationItemId":828,"__templateName":"Generic"},"items":[],"description":""},{"id":750,"title":"Betas","menuAttached":false,"order":3,"path":"/Resources/Ecosystem/betas","type":"INTERNAL","uiRouterKey":"betas","slug":"resources-ecosystem-betas","external":false,"related":{"id":90,"title":"Betas","path":"/betas/","scheduledAt":null,"createdAt":"2022-04-28T14:06:08.789Z","updatedAt":"2025-02-05T15:06:36.492Z","publishedAt":"2022-04-28T14:39:18.717Z","locale":"en","__contentType":"api::page.page","navigationItemId":750,"__templateName":"Generic"},"items":[],"description":""},{"id":747,"title":"Changelog","menuAttached":false,"order":4,"path":"https://www.scaleway.com/en/docs/changelog/","type":"EXTERNAL","uiRouterKey":"changelog-2","slug":{},"external":true,"description":""},{"id":758,"title":"Blog","menuAttached":false,"order":5,"path":"https://www.scaleway.com/en/blog/","type":"EXTERNAL","uiRouterKey":"blog-2","slug":{},"external":true,"description":""}],"description":""},{"id":745,"title":"Community","menuAttached":false,"order":2,"path":"/Resources/Community","type":"WRAPPER","uiRouterKey":"community","slug":"resources-community","external":false,"items":[{"id":748,"title":"Slack Community","menuAttached":false,"order":1,"path":"https://slack.scaleway.com/","type":"EXTERNAL","uiRouterKey":"slack-community-2","slug":{},"external":true,"description":""},{"id":749,"title":"Feature Requests","menuAttached":false,"order":2,"path":"https://feature-request.scaleway.com/","type":"EXTERNAL","uiRouterKey":"feature-requests-2","slug":{},"external":true,"description":""},{"id":757,"title":"Scaleway Learning","menuAttached":false,"order":3,"path":"/Resources/Community/Scaleway_Learning","type":"INTERNAL","uiRouterKey":"scaleway-learning-2","slug":"resources-community-scaleway-learning","external":false,"related":{"id":597,"title":"Scaleway Learning","path":"/scaleway-learning/","scheduledAt":null,"createdAt":"2022-12-20T08:57:37.886Z","updatedAt":"2024-12-11T09:57:09.345Z","publishedAt":"2023-01-02T21:14:10.049Z","locale":"en","__contentType":"api::page.page","navigationItemId":757,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":752,"title":"Company","menuAttached":false,"order":3,"path":"/Resources/Company","type":"WRAPPER","uiRouterKey":"company-1","slug":"resources-company","external":false,"items":[{"id":756,"title":"Events","menuAttached":false,"order":1,"path":"/Resources/Company/Events","type":"INTERNAL","uiRouterKey":"events-1","slug":"resources-company-events","external":false,"related":{"id":699,"title":"Events","path":"/events/","scheduledAt":null,"createdAt":"2023-03-13T09:14:30.830Z","updatedAt":"2025-02-17T10:12:28.627Z","publishedAt":"2023-03-13T09:14:41.552Z","locale":"en","__contentType":"api::page.page","navigationItemId":756,"__templateName":"Generic"},"items":[],"description":""},{"id":796,"title":"Marketplace","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/marketplace/","type":"EXTERNAL","uiRouterKey":"marketplace","slug":{},"external":true,"description":""},{"id":755,"title":"Careers","menuAttached":false,"order":3,"path":"/Resources/Company/Careers","type":"INTERNAL","uiRouterKey":"careers-1","slug":"resources-company-careers","external":false,"related":{"id":766,"title":"Careers","path":"/careers/","scheduledAt":null,"createdAt":"2023-03-31T14:17:38.589Z","updatedAt":"2024-07-16T10:08:23.648Z","publishedAt":"2024-02-12T15:39:28.684Z","locale":"en","__contentType":"api::page.page","navigationItemId":755,"__templateName":"Generic"},"items":[],"description":""},{"id":753,"title":"About us","menuAttached":false,"order":4,"path":"/Resources/Company/Aboutus","type":"INTERNAL","uiRouterKey":"about-us-1","slug":"resources-company-aboutus","external":false,"related":{"id":195,"title":"About us","path":"/about-us/","scheduledAt":null,"createdAt":"2022-05-03T13:05:13.546Z","updatedAt":"2023-12-14T09:00:58.075Z","publishedAt":"2022-05-11T12:26:40.217Z","locale":"en","__contentType":"api::page.page","navigationItemId":753,"__templateName":"Generic"},"items":[],"description":""},{"id":754,"title":"Customer Testimonials","menuAttached":false,"order":5,"path":"/Resources/Company/customer-testimonials","type":"INTERNAL","uiRouterKey":"customer-testimonials","slug":"resources-company-customer-testimonials","external":false,"related":{"id":294,"title":"Customer testimonials","path":"/customer-testimonials/","scheduledAt":null,"createdAt":"2022-05-19T15:33:42.418Z","updatedAt":"2024-07-08T12:41:04.663Z","publishedAt":"2022-05-19T15:37:23.202Z","locale":"en","__contentType":"api::page.page","navigationItemId":754,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":860,"title":"Partnership","menuAttached":false,"order":4,"path":"/Resources/Partnership","type":"WRAPPER","uiRouterKey":"partnership","slug":"resources-partnership","external":false,"items":[{"id":861,"title":"Partners Program","menuAttached":false,"order":1,"path":"/Resources/Partnership/PartnersProgram","type":"INTERNAL","uiRouterKey":"partners-program","slug":"resources-partnership-partners-program","external":false,"related":{"id":1350,"title":"Partners Program","path":"/partners-program/","scheduledAt":null,"createdAt":"2024-07-16T15:02:57.413Z","updatedAt":"2024-12-03T15:57:20.933Z","publishedAt":"2024-09-23T10:47:55.235Z","locale":"en","__contentType":"api::page.page","navigationItemId":861,"__templateName":"Generic"},"items":[],"description":""},{"id":862,"title":"Find your partner","menuAttached":false,"order":2,"path":"/Resources/Partnership/Find","type":"INTERNAL","uiRouterKey":"find-your-partner","slug":"resources-partnership-find","external":false,"related":{"id":1490,"title":"Find partner","path":"/find-partner/","scheduledAt":null,"createdAt":"2024-11-26T13:32:45.578Z","updatedAt":"2025-01-13T10:32:23.025Z","publishedAt":"2024-12-01T16:19:11.068Z","locale":"en","__contentType":"api::page.page","navigationItemId":862,"__templateName":"Generic"},"items":[],"description":""},{"id":863,"title":"Become a Partner","menuAttached":false,"order":3,"path":"/Resources/Partnership/become","type":"INTERNAL","uiRouterKey":"become-a-partner-1","slug":"resources-partnership-become","external":false,"related":{"id":1495,"title":"Partner Application","path":"/partner-application/","scheduledAt":null,"createdAt":"2024-11-27T13:07:23.267Z","updatedAt":"2025-02-18T16:14:09.502Z","publishedAt":"2024-11-27T13:07:24.432Z","locale":"en","__contentType":"api::page.page","navigationItemId":863,"__templateName":"Generic"},"items":[],"description":""}],"description":""}],"description":""},{"id":598,"title":"Pricing","menuAttached":false,"order":10,"path":"/pricing","type":"INTERNAL","uiRouterKey":"pricing-2","slug":"pricing-1","external":false,"related":{"id":1236,"title":"Pricing","path":"/pricing/","scheduledAt":null,"createdAt":"2024-05-14T07:33:54.370Z","updatedAt":"2025-01-24T08:42:07.875Z","publishedAt":"2024-05-14T13:19:03.795Z","locale":"en","__contentType":"api::page.page","navigationItemId":598,"__templateName":"Generic"},"items":[],"description":""}],"topBarNavigationItems":[{"id":425,"title":"Docs","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/docs/","type":"EXTERNAL","uiRouterKey":"docs","slug":{},"external":true},{"id":427,"title":"Contact","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/contact/","type":"EXTERNAL","uiRouterKey":"contact-2","slug":{},"external":true,"description":""}],"MOTD":{"id":7803,"label":"Deepseek R1 Distilled Llama 70B is now available!","url":"https://console.scaleway.com/generative-api/models","page":{"data":null}},"ctaList":{"dediboxCTAList":[{"id":6611,"label":"Log in","url":"https://console.online.net/en/login","page":{"data":null}},{"id":6612,"label":"Sign up","url":"https://console.online.net/en/user/subscribe","page":{"data":null}}],"defaultCTAList":[{"id":6610,"label":"Log in","url":"https://console.scaleway.com/login","page":{"data":null}},{"id":6609,"label":"Sign up","url":"https://console.scaleway.com/register","page":{"data":null}}]}},"footer":[{"id":276,"title":"Products","menuAttached":false,"order":1,"path":"/products","type":"WRAPPER","uiRouterKey":"products","slug":"products-2","external":false,"items":[{"id":283,"title":"All Products","menuAttached":false,"order":1,"path":"/products/AllProducts","type":"INTERNAL","uiRouterKey":"all-products","slug":"products-all-products","external":false,"related":{"id":223,"title":"All Products","path":"/all-products/","scheduledAt":null,"createdAt":"2022-05-09T13:56:36.517Z","updatedAt":"2025-01-27T10:23:16.899Z","publishedAt":"2022-05-09T14:37:46.378Z","locale":"en","__contentType":"api::page.page","navigationItemId":283,"__templateName":"Generic"},"items":[],"description":""},{"id":759,"title":"Betas","menuAttached":false,"order":2,"path":"/products/betas","type":"INTERNAL","uiRouterKey":"betas-1","slug":"products-betas","external":false,"related":{"id":90,"title":"Betas","path":"/betas/","scheduledAt":null,"createdAt":"2022-04-28T14:06:08.789Z","updatedAt":"2025-02-05T15:06:36.492Z","publishedAt":"2022-04-28T14:39:18.717Z","locale":"en","__contentType":"api::page.page","navigationItemId":759,"__templateName":"Generic"},"items":[],"description":""},{"id":281,"title":"Bare Metal","menuAttached":false,"order":3,"path":"/products/BareMetal","type":"INTERNAL","uiRouterKey":"bare-metal-2","slug":"products-bare-metal","external":false,"related":{"id":961,"title":"Bare Metal","path":"/bare-metal/","scheduledAt":null,"createdAt":"2023-09-27T07:45:06.975Z","updatedAt":"2025-01-24T08:21:16.687Z","publishedAt":"2023-10-17T12:08:02.344Z","locale":"en","__contentType":"api::page.page","navigationItemId":281,"__templateName":"Generic"},"items":[],"description":""},{"id":284,"title":"Dedibox","menuAttached":false,"order":4,"path":"/products/Dedibox","type":"INTERNAL","uiRouterKey":"dedibox-4","slug":"products-dedibox","external":false,"related":{"id":29,"title":"Dedibox","path":"/dedibox/","scheduledAt":null,"createdAt":"2022-04-19T15:29:02.488Z","updatedAt":"2024-12-02T21:42:14.962Z","publishedAt":"2022-04-28T17:05:07.122Z","locale":"en","__contentType":"api::page.page","navigationItemId":284,"__templateName":"Generic"},"items":[],"description":""},{"id":282,"title":"Elastic Metal","menuAttached":false,"order":5,"path":"/products/ElasticMetal","type":"INTERNAL","uiRouterKey":"elastic-metal-4","slug":"products-elastic-metal","external":false,"related":{"id":87,"title":"Elastic Metal","path":"/elastic-metal/","scheduledAt":null,"createdAt":"2022-04-28T12:45:28.696Z","updatedAt":"2025-01-24T13:35:03.496Z","publishedAt":"2022-04-28T13:22:46.501Z","locale":"en","__contentType":"api::page.page","navigationItemId":282,"__templateName":"Generic"},"items":[],"description":""},{"id":285,"title":"Compute Instances","menuAttached":false,"order":6,"path":"/products/Compute","type":"INTERNAL","uiRouterKey":"compute-instances","slug":"products-compute","external":false,"related":{"id":655,"title":"Virtual Instances","path":"/virtual-instances/","scheduledAt":null,"createdAt":"2023-02-20T10:48:52.279Z","updatedAt":"2025-02-11T13:16:39.501Z","publishedAt":"2023-02-28T08:32:03.960Z","locale":"en","__contentType":"api::page.page","navigationItemId":285,"__templateName":"Generic"},"items":[],"description":""},{"id":286,"title":"GPU","menuAttached":false,"order":7,"path":"/products/GPu","type":"INTERNAL","uiRouterKey":"gpu-6","slug":"products-g-pu","external":false,"related":{"id":1025,"title":"GPU Instances","path":"/gpu-instances/","scheduledAt":null,"createdAt":"2023-11-30T13:15:51.769Z","updatedAt":"2024-11-19T16:38:15.121Z","publishedAt":"2023-12-12T12:52:20.083Z","locale":"en","__contentType":"api::page.page","navigationItemId":286,"__templateName":"Generic"},"items":[],"description":""},{"id":287,"title":"Containers","menuAttached":false,"order":8,"path":"/products/Containers","type":"INTERNAL","uiRouterKey":"containers-6","slug":"products-containers","external":false,"related":{"id":465,"title":"Containers","path":"/containers/","scheduledAt":null,"createdAt":"2022-07-29T15:09:20.535Z","updatedAt":"2024-08-28T07:05:23.005Z","publishedAt":"2023-02-27T13:53:48.270Z","locale":"en","__contentType":"api::page.page","navigationItemId":287,"__templateName":"Generic"},"items":[],"description":""},{"id":288,"title":"Object Storage","menuAttached":false,"order":9,"path":"/products/ObjectStorage","type":"INTERNAL","uiRouterKey":"object-storage-4","slug":"products-object-storage","external":false,"related":{"id":652,"title":"Object Storage","path":"/object-storage/","scheduledAt":null,"createdAt":"2023-02-16T09:44:56.414Z","updatedAt":"2024-12-02T14:09:58.690Z","publishedAt":"2023-03-07T18:05:15.061Z","locale":"en","__contentType":"api::page.page","navigationItemId":288,"__templateName":"Generic"},"items":[],"description":""},{"id":289,"title":"Block Storage","menuAttached":false,"order":10,"path":"/products/BlockStorage","type":"INTERNAL","uiRouterKey":"block-storage-4","slug":"products-block-storage","external":false,"related":{"id":141,"title":"Block Storage","path":"/block-storage/","scheduledAt":null,"createdAt":"2022-05-02T08:20:39.280Z","updatedAt":"2025-02-05T14:21:18.667Z","publishedAt":"2022-05-02T08:28:12.783Z","locale":"en","__contentType":"api::page.page","navigationItemId":289,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":275,"title":"Resources","menuAttached":false,"order":2,"path":"/resources","type":"WRAPPER","uiRouterKey":"resources","slug":"resources-3","external":false,"items":[{"id":290,"title":"Documentation","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/docs/","type":"EXTERNAL","uiRouterKey":"documentation","slug":{},"external":true,"description":""},{"id":292,"title":"Changelog","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/docs/changelog/","type":"EXTERNAL","uiRouterKey":"changelog","slug":{},"external":true,"description":""},{"id":291,"title":"Blog","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/blog/","type":"EXTERNAL","uiRouterKey":"blog","slug":{},"external":true,"description":""},{"id":293,"title":"Feature Requests","menuAttached":false,"order":4,"path":"https://feature-request.scaleway.com/","type":"EXTERNAL","uiRouterKey":"feature-requests","slug":{},"external":true,"description":""},{"id":321,"title":"Slack Community","menuAttached":false,"order":5,"path":"https://slack.scaleway.com/","type":"EXTERNAL","uiRouterKey":"slack-community-2","slug":{},"external":true,"description":""}],"description":""},{"id":280,"title":"Contact","menuAttached":false,"order":3,"path":"/Contact","type":"WRAPPER","uiRouterKey":"contact-2","slug":"contact-4","external":false,"items":[{"id":294,"title":"Create a ticket","menuAttached":false,"order":1,"path":"https://console.scaleway.com/support/create/","type":"EXTERNAL","uiRouterKey":"create-a-ticket","slug":{},"external":true,"description":""},{"id":296,"title":"Report Abuse","menuAttached":false,"order":2,"path":"https://console.scaleway.com/support/abuses/create/","type":"EXTERNAL","uiRouterKey":"report-abuse","slug":{},"external":true,"description":""},{"id":295,"title":"Status","menuAttached":false,"order":3,"path":"https://status.scaleway.com/","type":"EXTERNAL","uiRouterKey":"status","slug":{},"external":true,"description":""},{"id":298,"title":"Dedibox Console online.net","menuAttached":false,"order":4,"path":"https://console.online.net/fr/login","type":"EXTERNAL","uiRouterKey":"dedibox-console-online-net","slug":{},"external":true,"description":""},{"id":407,"title":"Support plans","menuAttached":false,"order":5,"path":"/Contact/Support","type":"INTERNAL","uiRouterKey":"support-plans","slug":"contact-support","external":false,"related":{"id":493,"title":"Assistance","path":"/assistance/","scheduledAt":null,"createdAt":"2022-09-26T15:14:28.440Z","updatedAt":"2024-08-28T07:19:37.841Z","publishedAt":"2022-10-03T12:20:34.441Z","locale":"en","__contentType":"api::page.page","navigationItemId":407,"__templateName":"Generic"},"items":[],"description":""},{"id":409,"title":"Brand resources","menuAttached":false,"order":6,"path":"https://ultraviolet.scaleway.com/6dd9b5c45/p/62b4e2-ultraviolet","type":"EXTERNAL","uiRouterKey":"brand-resources","slug":{},"external":true,"description":""}],"description":""},{"id":436,"title":"Company","menuAttached":false,"order":4,"path":"/scw","type":"WRAPPER","uiRouterKey":"company","slug":"scw","external":false,"items":[{"id":440,"title":"About us","menuAttached":false,"order":1,"path":"/scw/About-us","type":"INTERNAL","uiRouterKey":"about-us","slug":"scw-about-us","external":false,"related":{"id":195,"title":"About us","path":"/about-us/","scheduledAt":null,"createdAt":"2022-05-03T13:05:13.546Z","updatedAt":"2023-12-14T09:00:58.075Z","publishedAt":"2022-05-11T12:26:40.217Z","locale":"en","__contentType":"api::page.page","navigationItemId":440,"__templateName":"Generic"},"items":[],"description":""},{"id":441,"title":"Events","menuAttached":false,"order":2,"path":"/scw/events","type":"INTERNAL","uiRouterKey":"events","slug":"scw-events","external":false,"related":{"id":699,"title":"Events","path":"/events/","scheduledAt":null,"createdAt":"2023-03-13T09:14:30.830Z","updatedAt":"2025-02-17T10:12:28.627Z","publishedAt":"2023-03-13T09:14:41.552Z","locale":"en","__contentType":"api::page.page","navigationItemId":441,"__templateName":"Generic"},"items":[],"description":""},{"id":798,"title":"Marketplace","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/marketplace/","type":"EXTERNAL","uiRouterKey":"marketplace-2","slug":{},"external":true,"description":""},{"id":439,"title":"Environment ","menuAttached":false,"order":4,"path":"/scw/environment","type":"INTERNAL","uiRouterKey":"environment","slug":"scw-environment","external":false,"related":{"id":59,"title":"Environmental leadership ","path":"/environmental-leadership/","scheduledAt":null,"createdAt":"2022-04-26T08:30:15.289Z","updatedAt":"2025-02-04T15:14:39.010Z","publishedAt":"2022-04-28T17:12:24.574Z","locale":"en","__contentType":"api::page.page","navigationItemId":439,"__templateName":"Generic"},"items":[],"description":""},{"id":790,"title":"Social Responsibility","menuAttached":false,"order":5,"path":"/scw/SocialResponsibility","type":"INTERNAL","uiRouterKey":"social-responsibility","slug":"scw-social-responsibility","external":false,"related":{"id":184,"title":"Social responsibility","path":"/social-responsibility/","scheduledAt":null,"createdAt":"2022-05-03T07:48:38.038Z","updatedAt":"2024-08-28T07:08:11.382Z","publishedAt":"2022-05-03T13:08:48.890Z","locale":"en","__contentType":"api::page.page","navigationItemId":790,"__templateName":"Generic"},"items":[],"description":""},{"id":438,"title":"Security","menuAttached":false,"order":6,"path":"/scw/security","type":"INTERNAL","uiRouterKey":"security-4","slug":"scw-security","external":false,"related":{"id":190,"title":"Security and resilience","path":"/security-and-resilience/","scheduledAt":null,"createdAt":"2022-05-03T10:22:40.696Z","updatedAt":"2024-08-28T08:56:56.744Z","publishedAt":"2022-05-11T12:39:01.810Z","locale":"en","__contentType":"api::page.page","navigationItemId":438,"__templateName":"Generic"},"items":[],"description":""},{"id":782,"title":"Shared Responsibility Model","menuAttached":false,"order":7,"path":"/scw/Model","type":"INTERNAL","uiRouterKey":"shared-responsibility-model","slug":"scw-model","external":false,"related":{"id":1180,"title":"Shared Responsibility Model","path":"/shared-responsibility-model/","scheduledAt":null,"createdAt":"2024-04-04T15:54:36.614Z","updatedAt":"2024-11-18T13:28:57.006Z","publishedAt":"2024-04-04T15:56:39.573Z","locale":"en","__contentType":"api::page.page","navigationItemId":782,"__templateName":"Generic"},"items":[],"description":""},{"id":442,"title":"News","menuAttached":false,"order":8,"path":"/scw/news","type":"INTERNAL","uiRouterKey":"news","slug":"scw-news","external":false,"related":{"id":263,"title":"News","path":"/news/","scheduledAt":null,"createdAt":"2022-05-19T10:28:45.212Z","updatedAt":"2022-05-31T07:47:17.728Z","publishedAt":"2022-05-19T10:29:13.394Z","locale":"en","__contentType":"api::page.page","navigationItemId":442,"__templateName":"Generic"},"items":[],"description":""},{"id":443,"title":"Careers","menuAttached":false,"order":9,"path":"/scw/career/","type":"INTERNAL","uiRouterKey":"careers","slug":"scw-career","external":false,"related":{"id":766,"title":"Careers","path":"/careers/","scheduledAt":null,"createdAt":"2023-03-31T14:17:38.589Z","updatedAt":"2024-07-16T10:08:23.648Z","publishedAt":"2024-02-12T15:39:28.684Z","locale":"en","__contentType":"api::page.page","navigationItemId":443,"__templateName":"Generic"},"items":[],"description":""},{"id":445,"title":"Scaleway Learning","menuAttached":false,"order":10,"path":"/scw/learning","type":"INTERNAL","uiRouterKey":"scaleway-learning","slug":"scw-learning","external":false,"related":{"id":597,"title":"Scaleway Learning","path":"/scaleway-learning/","scheduledAt":null,"createdAt":"2022-12-20T08:57:37.886Z","updatedAt":"2024-12-11T09:57:09.345Z","publishedAt":"2023-01-02T21:14:10.049Z","locale":"en","__contentType":"api::page.page","navigationItemId":445,"__templateName":"Generic"},"items":[],"description":""},{"id":444,"title":"Client Success Stories","menuAttached":false,"order":11,"path":"/scw/clientstor/","type":"INTERNAL","uiRouterKey":"client-success-stories","slug":"scw-clientstor","external":false,"related":{"id":294,"title":"Customer testimonials","path":"/customer-testimonials/","scheduledAt":null,"createdAt":"2022-05-19T15:33:42.418Z","updatedAt":"2024-07-08T12:41:04.663Z","publishedAt":"2022-05-19T15:37:23.202Z","locale":"en","__contentType":"api::page.page","navigationItemId":444,"__templateName":"Generic"},"items":[],"description":""},{"id":437,"title":"Labs","menuAttached":false,"order":12,"path":"https://labs.scaleway.com/en/","type":"EXTERNAL","uiRouterKey":"labs","slug":{},"external":true,"description":""}],"description":""}],"pagination":{"page":3,"pageSize":12,"pageCount":21,"total":250},"pageType":"homepage-paginated"},"__N_SSG":true},"page":"/blog/[slug]","query":{"slug":"3"},"buildId":"4xZKwUKlhtIRe3nXE5xXw","isFallback":false,"gsp":true,"locale":"en","locales":["default","en","fr"],"defaultLocale":"default","scriptLoader":[]}</script></body></html>

Pages: 1 2 3 4 5 6 7 8 9 10