CINXE.COM

Load Balancer at Scaleway | Scaleway Blog

<!DOCTYPE html><html lang="en"><head><meta charSet="utf-8"/><meta name="viewport" content="width=device-width"/><title>Load Balancer at Scaleway<!-- --> <!-- --> | Scaleway Blog</title><meta name="description" content="Server applications failures and lack of scalability can cause real problems for a server. When server applications fail, a node is unavailable to answer requests. In that case, if there is no redundancy, the service is unavailable as long as the server is not back in service. "/><meta property="og:url" content="https://www.scaleway.com/en/blog/load-balancer-scaleway-what-is-it/"/><meta property="og:type" content="article"/><meta property="og:title" content="Load Balancer at Scaleway"/><meta property="og:description" content="Server applications failures and lack of scalability can cause real problems for a server. When server applications fail, a node is unavailable to answer requests. In that case, if there is no redundancy, the service is unavailable as long as the server is not back in service. "/><meta property="article:author" content="https://www.scaleway.com/en/blog/author/remy-leone"/><meta property="og:image" content="https://www.scaleway.com/scaleway-og.jpg"/><meta content="https://www.scaleway.com/scaleway-og.jpg" name="twitter:image"/><meta content="summary" name="twitter:card"/><meta content="@Scaleway" name="twitter:creator"/><meta content="Load Balancer at Scaleway" name="twitter:title"/><meta content="Server applications failures and lack of scalability can cause real problems for a server. When server applications fail, a node is unavailable to answer requests. In that case, if there is no redundancy, the service is unavailable as long as the server is not back in service. " name="twitter:description"/><link href="https://www.scaleway.com/en/blog/load-balancer-scaleway-what-is-it/" rel="canonical"/><meta name="next-head-count" content="16"/><link rel="preload" href="/_next/static/media/a34f9d1faa5f3315-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/_next/static/media/2d141e1a38819612-s.p.woff2" as="font" type="font/woff2" crossorigin="anonymous" data-next-font="size-adjust"/><link rel="preload" href="/_next/static/css/5a4aac22989312df.css" as="style"/><link rel="stylesheet" href="/_next/static/css/5a4aac22989312df.css" data-n-g=""/><link rel="preload" href="/_next/static/css/c609e7b393629430.css" as="style"/><link rel="stylesheet" href="/_next/static/css/c609e7b393629430.css" data-n-p=""/><link rel="preload" href="/_next/static/css/1ca77b1ad9949237.css" as="style"/><link rel="stylesheet" href="/_next/static/css/1ca77b1ad9949237.css" data-n-p=""/><link rel="preload" href="/_next/static/css/4ad6f1eee4386756.css" as="style"/><link rel="stylesheet" href="/_next/static/css/4ad6f1eee4386756.css" data-n-p=""/><noscript data-n-css=""></noscript><script defer="" nomodule="" src="/_next/static/chunks/polyfills-42372ed130431b0a.js"></script><script src="/_next/static/chunks/webpack-7ff391b0025b4fe3.js" defer=""></script><script src="/_next/static/chunks/framework-2de211cc2591dcf8.js" defer=""></script><script src="/_next/static/chunks/main-387e329be5509820.js" defer=""></script><script src="/_next/static/chunks/pages/_app-1496642abd45d16f.js" defer=""></script><script src="/_next/static/chunks/675-283f369b69dc812e.js" defer=""></script><script src="/_next/static/chunks/395-a7842d01211b5e87.js" defer=""></script><script src="/_next/static/chunks/278-6df85a5cbf7eb789.js" defer=""></script><script src="/_next/static/chunks/830-01fd7ba2b0b77b8e.js" defer=""></script><script src="/_next/static/chunks/102-27040e86297157d6.js" defer=""></script><script src="/_next/static/chunks/854-ca4cfc03ee0da0c6.js" defer=""></script><script src="/_next/static/chunks/367-337657c830ee0244.js" defer=""></script><script src="/_next/static/chunks/51-873605722a81cdf5.js" defer=""></script><script src="/_next/static/chunks/pages/blog/%5Bslug%5D-574c558d3f8ed907.js" defer=""></script><script src="/_next/static/RHFDf2FUsZGq9xdClkNw2/_buildManifest.js" defer=""></script><script src="/_next/static/RHFDf2FUsZGq9xdClkNw2/_ssgManifest.js" defer=""></script></head><body><div id="__next"><style data-emotion="css-global 0"></style><div class="__variable_375d66 __variable_f77ac8 container"><div class="blog"><header class="HeaderBlog_headerContainer__n3f6s full-width"><div class="container"><div class="HeaderBlog_header__CTV5V"><div class="HeaderBlog_logo__kbnMY"><a href="/en/blog/"><img alt="Scaleway Blog" loading="lazy" width="240" height="40" decoding="async" data-nimg="1" style="color:transparent" srcSet="/_next/static/media/logo-blog.49246fc4.svg 1x, /_next/static/media/logo-blog.49246fc4.svg 2x" src="/_next/static/media/logo-blog.49246fc4.svg"/></a><a href="#main" class="SkipLink_link__wUma3">Skip to main content</a><a href="#footer" class="SkipLink_link__wUma3">Skip to footer section</a><button class="HeaderBlog_menuButton__PP1O7" type="button"><style data-emotion="css 3sqif5">.css-3sqif5{vertical-align:middle;fill:currentColor;height:1em;width:1em;min-width:1em;min-height:1em;}.css-3sqif5 .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-3sqif5 e1gt4cfo0"><path fill-rule="evenodd" d="M2 4.75A.75.75 0 0 1 2.75 4h14.5a.75.75 0 0 1 0 1.5H2.75A.75.75 0 0 1 2 4.75M2 10a.75.75 0 0 1 .75-.75h14.5a.75.75 0 0 1 0 1.5H2.75A.75.75 0 0 1 2 10m0 5.25a.75.75 0 0 1 .75-.75h14.5a.75.75 0 0 1 0 1.5H2.75a.75.75 0 0 1-.75-.75" clip-rule="evenodd"></path></svg></button></div><nav class="HeaderBlog_topNav__cNrI_ font-body-small-regular"><ul class="HeaderBlog_links__1jfH4"><li><a href="/en/blog/incidents/">Incidents</a></li><li><a href="https://www.scaleway.com/en/docs/">Docs</a></li><li><a href="https://www.scaleway.com/en/contact/">Contact</a></li></ul><ul class="HeaderBlog_language__IixQV"><li><span class="sr-only">English</span><span>en</span></li></ul></nav><nav class="HeaderBlog_bottomNav__wIZob"><a class="cta-primary cta-size-small" href="/en/">Discover Scaleway</a><div class="HeaderBlog_socials__eZU_7"><a href="https://x.com/Scaleway/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path d="M15.203 1.875h2.757l-6.023 6.883 7.085 9.367h-5.547l-4.345-5.68-4.972 5.68H1.4l6.442-7.363-6.797-8.887h5.688l3.928 5.193zm-.967 14.6h1.527L5.903 3.438H4.264z"></path></svg><span class="sr-only">X</span></a><a href="https://slack.scaleway.com/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path fill-rule="evenodd" d="M6.056 3.419a1.75 1.75 0 0 0 1.75 1.751H9.39a.167.167 0 0 0 .167-.166V3.419a1.75 1.75 0 1 0-3.501 0m3.5 4.392a1.75 1.75 0 0 0-1.75-1.751H3.417a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752m-6.123 6.142a1.75 1.75 0 0 0 1.75-1.752v-1.585a.167.167 0 0 0-.167-.166H3.433a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752m4.376-3.503a1.75 1.75 0 0 0-1.75 1.751v4.38a1.75 1.75 0 1 0 3.5 0V12.2a1.75 1.75 0 0 0-1.75-1.751m7.01-2.639a1.75 1.75 0 1 1 3.501 0 1.75 1.75 0 0 1-1.75 1.752h-1.584a.167.167 0 0 1-.167-.167zm-.875 0a1.75 1.75 0 1 1-3.5 0V3.42a1.75 1.75 0 1 1 3.5 0zm0 8.77a1.75 1.75 0 0 0-1.75-1.752H10.61a.167.167 0 0 0-.167.167v1.585a1.75 1.75 0 1 0 3.501 0m-3.5-4.38a1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752 1.75 1.75 0 0 0-1.75-1.751h-4.39a1.75 1.75 0 0 0-1.75 1.751" clip-rule="evenodd"></path></svg><span class="sr-only">Slack</span></a><a href="/en/blog/rss.xml"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path d="M3.75 3a.75.75 0 0 0-.75.75v.5c0 .414.336.75.75.75H4c6.075 0 11 4.925 11 11v.25c0 .414.336.75.75.75h.5a.75.75 0 0 0 .75-.75V16C17 8.82 11.18 3 4 3z"></path><path d="M3 8.75A.75.75 0 0 1 3.75 8H4a8 8 0 0 1 8 8v.25a.75.75 0 0 1-.75.75h-.5a.75.75 0 0 1-.75-.75V16a6 6 0 0 0-6-6h-.25A.75.75 0 0 1 3 9.25zM7 15a2 2 0 1 1-4 0 2 2 0 0 1 4 0"></path></svg><span class="sr-only">RSS</span></a></div></nav></div></div></header><main class="main" id="main"><nav class="TopBar_navBar__jEc9M"><a class="TopBar_link__c_MXa" href="/en/blog/"><svg width="16" height="16" aria-hidden="true"><use xlink:href="/svg/sprite.svg#all-items"></use></svg>all</a><a class="TopBar_link__c_MXa TopBar_isActive__bqGIp" href="/en/blog/build/">build</a><a class="TopBar_link__c_MXa" href="/en/blog/deploy/">deploy</a><a class="TopBar_link__c_MXa" href="/en/blog/scale/">scale</a></nav><section class="Hero_wrapper__l0O5u"><div class="Hero_content__WhyjP"><h1 class="font-heading-secondary-title Hero_title__64Z8x">Load Balancer at Scaleway</h1><div class="Hero_footer__KFZYB"><div class="blogCategory"><a class="cta-inline cta-size-big" href="/en/blog/build/">Build</a></div><span class="blogDot Hero_dot__OjyBJ" aria-hidden="true">•</span><address class="blogAuthor"><a class="cta-inline cta-size-big" href="/en/blog/author/remy-leone/">Rémy Léone</a></address><span class="blogDot Hero_dot__OjyBJ" aria-hidden="true">•</span><div><time dateTime="2020-10-19">19/10/20</time><span class="blogDot" aria-hidden="true">•</span><span>3 min read</span></div></div></div><div class="Hero_imageWrapper__tMCgD"><img alt="" loading="lazy" width="512" height="320" decoding="async" data-nimg="1" style="color:transparent" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24.webp 1x, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24.webp 2x" src="https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24.webp"/></div></section><div class="PostPage_post__sMook"><div class="TableOfContent_tableOfContent__e65l6"><nav aria-describedby="tableofcontent-main"><h2 class="font-body-small-bold">Table of contents</h2><ul><li class="TableOfContent_selected__VR351"><a href="#main">Introduction</a></li><li class=""><a href="#what-is-a-load-balancer">What Is a Load Balancer?</a></li><li class=""><a href="#why-use-a-load-balancer">Why Use a Load Balancer?</a></li><li class=""><a href="#how-does-a-load-balancer-work">How Does a Load Balancer Work?</a></li><li class=""><a href="#conclusion">Conclusion</a></li></ul></nav></div><div class="BlogRichText_blogRichText__zXeTD"><p>Server applications failures and lack of scalability can cause real problems for a server. When server applications fail, a node is unavailable to answer requests. In that case, if there is no redundancy, the service is unavailable as long as the server is not back in service. In case of lack of scalability, the load becomes larger than what the node can handle whilst keeping a good latency. In some cases, if the load is too important, the node can become so saturated that it times out. Both of those problems can happen on any website and can cause a lot of trouble. To solve both those issues, Load Balancer provides a significant help.</p> <h2 id="what-is-a-load-balancer">What Is a Load Balancer?</h2> <p>A load balancer is a system that is designed to spread an incoming flow of traffic across multiple other nodes. The incoming flow of traffic is coming from the Internet through a <em>frontend</em> and is spread on several machines called <em>backends</em>.</p> <p>When a user arrives with a session, it is routed at an infra level to the load balancer instances that are configured for this IP and port.<br/> The session is then transparently redirected to a backend according to the configured algorithm (roundrobin, leastconn) and the replies are then redirected to the user.</p> <p>Backends can be any IP in Scaleway, Online by Scaleway, Iliad Datacenter. In the case of private IP, they need to be available on the same region.<br/> Scaleway load balancers are also quickly up and running, usually in less than a second.</p> <h2 id="why-use-a-load-balancer">Why Use a Load Balancer?</h2> <p>Load Balancer provides two main features: horizontal scaling and high-availability.</p> <p>In the case of horizontal scaling, it means that an administrator can add or remove servers to a pool of backend servers to spread the load on more or less servers. For instance, in case of a peak of traffic, several servers running the application can be added to the pool to reduce the average load on each of them. Once the peak is over, the administator can withdraw servers from the pool. This elasticity is particulary well suited for the cloud.</p> <p>Another feature Load Balancer brings to the table is the high-availability. A load balancer is constantly checking whether a node is functionnal or not. In case it is not, the traffic is routed to a valid backend server. By doing so, the application is still available even if some backend nodes are not up.</p> <h2 id="how-does-a-load-balancer-work">How Does a Load Balancer Work?</h2> <p>The reliability of a load balancer is ensured using two instances configured in an active/passive setup. If an active instance of the load balancer does not answer to an healthcheck (from our internal monitoring system), the passive is turned into the active load balancer to receive incoming traffic. All of this is performed transparenlty for the user. It typically happens when an hypervisor breaks and instances are migrated to a different hypervisor. Traffic is immediatly routed to the passive instance, the active load balancer that failed is removed and a new instance is spawned to get a new passive.</p> <p>Load balancers update the networking configuration to route all the traffic to redundant load balancers instances. This features couldn&#x27;t exist only with instances. Change in the networking configuration are required to achieve this highly available architecture for load balancers.</p> <h3 id="tls-passthrough">TLS Passthrough</h3> <p>Many servers today use HTTPS which is built on top of TLS/SSL to encrypt traffic and ensure data integrity with electronic signature. TLS can be used directly with our load balancers using TCP mode. This technique is known as TLS passthrough. In this mode Load Balancer will forward the TLS traffic to the backend servers that will handle the TLS/SSL termination. Customers don’t need to configure anything special on the Load Balancer to use this mode. TLS passthrough also increases security as the certificates and private keys stay under full control of the customers and never shared with Scaleway.</p> <h3 id="backend-monitoring-with-healthchecks">Backend Monitoring with Healthchecks</h3> <p>Healthchecks ensure reliability for the load-balancer because they guarantee that no traffic will be forwarded to an unhealthy backend. Healthchecks are performed by the load balancer on the backend servers to check whether or not they are available to receive traffic. Load-balancers withdraw backend servers that are not passing their configured healthchecks. There is a wide variety of healthchecks (LDAP, HTTP, TCP, REDIS, MYSQL…) available to cover as many types of backends as possible.</p> <h3 id="scalability-by-horizontal-scaling">Scalability by Horizontal Scaling</h3> <p>Load-balancers provide horizontal scalability to a service and can ensure that enough backend servers are ready to ensure the elasticity of the demand of a service. When the load on a service increases, additionnal instances can be added to the backend servers to spread the load. When they are no longer required, the instances can be turned down and withdrawn from the pool.</p> <h2 id="conclusion">Conclusion</h2> <p>Load Balancer is an essential part to build highly available applications.<br/> At Scaleway, we designed our products to be fast, reliable and easy to get started. You can order those load balancers on our <a href="https://console.scaleway.com/load-balancer/load-balancers">website</a>, through Ansible and get started in seconds!</p></div></div><section class="ExtraPosts_container__0fO7Q"><h2 class="font-heading-highlighted ExtraPosts_title__hqJSu">Recommended articles</h2><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/how-to-deploy-and-distribute-the-workload-on-a-multi-cloud-kubernetes-environment/">How to deploy and distribute the workload on a multi-cloud Kubernetes environment</a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">This article will guide you through the best practices to deploy and distribute the workload on a multi-cloud Kubernetes environment on Scaleway&#x27;s Kosmos.</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/deploy/">Deploy</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/emmanuelle-demompion/">Emmanuelle Demompion</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2022-07-21">21/07/22</time><span class="blogDot" aria-hidden="true">•</span><span>31 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">Kubernetes</span><span class="Tag_tag__JS3kY">Multi-cloud</span><span class="Tag_tag__JS3kY">Quickstart</span><span class="Tag_tag__JS3kY">Introduction</span><span class="Tag_tag__JS3kY">Discover</span></div></div></article></div><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/understanding-kubernetes-autoscaling/">Understanding Kubernetes Autoscaling</a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">Kubernetes provides a series of features to ensure your clusters have the right size to handle any load. Let&#x27;s look into the different auto-scaling tools and learn the difference between them.</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/scale/">Scale</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/benedikt-rollik/">Benedikt Rollik</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2022-08-01">01/08/22</time><span class="blogDot" aria-hidden="true">•</span><span>8 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">Kubernetes</span><span class="Tag_tag__JS3kY">Scaling</span></div></div></article></div><div class="ExtraPosts_articles__4oTri"><article class="RecommendedArticleCard_articleCard__L95dV"><div class="blogImage RecommendedArticleCard_img__lFn5u"><img alt="" loading="lazy" decoding="async" data-nimg="fill" style="position:absolute;height:100%;width:100%;left:0;top:0;right:0;bottom:0;color:transparent" sizes="100vw" srcSet="https://scaleway.com/cdn-cgi/image/width=640/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp 640w, https://scaleway.com/cdn-cgi/image/width=750/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp 750w, https://scaleway.com/cdn-cgi/image/width=828/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp 828w, https://scaleway.com/cdn-cgi/image/width=1080/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp 1080w, https://scaleway.com/cdn-cgi/image/width=1200/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp 1200w, https://scaleway.com/cdn-cgi/image/width=1920/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp 1920w, https://scaleway.com/cdn-cgi/image/width=2048/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp 2048w, https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp 3840w" src="https://scaleway.com/cdn-cgi/image/width=3840/https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp"/></div><div class="RecommendedArticleCard_contentContainer__83Lgz"><h2 class="font-heading-title blogArticleTitle RecommendedArticleCard_heading___OIAO"><a class="breakout-link" href="/en/blog/understand-pam/">Understanding PAM - Pluggable Authentication Modules</a></h2><div class="RecommendedArticleCard_excerpt__Gsphk" role="doc-subtitle"><div class="RichText_scwRichtextStyle__xoOiq"><p class="font-body-regular">PAM is a Linux component to authenticate users. You don&#x27;t know how it works, but at the bottom of the todo list you made years ago, it says &quot;understand how PAM works&quot;. So here you go!</p></div></div><div class="RecommendedArticleCard_footer__avFIY"><div class="blogCategory"><a href="/en/blog/build/">Build</a></div><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><address class="blogAuthor"><a href="/en/blog/author/julien-castets/">Julien Castets</a></address><span class="blogDot RecommendedArticleCard_dot__4FuRq" aria-hidden="true">•</span><div><time dateTime="2022-02-04">04/02/22</time><span class="blogDot" aria-hidden="true">•</span><span>6 min read</span></div></div><div aria-label="Tags list. Click to choose as filter." class="Tags_tags__UDbwl"><span class="Tag_tag__JS3kY">Open-source</span><span class="Tag_tag__JS3kY">Infrastructure</span><span class="Tag_tag__JS3kY">Introduction</span></div></div></article></div></section></main><footer id="footer" class="Footer_footer__dXXGl full-width"><div class="container"><div class="Footer_categories__GKzcP"><div><div class="Footer_title__SsUPi">Products</div><ul><li><a class="cta-inline cta-size-big" href="/en/all-products/">All Products</a></li><li><a class="cta-inline cta-size-big" href="/en/betas/">Betas</a></li><li><a class="cta-inline cta-size-big" href="/en/bare-metal/">Bare Metal</a></li><li><a class="cta-inline cta-size-big" href="/en/dedibox/">Dedibox</a></li><li><a class="cta-inline cta-size-big" href="/en/elastic-metal/">Elastic Metal</a></li><li><a class="cta-inline cta-size-big" href="/en/virtual-instances/">Compute Instances</a></li><li><a class="cta-inline cta-size-big" href="/en/gpu-instances/">GPU</a></li><li><a class="cta-inline cta-size-big" href="/en/containers/">Containers</a></li><li><a class="cta-inline cta-size-big" href="/en/object-storage/">Object Storage</a></li><li><a class="cta-inline cta-size-big" href="/en/block-storage/">Block Storage</a></li></ul></div><div><div class="Footer_title__SsUPi">Resources</div><ul><li><a href="https://www.scaleway.com/en/docs/" class="cta-inline cta-size-big">Documentation</a></li><li><a href="https://www.scaleway.com/en/docs/changelog/" class="cta-inline cta-size-big">Changelog</a></li><li><a class="cta-inline cta-size-big" href="https://www.scaleway.com/en/blog/">Blog</a></li><li><a href="https://feature-request.scaleway.com/" class="cta-inline cta-size-big">Feature Requests</a></li><li><a href="https://slack.scaleway.com/" class="cta-inline cta-size-big">Slack Community</a></li></ul></div><div><div class="Footer_title__SsUPi">Contact</div><ul><li><a href="https://console.scaleway.com/support/create/" class="cta-inline cta-size-big">Create a ticket</a></li><li><a href="https://console.scaleway.com/support/abuses/create/" class="cta-inline cta-size-big">Report Abuse</a></li><li><a href="https://status.scaleway.com/" class="cta-inline cta-size-big">Status</a></li><li><a href="https://console.online.net/fr/login" class="cta-inline cta-size-big">Dedibox Console online.net</a></li><li><a class="cta-inline cta-size-big" href="/en/assistance/">Support plans</a></li><li><a href="https://ultraviolet.scaleway.com/6dd9b5c45/p/62b4e2-ultraviolet" class="cta-inline cta-size-big">Brand resources</a></li></ul></div><div><div class="Footer_title__SsUPi">Company</div><ul><li><a class="cta-inline cta-size-big" href="/en/about-us/">About us</a></li><li><a class="cta-inline cta-size-big" href="/en/events/">Events</a></li><li><a href="https://www.scaleway.com/en/marketplace/" class="cta-inline cta-size-big">Marketplace</a></li><li><a class="cta-inline cta-size-big" href="/en/environmental-leadership/">Environment </a></li><li><a class="cta-inline cta-size-big" href="/en/social-responsibility/">Social Responsibility</a></li><li><a class="cta-inline cta-size-big" href="/en/security-and-resilience/">Security</a></li><li><a class="cta-inline cta-size-big" href="/en/shared-responsibility-model/">Shared Responsibility Model</a></li><li><a class="cta-inline cta-size-big" href="/en/news/">News</a></li><li><a class="cta-inline cta-size-big" href="/en/careers/">Careers</a></li><li><a class="cta-inline cta-size-big" href="/en/scaleway-learning/">Scaleway Learning</a></li><li><a class="cta-inline cta-size-big" href="/en/customer-testimonials/">Client Success Stories</a></li><li><style data-emotion="css je8g23">.css-je8g23{pointer-events:none;}</style><style data-emotion="css 1ra7yv3">.css-1ra7yv3{background-color:transparent;border:none;padding:0;color:#34a8ff;-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;text-underline-offset:2px;text-decoration-color:transparent;display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;-webkit-flex-direction:row;-ms-flex-direction:row;flex-direction:row;-webkit-align-items:center;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-transition:text-decoration-color 250ms ease-out;transition:text-decoration-color 250ms ease-out;gap:8px;position:relative;cursor:pointer;width:-webkit-fit-content;width:-moz-fit-content;width:fit-content;font-size:16px;font-family:Inter,Asap,sans-serif;font-weight:500;letter-spacing:0;line-height:24px;paragraph-spacing:0;text-case:none;}.css-1ra7yv3 .e1afnb7a2{-webkit-transition:-webkit-transform 250ms ease-out;transition:transform 250ms ease-out;}.css-1ra7yv3 >*{pointer-events:none;}.css-1ra7yv3:hover,.css-1ra7yv3:focus{outline:none;-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;color:#6fc2ff;text-decoration-color:#6fc2ff;}.css-1ra7yv3:hover .e1afnb7a2,.css-1ra7yv3:focus .e1afnb7a2{-webkit-transform:translate(-4px, 0);-moz-transform:translate(-4px, 0);-ms-transform:translate(-4px, 0);transform:translate(-4px, 0);}.css-1ra7yv3[data-variant='inline']{-webkit-text-decoration:underline;text-decoration:underline;text-decoration-thickness:1px;}.css-1ra7yv3:hover::after,.css-1ra7yv3:focus::after{background-color:#34a8ff;}.css-1ra7yv3:active{text-decoration-thickness:2px;}</style><a href="https://labs.scaleway.com/en/" target="_blank" rel="noopener noreferrer" class="css-1ra7yv3 e1afnb7a0" variant="bodyStrong" data-variant="standalone">Labs<style data-emotion="css ajnoa3">.css-ajnoa3{display:-webkit-inline-box;display:-webkit-inline-flex;display:-ms-inline-flexbox;display:inline-flex;padding-bottom:4px;}</style><span class="css-ajnoa3 e1afnb7a1"><style data-emotion="css 1udvifh">.css-1udvifh{vertical-align:middle;fill:currentColor;height:14px;width:14px;min-width:14px;min-height:14px;}.css-1udvifh .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="e1afnb7a2 css-1udvifh e1gt4cfo0"><path fill-rule="evenodd" d="M4.25 6.5a.75.75 0 0 0-.75.75v8.5c0 .414.336.75.75.75h8.5a.75.75 0 0 0 .75-.75v-4a.75.75 0 0 1 1.5 0v4A2.25 2.25 0 0 1 12.75 18h-8.5A2.25 2.25 0 0 1 2 15.75v-8.5A2.25 2.25 0 0 1 4.25 5h5a.75.75 0 0 1 0 1.5z" clip-rule="evenodd"></path><path fill-rule="evenodd" d="M6.194 13.753a.75.75 0 0 0 1.06.053L16.5 5.44v2.81a.75.75 0 0 0 1.5 0v-4.5a.75.75 0 0 0-.75-.75h-4.5a.75.75 0 0 0 0 1.5h2.553l-9.056 8.194a.75.75 0 0 0-.053 1.06" clip-rule="evenodd"></path></svg></span></a></li></ul></div></div><div class="Footer_socialsContainer__FuhFv"><a href="/en/"><img alt="Scaleway" loading="lazy" width="166" height="32" decoding="async" data-nimg="1" style="color:transparent" srcSet="/_next/static/media/logo.7e2996cb.svg 1x, /_next/static/media/logo.7e2996cb.svg 2x" src="/_next/static/media/logo.7e2996cb.svg"/></a><div><p>Follow us</p><a class="Footer_socialLink__9UK2B" href="https://x.com/Scaleway/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path d="M15.203 1.875h2.757l-6.023 6.883 7.085 9.367h-5.547l-4.345-5.68-4.972 5.68H1.4l6.442-7.363-6.797-8.887h5.688l3.928 5.193zm-.967 14.6h1.527L5.903 3.438H4.264z"></path></svg><span class="sr-only">x</span></a><a class="Footer_socialLink__9UK2B" href="https://slack.scaleway.com/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path fill-rule="evenodd" d="M6.056 3.419a1.75 1.75 0 0 0 1.75 1.751H9.39a.167.167 0 0 0 .167-.166V3.419a1.75 1.75 0 1 0-3.501 0m3.5 4.392a1.75 1.75 0 0 0-1.75-1.751H3.417a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752m-6.123 6.142a1.75 1.75 0 0 0 1.75-1.752v-1.585a.167.167 0 0 0-.167-.166H3.433a1.75 1.75 0 0 0-1.75 1.751 1.75 1.75 0 0 0 1.75 1.752m4.376-3.503a1.75 1.75 0 0 0-1.75 1.751v4.38a1.75 1.75 0 1 0 3.5 0V12.2a1.75 1.75 0 0 0-1.75-1.751m7.01-2.639a1.75 1.75 0 1 1 3.501 0 1.75 1.75 0 0 1-1.75 1.752h-1.584a.167.167 0 0 1-.167-.167zm-.875 0a1.75 1.75 0 1 1-3.5 0V3.42a1.75 1.75 0 1 1 3.5 0zm0 8.77a1.75 1.75 0 0 0-1.75-1.752H10.61a.167.167 0 0 0-.167.167v1.585a1.75 1.75 0 1 0 3.501 0m-3.5-4.38a1.75 1.75 0 0 0 1.75 1.752h4.39a1.75 1.75 0 0 0 1.75-1.752 1.75 1.75 0 0 0-1.75-1.751h-4.39a1.75 1.75 0 0 0-1.75 1.751" clip-rule="evenodd"></path></svg><span class="sr-only">slack</span></a><a class="Footer_socialLink__9UK2B" href="https://www.instagram.com/scaleway/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path fill-rule="evenodd" d="M1.667 9.719c0-2.848 0-4.272.563-5.356A5 5 0 0 1 4.362 2.23c1.084-.563 2.507-.563 5.355-.563h.566c2.848 0 4.272 0 5.355.563a5 5 0 0 1 2.132 2.133c.563 1.084.563 2.508.563 5.356v.566c0 2.848 0 4.272-.562 5.356a5 5 0 0 1-2.133 2.133c-1.083.563-2.507.563-5.355.563h-.566c-2.848 0-4.271 0-5.355-.563a5 5 0 0 1-2.132-2.133c-.563-1.084-.563-2.508-.563-5.356zm3.67.284a4.668 4.668 0 1 0 9.336 0 4.668 4.668 0 0 0-9.336 0m7.697 0a3.03 3.03 0 1 1-6.06 0 3.03 3.03 0 1 1 6.06 0m2.912-4.854a1.09 1.09 0 1 1-2.18 0 1.09 1.09 0 0 1 2.18 0" clip-rule="evenodd"></path></svg><span class="sr-only">instagram</span></a><a class="Footer_socialLink__9UK2B" href="https://www.linkedin.com/company/scaleway/"><style data-emotion="css 1fr75dr">.css-1fr75dr{vertical-align:middle;fill:currentColor;height:20px;width:20px;min-width:20px;min-height:20px;}.css-1fr75dr .fillStroke{stroke:currentColor;fill:none;}</style><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" class="css-1fr75dr e1gt4cfo0"><path fill-rule="evenodd" d="M18.332 18.166a.167.167 0 0 1-.167.167h-3.09a.167.167 0 0 1-.167-.167V12.5c0-1.599-.608-2.492-1.874-2.492-1.377 0-2.096.93-2.096 2.492v5.666a.167.167 0 0 1-.167.167H7.804a.167.167 0 0 1-.166-.167V7.39c0-.092.074-.167.166-.167h2.967c.092 0 .167.075.167.167v.67c0 .174.275.26.39.131a3.88 3.88 0 0 1 2.96-1.307c2.357 0 4.044 1.439 4.044 4.415zM3.7 5.767a2.043 2.043 0 0 1-2.035-2.05c0-1.132.91-2.05 2.035-2.05s2.034.918 2.034 2.05-.91 2.05-2.034 2.05m-1.704 12.4c0 .091.074.166.166.166H5.27a.167.167 0 0 0 .167-.167V7.39a.167.167 0 0 0-.167-.167H2.163a.167.167 0 0 0-.166.167z" clip-rule="evenodd"></path></svg><span class="sr-only">linkedIn</span></a></div></div><ul class="Footer_sublinks__Mjpw0"><li><a href="/en/contracts/">Contracts</a></li><li><a href="/en/legal-notice/">Legal Notice</a></li><li><a href="/en/privacy-policy/">Privacy Policy</a></li><li><a href="/en/cookie/">Cookie</a></li><li><a href="https://security.scaleway.com">Security Measures</a></li></ul><span class="Footer_brand__qv1gM">© 1999-<!-- -->2024<!-- --> - Scaleway SAS</span></div></footer></div><div id="portal"></div></div></div><script id="__NEXT_DATA__" type="application/json">{"props":{"pageProps":{"post":{"id":156,"attributes":{"title":"load-balancer-scaleway-what-is-it","path":"load-balancer-scaleway-what-is-it/","description":"Server applications failures and lack of scalability can cause real problems for a server. When server applications fail, a node is unavailable to answer requests. In that case, if there is no redundancy, the service is unavailable as long as the server is not back in service. In case of lack of scalability, the load becomes larger than what the node can handle whilst keeping a good latency. In some cases, if the load is too important, the node can become so saturated that it times out. Both of those problems can happen on any website and can cause a lot of trouble. To solve both those issues, Load Balancer provides a significant help.\n\n## What Is a Load Balancer?\n\nA load balancer is a system that is designed to spread an incoming flow of traffic across multiple other nodes. The incoming flow of traffic is coming from the Internet through a _frontend_ and is spread on several machines called _backends_.\n\nWhen a user arrives with a session, it is routed at an infra level to the load balancer instances that are configured for this IP and port. \nThe session is then transparently redirected to a backend according to the configured algorithm (roundrobin, leastconn) and the replies are then redirected to the user.\n\nBackends can be any IP in Scaleway, Online by Scaleway, Iliad Datacenter. In the case of private IP, they need to be available on the same region. \nScaleway load balancers are also quickly up and running, usually in less than a second.\n\n## Why Use a Load Balancer?\n\nLoad Balancer provides two main features: horizontal scaling and high-availability.\n\nIn the case of horizontal scaling, it means that an administrator can add or remove servers to a pool of backend servers to spread the load on more or less servers. For instance, in case of a peak of traffic, several servers running the application can be added to the pool to reduce the average load on each of them. Once the peak is over, the administator can withdraw servers from the pool. This elasticity is particulary well suited for the cloud.\n\nAnother feature Load Balancer brings to the table is the high-availability. A load balancer is constantly checking whether a node is functionnal or not. In case it is not, the traffic is routed to a valid backend server. By doing so, the application is still available even if some backend nodes are not up.\n\n## How Does a Load Balancer Work?\n\nThe reliability of a load balancer is ensured using two instances configured in an active/passive setup. If an active instance of the load balancer does not answer to an healthcheck (from our internal monitoring system), the passive is turned into the active load balancer to receive incoming traffic. All of this is performed transparenlty for the user. It typically happens when an hypervisor breaks and instances are migrated to a different hypervisor. Traffic is immediatly routed to the passive instance, the active load balancer that failed is removed and a new instance is spawned to get a new passive.\n\nLoad balancers update the networking configuration to route all the traffic to redundant load balancers instances. This features couldn't exist only with instances. Change in the networking configuration are required to achieve this highly available architecture for load balancers.\n\n### TLS Passthrough\n\nMany servers today use HTTPS which is built on top of TLS/SSL to encrypt traffic and ensure data integrity with electronic signature. TLS can be used directly with our load balancers using TCP mode. This technique is known as TLS passthrough. In this mode Load Balancer will forward the TLS traffic to the backend servers that will handle the TLS/SSL termination. Customers don’t need to configure anything special on the Load Balancer to use this mode. TLS passthrough also increases security as the certificates and private keys stay under full control of the customers and never shared with Scaleway.\n\n### Backend Monitoring with Healthchecks\n\nHealthchecks ensure reliability for the load-balancer because they guarantee that no traffic will be forwarded to an unhealthy backend. Healthchecks are performed by the load balancer on the backend servers to check whether or not they are available to receive traffic. Load-balancers withdraw backend servers that are not passing their configured healthchecks. There is a wide variety of healthchecks (LDAP, HTTP, TCP, REDIS, MYSQL…) available to cover as many types of backends as possible.\n\n### Scalability by Horizontal Scaling\n\nLoad-balancers provide horizontal scalability to a service and can ensure that enough backend servers are ready to ensure the elasticity of the demand of a service. When the load on a service increases, additionnal instances can be added to the backend servers to spread the load. When they are no longer required, the instances can be turned down and withdrawn from the pool.\n\n## Conclusion\n\nLoad Balancer is an essential part to build highly available applications. \nAt Scaleway, we designed our products to be fast, reliable and easy to get started. You can order those load balancers on our [website](https://console.scaleway.com/load-balancer/load-balancers), through Ansible and get started in seconds!","createdAt":"2023-01-18T14:15:17.236Z","updatedAt":"2023-02-08T15:51:51.140Z","publishedAt":"2023-01-18T14:19:05.832Z","locale":"en","tags":"Introduction\nLoad Balancer\n","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":3,"excerpt":"Server applications failures and lack of scalability can cause real problems for a server. When server applications fail, a node is unavailable to answer requests...","author":"Rémy Léone","h1":"Load Balancer at Scaleway","createdOn":"2020-10-19","image":{"data":{"id":1647,"attributes":{"name":"GAProduct-LoadBalancers-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24.webp","hash":"large_GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24","mime":"image/webp","name":"large_GAProduct-LoadBalancers-Illustration-Blog.webp","path":null,"size":"243.85","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24.webp","hash":"small_GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24","mime":"image/webp","name":"small_GAProduct-LoadBalancers-Illustration-Blog.webp","path":null,"size":"92.98","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24.webp","hash":"medium_GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24","mime":"image/webp","name":"medium_GAProduct-LoadBalancers-Illustration-Blog.webp","path":null,"size":"165.81","width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24.webp","hash":"thumbnail_GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24","mime":"image/webp","name":"thumbnail_GAProduct-LoadBalancers-Illustration-Blog.webp","path":null,"size":"32.35","width":245,"height":152}},"hash":"GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24","ext":".webp","mime":"image/webp","size":316.91,"url":"https://www-uploads.scaleway.com/GA_Product_Load_Balancers_Illustration_Blog_4d5ff54e24.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-01-18T14:17:04.245Z","updatedAt":"2023-01-18T14:17:04.245Z"}}},"recommendedArticles":{"data":[{"id":185,"attributes":{"title":"how-to-deploy-and-distribute-the-workload-on-a-multi-cloud-kubernetes-environment","path":"how-to-deploy-and-distribute-the-workload-on-a-multi-cloud-kubernetes-environment/","description":"This article will guide you through the best practices to deploy and distribute the workload on a Kubernetes environment on [Scaleway's Kosmos](https://www.scaleway.com/fr/kubernetes-kosmos/).\n\n⚠️ **Warning reminder**\n\nThis article will balance between concept explanations and operations or commands that need to be performed by the reader.\n\nIf this icon (🔥) is present before an image, a command, or a file, you are required to perform an action.\n\nSo remember, when 🔥 is on, so are you!\n\n## Redundancy\n\n### 🔥 Labels\n\nFirst, we are going to start by listing your nodes, and more specifically their associated labels. The `kubectl get nodes --show-labels` command will perform this action for us.\n\n🔥 `kubectl get nodes --show-labels --no-headers | awk '{print \"NODE NAME: \"$1\",\"$6\"\\n\"}' | tr \",\" \"\\n\"`\n\n Output\n```js\nNODE NAME: scw-kosmos-kosmos-scw-09371579edf54552b0187a95\nbeta.kubernetes.io/arch=amd64\nbeta.kubernetes.io/instance-type=DEV1-M\nbeta.kubernetes.io/os=linux\nfailure-domain.beta.kubernetes.io/region=nl-ams\nfailure-domain.beta.kubernetes.io/zone=nl-ams-1\nk8s.scaleway.com/kapsule=b58ad1f6-2a4d-4c0b-8573-459fad62682f\nk8s.scaleway.com/managed=true\nk8s.scaleway.com/node=09371579-edf5-4552-b018-7a95e779b70e\nk8s.scaleway.com/pool-name=kosmos-scw\nk8s.scaleway.com/pool=313ccb19-0233-4dc9-b582-b1e687903b7a\nk8s.scaleway.com/runtime=containerd\nkubernetes.io/arch=amd64\nkubernetes.io/hostname=scw-kosmos-kosmos-scw-09371579edf54552b0187a95\nkubernetes.io/os=linux\nnode.kubernetes.io/instance-type=DEV1-M\ntopology.csi.scaleway.com/zone=nl-ams-1\ntopology.kubernetes.io/region=nl-ams\ntopology.kubernetes.io/zone=nl-ams-1\n \nNODE NAME: scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6\nbeta.kubernetes.io/arch=amd64\nbeta.kubernetes.io/os=linux\nk8s.scw.cloud/disable-lifecycle=true\nk8s.scw.cloud/node-public-ip=151.115.36.196\nkubernetes.io/arch=amd64\nkubernetes.io/hostname=scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6\nkubernetes.io/os=linux\ntopology.kubernetes.io/region=scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6\n \nNODE NAME: scw-kosmos-worldwide-b2db708b0c474decb7447e0d6\nbeta.kubernetes.io/arch=amd64\nbeta.kubernetes.io/os=linux\nk8s.scw.cloud/disable-lifecycle=true\nk8s.scw.cloud/node-public-ip=65.21.146.191\nkubernetes.io/arch=amd64\nkubernetes.io/hostname=scw-kosmos-worldwide-b2db708b0c474decb7447e0d6\nkubernetes.io/os=linux\ntopology.kubernetes.io/region=scw-kosmos-worldwide-b2db708b0c474decb7447e0d6\n```\n\nFor each of our three nodes, we see many labels. The first node on the list has considerably more labels as it is managed by a Kubernetes Kosmos engine. In this case, more information about features and node management is added.\n\n### 🔥 Adding labels to distinguish Cloud providers\n\nAs it might not be easy to remember which node comes from which provider, and as it can help us distribute our workload across providers, we are going to label our nodes with a label called `provider` with values such as `scaleway` or `hetzner`.\n\n`kubectl label nodes scw-kosmos-kosmos-scw-09371579edf54552b0187a95 provider=scaleway`\n\n`kubectl label nodes scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6 provider=scaleway`\n\n`kubectl label nodes scw-kosmos-worldwide-b2db708b0c474decb7447e0d6 provider=hetzner`\n\nIn addition, we are also going to add label to our unmanaged Scaleway node to specify that it is, in fact, not managed by the engine. For that we use the same label used on the managed Scaleway node, but set to false: `k8s.scaleway.com/managed=false`.\n\n`kubectl label nodes scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6 k8s.scaleway.com/managed=false`\n\n### 🔥 Listing our labels\n\nLet's list our labels to ensure that the `provider` label is well set on our three nodes.\n\n🔥 `kubectl get nodes --show-labels --no-headers | awk '{print \"NODE NAME: \"$1\",\"$6\"\\n\"}' | tr \",\" \"\\n\"`\n\nOutput\n```js\nNODE NAME: scw-kosmos-kosmos-scw-09371579edf54552b0187a95\nbeta.kubernetes.io/arch=amd64\nbeta.kubernetes.io/instance-type=DEV1-M\nbeta.kubernetes.io/os=linux\nfailure-domain.beta.kubernetes.io/region=nl-ams\nfailure-domain.beta.kubernetes.io/zone=nl-ams-1\nk8s.scaleway.com/kapsule=b58ad1f6-2a4d-4c0b-8573-459fad62682f\nk8s.scaleway.com/managed=true\nk8s.scaleway.com/node=09371579-edf5-4552-b018-7a95e779b70e\nk8s.scaleway.com/pool-name=kosmos-scw\nk8s.scaleway.com/pool=313ccb19-0233-4dc9-b582-b1e687903b7a\nk8s.scaleway.com/runtime=containerd\nkubernetes.io/arch=amd64\nkubernetes.io/hostname=scw-kosmos-kosmos-scw-09371579edf54552b0187a95\nkubernetes.io/os=linux\nnode.kubernetes.io/instance-type=DEV1-M\nprovider=scaleway\ntopology.csi.scaleway.com/zone=nl-ams-1\ntopology.kubernetes.io/region=nl-ams\ntopology.kubernetes.io/zone=nl-ams-1\n\nNODE NAME: scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6\nbeta.kubernetes.io/arch=amd64\nbeta.kubernetes.io/os=linux\nk8s.scaleway.com/managed=false\nk8s.scw.cloud/disable-lifecycle=true\nk8s.scw.cloud/node-public-ip=151.115.36.196\nkubernetes.io/arch=amd64\nkubernetes.io/hostname=scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6\nkubernetes.io/os=linux\nprovider=scaleway\ntopology.kubernetes.io/region=scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6\n\nNODE NAME: scw-kosmos-worldwide-b2db708b0c474decb7447e0d6\nbeta.kubernetes.io/arch=amd64\nbeta.kubernetes.io/os=linux\nk8s.scw.cloud/disable-lifecycle=true\nk8s.scw.cloud/node-public-ip=65.21.146.191\nkubernetes.io/arch=amd64\nkubernetes.io/hostname=scw-kosmos-worldwide-b2db708b0c474decb7447e0d6\nkubernetes.io/os=linux\nprovider=hetzner\ntopology.kubernetes.io/region=scw-kosmos-worldwide-b2db708b0c474decb7447e0d6\n```\n\n## Deployment and observation: What happens in a Multi-Cloud cluster?\n\n### 🔥 A first very simple deployment\n\nTo better understand the behavior of a Multi-Cloud Kubernetes cluster, we are going to create a very simple deployment using `kubectl`. This deployment will run three replicas of the `busybox` image, each of which will print the date every ten seconds.\n\n`kubectl create deploy first-deployment --replicas=3 --image=busybox -- /bin/sh -c \"while true; do date; sleep 10; done\"`\n\nOnce the deployment has been created, we can observe what is actually happening on our cluster.\n\n`kubectl get all`\n\nOutput\n```js\nNAME READY STATUS RESTARTS AGE\npod/first-deployment-695f579bd4-cfg6l 1/1 Running 0 8s\npod/first-deployment-695f579bd4-jzft8 1/1 Running 0 8s\npod/first-deployment-695f579bd4-rt5jt 1/1 Running 0 8s\n\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nservice/kubernetes ClusterIP 10.32.0.1 \u003cnone 443/TCP 53m\n\nNAME READY UP-TO-DATE AVAILABLE AGE\ndeployment.apps/first-deployment 3/3 3 3 8s\n\nNAME DESIRED CURRENT READY AGE\nreplicaset.apps/first-deployment-695f579bd4 3 3 3 8s\n\n```\n\nOur first observation is that our `deployment` object is here, along with the three `pods` (replicas) we asked for. We can also observe that another \"unexpected\" object was also created, a `replicaset`. The `replicaset` is an intermediary object created by the `deployment` in charge of maintaining and monitoring the replicas.\n\nNow, let's have a quick look inside one of our `pods` to see if it performs normally.\n\n🔥 `kubectl logs pod/first-deployment-695f579bd4-cfg6l`\n\nOutput\n```js\nMon Sep 6 08:41:01 UTC 2021\nMon Sep 6 08:41:11 UTC 2021\nMon Sep 6 08:41:21 UTC 2021\nMon Sep 6 08:41:31 UTC 2021\n```\n\nWe can see that our pod is writing the date every ten seconds, which is exactly what we asked it to do.\n\nNow, the real question is, where are these `pods` running? We can use the `kubectl get pods` to give us the name of the node where they actually run.\n\n🔥 `kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName`\n\nOutput\n```js\nNAME NODE\nfirst-deployment-695f579bd4-cfg6l scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-jzft8 scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-rt5jt scw-kosmos-kosmos-scw-0937\n```\n\nWhen listing our three pods and their location, it seems that they all run on the same managed Scaleway node (the one located in Amsterdam). That's unfortunate... Let's see if we can act on this behavior.\n\n### 🔥 Scaling up\n\nThe first thing we can try is to scale up our deployment and see where all our new replicas will be scheduled.\n\n🔥 `kubectl scale deployment first-deployment --replicas=15`\n\nThe scaling has been applied, we can list our pods again.\n\n🔥 `kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName`\n\nOutput\n```js\nNAME NODE\nfirst-deployment-695f579bd4-5jq9q scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-5t6tw scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-5twcj scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-5xljr scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-8phq5 scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-cfg6l scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-jzft8 scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-nf9fg scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-nsxb6 scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-ptlkp scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-rgdqj scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-rt5jt scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-vrl95 scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-vwv7l scw-kosmos-kosmos-scw-0937\nfirst-deployment-695f579bd4-w9qqq scw-kosmos-kosmos-scw-0937\n```\n\nAnd they are still all running on the same node.\n\nTo go further, we are going to play with more complex configuration. In order to do so without getting mixed up with our other configurations, deployments and pods, it is best to clean our environment and delete our deployment.\n\n🔥 `kubectl delete deployment first-deployment`\n\nOutput \n`deployment.apps \"first-deployment\" deleted`\n\n### Yaml files\n\nKubectl commands are nice, but when it comes to managing multiple Kubernetes objects, configuration files are a better and more reliable fit‌. In Kubernetes, configurations are made in `yaml` format, always following a pattern similar to the one below:\n\n```yaml\n#example.yaml\n-—-\napiVersion: apps/v1 # version of the k8s api\nkind: Pod # type of the Kubernetes object we aim to describe\nmetadata: # additional options such as the object name, labels, annotations\n …\nspec: # parameters and options of the k8s object to create\n …\n```\n\n## Selecting where to run our pods\n\nIn Kubernetes, there are different options available to distribute our workload across nodes, namespaces, or depending on affinity, between `pods`. Working in a Multi-Cloud Kubernetes environmnent makes their usage mandatory and knowing them and their behavior can rapidly become crucial.\n\n### 🔥 NodeSelector\n\nA node selector is applied on a pod and will match labels that exist on the cluster nodes. The command below gives us all information about a given node, including labels, annotations, running pods, etc...\n\n🔥 `kubectl describe node scw-kosmos-kosmos-scw-09371579edf54552b0187a95`\n\nOutput\n```js\nName: scw-kosmos-kosmos-scw-09371579edf54552b0187a95\nRoles: \u003cnone\u003e\nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/instance-type=DEV1-M\n beta.kubernetes.io/os=linux\n failure-domain.beta.kubernetes.io/region=nl-ams\n failure-domain.beta.kubernetes.io/zone=nl-ams-1\n k8s.scaleway.com/kapsule=b58a[...]\n k8s.scaleway.com/managed=true\n k8s.scaleway.com/node=0937[...]\n k8s.scaleway.com/pool=313c[...]\n k8s.scaleway.com/pool-name=kosmos-scw\n k8s.scaleway.com/runtime=containerd\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=scw-kosmos-kosmos-scw-0937[...]\n kubernetes.io/os=linux\n node.kubernetes.io/instance-type=DEV1-M\n provider=scaleway\n topology.csi.scaleway.com/zone=nl-ams-1\n topology.kubernetes.io/region=nl-ams\n topology.kubernetes.io/zone=nl-ams-1\nAnnotations: csi.volume.kubernetes.io/nodeid: {\"csi.scaleway.com\":\"[...]\"}\n kilo.squat.ai/discovered-endpoints: {}\n kilo.squat.ai/endpoint: 51.15.123.156:51820\n kilo.squat.ai/force-endpoint: 51.15.123.156:51820\n kilo.squat.ai/granularity: location\n kilo.squat.ai/internal-ip: 10.67.36.37/31\n kilo.squat.ai/key: cSP2[...]\n kilo.squat.ai/last-seen: 1630917821\n kilo.squat.ai/wireguard-ip: 10.4.0.1/16\n node.alpha.kubernetes.io/ttl: 0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Mon, 06 Sep 2021 09:50:02 +0200\nTaints: \u003cnone\u003e\n[...]\n```\n_Parts in brackets \\[...\\] are truncation of the output for more lisibility_\n\nA `node selector` can be applied on a pod using an existing `label`, or a new `label` created by the Kubernetes user, such as the labels we previously added on our nodes.\n\nThis is a sample `yaml` file using a `node selector` in a `pod`:\n\n```yaml\n#example.yaml\n-—-\napiVersion: apps/v1\nkind: Pod\nmetadata:\n name: nginx\nspec:\n containers:\n - name: nginx\n image: nginx\n nodeSelector:\n provider: scaleway\n\n```\n\nThe `node selector` ensures that the defined pod will only be scheduled on a node matching the condition. In this example, the `nginx` pod will only be scheduled on nodes with the label `provider=scaleway`. \n\n### NodeAffinity\n\nNode affinity also matches labels existing on nodes, but provides more flexibility and option in terms of the rules that are applied.\n\nFirst of all, the `node affinity` accepts two different policies:\n\n* requiredDuringSchedulingIgnoredDuringExecution‌‌\n* preferredDuringSchedulingIgnoredDuringExecution\n\nAs their names are self explanatory, we can easily understand that if a condition is not matched, Kubernetes might still be able to schedule `pods` on nodes that do not match the conditions. It allows the definitions of preferences for `pod` scheduling instead of mandatory criterions.\n\nThe file here is an example of `requiredDuringSchedulingIgnoredDuringExecution‌‌` and `preferredDuringSchedulingIgnoredDuringExecution` configuration.\n\n```yaml\n#example.yaml\n-—-\napiVersion: apps/v1\nkind: Pod\nmetadata:\n name: nginx\nspec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: provider\n operator: In\n values:\n - scaleway\n - hetzner\n preferedDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n preference:\n - matchExpressions:\n - key: topology.kubernetes.io/region\n operator: In\n values:\n - nl-ams\n Containers:\n - name: nginx\n image: nginx\n\n```\n\nIn this example, the `pod` is required to run on a node with `provider=scaleway` or `provider=hetzner` label, and should preferably be scheduled on a node with a label `topology.kubernetes.io/region=nl-ams`.\n\n### PodAffinity\n\nThe `pod affinity` constraint is applied on `pods` based on other `pods` labels. It benefits from the two same policies as the `node affinity`:\n\n* requiredDuringSchedulingIgnoredDuringExecution‌‌\n* preferredDuringSchedulingIgnoredDuringExecution\n\nThe difference with `node affinity` is that instead of defining rules for the cohabitation of pods on nodes, the `pod affinity` defines rules between pods, such as \"`pod 1` should run on the same node as `pod 2`\".\n\nIn the following sample file, we specify that an `nginx` pod must be scheduled on any nodes containing a pod with the label `app=one-per-provider`.\n\n```yaml\n#example.yaml\n-—-\napiVersion: apps/v1\nkind: Pod\nmetadata:\n name: nginx\nspec:\n affinity:\n podAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchExpressions:\n - key: app\n operator: In\n values:\n - one-per-provider\n topologyKey: provider\n containers:\n - name: nginx\n image: nginx\n```\n\n### PodAntiAffinity\n\nThe same way Kubernetes allows us to define `pod affinities`, we can also define `pod anti affinities`, thus defining preferences for `pods` to not cohabitate together under some conditions.\n\nThe same two policies are available:\n* requiredDuringSchedulingIgnoredDuringExecution‌‌\n* preferredDuringSchedulingIgnoredDuringExecution\n\nIn the following sample file, we define that `nginx` `pods` should ideally not be scheduled on `pods` with the `security=S1` label and on a node with a different value for `topology.kubernetes.io/zone` labels.\n\n```yaml\n#example.yaml\n-—-\napiVersion: apps/v1\nkind: Pod\nmetadata:\n name: nginx\nspec:\n affinity:\n podAntiAffinity:\n preferedDuringSchedulingIgnoredDuringExecution:\n - weight: 100\n podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: security\n operator: In\n values:\n - S1\n topologyKey: topology.kubernetes.io/zone\n containers:\n - name: nginx\n image: nginx\n```\n\n### 🔥 Deploy \u0026 see: Spread our deployment across different providers\n\nWe are going to try out deploying an application across our two providers: Scaleway and Hetzner.\n\n🔥 Let's create this `antiaffinity.yaml` configuration file to create a `deployment` where each `pod` will be deployed on nodes with a different `provider` label, and will not cohabitate with `pods` that have the `app=one-per-provider` label.\n\n🔥\n\n```yaml\n#antiaffinity.yaml\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: one-per-provider-deploy\nspec:\n replicas: 2\n selector:\n matchLabels:\n app: one-per-provider\n template:\n metadata:\n labels:\n app: one-per-provider\n spec:\n affinity:\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchExpressions:\n - key: app\n operator: In\n values:\n - one-per-provider\n topologyKey: provider\n containers:\n - name: busytime\n image: busybox\n command: [\"/bin/sh\",\"-c\",\"while true; do date; sleep 10; done\"]\n```\n\n🔥 Apply the `deployment` configuration on our cluster using the following command.\n\n🔥 `kubectl apply -f antiaffinity.yaml`\n\nOutput \n`deployment.apps/one-per-provider-deploy created`\n\nAnd observe `pods` that were generated and the `nodes` they run on.\n\n🔥 `kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName`\n\nOutput\n```js\nNAME NODE\none-per-provider-deploy-75945bb589-6jb87 scw-kosmos-worldwide-b2db\none-per-provider-deploy-75945bb589-db25x scw-kosmos-kosmos-scw-0937\n```\n\nBy looking at the name of the different nodes, we can see the `pool` name in the middle, informing that our two `pods` are deployed on different `pools`, and thus different `nodes`.\n\nSince we have a Scaleway node in our **\"worldwide\"** `pool`, let's make sure that both our instances are on different providers. This information can be found in the `deployment` configuration, by fetching the `provider` `label` we set on it at the beginning of the workshop.\n\nFurthermore, one of the nodes in the **\"worldwide\"** `pool` is a Scaleway node, so we want to make sure that both our pods don't actually run on Scaleway.\n\n🔥 `kubectl get nodes scw-kosmos-worldwide-b2db708b0c474decb7447e0d6 -o custom-columns=NAME:.metadata.name,PROVIDER:.metadata.labels.provider`\n\nOutput\n```\nNAME PROVIDER\nscw-kosmos-worldwide-b2db708b0c474decb7447e0d6 hetzner\n```\n\n🔥 `kubectl get nodes scw-kosmos-kosmos-scw-09371579edf54552b0187a95 -o custom-columns=NAME:.metadata.name,PROVIDER:.metadata.labels.provider`\n\nOutput \n```js\nNAME PROVIDER\nscw-kosmos-kosmos-scw-09371579edf54552b0187a95 scaleway\n```\n\nWhen we ask for the `provider` `label` of our two nodes, we can confirm that our two `pods` from the deployment (that have the same `app=one-per-provider` `label`) were scheduled on different providers. \n\n### 🔥 Scaling up\n\nOur previous `deployment` defined two `replicas` where each generated `pod` is labeled `app=one-per-provider`. These `pods` should be scheduled on `nodes` with different values for their `provider label`( _`topologyKey` field_).\n\nAs our cluster has only two different providers, and all our `pods` were created within the `one-per-provider-deploy deployment`, scaling up the `deployment` should not result in the scheduling of a third `pod`.\n\nSo let's try it by adding only one more `replica`.\n\n🔥 `kubectl scale deployment one-per-provider-deploy --replicas=3`\n\nOutput \n`deployment.apps/one-per-provider-deploy scaled`\n\nOnce the `deployment` has scaled up, we can list our `pods` and see what happened.\n\n🔥 `kubectl get pods`\n\nOutput\n```\nNAME READY STATUS RESTARTS AGE\none-per-provider-deploy-7594-29wr7 0/1 Pending 0 7s\none-per-provider-deploy-7594-6jb87 1/1 Running 0 12m\none-per-provider-deploy-7594-db25x 1/1 Running 0 12m\n```\n\nThe third `pod` is present in our cluster as it was required by the `deployment replicaset`, but we can see that it is stuck in `pending` state, meaning it the `pod` could not find a `node` to be scheduled on.\n\nThe reason behind this behavior is that the `pod` is waiting for a `node` to match all of its `pod affinity constraints`, and there are currently no other nodes from a third Cloud provider in our cluster. Until a new `node` with this requirement is added to the cluster, our third `pod` will remain unavailable and in a `pending` state.\n\n### Taints\n\nA `taint` is a Kubernetes concept used to block `pods` from running on certain nodes‌‌.\n\nThe principle is to define key/value pairs completed by an `effect` (i.e. a taint policy). There are three possibilities:\n\n* **_NoSchedule‌‌_**: Forbids `pod` scheduling on the `node` but allows the execution.\n* **_PreferNoSchedule‌‌_**: Allows execution, and forbids `pod` scheduling on the `node`, except if no `node` can match this policy.\n* **_NoExecute_**: Forbids `pod` execution on the `node`, resulting in the eviction of unauthorized running `pods`.\n\n**Example** \n`user@local:~$ kubectl taint nodes tainted-node key1=value1:NoSchedule`\n\nIn this example, a `taint` is applied to the node named `tainted-node`, and set with the `effect` (i.e. constraint or policy) `NoSchedule`.\n\nThis means that no `pod` has permission to be scheduled on a `tainted-node`, except for `pods` with a specific authorization to do so. These authorizations are called `tolerations` and are covered in the next section of this article.\n\nIf a `pod` without the corresponding `toleration` is already running on a `tainted-node` at the time the `taint` is added (i.e. when the `kubectl` command above is executed), the `pod` will not be evicted and will keep running on this node.\n\nHowever, if the constraint was set to `NoExecute`, any `pods` without the corresponding `toleration` would not be allowed to run on the `tainted-node`, resulting in its eviction.\n\n### Tolerations\n\nAs stated before, `tolerations` are applied on `pods` to allow exceptions on tainted nodes. Two policies are available:\n\n* **_Equal_**: matches the `effect` of a `node taint` exactly.\n* **_‌‌Exists_**: matches the existence of a `node taint` regardless of its value.\n\nIn this example, the `pod` named `busybox` is granted permission to be scheduled on a tainted node with two `taints`:\n\n* `key1=value1:NoSchedule`\n* `key2` with `NoSchedule effect` regardless of the `value` attributed to the `taint`.\n\n```yaml\n# example-equal.yaml\n--—\napiVersion: v1\nkind: Pod\nmetadata:\n name: busybox\nspec:\n containers:\n - name: busybox\n image: busybox\n command: [\"/bin/sh\",\"-c\",\"sleep 3600\"]\n tolerations:\n - key: key1\n operator: Equal\n value: \"value1\"\n effect: NoSchedule\n - key: key2\n operator: Exists\n effect: NoSchedule\n```\n\n`Taints` and `tolerations` can converge to define very specific behaviors for the scheduling and execution of `pods`.\n\n### Forbidding execution\n\nTo experiment with `taints`, we are going to taint our managed Scaleway node with `autoscale=true:Noschedule`. \nAs this node is part of a managed pool of our cluster, it benefits from the auto-scaling feature. We want grant permission only to `pods` that are configured to run on an auto-scalable pool. \nWe also want to exclude (i.e. evict) all running pods which do not have the toleration from this node.\n\nLet's have a look at our cluster status by listing our `pods` and the `nodes` they run on.\n\n🔥 `kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName`\n\nOutput\n```js\nNAME NODE\none-per-provider-deploy-7594-29wr7 \u003cnone\u003e\none-per-provider-deploy-7594-6jb87 scw-kosmos-worldwide-b2db\none-per-provider-deploy-7594-db25x scw-kosmos-kosmos-scw-0937\n```\n\nWe still have the same three `pods`, two of which are `running`, and one `pending` (as no node is attributed to it).\n\nOur managed Scaleway `pool` has auto-scaling activated, using the preset `label` `autoscale=true`. We are, therefore, going to use the same label to forbid scheduling on this specific node.\n\n🔥 `kubectl taint nodes scw-kosmos-kosmos-scw-09371579edf54552b0187a95 autoscale=true:NoSchedule`\n\nOutput \n`node/scw-kosmos-kosmos-scw-09371579edf54552b0187a95 tainted`\n\nSee what happened on our cluster after applying the `taint`, below:\n\n🔥 `kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,STATUS:.status.phase`\n\nOutput\n```js\nNAME NODE STATUS\none-per-provider-deploy-75-29wr7 \u003cnone\u003e Pending\none-per-provider-deploy-75-6jb87 scw-kosmos-worldwide-b2db Running\none-per-provider-deploy-75-db25x scw-kosmos-kosmos-scw-0937 Running\n```\n\nThe state of our cluster has not changed. The reason for that is that the `taint` we added concerned scheduling, and our `pods` were already scheduled on our `nodes` at the time the `taint` was added.\n\nAlso, our `taint` forbid scheduling, but it did not forbid the execution of the `pod`.\n\nNow, let's add a new taint to the same node. This time, however, we will set its effect to `NoExecute`.\n\n🔥 `kubectl taint nodes scw-kosmos-kosmos-scw-09371579edf54552b0187a95 autoscale=true:NoExecute`\n\nOutput \n`node/scw-kosmos-kosmos-scw-09371579edf54552b0187a95 tainted`\n\nOnce this new `taint` is applied, we want to observe the behavior of our `pods`.\n\n🔥 `kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,STATUS:.status.phase`\n\nOutput \n```js\nNAME NODE STATUS\none-per-provider-deploy-75-29wr7 \u003cnone\u003e Pending\none-per-provider-deploy-75-6jb87 scw-kosmos-worldwide-b2db Running\none-per-provider-deploy-75-vjxkq scw-kosmos-worldwide-5ecd Running\n```\n\nIf we look closely at the node column of this output, we can see that `node scw-kosmos-kosmos-scw-0937` no longer has a `pod` running on it. This `pod` was evicted when the `taint` with `NoExecute` effect was applied.\n\nTo maintain the stability of our cluster, the `replicaset` of our `one-per-provider-deploy deployment` rescheduled a new `pod` on a node without the incompatible `taints`.\n\nA new `pod` was created in pending state while the evicted `pod` was in a `Terminating` status. The `pod` was rescheduled to a `node` that matched the `taints`' conditions (the \"do not execute on `node` with the autoscaling\" `label`, but with the condition of being on a different provider using the `node selector` provider `label`).\n\nMoving on to `tolerations`, we will create a `pod` with a `toleration` which allows it to schedule on our managed Scaleway node based on its location `label [topology.kubernetes.io/region=nl-ams](http://topology.kubernetes.io/region=nl-ams)`(this label was setup directly by the Scaleway Kubernetes engine during the managed pool creation).\n\n🔥\n\n```yaml\n#toleration.yaml\n---\napiVersion: v1\nkind: Pod\nmetadata:\n name: busytolerant\nspec:\n containers:\n - name: busytolerant\n image: busybox\n command: [\"/bin/sh\",\"-c\",\"sleep 3600\"]\n tolerations:\n - key: autoscale\n operator: Equal\n value: \"true\"\n effect: NoSchedule\n - key: autoscale\n operator: Equal\n value: \"true\"\n effect: NoExecute\n nodeSelector:\n topology.kubernetes.io/region: \"nl-ams\"\n```\n\nThis `yaml` file defines a `pod` able to run on a node with the following conditions:\n\n* node has the `taint autoscale=true:NoSchedule`\n* node has the `taint autoscale=true:NoExecute`\n* node has the `label topology.kubernetes.io/region=nl-ams`\n\nLet's apply this configuration to our cluster and observe what happens.\n\n🔥 `kubectl apply -f toleration.yaml`\n\n\u003e Output \n\u003e `pod/busytolerant created`\n\n🔥 `kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,STATUS:.status.phase`\n\nOutput\n```js\nNAME NODE STATUS\nbusytolerant scw-kosmos-kosmos-scw-0937 Running\none-per-provider-deploy-75-29wr7 \u003cnone\u003e Pending\none-per-provider-deploy-75-6jb87 scw-kosmos-worldwide-b2db Running\none-per-provider-deploy-75-vjxkq scw-kosmos-worldwide-5ecd Running\n```\n\nWe can see that with the right `tolerations`, the `pod` named `busytolerant` was perfectly able to be scheduled and executed on the node we tainted previously.\n\nThe addition of the constraint on the `region label` is just a way to show how all the workload distribution features Kubernetes offers are cumulative.\n\n---\n\n### 🔥 Removing the taints before moving forward\n\nTo avoid scheduling issues while moving forward in this Hands-On, it is best to remove the `taints` applied on our node. The command to do so is the same as the one to add the `taint`, with just the addition of the `-` (_dash_) character at the end of the `taint` declaration .\n\n🔥 `kubectl taint nodes scw-kosmos-kosmos-scw-09371579edf54552b0187a95 autoscale=true:NoSchedule-`\n\nOutput \n`node/scw-kosmos-kosmos-scw-09371579edf54552b0187a95 untainted`\n\n🔥 `kubectl taint nodes scw-kosmos-kosmos-scw-09371579edf54552b0187a95 autoscale=true:NoExecute-`\n\nOutput \n`node/scw-kosmos-kosmos-scw-09371579edf54552b0187a95 untainted`\n\nWe can observe that removing the `taints` did not have an effect on the pods running in our cluster. This happens because `tolerations` are rules for authorization and not forbidding instructions (by opposition to `taints`).\n\n🔥 `kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,STATUS:.status.phase`\n\nOutput\n```js\nNAME NODE STATUS\nbusytolerant scw-kosmos-kosmos-scw-0937 Running\none-per-provider-deploy-75-29wr7 \u003cnone Pending\none-per-provider-deploy-75-6jb87 scw-kosmos-worldwide-b2db Running\none-per-provider-deploy-75-vjxkq scw-kosmos-worldwide-5ecd Running\n```\n\nLet's keep cleaning our environment and remove our `busytolerant pod` and our `one-per-provider-deploy deployment` one by one.\n\n🔥 `kubectl delete pods busytolerant`\n\nOutput \n`pod \"busytolerant\" deleted`\n\n🔥 `kubectl delete deployment one-per-provider-deploy`\n\nOutput \n`deployment.apps \"one-per-provider-deploy\" deleted`\n\n🔥 `kubectl get all`\n\nOutput \n ```js\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nservice/kubernetes ClusterIP 10.32.0.1 \u003cnone\u003e 443/TCP 107m\n```\n\n### PodTopologySpread constraint\n\nThe `pod topology spread` constraint aims to evenly distribute `pods` across `nodes` based on specific rules and constraints.\n\nIt allows to set a maximum difference of a number of similar `pods` between the nodes (`maxSkew` parameter) and to determine the action that should be performed if the constraint cannot be met:\n\n* **_DoNotSchedule_**: hard constraint, the `pod` cannot be scheduled\n* **ScheduleAnyway**: soft constraint, the `pod` can be scheduled if the conditions are not matched.\n\nThe sample file below shows the type of configuration to apply a `topologySpreadConstraint` on `pods` created from a `deployment`.\n\n```yaml\n# example.yaml\n---\n apiVersion: apps/v1\n kind: Deployment\n metadata:\n name: busy-topologyspread\n spec:\n replicas: 10\n selector:\n matchLabels:\n app: busybox-acrossproviders\n template:\n metadata:\n labels:\n app: busybox-acrossproviders\n spec:\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: provider\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: busybox-acrossproviders\n containers:\n - name: busybox-everywhere\n image: busybox\n command: [\"/bin/sh\",\"-c\",\"sleep 3600\"]\n```\n\n### 🔥 Distributing our workload\n\nThe `topology spread constraint` is specifically useful to spread the workload of one or multiple applications evenly throughout a Kubernetes cluster.\n\n🔥 We are going to define a `spread.yaml` file to setup a deployment with ten replicas, but which should be scheduled evenly between nodes with the following `labels`: `provider=scaleway` and `provider=hetzner`. \n\nWe are authorize a difference of only one `pod` between our matching nodes using the `maxSkew` parameter:\n\n🔥\n\n```yaml\n#spread.yaml\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: busyspread\nspec:\n replicas: 10\n selector:\n matchLabels:\n app: busyspread-providers\n template:\n metadata:\n labels:\n app: busyspread-providers\n spec:\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: provider\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: busyspread-providers\n containers:\n - name: busyspread\n image: busybox\n command: [\"/bin/sh\",\"-c\",\"sleep 3600\"]\n\n```\n\n🔥 Let's apply this `deployment`.\n\n🔥 `kubectl apply -f spread.yaml`\n\nOutput \n`deployment.apps/busyspread created`\n\nTo see the distribution of our pods across the nodes of our cluster, we are going to list our `busyspread pods`, the `nodes` they run on, and count the number of occurences.\n\n🔥 `kubectl get pods -o wide --no-headers | grep busyspread | awk '{print $7}' | sort | uniq -c`\n\nOutput \n`2 scw-kosmos-kosmos-scw-09371579edf54552b0187a95` \n`3 scw-kosmos-worldwide-5ecdb6d02cf84d63937af45a6` \n`5 scw-kosmos-worldwide-b2db708b0c474decb7447e0d6`\n\nKnowing that the two first nodes in this list have the `label provider=scaleway` and the third one has a `label provider=hetzner`, we have indeed an even distribution of our workload across our providers with five pods for each of them.\n\nThe next step of this Hands-On will be to set up Load Balancing and Storage management within a Multi-Cloud Kubernetes cluster.\n\n🔥 In order to avoid getting mixed up in all our pods and deployments, we are going to clean our enviromnent by deleting our `busyspread deployment`.\n\n🔥 `kubectl delete deployment busyspread`\n\nOutput \n`deployment.apps \"busyspread\" deleted`\n","createdAt":"2023-01-18T16:32:11.093Z","updatedAt":"2023-03-13T08:13:03.642Z","publishedAt":"2023-01-18T16:35:21.099Z","locale":"en","tags":"Kubernetes\nMulti-cloud\nQuickstart\nIntroduction\nDiscover","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":31,"excerpt":"This article will guide you through the best practices to deploy and distribute the workload on a multi-cloud Kubernetes environment on Scaleway's Kosmos.","author":"Emmanuelle Demompion","h1":"How to deploy and distribute the workload on a multi-cloud Kubernetes environment","createdOn":"2022-07-21","image":{"data":{"id":1681,"attributes":{"name":"Post-Kapsule-LoadBalancer-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp","hash":"large_Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04","mime":"image/webp","name":"large_Post-Kapsule-LoadBalancer-Illustration-Blog.webp","path":null,"size":"234.97","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp","hash":"small_Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04","mime":"image/webp","name":"small_Post-Kapsule-LoadBalancer-Illustration-Blog.webp","path":null,"size":"82.49","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp","hash":"medium_Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04","mime":"image/webp","name":"medium_Post-Kapsule-LoadBalancer-Illustration-Blog.webp","path":null,"size":"154.95","width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp","hash":"thumbnail_Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04","mime":"image/webp","name":"thumbnail_Post-Kapsule-LoadBalancer-Illustration-Blog.webp","path":null,"size":"29.56","width":245,"height":152}},"hash":"Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04","ext":".webp","mime":"image/webp","size":314.29,"url":"https://www-uploads.scaleway.com/Post_Kapsule_Load_Balancer_Illustration_Blog_bb36e17f04.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-01-18T16:32:58.385Z","updatedAt":"2023-01-18T16:32:58.385Z"}}},"recommendedArticles":{"data":[{"id":246,"attributes":{"title":"best-practices-on-service-exposure-and-data-persistence-for-a","path":"best-practices-on-service-exposure-and-data-persistence-for-a/","description":"This article follows the first and second part of the Hands-On prepared for [Devoxx Poland 2021](https://devoxx.pl/).\n\n* [How to deploy and distribute the workload on a multi-cloud Kubernetes environment](https://www.scaleway.com/en/blog/how-to-deploy-and-distribute-the-workload-on-a-multi-cloud-kubernetes-environment/)\n\n⚠️ **Warning reminder:**\n\nThis article will balance between concept explanations and operations or commands that need to be performed by the reader.\n\nIf this icon 🔥 is present before an image, a command, or a file, you are required to perform an action.\n\nSo remember, when 🔥 is on, so are you!\n\n---\n\n## Load Balancing\n\nLoad Balancing traditionally allows service exposure using standard protocols such as HTTP or HTTPS. Often used to give an external access point to software end-users, Load Balancers are usually managed by Cloud providers.\n\nWhen working with Kubernetes, a specific Kubernetes component manages the creation, configuration, and lifecycle of Load Balancers within a cluster.\n\n### Cloud Controller Manager (CCM) component\n\nThe Cloud Controller Manager is the Kubernetes component that provides an interface for Cloud providers to manage Cloud resources from a cluster's configuration. \n\nThis component is especially in charge of the creation and deletion of Instances (in auto-healing and auto-scaling usage) and Load Balancers to expose the applications running in a Kubernetes cluster outside of it, making it available for external users.\n\n![Classic Kubernetes architecture with the CCM component within the Kubernetes Control-Plane](https://www-uploads.scaleway.com/Classic_Kubernetes_architecture_with_the_CCM_component_within_the_Kubernetes_Control_Plane_8e4d449304.svg)\n\nClassic Kubernetes architecture with the CCM component within the Kubernetes Control-Plane\n\n### 🔥 Adding a Load Balancer\n\nWe will create a Multi-Cloud Load Balancer with Scaleway's Cloud Controller Manager which allows us to expose services and redirect traffic across both of our two providers (Scaleway and Hetzner).\n\nFor this exercise, we will be using an [open source project called WhoAmI hosted on Github](https://github.com/kontena/k8s-client/blob/master/spec/fixtures/stacks/whoami.yaml). The two main advantages are that the project is available as a `docker` image, and that once called, it outputs the identifier of the `pod` it runs on.\n\n🔥 We are going to write a `lb.yaml` file which will contain:\n\n- the `deployment` of our `whoami` application, with two `replicas`, a `pod anti affinity` to distribute our two `pods` on different providers, and expose its port `8000` inside the cluster.\n- the `service` of type `LoadBalancer` which maps the `8000` port of our `pods` to the standard HTTP port (`80`) and expose our application outside of the cluster.\n\n🔥\n\n```bash\n#lb.yaml\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: whoami\n labels:\n app: whoami\nspec:\n replicas: 2\n selector:\n matchLabels:\n app: whoami\n template:\n metadata:\n labels:\n app: whoami\n spec:\n affinity:\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchExpressions:\n - key: app\n operator: In\n values:\n - whoami\n topologyKey: provider\n containers:\n - name: whoami\n image: jwilder/whoami\n ports:\n - containerPort: 8000\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: scw-multicloud-lb\n labels:\n app: whoami\nspec:\n selector:\n app: whoami\n ports:\n - port: 80\n targetPort: 8000\n type: LoadBalancer\n```\n\nNow that our `yaml` file is ready, we can create the `deployment` and the load balancer `service` at once.\n\n🔥 `kubectl apply -f lb.yaml`\n\nOutput \n`deployment.apps/whoami created` \n`service/scw-multicloud-lb created`\n\n🔥 We can list the services available in our cluster and see that our `service` of type `LoadBalancer` is ready and has been given an `external-ip`.\n\n🔥 `kubectl get services`\n\nOutput\n```bash\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)\nkubernetes ClusterIP 10.32.0.1 \u003cnone\u003e 443/TCP\nscw-multicloud-lb LoadBalancer 10.35.41.56 51.159.11.31 80:31302/TCP\n```\n\nAs we stated before, creating a Load Balancer `service` within a Kubernetes cluster results in the creation of a Load Balancer resource on the user account on the Cloud provider managing the Kubernetes cluster (via the CCM component).\n\nIf we connect to our Scaleway Console, we can see that a Load Balancer resource has been added to our account with the same attributed `external-ip` that we were given by listing our Kubernetes `services`.\n\n![Load Balancer listing view in Scaleway Console](https://www-uploads.scaleway.com/blog-Screen-Shot-2021-09-06-at-11-43-59.webp)\n\n![Our Load Balancer overview in Scaleway Console](https://www-uploads.scaleway.com/Our_Load_Balancer_overview_in_Scaleway_Console_8ee90db1f5.webp)\n\nSince we created our Load Balancer `service` in the context of a Kubernetes Kosmos cluster (i.e. a Multi-Cloud environment), the CCM created a Multi-Cloud Scaleway Load Balancer to match our needs.\n\n### Checking the behavior of our Load Balancer\n\nThe project we deployed allows us to test the behavior of our Multi-Cloud Load Balancer. We already know that our two `whoami pods` run on different providers, but let's be sure that the traffic is redirected to both providers when it is called.\n\n🔥 To do so, we will create a simple `bash` command to call our given `external-ip`. The `whoami` project should answer us its identifier.\n\n🔥 `while true; do curl http://51.159.11.31; sleep 1; done`\n\n\u003e Output\n\u003e \n\u003e ```\n\u003e I'm whoami-59c74f5cf4-hxdwp\n\u003e I'm whoami-59c74f5cf4-hxdwp\n\u003e I'm whoami-59c74f5cf4-hxdwp\n\u003e I'm whoami-59c74f5cf4-kbfq4\n\u003e I'm whoami-59c74f5cf4-hxdwp\n\u003e I'm whoami-59c74f5cf4-hxdwp\n\u003e I'm whoami-59c74f5cf4-hxdwp\n\u003e I'm whoami-59c74f5cf4-kbfq4\n\u003e \n\u003e ```\n\nWhen calling our `service`, we can see that we indeed have two different identifiers returned by our calls, those identifiers corresponding to our two `pods` running on `nodes` from our two different providers.\n\nThe Multi-Cloud Load Balancer created directly within our Kubernetes cluster can redirect traffic between different nodes from different Cloud providers for the same application (i.e. `deployment`).\n\nIf we want to verify this assessment, we can simply list our `pods` again and the `nodes` they run on.\n\n🔥 ```kubectl get pods -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,STATUS:.status.phase```\n\nOutput\n```bash\nNAME NODE STATUS\n whomai-59c74f5cf4-hxdwp scw-kosmos-kosmos-scw-0937 Running\nwhomai-59c74f5cf4-kbfq4 scw-kosmos-worldwide-b2db Running \n```\n\nThe last check we can do is to display the `provider label` associated with these two nodes, just to be sure they are on different Cloud providers. \n\n🔥 `kubectl get nodes scw-kosmos-kosmos-scw-09371579edf54552b0187a95 scw-kosmos-worldwide-b2db708b0c474decb7447e0d6 -o custom-columns=NAME:.metadata.name,PROVIDER:.metadata.labels.provider`\n\nOutput \n```bash\nNAME PROVIDER\nscw-kosmos-kosmos-scw-09371579edf54552b0187a95 scaleway\nscw-kosmos-worldwide-b2db708b0c474decb7447e0d6 hetzner\n```\n\n🔥 Now we can delete our deployment and Multi-Cloud Load Balancer service. To do so, we will start by deleting the `service`, which will result in the deletion of our Load Balancer resource in the Scaleway Console.\n\n🔥 `kubectl delete service scw-multicloud-lb`\n\nOutput \n`service \"scw-multicloud-lb\" deleted` \n![](https://www-uploads.scaleway.com/blog-Screen-Shot-2021-09-06-at-11-53-17.webp)\n\n🔥 And then we can delete our `deployment`.\n\n🔥 `kubectl delete deployment whoami`\n\nOutput \n`deployment.apps \"whoami\" deleted`\n\n## Managing Persistent Storage in a Multi-Cloud Kubernetes cluster\n\nIn Kubernetes, nodes are considered dispensable, meaning they can be deleted, created, or replaced. This implies that local storage for Kubernetes usage is not a reliable solution, since the data stored locally on an instance would be lost in case of the Instance deletion or replacement.\n\n### Container Storage Interface (CSI)\n\nThe Container Storage Interface (or CSI) is a Kubernetes component running on all the managed nodes of a Kubernetes cluster.\n\nIt provides an interface for persistent storage management‌‌ such as:\n- **_File Storage‌‌:_** remote filesystem storage type that can be shared between multiple `pods` within a cluster.\n- **_Block Storage‌‌:_** remote storage type that is attached to one Instance of the cluster and only one `pod`.\n\nEach Cloud Provider implements its own CSI plugin to create Storage resources on the user account. Additional storage resources created by Cloud providers CSI are usually visible in their Cloud Console with the instances information.\n\n### 🔥 Adding a block storage\n\nTo understand the behavior of CSI plugins, we are going to create `pods` with default `persistent volume claims` for Scaleway and Hetzner `nodes`.\n\n**🔥 Creating a Scaleway Block Storage**\n\nOur first step will be to define a `pod` called `csi-app-scw` and attach a `persistent volume claim` to it. This should result in the creation of a `pod` and a `block storage` attached to it.\n\n🔥\n\n```yaml\n#pvc_on_scaleway.yaml\n---\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: pvc-scw\nspec:\n accessModes: \n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n---\napiVersion: v1\nkind: Pod\nmetadata:\n name: csi-app-scw\nspec:\n nodeSelector: \n provider: scaleway\n containers: \n - name: busy-pvc-scw\n image: busybox\n volumeMounts:\n - mountPath: \"/data\"\n name: csi-volume-scw\n command: [\"sleep\",\"3600\"]\n volumes:\n - name: csi-volume-scw\n persistentVolumeClaim:\n claimName: pvc-scw\n```\n\n🔥 We can apply our configuration and see what happens.\n\n🔥 `kubectl apply -f pvc_on_scaleway.yaml`\n\nOutput \n`persistentvolumeclaim/pvc-scw created` \n`pod/csi-app-scw created`\n\nBy listing the pod, we see that it is running and in an healthy state.\n\n🔥 `kubectl get pod csi-app-scw`\n\nOutput \n```\nNAME READY STATUS RESTARTS AGE\ncsi-app-scw 1/1 Running 0 37s\n```\n\nDefault CSI storage class scw-bssd was found and created the persistent volume.\n\n🔥 Now if we list the `persistent volumes` and `persistent volume claims` of our cluster, we can see the status if our `block storage`.\n\n🔥 `kubectl get pv -o custom-columns=NAME:.metadata.name,CAPACITY:.spec.capacity.storage,CLAIM:.spec.claimRef.name,STORAGECLASS:.spec.storageClassName`\n\nOutput\n```\nNAME CAPACITY CLAIM STORAGECLASS\npvc-8793d69a-3fbd-4ccf-8e88-ef36 10Gi pvc-scw scw-bssd\n```\n\nWe can see that the default CSI Storage Class `scw-bssd` was found and created the persistent volume.\n\nOur block storage was even created in our Scaleway account on our managed Scaleway Instance located in Amsterdam.\n\n![](https://www-uploads.scaleway.com/blog-Screen-Shot-2021-09-06-at-11-56-37.webp)\n\n**🔥 Creating a Hetzner Block Storage**\n\n**🔥** In the same way as before, we are going to define a similar `pod` called `csi-app-hetzner` and attach a `persistent volume claim` to it. We should expect the same result as before with the Scaleway Instance.\n\n🔥\n\n```yaml\n#pvc_on_hetzner.yaml\n---\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: pvc-hetzner\nspec:\n accessModes: \n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n---\napiVersion: v1\nkind: Pod\nmetadata:\n name: csi-app-hetzner\nspec:\n nodeSelector: \n provider: hetzner\n containers: \n - name: busy-pvc-hetzner\n image: busybox\n volumeMounts:\n - mountPath: \"/data\"\n name: csi-volume-hetzner\n command: [\"sleep\",\"3600\"]\n volumes:\n - name: csi-volume-hetzner\n persistentVolumeClaim:\n claimName: pvc-hetzner\n\n```\n\n**🔥** Let's apply our configuration for Hetzner.\n\n🔥 `kubectl apply -f pvc_on_hetzner.yaml`\n\nOutput \n`persistentvolumeclaim/pvc-hetzner created` \n`pod/csi-app-hetzner created`\n\n🔥 And list our `csi-app-hetzner` pod.\n\n🔥 `kubectl get pod csi-app-hetzner`\n\nOutput\n```\nNAME READY STATUS RESTARTS AGE\ncsi-app-hetzner 0/1 Pending 0 76s\n ```\n\nThis time, we find ourselves with a `pod` stuck in `pending` state.\n\nTo understand what happened, we are going to have a closer look at our persistent volumes by listing their `name` and `storage class`.\n\n🔥 `kubectl get pv -o custom-columns=NAME:.metadata.name,CAPACITY:.spec.capacity.storage,CLAIM:.spec.claimRef.name,STORAGECLASS:.spec.storageClassName`\n\nOutput\n```\nNAME CAPACITY CLAIM STORAGECLASS\npvc-6f6d1aea-d8cd-43af-9c54-460a 10Gi pvc-hetzner scw-bssd\npvc-8793d69a-3fbd-4ccf-8e88-ef36 10Gi pvc-scw scw-bssd\n```\n\nThe default `storage class` of our Scaleway Managed Kubernetes engine being `scw-bssd` and the `csi-app-hetzner pod` being scheduled on the Hetzner Instance, the `persistent volume` could not be attached to the `pod`.\n\nIn fact, block storage can only be attached to Instances from the same Cloud provider.\n\n🔥 To see the available `storage classes` available on a Kubernetes,cluster, we can execute the following command:\n\n🔥 `kubectl get StorageClass -o custom-columns=NAME:.metadata.name`\n\nOutput\n```\nNAME\nscw-bssd\nscw-bssd-retain\n```\n\nWe see here that only Scaleway CSI is available, so the pod requiring to be on Hetzner Instance cannot schedule due to its incapacity to connect to a Scaleway Block Storage.\n\nThen, let's try to focus on Scaleway Instances and create Block Storage on our two Instances, one managed, and the other unmanaged, and we will get back to Hetzner later.\n\n### 🔥 StatefulSet on Scaleway Instances\n\nIn order to play with a lot of `pods` and `persistent volumes`, we are going to use Kubernetes `statefulset` object.\n\n🔥 We are going to write a `statefulset_on_scaleway.yaml` file to configure a `statefulset` with:\n\n-the name`statefulset-csi-scw`\n-ten `replicas`\n-a `node selector` for `pods` to run on nodes with `label provider=scaleway` only\n-a `volume claim template` to create block storages for each `pods`, using the `scw-bssd storage class`.\n\n🔥\n\n```yaml\n#statefulset_on_scaleway.yaml\n---\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: statefulset-csi-scw\nspec:\n serviceName: statefulset-csi-scw\n replicas: 10\n selector: \n matchLabels:\n app: statefulset-csi-scw\n provider: scaleway\n template:\n metadata:\n labels:\n app: statefulset-csi-scw\n provider: scaleway\n spec:\n containers: \n - name: busy-pvc-scw\n image: busybox\n volumeMounts:\n - mountPath: \"/data\"\n name: csi-vol-scw\n command: [\"sleep\",\"3600\"]\n volumeClaimTemplates:\n - metadata:\n name: csi-vol-scw\n spec:\n accessModes: [ \"ReadWriteOnce\" ]\n storageClassName: scw-bssd\n resources:\n requests:\n storage: 1Gi\n\n```\n\n🔥 Let's apply our file.\n\n🔥 `kubectl apply -f statefulset_on_scaleway.yaml`\n\nOutput \n`statefulset.apps/statefulset-csi-scw created`\n\n🔥 Each `pods` of our `statefulset` is being created progressively.\n\n🔥 `kubectl get statefulset -w`\n\nOutput\n```\nNAME READY AGE\nstatefulset-csi-scw 0/10 16s\nstatefulset-csi-scw 1/10 32s\nstatefulset-csi-scw 2/10 62s\nstatefulset-csi-scw 3/10 90s\nstatefulset-csi-scw 4/10 2m1s\n[...]\n```\n\n🔥 Once all `pods` available, we can list them with the associated `node` they have been scheduled on.\n\n🔥 `kubectl get pods -o wide | awk '{print $1\"\\t\"$7}' | grep statefulset-csi-scw`\n\nOutput\n```bash\nstatefulset-csi-scw-0\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-1\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-2\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-3\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-4\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-5\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-6\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-7\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-8\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-9\tscw-kosmos-kosmos-scw-0937\n ```\n\nUnfortunately, we can see that all `pods` are running on the same `node`, which is the `node` managed by Scaleway.\n\nThe reason for that is that Scaleway CSI only runs on managed Instances such as the one we created in Amsterdam, using a node selector on a specific `label` set by the Scaleway Kubernetes Control-Plane.\n\nSince our instance in Warsaw is not managed (i.e. it has been added manually), no `pod` could be scheduled on it because it did not have access to Scaleway CSI.\n\nIn fact, if we look at Scaleway Console and the list of Block Storages that have been created, we can see that they are all attached to the same Instance in the same Availability Zone.\n\n![](https://www-uploads.scaleway.com/blog-Screen-Shot-2021-09-06-at-12-11-46.webp)\n\nBlock Storage listing view in Scaleway Console\n\n\nIn order to fix this behevior, we will start by deleting our `statefulset` and `persistent volumes` .\n\n🔥 Delete our StatefulSet and our Persistent volumes:\n\n🔥 `kubectl delete statefulset statefulset-csi-scw`\n\n\u003e Output \n\u003e `statefulset.apps \"statefulset-csi-scw\" deleted`\n\nDelete all resources that should still be running on our cluster (`pods` and `persistent volume claims`)\n\n🔥 `kubectl delete pod $(kubectl get pod | awk '{print $1}')` \n🔥 `kubectl delete pvc $(kubectl get pvc | awk '{print $1}')`\n\nDeleting `Persistent Volume Claims` (PVC) will also delete `Persistent Volumes` (PV) objects\n\n### 🔥 Adding Scaleway CSI on our unmanaged node\n\nTo understand what is happening with our Scaleway CSI, let's have a look at the components running in the `kube-system namespace` of our cluster, and specifically have a look a the line concerning the `csi-node deamonset`.\n\n🔥 `kubectl -n kube-system get all`\n\nOutput (the part we are going to modify)\n ```\nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\ndaemonset.apps/csi-node 1 1 1 1 1 k8s.scaleway.com/managed=true 150m\n```\n\nScaleway's CSI have a `node selector` on the label `k8s.scaleway.com/managed=true`, which is set to `false` on our unmanaged Scaleway server. Instead of changing our `label` to `true` and risking unwanted behavior (maybe this label is used for another Kubernetes component), we are going to change the `node selector` so it uses the `provider label` instead.\n\n🔥 To do so, we can edit the `deamonset` directly using `kubectl`. It will open your default `unix text editor`.\n\n🔥 `kubectl -n kube-system edit daemonset.apps/csi-node`\n\nWe need here to replace the occurence of the `nodeSelector` `k8s.scaleway.com/managed=true` by `provider=scaleway`. \nThere is only one occurrence to replace. \n🔥 Save the file and exit the edition mode.\n\n```\nnodeSelector:\n provider: scaleway\n```\n\nOutput \n`daemonset.apps/csi-node edited`\n\n🔥 Once edited, we can list the components of `kube-system namespace` again to check that the change was applied.\n\n🔥 `kubectl -n kube-system get all`\n\nOutput (the part we are going to modify)\n```\nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\n\u003e daemonset.apps/csi-node 2 2 2 1 2 provider=scaleway 158m\n```\n\nWe can see that the `node selector` has been changed, and that instead of having only one occurence of it, we now have two `deamonset` available in our cluster, one for each of our Scaleway `node`.\n\n🔥 Now that our Scaleway CSI is applied correctly, let's reapply the exact `statefulset` and see what happens.\n\n🔥 `kubectl apply -f statefulset_on_scaleway.yaml`\n\nOutput \n`statefulset.apps/statefulset-csi-scw created`\n\n🔥 `kubectl get pods -o wide | awk '{print $1\"\\t\"$7}' | grep statefulset-csi-scw`\n\nOutput\n```\nstatefulset-csi-scw-0\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-1\tscw-kosmos-worldwide-5ecd\nstatefulset-csi-scw-2\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-3\tscw-kosmos-worldwide-5ecd\nstatefulset-csi-scw-4\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-5\tscw-kosmos-worldwide-5ecd\nstatefulset-csi-scw-6\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-7\tscw-kosmos-worldwide-5ecd\nstatefulset-csi-scw-8\tscw-kosmos-kosmos-scw-0937\nstatefulset-csi-scw-9\tscw-kosmos-worldwide-5ecd\n```\n\nOur two Scaleway nodes are now able to attach to Scaleway Block Storage, and we can see that our `statefulset` is now spread across our two Scaleway Instances, the managed one in **Amsterdam** and the unmanaged one in **Warsaw**.\n\nIf we look at Scaleway Console where the volumes are listed, we can see our Block Storages in **Warsaw 1** and **Amsterdam 1** Availability Zone.\n\n![](https://www-uploads.scaleway.com/blog-Screen-Shot-2021-09-06-at-12-31-13-1.webp)\n\n### Adding Hetzner CSI\n\nNow that we managed to reconfigure Scaleway CSI, let's come back to Hetzner use case.\n\nFirst, as described in the requirements of the first part of our Hands-On, we need a security token to be able to create resources such as Block Storages on our Hetzner account.\n\nOnce this token is generated, we need to store it in our Kubernetes cluster using a `secret` Kubernetes object.\n\n🔥 Let's create the secret and apply it to our cluster.\n\n🔥\n\n```yaml\n#secret-hetzner.yaml\n---\napiVersion:v1\nkind: Secret\nmetadata:\n name: hcloud-csi\n namespace: kube-system\nstringData:\n token: \u003cMY-HETZNER-TOKEN\u003e\n\n```\n\n🔥 `kubectl apply -f secret-hetzner.yaml`\n\nOutput \n`secret/hcloud-csi created`\n\n🔥 Since CSI plugin are open-source ([Hetzner CSI included](https://github.com/hetznercloud/csi-driver)), we can download the CSI plugin we need with the following command. Though, we will need to adapt it for our specific situation:\n\n🔥 `wget https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml .`\n\nOutput \n`Downloaded: 1 files, 9,4K in 0,002s (4,65 MB/s)`\n\nWe now have locally the `hcloud-csi.yml` file, containing Hetzner CSI plugin, and we need to add the right `node selector on the label provider=hetzner` so that Hetzner CSI is only installed on Hetzner Instances.\n\n🔥 Edit the `hcloud-csi.yml` downloaded previously to add a `nodeSelector` contraint. It needs to be applied on two places in the `hcloud-csi.yml`.\n\n🔥 First we need to find the StatefulSet called `hcloud-csi-controller` and add the `nodeSelector` as follows:\n\n```\n---\nkind: StatefulSet \napiVersion: apps/v1\nmetadata:\n name: hcloud-csi-controller\n [...]\n spec:\n serviceAccount: hcloud-csi\n nodeSelector: \n provider: hetzner\n containers:\n[...]\n\n```\n\n🔥 Then in the DeamonSet `hcloud-csi-node`, we need to perform the same addition of the `nodeSelector`, as follows:\n\n```\n[...]\n-—-\nkind: DeamonSet \napiVersion: apps/v1\nmetadata:\n name: hcloud-csi-node\n namespace: kube-system\nspec:\n selector: \n matchLabels:\n app: hcloud-csi\n template:\n metadata: \n labels: \n app: hcloud-csi\n spec:\n tolerations:\n [...]\n serviceAccount: hcloud-csi\n nodeSelector: \n provider: hetzner\n containers:\n[...]\n---\n```\n\n🔥 We can now save the file and apply the configuration. This will deploy Hetzner CSI with the `nodeSelector` on `provider=hetzner`.\n\n🔥 `kubectl apply -f hcloud-csi.yml`\n\nOutput\n```\ncsidriver.storage.k8s.io/csi.hetzner.cloud created\nstorageclass.storage.k8s.io/hcloud-volumes created\nserviceaccount/hcloud-csi created\nclusterrole.rbac.authorization.k8s.io/hcloud-csi created\nclusterrolebinding.rbac.authorization.k8s.io/hcloud-csi created\nstatefulset.apps/hcloud-csi-controller created\ndaemonset.apps/hcloud-csi-node created\nservice/hcloud-csi-controller-metrics created\nservice/hcloud-csi-node-metrics created\n```\n\n🔥 Let's see what happened once we applied Hetzner CSI on our `kube-system namespace`.\n\n🔥 `kubectl get all -n kube-system`\n\nOutput\n```\nNAME READY STATUS RESTARTS AGE\n[...]\npod/csi-node-jnr2h 2/2 Running 0 166m\npod/csi-node-sxct2 2/2 Running 0 14m\npod/hcloud-csi-controller-0 5/5 Running 0 42s\npod/hcloud-csi-node-ls6ls 3/3 Running 0 42s\n[...]\n \nNAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE\ndaemonset.apps/csi-node 2 2 2 1 2 provider=scaleway 168m\ndaemonset.apps/hcloud-csi-node 1 1 1 1 1 provider=hetzner 42s\n[...]\nNAME READY AGE\nstatefulset.apps/hcloud-csi-controller 1/1 42s\n```\n\nWe can now see our two different CSI and their associated `node selectors`.\n\n🔥 Now, we should be able to create a pod on an Hetzner node and attach a `persistent volume` to it, while specifying the `storage class name`(`hcloud-volumes`).\n\n🔥\n\n```yaml\n#hpvc.yaml\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: hcsi-pvc\nspec:\n accessModes: \n - ReadWriteOnce\n resources:\n requests:\n storage: 10Gi\n storageClassName: hcloud-volumes\n---\napiVersion: v1\nkind: Pod\nmetadata:\n name: hcsi-app\nspec:\n nodeSelector: \n provider: hetzner\n containers: \n - name: busy-hetzner\n image: busybox\n volumeMounts:\n - mountPath: \"data\"\n name: hcsi-volume\n command: [\"sleep\",\"3600\"]\n volumes:\n - name: hcsi-volume\n persistentVolumeClaim:\n claimName: hcsi-pvc\n\n```\n\n🔥 Let's apply the configuration and check if our `pod` is running and our `persistent volume` created with the corresponding `storage class`.\n\n🔥 `kubectl apply -f hpvc.yaml`\n\nOutput \n`persistentvolumeclaim/hcsi-pvc created` \n`pod/hcsi-app created`\n\n🔥 `kubectl get pods | grep hcsi`\n\nOutput \n`hcsi-app 1/1 Running 0 55s`\n\nOur pod is `running`, which means it managed to create and attach a `persistent volume`.\n\nWe can check by listing the `persistent volume` and the `storage class` they are based on.\n\n🔥 `kubectl get pv -o custom-columns=NAME:.metadata.name,CAPACITY:.spec.capacity.storage,CLAIM:.spec.claimRef.name,STORAGECLASS:.spe c.storageClassName`\n\nOutput\n```\nNAME CAPACITY CLAIM STORAGECLASS\npvc-262442c4-6275-4a38-9be8-99c8dc5b5ff6 1Gi csi-vol-scw-statefulset-csi-scw-9 scw-bssd\npvc-2ea1d637-9b81-4e07-a342-9574082975d0 1Gi csi-vol-scw-statefulset-csi-scw-2 scw-bssd\npvc-381ddf6b-2baa-42a6-a474-2024fb891b91 1Gi csi-vol-scw-statefulset-csi-scw-1 scw-bssd\npvc-485b33d7-4740-46f9-b479-71db5eaf48dc 1Gi csi-vol-scw-statefulset-csi-scw-0 scw-bssd\npvc-4aa708c4-162e-40cc-9b28-bd97dfbf2f3a 1Gi csi-vol-scw-statefulset-csi-scw-4 scw-bssd\npvc-50868e23-0dd4-48e1-86b8-3d9402393371 10Gi hcsi-pvc hcloud-volumes\npvc-81d8b6ef-a83e-4ca1-95b6-65c3a350a126 1Gi csi-vol-scw-statefulset-csi-scw-7 scw-bssd\npvc-e06affe0-d5e1-4ed2-b1ca-1f34a072772f 1Gi csi-vol-scw-statefulset-csi-scw-5 scw-bssd\npvc-e6a7791c-71c0-4b73-ac7c-e83c28efd259 1Gi csi-vol-scw-statefulset-csi-scw-3 scw-bssd\npvc-f4169ea9-9c22-46c9-b248-870d1859f813 1Gi csi-vol-scw-statefulset-csi-scw-6 scw-bssd\npvc-ffb10c3b-beba-4528-9d1a-6f09bb5926c9 1Gi csi-vol-scw-statefulset-csi-scw-8 scw-bssd\n ```\n\nOur `persistent volume` with `hcsi-pvc` claim is created, and if we check on our Hetzner Console, our `block storage` exists in **Helsinki**, linked to our **Helsinki** Hetzner Instance.\n\n![](https://www-uploads.scaleway.com/blog-Screen-Shot-2021-09-06-at-12-42-54.webp)\n\nVolumes listing view in Hetzner Cloud Console\n\n---\n\n## Data resilience across providers\n\nAs we saw, a `pod` in need of a remote storage solution needs a resource in the same Availability Zone as it is scheduled, using the right Container Storage Interface.\n\nAs it is true for Scaleway or Hetzner as we demonstrated it, it is also true for all Cloud providers.\n\nNonetheless, the next step would be to manage shared remote storage across providers. _We voluntarily excluded the usage of remote DataBases as a Service that is not managed within Kubernetes and is Cloud agnostic._\n\nThis solution is possible using a provider-independent Container Storage Interface allowing pods to schedule with associated volumes independently from the Cloud Provider they run on.\n\nThe future of data resilience in the Multi-Cloud environment will be led by Cloud-agnostic-based technologies and custom CSI implementation from the Kubernetes community. ","createdAt":"2023-01-19T13:54:07.591Z","updatedAt":"2023-02-23T13:29:10.905Z","publishedAt":"2023-01-19T14:01:59.528Z","locale":"en","tags":"Best practices\nMulti-cloud\nKubernetes\nQuickstart","popular":false,"articleOfTheMonth":false,"category":"Scale","timeToRead":22,"excerpt":"When working with Kubernetes, a specific Kubernetes component manages the creation, configuration, and lifecycle of Load Balancers within a cluster.","author":"Emmanuelle Demompion","h1":"Best practices on service exposure and data persistence for a Multi-Cloud Kubernetes cluster","createdOn":"2022-03-18"}},{"id":186,"attributes":{"title":"multi-cloud-vs-hybrid-cloud-understanding-the-difference","path":"multi-cloud-vs-hybrid-cloud-understanding-the-difference/","description":"The rise of cloud computing brought about a multitude of cloud technologies, which in turn caused some confusion on core cloud paradigms. With the public cloud, hybrid cloud, multi cloud, etc., it became hard to navigate and understand the idea behind every concept. As [the multi cloud](https://www.scaleway.com/en/blog/10-cloud-trends-to-watch-in-2022/) is starting to topple the discussion as a core trend for the upcoming years, it is in fact nothing new: even without using the term “multi cloud”, 92% of [companies](https://info.flexera.com/CM-REPORT-State-of-the-Cloud) are already using a multi-cloud approach. So what is it exactly? \n\n## Public cloud vs private cloud\n\nTo understand the difference between the multi cloud and the hybrid cloud, you first need to understand the difference between the private cloud and the public cloud. \n\nA **public cloud** is a set of services and resources offering a complete virtual infrastructure that is available on-demand and billed based on actual usage.\n\nYou can run anything from basic instances to architectures within a complete, highly scalable, computing environment. \n\n**A private cloud** is used to deploy services and resources within your own data center or you own cloud on your provider’s bare metal. You own the environment, which is used by a relatively small number of employees and customers and provides limited services and scalability. In theory, private clouds offer the convenience of the public cloud while enabling the business to maintain direct control over the infrastructure. \n \n## What is a hybrid cloud?\n\nA hybrid cloud is simply a connection between a private and a public cloud. A business can use a hybrid cloud model to support workload migrations between public and private cloud environments, with a workload that could also run redundancy in both environments, and components running in each environment can communicate. \nA hybrid cloud offers more flexibility and control than a public or private cloud could do on its own. For example, cloud bursting allows infrastructure to move resources from the private to the public cloud when the demand is exciding the private cloud's capacities in order to support the overload. In practice, this is quite challenging technically to set up. \nA hybrid cloud is a subsection of multi cloud, as it runs different clouds architectures. \n\n![](https://www-uploads.scaleway.com/blog-Hybrid_cloud_schema.webp)\n \n## What is a multi cloud?\n\nA multi cloud environment incorporates services from at least two cloud providers and lets you choose the resources that best fit your needs between several providers.\n\n\n### Hyper personalized infrastructure\n\nTeams and products from the same company may have different IT needs. \nWhere a cloud provider might be recognized for its AI services which empower your team of data scientists, another cloud provider might provide greater diversity in terms of infrastructure. Organizations can use any public cloud services to support their own specific goals and customize their infrastructure to best fit their needs. \n\n### Avoid vendor lock-in\n\nIn theory, an organization builds its cloud environment to enable straightforward workload portability, should it ever decide to switch providers. But in practice, this does isn't enough to justify a formal multi-cloud strategy because public clouds are not interchangeable.\n\nDifferences in resources, services, APIs, and other components make it challenging to migrate complex applications without some amount of architecting.\n\nFinally, a multi-cloud environment typically involves two or more public cloud providers. \n\n![](https://www-uploads.scaleway.com/multicloud_schema_c9a58ae32d.webp)\n## The difference between hybrid cloud and multi cloud models kept simple\n\nHybrid and multi-clouds do overlap on certain aspects - in both cases, it involves using multiple clouds, but there is a subtle difference. \nThink of hybridization as going to your favorite restaurant for dinner, then going to your favorite bakery for dessert. The multi-cloud approach would be more like getting all of your favorite food from your favorite restaurants, then setting up one table with everything on it. \n\n## Why do many organizations use the hybrid and multi-cloud approach?\n\nMost companies already run multi cloud infrastructures today… without a frontal multi cloud strategy to enable its benefits. However, power is in the hands of the consumer: you. You can choose to weaken vendor lockin and get the most out of each cloud provider. Hyper personalized infrastructure based on a multi cloud approach enables a more rational billing, an empowered engineer's teams, and keeping with national laws and regulations regarding data simpler. ","createdAt":"2023-01-18T16:37:14.563Z","updatedAt":"2023-02-02T16:20:21.963Z","publishedAt":"2023-01-18T16:39:29.820Z","locale":"en","tags":"Multi-cloud\nIntroduction\nCloud","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":3,"excerpt":"Multi cloud is starting to topple the discussion, but it is nothing new: even without using the term “multi cloud”, 92% of companies are already using a multi-cloud approach. So what is it exactly?","author":"Hana Khelifa","h1":"Multi cloud vs. hybrid cloud: understanding the difference","createdOn":"2022-07-18"}},{"id":177,"attributes":{"title":"k8s-multi-cloud-best-practices","path":"k8s-multi-cloud-best-practices/","description":"Scaleway Kubernetes Kosmos is the first Multi-Cloud Kubernetes engine on the market. It offers the possibilities for multiple Cloud providers to coexist within the same Kubernetes cluster...\n\nUsing Kubernetes in a Multi-Cloud environment can be challenging and requires the implementation of best practices.\n\n## Labels\n\nLabelling resources significantly helps you manage your configuration as you can use [selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and [affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). As a result, when working in a Multi-Cloud Kubernetes cluster, this step is strongly encouraged or even mandatory.\n\nIt is highly recommended to at least label nodes with information regarding their specificities, such as the provider managing them.\n\nFor example, our cluster nodes can be set with a `provider` label, such as `provider=scaleway`.\n\n## Isolating workload across providers\n\nSome specific workload might require running on specific hardware (such as GPUs) but it can also be preferable for applications to run on a dedicated Cloud Provider as well, may it be for ownership, legal, or technical reasons.\n\nIn that specific case, Kubernetes [taints and tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) will allow you to add very specific rules to nodes and applications.\n\n## One replica per provider\n\nIf an application needs to run on each cloud provider’s network, [anti-affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) can be set as follows.\n\n![antiaffinity.yml](https://www-uploads.scaleway.com/blog-image-2.webp)\n\nThis way, each cloud provider used within the cluster will have an instance running one replica of the deployment.\n\n## Distribute our workload across providers\n\nWhen a workload needs to be spread across multiple Cloud providers to ensure a very high availability of services, the usage of [pod topology spread](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) is relevant.\n\n![topologyspread.yml.webp](https://www-uploads.scaleway.com/image_3_14753e7915.webp)\n\nThis `yaml` file describes a balanced distribution of `busybox-everywhere` pods with labels `app=aroundtheworld` across nodes depending on their `provider` label value.\n\n## Using Scaleway pools to bufferize workload\n\nIt is not simple to benefit from Kubernetes node auto-scaling feature when running a Multi-Cloud Kubernetes cluster. Nonetheless, when using Scaleway Kubernetes Kosmos, this feature is available within Scaleway node pools.\n\nWith a minimum number of zero nodes, Scaleway node pools are an ideal solution to buffer any unexpected workload at a minimum cost.\n\nIt is highly recommended to use such pools with the node auto-scaling feature activated, to ensure the highest availability of any production system and prevent any potential issue across all Cloud providers.\n\n![Node auto-scaling feature in Scaleway Console](https://lh3.googleusercontent.com/8d3Z7pLf799Ye1Lcih8KJbGMNnPozmC0mBLKWSarnY4xDXx8d3Q79J8p72Svq6gDHuKqGKTJ4bru-zo66i2UHty1SuCc4rHUWWP2E5x-GER4-LFxI7XYXoaYZAW3sDrLeGP2gMcT)\n\n\n## Services exposure\n\nExposing HTTP services within a Multi-Cloud cluster is no different than in a regular cluster. In a managed Kubernetes cluster such as Kubernetes Kosmos, the [Cloud Controller Manager](https://kubernetes.io/docs/concepts/architecture/cloud-controller/) will create a Scaleway Multi-Cloud Load Balancer.\n\n## Storage - Deploying CSI\n\nCloud providers offer storage solutions that can only be attached to their own infrastructure services. In the case of Multi-Cloud environments, it is a real constraint that needs to be taken into account when designing a production architecture as well as a software. While using an external database solution is recommended and compatible with any Multi-Cloud solution, Kubernetes users sometimes require persistent storage within their Kubernetes clusters.\n\n[Persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) are managed by the [Container Storage Interface](https://kubernetes-csi.github.io/docs/) (CSI) component, and fortunately, almost all Cloud providers made their CSI open source, allowing customers to deploy them within their clusters.\n\nThe best practice remains to use the labels set on different nodes to target the instances of each provider and provide them with the corresponding CSI.\n\n### Points of concern when going Multi-Cloud with Kubernetes\n\nAs Multi-Cloud flexibility comes along with complexity, there are a few topics and behaviors to keep in mind when implementing a Cross-Cloud Kubernetes cluster.\n\nManaged Kubernetes engines implement many components that have different behaviors depending on the Cloud provider providing them. Each of which can have an impact on the behavior of software and applications running in a Kubernetes cluster if their principles are not understood.\n\n## Cloud Controller Manager\n\nThe [Cloud Controller Manager](https://kubernetes.io/docs/concepts/architecture/cloud-controller/) (CCM) is a component of Kubernetes control-plane implementing a cloud-specific logic.\n\nWhen using a managed Kubernetes engine from a Cloud provider, the creation of a Kubernetes Load Balancer service will more likely lead to the creation of a Load Balancer on the customer’s Cloud provider account. This logic is in fact implemented by the CCM, and every Cloud provider will configure it to connect to its own services APIs.\n\nWhat happens then in a Multi-Cloud Kubernetes cluster?\n\nThe Cloud Controller Manager being a component of the control-plane, its behavior is the same regardless of the host of each cluster node. It is then the responsibility of the CCM maintainer to implement the expected behavior.\n\nWhen using a Scaleway Kubernetes Kosmos cluster, the CCM implements the creation of a Scaleway Multi-Cloud Load Balancer and allows the exposure of HTTP services from a Multi-Cloud cluster.\n\n## Container Storage Interface\n\nThe [Container Storage Interface](https://github.com/scaleway/scaleway-csi) (CSI) component manages the interface between a Kubernetes node and storage solutions such as block storages or Network FileSystems (NFS).\n\nJust like for the CCM, the CSI is an interface between Kubernetes and a Cloud provider’s APIs. However, instead of having a global behavior over the whole cluster, a customer can install as many CSI as he wants.\n\nStorage management within a Multi-Cloud Kubernetes cluster then implies multiple CSI management; at least one per Cloud provider used in the cluster, if persistent storage is needed.\n\nIt also implies that persistent volumes hosted on provider A cannot be mounted on a node from provider B, thus reducing the possibility of redundancy, data access, and data recovery.\n\nFortunately for Kubernetes users, _almost_ all Cloud providers have made their CSI open source, making it easy to install the needed CSI on our Kubernetes clusters using Kubernetes selectors.\n\n[_The list of open source Kubernetes CSI can be found here._](https://kubernetes-csi.github.io/docs/drivers.html)\n\n## Autoscaler\n\nThe node auto-scaling feature offered by Kubernetes allows the automatic addition or removal of nodes from your Kubernetes cluster depending on its workload.\n\nWhen working in a Multi-Cloud Kubernetes cluster, two options can be considered. The first one is to completely deactivate the node auto-scaling feature, as managing a Cross-Cloud node auto-scaling strategy can quickly become too complex. It also implies managing all providers’ accounts and credentials. The second option would be to authorize auto-scaling on nodes from only one Cloud provider.\n\nWith Scaleway Kubernetes Kosmos the choice was made on the latter option.\n\nIn fact, a Kubernetes Kosmos cluster can contain multiple Scaleway node pools in multiple regions, each of them implementing the node auto-scaling feature.\n\n## Node auto-healing\n\nThe node auto-healing feature (managed by yet another Kubernetes component) has the same constraints as the node auto-scaling feature. It requires the permissions to perform sensible actions on a Cloud provider user account. For this reason, managing auto-healing in a Multi-Cloud cluster is complicated and implies the same choices as before.\n\nFor the sake of simplicity and consistency, Scaleway Kubernetes Kosmos only manages node auto-healing for Scaleway Instances, just as it would for any standard Kubernetes Kapsule cluster.\n\nWhile running a Multi-Cloud Kubernetes cluster, and by extension in a Kubernetes Kosmos cluster, it is advised to have a fallback strategy in case of the loss of another Cloud provider infrastructure. As such, we recommend keeping at least one Scaleway node pool with the auto-healing and auto-scaling features enabled to absorb any eventual workload in case of the failure of part of the infrastructure.\n\n## Network\n\nA Multi-Cloud architecture comes with complexity, but also constraints, as multiple Instances from different providers communicate with each other within the same network.\n\nFirst of all, it raises the question of low latency versus high availability. When one of the main purposes of having a Cross-Cloud cluster is obviously to have a highly available infrastructure, it implies a latency that can be hard to measure and anticipate. This is not necessarily a major concern, but it is an important point to consider.\n\nSecondly, as nodes are communicating with each other through a dedicated network, based on a VPN solution, the usage of a Cloud providers’ private network is out of the question. By design, using a provider’s private network requires that all resources available in this network are hosted and managed by the same provider.\n\n## Conclusion\nImplementing a Multi-Cloud strategy has always been a real challenge for every company as it requires in-depth knowledge of the infrastructure and the underlying technologies to successfully achieve its objective. Keeping simple logic rules and being mindful of the overall behavior of your architecture are essential to running a smooth Multi-Cloud Kubernetes environment while making the most of its many functionalities.","createdAt":"2023-01-18T15:49:25.953Z","updatedAt":"2023-02-08T15:38:56.412Z","publishedAt":"2023-01-18T15:50:37.267Z","locale":"en","tags":"Multi-cloud\nBest practices\nKubernetes\nMulti-Cloud","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":6,"excerpt":"Using Kubernetes in a Multi-Cloud environment can be challenging and requires the implementation of best practices. Learn a few good practices to implement a concrete multi-cloud strategy.","author":"Emmanuelle Demompion","h1":"Multi-Cloud Kubernetes best practices","createdOn":"2022-07-21"}}]},"meta":{"id":691,"title":"How to deploy and distribute the workload on a multi-cloud Kubernetes environment","description":"This article will guide you through the best practices to deploy and distribute the workload on Kubernetes environment","ogtype":null,"ogtitle":null,"ogdescription":null,"noindex":false},"localizations":{"data":[]}}},{"id":159,"attributes":{"title":"understanding-kubernetes-autoscaling","path":"understanding-kubernetes-autoscaling/","description":"Kubernetes provides a series of features to ensure your clusters have the right size to handle any type of load. In this blog post, we will look into the different auto-scaling tools provided by Kubernetes and learn the difference between the horizontal pod autoscaler, the vertical pod autoscaler and Kubernetes Nodes autoscaler.\n\nDevelopers use Kubernetes to ship faster to their users and respond to their requests as quickly as possible. You design the capacity of your cluster on the estimated load your users will generate on it. But imagine your service went viral, and the number of requests grows faster than you ever imagined. You risk running out of compute resources, your service might slow down, and users may get frustrated. \n\nWhen you allocate resources manually, your responses may not be as quick as required by your application's changing needs. This is were Kubernetes Autoscaling comes in: Kubernetes provides multiple layers of autoscaling functionality: Pod-based scaling with the Horizontal Pod Autoscaler and the Vertical Pod Autoscaler, as well as node-based with the Cluster Autoscaler. It automatically scales up your cluster as soon as you need it and scales it back down to its regular size when the load is lower. These layers ensure that each pod and cluster has the right performance to serve your current needs.\n\n## Kubernetes Architecture\n\nIn Kubernetes, a set of machines for running containerized applications is called **Cluster**. A cluster contains, at minimum, a **Control Plane** and one or several **Nodes**. The control plane maintains the clusters' desired state, such as which applications run on them and which images they use. The nodes are either virtual or physical machines that run the applications and workloads, called **Pods**. Pods consist of containers that request compute resources such as CPU, Memory, or GPU.\n![Kubernetes Cluster Architecture](https://www-uploads.scaleway.com/kuber_custer_arch_27dd050ed5.webp)\nFor more information to the different Kubernetes components, refer to our dedicated blog post: _[An introduction to Kubernetes](https://www.scaleway.com/en/blog/an-introduction-to-kubernetes/)_\n\n## Horizontal vs. Vertical Scaling\n\n| Horizontal | Vertical | |\n| ---------- | --------------------- | ------------------------------------------------------- |\n| **Pod** | Adds or removes Pods | Modifies CPU and/or RAM resources allocated to the Pod |\n| **Node** | Adds or removes Nodes | Modifies CPU and/or RAM resources allocated to the Node |\n\n* **Horizontal Scaling** means modifying the compute resources of an existing cluster, for example, by adding new nodes to it or by adding new pods by increasing the replica count of pods (Horizontal Pod Autoscaler).\n* **Vertical Scaling** means to modify the attributed resources (like CPU or RAM) of each node in the cluster. In most cases, this means creating an entirely new node pool using machines that have different hardware configurations. Vertical scaling on pods means dynamically adjusting the resource requests and limits based on the current application requirements (Vertical Pod Autoscaler).\n\n### Horizontal Pod Autoscaler\n\nThe [Horizontal Pod Autoscaler (HPA)](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) is able to scale the number of pods available in a cluster to handle the current computational workload requirements of an application. It determines the number of pods needed based on metrics set by you and applies the creation or deletion of pods based on threshold sets. In most cases, these metrics are CPU and RAM usage, but it is also possible to specify your custom metrics. The HPA checks continuously the CPU and memory metrics generated by the `metrics-server` installed in the Kubernetes cluster.\n\nIf one of the specified thresholds is met, it updates the number of pod replicas inside the deployment controller. Following the updated number of pod replicas, the deployment controller will scale up or down the number of pods until the number of replicas matches the desired number. In case you want to use custom metrics to define rules on how the HPA handles scaling your pods, your cluster needs to be linked to a time-series database holding the metrics you want to use. Please note that Horizontal Pod Autoscaling can not be applied to objects that can not be scaled like, for example, DaemonSets.\n\n### Vertical Pod Autoscaler\n\nThe [Vertical Pod Autoscaler (VPA)](https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/README.md) can allocate more (or less) CPU and memory resources to existing pods to modify the available compute resources for an application. This feature can be useful to monitor and adjust the allocated resources of each pod over its lifetime. The VPA comes with a tool called _VPA Recommender_, which monitors the current and past resource consumption and use this data to provide recommended CPU and memory resources to be allocated for the containers. The Vertical Pod Autoscaler does not update resource configurations for existing pods. It checks which pods have the correct resource configuration and kills the ones that are not having the recommended configuration so that their controllers can recreate them with the updated configuration.\n\nWhen you want to use the HPA and VPA both at the same time to manage your container resources, you may put them in a conflict which each other when using the same metrics (CPU and memory). Both of them will try to solve the situation simultaneously, resulting in a wrong allocation of resources. However, it is possible to use them both if they rely on different metrics. The VPA uses CPU and memory consumption as unique sources to gather the perfect resource allocation, but the HPA can be used with custom metrics so both tools can be used in parallel.\n\n### Kubernetes Nodes Autoscaler\n\nThe Kubernetes Nodes Autoscaler adds or removes nodes in a cluster based on **all pods' requested resources**. It is possible to define a minimum and a maximum number of nodes available to the cluster from the [Scaleway Elements console](https://console.scaleway.com/register).\n\nWhile the Horizontal and Vertical Pod Autoscalers allow you to scale pods, the Kubernetes Node Autoscaler scales your clusters nodes, based on the number of pending pods. The CA checks to see whether there are any pending pods and increases the cluster's size so that these pods can be created. It also deallocates idle nodes to keep the cluster at the optimal size. The Nodes Autoscaler can request to deploy new nodes directly in your pool, within the given resource limits (if any).\n\n**Cluster upscaling** \nIf pods are scheduled for execution, the Kubernetes Autoscaler can increase the number of machines in the cluster to avoid resource shortage. The diagram below illustrates how a cluster can be automatically upscaled:\n\n![Kubernetes Nodes Autoscaler upscaling](https://www-uploads.scaleway.com/blog-image-1-1.webp)\n\nAs illustrated, two pods are scheduled for execution but the current node's compute capacity is reached. The cluster autoscaler automatically scans all nodes for scheduled pods. It requests provision of a new node if three conditions are met:\n\n* Some pods failed to schedule on any of the existing nodes due to insufficient available resources.\n* Adding a node with the same specifications as the current ones help to redistribute the load.\n* The cluster has not reached the user-defined maximum node count.\n\nOnce the node is deployed and detected by the Kubernetes Control Plane, the scheduler allocates the pending pods to the cluster's new node. In case there are still some pending pods, the autoscaler repeats these steps as often as required.\n\n**Cluster downscaling** \nThe Kubernetes Cluster Autoscaler decreases the number of nodes in a cluster when some are considered not necessary for a pre-defined amount of time. To be considered unnecessary, a node must have low utilization, and all of its important pods can be moved elsewhere without resource shortage. The node scaledown check takes into account the resource requests made by the pods, and if the Kubernetes scheduler decides that the pods can be moved somewhere else, it removes the node from the cluster to optimize resource usage and to reduce costs. If you have defined a minimum number of active nodes in the cluster, the autoscaler will not reduce the number of nodes below this threshold.\n\n## Configuring Autoscaling\n\nYou can configure **Cluster Autoscaling** directly from your [Scaleway Elements](https://console.scaleway.com/) console.\n\n**During Cluster creation:** \nTo enable Kubernetes Cluster Autoscaling during the creation of a new cluster, head to step 5 in the cluster creation form, toggle the switch, and set the minimum and maximum resources available for your cluster:\n![](https://www-uploads.scaleway.com/blog-image-2-1.webp)\n\n**On an existing Cluster:**\n\n1. From your cluster information page, click on the **Pools** tab and select the pool to modify. Click **Edit** in the pools drop-down menu to configure the pool:\n ![](https://www-uploads.scaleway.com/blog-Screenshot-2020-10-12-at-16-33-05.webp)\n2. Toggle on the **Autoscale the number of nodes** switch and set the desired number of minimum and maximum resources available for the pool:\n![](https://www-uploads.scaleway.com/blog-image-5-1.webp)\n3. Confirm the the modification of the pool by clicking on **Update pool.**\n\n## Conclusion\n\nYou now understand the basics of Kubernetes Autoscaling features and how you can use them to configure your cluster for maximum performances.\n\nFor more information about the Kubernetes Cluster Autoscaler, please [refer to the official documentation](https://github.com/kubernetes/autoscaler/blob/cad686ebf3f2/cluster-autoscaler/FAQ.md).\n\nYou can also deploy your first [Kubernetes Kapsule Cluster](https://console.scaleway.com/kapsule/clusters) directly from your Scaleway console and try out the Autoscaling feature yourself!\n\n","createdAt":"2023-01-18T14:29:42.642Z","updatedAt":"2023-05-05T14:27:49.169Z","publishedAt":"2023-01-18T14:30:47.672Z","locale":"en","tags":"Kubernetes\nScaling","popular":false,"articleOfTheMonth":false,"category":"Scale","timeToRead":8,"excerpt":"Kubernetes provides a series of features to ensure your clusters have the right size to handle any load. Let's look into the different auto-scaling tools and learn the difference between them.","author":"Benedikt Rollik","h1":"Understanding Kubernetes Autoscaling","createdOn":"2022-08-01","image":{"data":{"id":1652,"attributes":{"name":"DigitalServices-Kapsule-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp","hash":"large_Digital_Services_Kapsule_Illustration_Blog_8f87b431c2","mime":"image/webp","name":"large_DigitalServices-Kapsule-Illustration-Blog.webp","path":null,"size":"442.31","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp","hash":"small_Digital_Services_Kapsule_Illustration_Blog_8f87b431c2","mime":"image/webp","name":"small_DigitalServices-Kapsule-Illustration-Blog.webp","path":null,"size":"155.70","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp","hash":"medium_Digital_Services_Kapsule_Illustration_Blog_8f87b431c2","mime":"image/webp","name":"medium_DigitalServices-Kapsule-Illustration-Blog.webp","path":null,"size":"295.49","width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp","hash":"thumbnail_Digital_Services_Kapsule_Illustration_Blog_8f87b431c2","mime":"image/webp","name":"thumbnail_DigitalServices-Kapsule-Illustration-Blog.webp","path":null,"size":"50.31","width":245,"height":152}},"hash":"Digital_Services_Kapsule_Illustration_Blog_8f87b431c2","ext":".webp","mime":"image/webp","size":578.34,"url":"https://www-uploads.scaleway.com/Digital_Services_Kapsule_Illustration_Blog_8f87b431c2.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-01-18T14:29:37.697Z","updatedAt":"2023-01-18T14:29:37.697Z"}}},"recommendedArticles":{"data":[{"id":206,"attributes":{"title":"understanding-the-different-types-of-storage","path":"understanding-the-different-types-of-storage/","description":"Scaleway just released its [Block Storage in public beta](https://www.scaleway.com/en/betas/#block-storage) and it is a great opportunity for us to explain **the main differences between Block, File and Object storage**.\n\n![](https://www-uploads.scaleway.com/schema_storage_7805627481.svg)\n\n## What Is Block Storage? \n\nBlock storage is a technology that allows abstraction over a low-level storage device. The main advantage of this storage solution is to provide **low-latency operations**. When one orders a block volume, it is like ordering a virtual hard-drive that can be plugged-in/out of a cloud instance. As a user, you would treat this block device as a regular disk. When you plug it in, the operating system detects it as a raw disk. Then, you format it to create a file system on it (ext4, XFS, NTFS…) and start using it as a regular device upon which you can store your data.\n\nIn the background, a block device is managed by the cluster as a collection of smaller pieces (called chunks or simply blocks, hence its name). Each of these chunks can be stored across a storage cluster of several machines and under a unique address. In particular, in the case of a cloud block storage, those chunks are **replicated to avoid data loss in the event of a storage medium failure**. \n\nWhen the operating system of your instance asks for a particular file, it makes a request to the block device your file is stored in. This request is then translated in a block storage system request that will deliver the data to your operating system just like a real hard-drive would do. \n\nThis makes block storage ideal for latency-critical applications such as storage of **virtual machines and transactional databases**. It is also well suited for **business-critical applications** as data is stored redundantly across multiple physical disks and nodes. In case a disk failure occurs, the missing blocks can easily be recovered from other disks in the cluster. Block storage also provides consistent performance, no matter the amount of data stored, contrary to file storage, which might suffer performance issues when a certain number of files are stored. In addition, the block device is accessed over a network, making it **easy to detach a volume from a server A to attach it to a server B inside the same availability zone**. Block storage is convenient when using products such as [Kubernetes](https://www.scaleway.com/en/betas/#kubernetes-kapsule) or [Database](https://www.scaleway.com/en/database/).\n\n| **Block Storage Pros:** | **Block Storage Cons:** |\n| ------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |\n| Adequate for applications that requires high-performance and optimized I/O bound | Block storage volumes are allocated with a fixed size.You may end up paying for unused storage. |\n| Highly redundant. The data is redundant across the volumeso if a disk fails, the data can be easily recovered without any impact on your applications. | |\n\u003cbr\u003e\n## What Is File Storage? \n\nFile storage is a solution to store data as files and present it to its final users as a hierarchical directories structure. The main advantage is to provide **a user-friendly solution to store and retrieve files**. To locate a file in file storage, the complete path of the file is required. For instance: `/home/myuser/myphotos/summer2019/italy/beach.webp`. \n\nFile storage is how final users and many applications interact with a storage solution. Data can either be stored **on a local computer hard drive or on a network-attached storage solution** for example through [network attached storage (NAS) devices](https://en.wikipedia.org/wiki/Network-attached%5Fstorage). \n\nThis type of storage system supports a range of file access management features, such as **ownership and permissions** across a set of authenticated users. It also supports **multiple concurrent writes**. Several users can mount the same file storage and edit it concurrently. \n \nBut it got a set of drawbacks in the field of scalability. This solution is limited in the number of files it can serve efficiently. Still, as the number of data we need to manage is continuously growing, file recovery can become a burdensome and time-consuming task. **Expanding the storage capacity requires the careful management of the underlying storage medium** which can be problematic in the case of a NAS with limited slots. \n\nCloud solutions exist to solve the physical constraints of file storage. These services allow users to store their files on servers in a remote datacenter (the cloud) and make them available through a network connection. Multiple users can simultaneously access their files while the cloud provider manages the physical devices storing the data. \n\n| **File Storage Pros:** | **File Storage Cons:** |\n| ---------------------------------------------------------------------------------------------------- | --------------------------------------------- |\n| Accessible to multiple runtimes. A single fileshare that has multiple servers accessing all at once. | Performance affected by network traffic. |\n| Simultaneous reads and writes without worryingabout your data being overwritten. | Performance suffers beyond a certain capacity |\n| Limited set of metadata | |\n\u003cbr\u003e\n## What Is Object Storage? \n\nObject storage is one of the most recent storage system. It was created in the cloud computing industry with the requirement of **storing vast amounts of unstructured data**. Instead of using file paths, data is stored as immutable objects addressed by a key. \n\nNo matter if these objects are log files, HTML websites, images, documents, or any kind of data. As there is no specific schema to follow, these objects are called unstructured. Data objects include an ID (instead of a file name and a file path), metadata (e.g., authors of the file, permissions set on the files, date on which it was created, etc.) and unstructured data (e.g., images, videos, websites backups, etc.). **The metadata is entirely customizable**, which allows you to add more information to each piece of data.\n\n**Access to the objects and their metadata is done using a standard HTTP API**, which is one of the reasons object storage became a massive success with all kinds of developers and in particular web developer. Storing and retrieving objects using standard HTTP requests made it easy to develop libraries for almost all programming languages. Most object storage service providers also allow accessing objects from a public link making it possible to **host static assets of websites** on the object storage service instead of using a dedicated web server for this task. \n\n| **Object Storage Pros:** | **Object Storage Cons:** |\n| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| Massively scalable | Object Storage does not allow the modificationof a certain data blob, each object must be read and written completely whichmay lead to performance issues |\n| Customizable metadata and flat address space | Higher latency than block storage |\n| Easily accessible via HTTP requests | |\n| Billed per usage, no fixed costs or very low entry fee | |\n\u003cbr\u003e\n\nObject Storage is the ideal solution for storing large amounts of data that is not being altered once stored and where latency is secondary. For example, it can be used to provide storage for file-sharing services, backups or personal data storage like photos or videos.","createdAt":"2023-01-19T09:38:00.805Z","updatedAt":"2023-01-24T16:44:07.040Z","publishedAt":"2023-01-19T09:41:04.845Z","locale":"en","tags":"Storage\nIntroduction","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":11,"excerpt":"Scaleway just released its Block Storage in public beta and it is a great opportunity for us to explain the main differences between Block, File and Object storage.","author":"Océane Franc","h1":"Understanding the Different Types of Storage","createdOn":"2020-01-09"}},{"id":84,"attributes":{"title":"k8s-security-episode-7-kapslocked","path":"k8s-security-episode-7-kapslocked/","description":"We could not end this series without a short conclusion.\n\nSecuring a Kubernetes cluster is not an easy task, but it is not more complicated than securing any cloud environment or even dedicated servers.\n\nVulnerabilities didn't appear with cloud computing, they were there long before, but we are now equipped with the right tools to detect them and protect our infrastructures the best we can. Believe it or not, but it is even more simple to protect a production environment now than it was before.\n\nKnowing the flaws and risks is a huge first step into security, and by reading this series, you are already in the right direction. Applying some of the rules we talked about here should already make your environment safer than 50% of other production systems.\n\nIs it good enough though? That is for you to decide. Remember that you have to start somewhere, and it is never too late for a matter as critical as security for businesses.\n\n## Engage your team to protect your customer\n\nMaintaining a production environment is everyone's concern, and we hope that with this series, whatever your job description might be, you feel that you can do something to secure your software and applications.\n\n![an engaged team makes for a protected customer](https://www-uploads.scaleway.com/blog-Screenshot-2021-05-06-at-10-40-19.webp)\n\nEven if security is a critical topic, it does not mean that it needs to take a hundred percent of your time. Except if it is your core business, you have more value to add by working on it on the side, as a constant reminder. It is basically the same rule as for code test coverage. Looking for the highest score might not be the best way to go on a business perspective.\n\n\u003cQuote size=\"small\" \u003eCare, but don’t overthink it.\u003c/Quote\u003e\n\n\n### Official Kubernetes documentation\n\n* [Certificates](https://kubernetes.io/docs/setup/best-practices/certificates/)\n* [Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/)\n* [Service Account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)\n* [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/)\n\n## Take care and stay safe!\n\nThank you for taking this journey with us on a secured Kubernetes ride.\n\nHope to see you soon on our [#k8s slack community channel](https://slack.scaleway.com/).","createdAt":"2023-01-17T15:04:36.286Z","updatedAt":"2023-01-26T07:53:57.888Z","publishedAt":"2023-01-17T15:06:19.931Z","locale":"en","tags":"Kubernetes\nSecurity","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":1,"excerpt":"Securing a Kubernetes cluster is not an easy task, but it is not more complicated than securing any cloud environment or even dedicated servers.","author":"Emmanuelle Demompion","h1":"K8s security - Episode 7: KAPSLOCKED","createdOn":"2021-03-09"}},{"id":143,"attributes":{"title":"an-introduction-to-kubernetes","path":"an-introduction-to-kubernetes/","description":"[Kubernetes (K8s)](https://kubernetes.io/) is an open-source platform for managing containerized workloads and services. Google initially developed the project and it has been made publicly available in 2014\\. Since then, it has a vast, rapidly growing ecosystem. The name Kubernetes derivates from the ancient Greek word meaning helmsman or pilot.\n\n## From Traditional Deployment to Containerized Deployment\n\nTo understand why Kubernetes and containerized deployment is so useful for nowadays workloads, let us go back in time and have a view on how deployment has evolved:\n\nDuring the **traditional deployment era**, organizations ran applications directly on physical servers. There was no way to control the resources an application may consume, causing resource allocation issues. If an application consumed most of the resources of the server it ran on, this high load might have caused performance issues on other applications running on the same physical server. \nA solution would be to run each application on a dedicated server, but this would cause resources to be under-used and maintenance costs to increase.\n\nMultiple Virtual Machines (VMs) brought a beginning of solution during the **virtualized deployment era**. Virtualization allowed applications to be isolated between different VMs running on the same physical server, providing security layer and better resource allocation. \nAs this solution reduces hardware costs, each VM still requires the same administration and maintenance task as a physical machine.\n\nThe **containerized deployment era** brought us the concept of containers. \nA container includes its running environment and all the required libraries for an application to run. Different containers with different needs can now run on the same VM or physical machine, sharing resources. Once configured, they are portable and can be easily run across different clouds and OS distributions, making software less and less dependent on hardware and reducing maintenance costs.\n\n### How Kubernetes can help you to manage Containerized Deployments\n\nIn a production environment, you may need to deal with huge amounts of containers, and you need to manage the containers running the applications to ensure there is no downtime. Managing thousands of simultaneously running containers on a cluster of machines by hand sounds like an unpleasant task.\n\nThis is what Kubernetes can do for you. It manages the lifecycle of containerized applications and services, defines how applications should run, how they are intended to interact with other applications on the outside world while providing predictability, scalability, and high availability.\n\n## Kubernetes Architecture\n\nKubernetes is able to manage a cluster of virtual or physical machines using a shared network to communicate between them. All Kubernetes components and workloads are configured on this cluster.\n\nEach machine in a Kubernetes cluster has a given role within the Kubernetes ecosystem. At least one of these servers acts as the master server, in production grade workloads usually a multi-master setup is being configured, meaning that multiple servers act as master for redundancy. The master setup is the “brain” of the cluster exposing the different APIs, performing health checks on other servers, scheduling the workloads and orchestrating communication between different components. It acts as the primary point of contact with the cluster.\n\nThe other machines in the cluster are called **nodes**. These machines are designed to run workloads in containers, meaning each of them requires a container runtime installed on it (for example [Docker](https://www.scaleway.com/en/docs/how-to-install-docker-community-edition-ubuntu-bionic-beaver/) or [CRI-O](https://cri-o.io/)).\n\nThe different underlying components running in the cluster ensure that the desired state of an application matches the actual state of the cluster. In case the given state of an application changes, the master server will take the actions required to restore the desired state of the application by creating or destroying containers on the nodes, as well as adjusting network rules to route and forward traffic as requested by the master.\n\nA user interacts with the master server either directly with the API or with additional clients by submitting a declarative plan in `JSON` or `YAML`. This plan, containing instructions about what to create and how to manage it, is interpreted by the master who decides how to deploy the application.\n\n## Kubernetes Components\n\n### Master Components\n\n**Master components** provide the cluster’s control plane. These components are making global decisions about the cluster as well as detecting and responding to cluster events.\n\nMultiple applications and processes are needed for a Kubernetes cluster to run. They are either components guaranteeing the cluster health and status, or processes allowing communication and control over the cluster.\n\n#### `etcd`\n\n[etcd](https://etcd.io/) is a consistent and highly-available key-value store that is used by Kubernetes to store its configuration data, its state, and its metadata.\n\n#### `kube-apiserver`\n\nThe `kube-apiserver` is a component on the master that exposes the Kubernetes API. It is the front-end for the Kubernetes control plane and the primary means for a user to interact with a cluster. The API server is the only component that communicates directly with the `etcd`.\n\n#### `kube-scheduler`\n\nThe `kube-scheduler` is a master component watching newly created pods that have no node assigned yet and assigns them a node to run on.\n\nIt assigns the node based on individual and collective resource requirements, hardware/software/policy constraints, and more.\n\n#### `kube-controller-manager`\n\nThe `kube-controller-manager` is a master component that runs controllers.\n\nTo reduce complexity, all controllers are compiled into a single binary and run in a single process.\n\n#### `cloud-controller-manager`\n\nThe [cloud-controller-manager](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/) is an add-on useful when your cluster is running on a cloud provider.\n\nIt “glues” the different capabilities, features, and APIs of different providers while maintaining relatively generic constructs internally. \n\nYou can check out how we implemented our [Cloud Controller Manager](https://github.com/scaleway/scaleway-cloud-controller-manager) on GitHub.\n\n### Node Components\n\nServers that perform workloads in Kubernetes (running containers) are called **nodes**. Nodes may be VMs or physical machines.\n\nNode components are maintaining pods and providing the Kubernetes runtime environment. These components run on every node in the cluster.\n\n#### `kubelet`\n\nThe `kubelet` is an agent running on each node and ensuring that containers are running in a pod. It makes sure that containers described in `PodSpecs` are running and healthy. The agent does not manage any containers that were not created by Kubernetes.\n\n#### `kube-proxy`\n\nThe `kube-proxy` is a network proxy running on each node in the cluster. It maintains the network rules on nodes to allow communication to the pods inside the cluster from internal or external connections. `kube-proxy` uses either the packet filtering layer of the operating system, if there is one, or forwards the traffic itself if there is none.\n\n### Container Runtime\n\nKubernetes is able to manage containers, but not capable to run them. Therefore a container runtime is required that is responsible for running containers. Kubernetes supports several container runtimes like Docker or containerd as well as any implementation of the [Kubernetes CRI (Container Runtime Interface)](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md).\n\n## Kubernetes Objects\n\nKubernetes uses containers to deploy applications, but it also uses additional layers of abstraction to provide scaling, resiliency, and life cycle management features. These abstractions are represented by **objects** in the Kubernetes API.\n\n### Pods\n\nA **Pod** is the smallest and simplest unit in the Kubernetes object model. Containers are not directly assigned to hosts in Kubernetes. Instead, one or multiple containers that are working closely together are bundled in a Pod together, sharing a unique network address, storage resources and information on how to govern the containers.\n\n### Services\n\nA **service** is an abstraction which defines a logical group of Pods that perform the same function and a policy on how to access them. The service provides a stable endpoint (IP address) and acts like a load balancer by redirecting requests to the different pods in the service. The service abstraction allows to scale out or to replace dead pods without making changes in the configuration of an application.\n\nBy default services are only available using internally routable IP addresses but can be exposed publicly. \nIt can be done either by using the `NodePort` configuration, which works by opening a static port on each nodes external networking interface. Otherwise, it is possible to use the `LoadBalancer` service, which creates an external load balancer at a cloud provider using Kubernetes load-balancer integration. This service works only if a cloud controller manager is present, however.\n\n### ReplicaSet\n\nA **ReplicaSet** contains information about how many Pods it can acquire, how many Pods it shall maintain and a Pod template specifying the data of new Pods to meet the number of replicas criteria. The task of a ReplicaSet is to create and delete Pods as needed to reach the desired status. Each Pod within a ReplicaSet can be identified via the `metadata.ownerReference` field, allowing the ReplicaSet to know the state of each of the Pods that it is maintaining and therefore it can schedule tasks according to the state of the Pods.\n\nHowever, `Deployments` are a higher-level concept managing ReplicaSets and providing declarative updates to Pods with many other useful features. It is therefore recommanded to use Deployments unless you require some specific customized orchestration.\n\n### Deployments\n\nA Deployment is representing a set of identical Pods with no individual identities, managed by a _deployment controller_.\n\nThe deployment controller runs multiple replicas of an application as specified in a _ReplicaSet_. In case any pods may fail or become unresponsive, the deployment controller replaces them until the actual state equals the desired state.\n\n### StatefulSets\n\nA StatefulSet is able to manage Pods like the deployment controller but maintains a sticky identity of each Pod. Pods are created from the same base but are not interchangeable.\n\nThe operating pattern of StatefulSet is the same as for any other Controllers. The StatefulSet controller maintains the desired state, defined in a StatefulSet object, by making the necessary update to go from the actual state of a cluster to the desired state.\n\nThe unique, number-based name of each pod in the StatefulSet persists, even if a pod is being moved to another node.\n\n### DaemonSets\n\nAnother type of pod controller is called DaemonSet. It ensures that all (or some) Nodes run a copy of a Pod. For most use cases, it does not matter where pods are running, but in some cases, it is required that a single pod runs on all nodes. This is useful for aggregating log files, collecting metrics, or running a network storage cluster.\n\n### Jobs and CronJobs\n\nJobs manage a task until it runs to completion.\n\nJobs are able to run multiple Pods in parallel, and they are useful for batch-orientated tasks.\n\nCronJobs in Kubernetes work like traditional cron jobs in Linux. They can be used to run tasks at a specific time or interval and may be useful for Jobs such as backups or cleanup tasks.\n\n### Volumes\n\nA Volume is a directory that is accessible to containers in a Pod. Kubernetes uses its own volumes abstraction, allowing data to be shared by all containers and remain available until the pod is terminated. A Kubernetes volume has an explicit lifetime - the same as the Pod that encloses it. This means data in a pod will be destroyed when a Pod ceases to exist. This also means volumes are not a good solution for storing persistent data.\n\n### Persistent Volumes\n\nTo avoid the constraints of the volume life cycle being tied to the pod life cycle, [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) allow configuring storage resources for a cluster that are independent of the life cycle of a pod. \nOnce a Pod is being terminated, the reclamation policy of the volume determines if the volume is kept until it gets deleted manually or if it is being terminated with the pod.","createdAt":"2023-01-18T13:05:20.402Z","updatedAt":"2023-01-24T23:42:01.951Z","publishedAt":"2023-01-18T13:13:58.955Z","locale":"en","tags":"Kubernetes\nIntroduction\nDiscover","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":8,"excerpt":"To understand why Kubernetes and containerized deployment is so useful for nowadays workloads, let us go back in time and have a view on how deployment has evolved\n","author":"Benedikt Rollik","h1":"An Introduction to Kubernetes","createdOn":"2020-11-05"}}]},"meta":{"id":663,"title":"Understanding Kubernetes Autoscaling","description":"Kubernetes provides a series of features to ensure your clusters have the right size to handle any type of load.","ogtype":null,"ogtitle":null,"ogdescription":null,"noindex":false},"localizations":{"data":[{"id":163,"attributes":{"title":"comprendre-autoscaling-de-kubernetes","path":"comprendre-autoscaling-de-kubernetes/","description":"Kubernetes fournit une série de fonctionnalités pour garantir que vos clusters ont la bonne dimension pour pouvoir gérer n'importe quel type de charge. Dans cet article, nous allons distinguer les différentes méthodes d'autoscaling (mise à l'échelle automatique en français) fournies par Kubernetes et comprendre les différences entre l'horizontal pod autoscaler, le vertical pod autoscaler et enfin le Kubernetes Nodes autoscaler.\n\nKubernetes est un outil d'orchestration qui permet aux développeurs de déployer leurs applications le plus sereinement et le plus rapidement possible. Dans une architecture classique, vous devez définir la capacité de votre cluster en vous basant sur la charge estimée générée par vos utilisateurs. Mais imaginez que votre service devient viral, et le nombre de demandes augmente plus vite que vous ne l'imaginiez. Vous risquez de manquer de ressources de calcul, votre service de ralentir et vos utilisateurs d'être frustrés.\n\nLorsque vous allouez des ressources manuellement, vos temps de réponse peuvent ne pas être aussi rapides que l'exige l'évolution des besoins de votre application. C'est pour répondre à cette problématique que la fonctionnalité Autoscaling de Kubernetes entre en jeu : Kubernetes fournit plusieurs couches de fonctionnalités de mise à l'échelle automatique : mise à l'échelle basée sur le POD avec l'**Horizontal Pod Autoscaler** et le **Vertical Pod Autoscaler**, ainsi que basée sur des nœuds avec le **Cluster Autoscaler**. Il met automatiquement à l'échelle votre cluster dès que vous en avez besoin et redescend à sa taille nominale lorsque la charge est plus faible. Ces couches garantissent que chaque **pod** et **cluster** dispose des performances adéquates pour répondre à vos besoins en temps réel!\n\n## Architecture de Kubernetes\n\nDans Kubernetes, un ensemble de machines pour exécuter des applications conteneurisées est appelé **Cluster**. Un cluster contient au minimum un **Control Plane** (plan de contrôle en français) et un ou plusieurs **nœuds**. Le Control Plane conserve l'état souhaité du cluster, par exemple quelles applications s'exécutent sur celui-ci et quelles images il utilise. Les nœuds sont des machines virtuelles ou physiques qui exécutent les applications et les charges de travail, appelées **Pods**. Les Pods sont constitués de conteneurs qui demandent des ressources de calcul telles que CPU, Mémoire ou même GPU.\n\n![Kubernetes Cluster Architecture](https://www-uploads.scaleway.com/blog-image--18-.webp)\n\nPour plus d'informations sur les différents composants de Kubernetes, consultez notre article de blog : [Une introduction à Kubernetes](https://www.scaleway.com/en/blog/an-introduction-to-kubernetes/) (en anglais)\n\n## Horizontal vs. Vertical Scaling\n\n| Horizontal | Vertical | |\n| ---------- | -------------------------- | ---------------------------------------------------- |\n| **Pod** | Ajoute ou retire des pods | Modifie les resources CPU et/ou RAM allouées au pod |\n| **Node** | Ajoute ou retire des nœuds | Modifie les resources CPU et/ou RAM allouées au nœud |\n\n* **Horizontal Scaling** signifie modifier les ressources de calcul d'un cluster existant, par exemple en y ajoutant de nouveaux nœuds ou en ajoutant de nouveaux pods en augmentant le nombre de réplicas de pods (Horizontal Pod Autoscaler).\n* **Vertical Scaling** signifie modifier les ressources attribuées (comme CPU ou RAM) de chaque nœud du cluster. Dans la plupart des cas, cela signifie créer un pool de nœuds entièrement nouveau à l'aide de machines ayant des configurations matérielles différentes. La mise à l'échelle verticale sur les pods signifie l'ajustement dynamique des demandes de ressources et des limites en fonction des exigences actuelles de l'application (Vertical Pod Autoscaler).\n\n## Horizontal Pod Autoscaler\n\nLe [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) est capable de mettre à l'échelle le nombre de pods disponibles dans un cluster pour pouvoir gérer la charge de travail (ou workload) d'une application. Il détermine le nombre de pods nécessaires en fonction des seuils de ressources que vous avez définies et crée ou supprime des pods basés sur des ensembles de ces seuils. Dans la plupart des cas, ces mesures sont l'utilisation du CPU et de la RAM, mais il est également possible de spécifier vos mesures personnalisées (trafic ou autres). L'HPA vérifie en permanence les mesures de CPU et de mémoire générées par le `metrics-server` installé sur le cluster Kubernetes.\n\nSi l'un des seuils spécifiés est atteint, l'outil met à jour le nombre de réplicas de pod à l'intérieur du contrôleur de déploiement. Après avoir mis à jour le nombre de réplicas de pod, le contrôleur de déploiement va augmenter ou diminuer le nombre de pods jusqu'à ce que le nombre de réplicas corresponde au nombre souhaité. Si vous souhaitez utiliser des mesures personnalisées pour définir des règles sur la façon dont le HPA gère la mise à l'échelle de vos modules, votre cluster doit être lié à une base de données chronologiques (time-series database) contenant les mesures que vous souhaitez utiliser. Veuillez noter que la mise à l'échelle automatique horizontale des pods ne peut pas être appliquée à des objets qui ne peuvent pas être mis à l'échelle comme, par exemple, les DaemonSets.\n\n## Vertical Pod Autoscaler\n\nLe [Vertical Pod Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/README.md) (VPA) peut allouer plus (ou moins) de ressources CPU et mémoire aux pods existants afin de modifier les ressources de calcul disponibles pour une application. Cette fonctionnalité peut être utile pour surveiller et ajuster les ressources allouées de chaque pod tout au long de sa durée de vie. Le VPA est livré avec un outil appelé _VPA Recommander_, qui surveille la consommation actuelle et la consommation passée des ressources. Celui-ci utilise alors ces données pour fournir des valeurs recommandées pour allouer des ressources CPU et mémoire aux conteneurs. Pour rentrer légèrement dans le fonctionnement, le Vertical Pod Autoscaler ne met pas à jour les configurations de ressources pour les pods existants. Il se contente de vérifier si les pods bénéficient d'une bonne allocation des ressources et détruit ceux qui n'ont pas la configuration recommandée afin que leurs contrôleurs puissent les recréer avec la configuration adéquate.\n\nIl est important de noter que si vous voulez utiliser le HPA et le VPA en même temps pour gérer vos ressources de conteneur, il y a de fortes chances pour qu'ils soient en conflit si les mêmes variables utilisées (CPU et mémoire). Les deux essaieront de résoudre la situation simultanément, entraînant une mauvaise affectation des ressources. Cependant, il est possible de les utiliser tous les deux s'ils s'appuient sur des mesures différentes. Le VPA utilise la consommation de CPU et de mémoire comme sources uniques pour recueillir l'allocation parfaite des ressources, mais l'HPA peut être utilisé avec des mesures personnalisées afin que les deux outils puissent être utilisés en parallèle.\n\n## Kubernetes Nodes Autoscaler\n\nL'Autoscaler des nœuds Kubernetes ajoute ou supprime des nœuds dans un cluster en fonction des ressources demandées par **l'ensemble des pods**. Il est possible de définir un nombre minimum et un nombre maximal de nœuds disponibles pour le cluster à partir de la [console Scaleway Elements](https://console.scaleway.com/register).\n\nAlors que les _autoscalers_ des pods horizontaux et verticaux vous permettent la mise à l'échelle des pods, l'autoscaler de noeud Kubernetes quant à lui se concentre sur la mise à l'échelle des nœuds de vos clusters. L'outil vérifie s'il y a des pods en attente et augmente la taille du cluster afin que ceux-ci puissent être créés. Il libère également les nœuds inactifs pour garder une taille optimale de votre cluser. Le Node Autoscaler peut demander de déployer de nouveaux nœuds directement dans votre pool (un pool étant un ensemble de nœuds partageant les mêmes configurations), dans les limites de ressources indiquées.\n\n### Cluster upscaling\n\nSi des pods sont planifiés pour l'exécution, le Kubernetes Autoscaler peut augmenter le nombre de machines dans le cluster pour éviter le manque de ressources. Le schéma ci-dessous illustre comment un cluster peut être automatiquement mis à l'échelle :\n\n![Kubernetes Nodes Autoscaler upscaling](https://www-uploads.scaleway.com/blog-image-1-1.webp)\n\nComme illustré, deux pods sont planifiés pour l'exécution mais la capacité de calcul du nœud actuel est atteinte. L'autoscaler de cluster analyse automatiquement tous les nœuds à la recherche de pods planifiés. Il demande alors l'ajout d'un nouveau nœud si trois conditions sont remplies :\n\n* Certains pods n'ont pas pu être planifié sur l'un des nœuds existants en raison du manque de ressources disponibles.\n* L'ajout d'un nœud avec les mêmes spécifications que les spécifications actuelles permet de redistribuer la charge.\n* Le cluster n'a pas atteint le nombre maximal de nœuds défini par l'administrateur.\n\nUne fois que le nœud est déployé et détecté par le Control Plan de Kubernetes, le _scheduler_ alloue les _pending pods_ (ou pods en attente en français) au nouveau nœud du cluster. Dans le cas où il y a encore des modules en attente, l'autoscaler répète ces étapes aussi souvent que nécessaire.\n\n### Cluster downscaling\n\nLe Kubernetes Cluster Autoscaler diminue le nombre de nœuds dans un cluster lorsque certains sont considérés comme non nécessaires pendant une période prédéfinie. Pour être considéré comme inutile, un nœud doit avoir une faible utilisation, et tous ses modules importants peuvent être déplacés ailleurs sans créer une pénurie de ressources. La vérification de la mise à l'échelle du nœud prend en compte les demandes de ressources faites par les pods, et si le _scheduler_ décide que les pods peuvent être déplacés ailleurs, il supprime le nœud du cluster afin d'optimiser l'utilisation des ressources et de réduire les coûts. Si vous avez défini un nombre minimum de nœuds actifs dans le cluster, l'autoscaler ne réduira pas le nombre de nœuds au-dessous de ce seuil.\n\n## Configuration de la mise à l'échelle automatique chez Scaleway\n\nVous pouvez configurer la mise à l'échelle automatique de cluster directement à partir de la [console Scaleway](https://console.scaleway.com/register).\n\n**Creation d'un nouveau cluster**\n\nPour activer Kubernetes Cluster Autoscaling lors de la création d'un nouveau cluster, rendez-vous à l'étape 5, activez le bouton et définissez les ressources minimales et maximales disponibles pour votre cluster :\n\n![Cluster déjà existant](https://www-uploads.scaleway.com/blog-image-2-1.webp)\n\n1. Dans la page d'informations de votre cluster, cliquez sur l'onglet Pools et sélectionnez le pool à modifier. Cliquez sur Edit dans le menu déroulant Pools pour configurer le pool :\n ![](https://www-uploads.scaleway.com/blog-Screenshot-2020-10-12-at-16-33-05.webp)\n2. Activez le bouton **Autoscale the number of nodes** et définissez le nombre souhaité de ressources minimales et maximales disponibles pour le pool :\n![](https://www-uploads.scaleway.com/blog-image-5-1.webp)\n3. Confirmez la modification du pool en cliquant sur **Update Pool**.\n\n## Conclusion\n\nVous comprenez maintenant les bases des différentes méthodes d'Autoscaling de Kubernetes et comment vous pouvez les utiliser pour configurer votre cluster pour des performances maximales.\n\nDéployez votre premier [cluster Kapsule Kubernetes](https://console.scaleway.com/kapsule/clusters) directement à partir de votre console Scaleway et essayez la fonctionnalité Autoscaling.","createdAt":"2023-01-18T14:54:20.162Z","updatedAt":"2023-05-05T14:27:49.290Z","publishedAt":"2023-01-23T22:57:34.338Z","locale":"fr","tags":"Kubernetes\nScaling","popular":false,"articleOfTheMonth":false,"category":"Scale","timeToRead":8,"excerpt":"Dans cet article, nous allons distinguer les différentes méthodes d'autoscaling fournies par Kubernetes et comprendre les différences entre l'horizontal pod autoscaler et le vertical pod autoscaler.","author":"Benedikt Rollik","h1":"Comprendre l'autoscaling de Kubernetes","createdOn":"2022-08-01"}}]}}},{"id":41,"attributes":{"title":"understand-pam","path":"understand-pam/","description":"PAM is one of the Linux components you probably already heard of. You know that it is used someway to authenticate users. You don't exactly know how it works neither how to configure it, but at the very bottom of the todo list you made a few years ago, there's the line \"understand how PAM works\" written, so you will get it eventually!\n\nI had to understand PAM to configure the [torrents InstantApp](https://hub.scaleway.com/torrents.html) of Scaleway. You can use this app to create a [C1 server](https://www.scaleway.com/) in one click. It contains:\n\n* [rTorrent](https://rakshasa.github.io/rtorrent/) to download torrent files.\n* [ruTorrent](https://github.com/Novik/ruTorrent), a pretty cool web interface to visualize and manage your downloads.\n* a fancy web page to list your downloaded files using [h5ai](http://larsjung.de/h5ai/).\n\nI also wanted to install the FTP server [vsftpd](https://security.appspot.com/vsftpd.html) to allow the user to download his/her files from his favorite FTP client.\n\nThe web interface of the torrents InstantApp is protected by basic authentication (_i.e._ credentials are generated with `htpasswd -c \u003cfilename\u003e \u003cusername\u003e`) and I want to setup vsftpd to use this htpasswd file for authentication too.\n\n\n\n![PAM is a pain to understand and configure](https://www-uploads.scaleway.com/blog-crying.gif)\n\nNote that even if this tutorial explains how vsftpd is using PAM, the concepts are the same if you need to configure other softwares.\n\n## PAM authentication lifecycle\n\nWhen you read some documentation about PAM, it's often explained how to configure it and it's quite... complicated. Let's try another way, and explain first how vsftpd uses PAM to handle authentication. We'll discuss the configuration files a little bit later.\n\nIn `/etc/vsftpd.conf` you will find `pam_service_name=vsftpd`. It means vsftpd uses the PAM service named `vsftpd` to perform authentication (in other words, it uses PAM configured by `/etc/pam.d/vsftpd`).\n\nThe authentication process is done in four steps:\n* first, vsftpd calls [pam\\_authenticate](https://github.com/dagwieers/vsftpd/blob/master/sysdeputil.c#L375) to authenticate the user. PAM requires the user to provide an authentication token depending upon its configuration, usually this is a password, but could also be a finger print.\n* if it succeeds, vsftpd calls [pam\\_acct\\_mgmt](https://github.com/dagwieers/vsftpd/blob/master/sysdeputil.c#L393). PAM determines if the user's account is valid. It checks for the authentication token and account expiration and verifies access restrictions.\n* if it succeeds and `SESSION_SUPPORT=YES` in `vsftpd.conf` (default is `NO`), vsftpd calls [pam\\_open\\_session](https://github.com/dagwieers/vsftpd/blob/master/sysdeputil.c#L416). PAM sessions are used to do things that need to be done for the user before/after they can be given a service, for instance mounting directories.\n* finally, vsftpd calls [pam\\_close\\_session](https://github.com/dagwieers/vsftpd/blob/master/sysdeputil.c#L440) to close the PAM session.\n\nEach step (authentication, account management and session management) corresponds to a PAM type, named _auth_, _account_ and _session_. Another type exists, _password_, useful to update the authentication token associated with the user but won't be discussed here, though after this blog post you should be able to read and understand the manual of pam.conf easily if you need to use it.\n\n## PAM configuration\n\nPAM configuration files are located in `/etc/pam.d`. Each line (called _rule_) is composed as follow:\n\n`type` `control` `module-path` `module-arguments`\n\nFor example the following rule is successful only if the username is not in /etc/ftpusers:\n\n```auth required pam_listfile.so item=user sense=deny file=/etc/ftpusers onerr=succeed\n\nLet's see what is the meaning of each part:\n* we saw `type` previously. It can be `auth`, `account`, `password` or `session` depending on which step you want to configure.\n* `control` indicates if the module should fail or succeed in its authentication task. Since the syntax is a bit more complex, we will discuss what it can contain just after.\n* `module-path` is the PAM module used to perform the action. [A lot of PAM modules](https://github.com/linux-pam/linux-pam/) exist. For example, [pam\\_listfile.so](https://github.com/linux-pam/linux-pam/) is used to allow or deny the access if the username is (or is not) in a text file ; and [pam\\_unix.so](https://github.com/linux-pam/linux-pam/) is used to ask password and compare it against `/etc/shadow`. Note that certain modules cannot be used for every `type`. For instance, the module [pam\\_shells.so](https://github.com/linux-pam/linux-pam/) which allows access to the system if the users shell is listed in `/etc/shells` can only be used for the types `auth`, `account` and `password`.\n* Finally, `module-arguments` are the arguments given to the module. For example, it could be `item=user sense=deny file=/etc/ftpusers onerr=succeed` for `pam_listfile.so`. You need to read the module documentation to understand the meaning of each parameter.\n\nFor the authentication process to succeed, the set of rules for each type must be successful. First, `auth` rules are processed, then `account`, then optionally `session` rules.\n\nThere is a way to control the behavior of a rule, and tell PAM \"if this rule fails, then ignore it and go to the next one\" or \"if this rule succeeds, skip the next one and execute the one after\" or \"if this rule fails, do not process the next rules and return an error immediately to the application\". This is what `control` is for.\n\n## _control_ format in a PAM rule\n\n`control` is a square-bracketed selection of `value=action` pairs, for example `[success=ok new_authtok_reqd=ok ignore=ignore default=bad]`.\n\n`value` can be one of these: `success`, `cred_unavail`, `acct_expired`, or `default` (and actually muuuuuuch more, everything is detailed in `pam.conf`). It allows to configure the behavior of the rule if the module returns a success, or if it returns that credentials are unavailable, or... you get the idea. `default` is used to configure the behavior of the values not explicitely mentionned.\n\n`action` defines what to do if the PAM module returns `value`. It can be:\n\n* `ignore` to prevent returning an error to the application if the PAM module fails. In our case, the application is vsftpd, and we `ignore` when the authentication using the .htpasswd file fails to let later rules decide if we should grant access.\n* `done` to return the current status to the application, and stop processing further rules.\n* `N (an unsigned integer)` to skip the next `N` rules..\n\n`action` can also be `bad`, `die` or `ok`.\n\nLast thing about control: the square-bracket syntax is hard to read and you can use `required` (equivalent to `[success=ok new_authtok_reqd=ok ignore=ignore default=bad]`), `requisite`, `sufficient` or `optional` instead.\n\nEverything is detailed in the man of pam.conf ;)\n\n## The final PAM configuration file\n\nFrom now, you should be able to understand the configuration file.\n\n```bash\n# Ensure the FTP username is not in /etc/ftpusers\nauth required pam_listfile.so item=user sense=deny file=/etc/ftpusers onerr=succeed\n\n# If credentials match what is in the htpasswd file, return \n# success and do not process the other rules\nauth sufficient pam_pwdfile.so pwdfile=/var/www/credentials debug\n\n# Always return success. Note the type `account` will only\n# be called if `auth` previously returned a success.\naccount sufficient pam_permit.so\n\n# Standard PAM includes, to authenticate using /etc/passwd.\n@include common-account\n@include common-session\n@include common-auth\n\n# Ensure the user has a shell in /etc/passwd.\nauth required pam_shells.so","createdAt":"2023-01-17T11:03:57.976Z","updatedAt":"2023-02-23T13:02:38.661Z","publishedAt":"2023-01-17T11:10:16.341Z","locale":"en","tags":"Open-source\nInfrastructure\nIntroduction","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":6,"excerpt":"PAM is a Linux component to authenticate users. You don't know how it works, but at the bottom of the todo list you made years ago, it says \"understand how PAM works\". So here you go!","author":"Julien Castets","h1":"Understanding PAM - Pluggable Authentication Modules","createdOn":"2022-02-04","image":{"data":{"id":1513,"attributes":{"name":"UseCases-Illustration-Blog.webp","alternativeText":null,"caption":null,"width":1216,"height":752,"formats":{"large":{"ext":".webp","url":"https://www-uploads.scaleway.com/large_Use_Cases_Illustration_Blog_f066679bf7.webp","hash":"large_Use_Cases_Illustration_Blog_f066679bf7","mime":"image/webp","name":"large_UseCases-Illustration-Blog.webp","path":null,"size":"140.88","width":1000,"height":618},"small":{"ext":".webp","url":"https://www-uploads.scaleway.com/small_Use_Cases_Illustration_Blog_f066679bf7.webp","hash":"small_Use_Cases_Illustration_Blog_f066679bf7","mime":"image/webp","name":"small_UseCases-Illustration-Blog.webp","path":null,"size":"54.34","width":500,"height":309},"medium":{"ext":".webp","url":"https://www-uploads.scaleway.com/medium_Use_Cases_Illustration_Blog_f066679bf7.webp","hash":"medium_Use_Cases_Illustration_Blog_f066679bf7","mime":"image/webp","name":"medium_UseCases-Illustration-Blog.webp","path":null,"size":"97.53","width":750,"height":464},"thumbnail":{"ext":".webp","url":"https://www-uploads.scaleway.com/thumbnail_Use_Cases_Illustration_Blog_f066679bf7.webp","hash":"thumbnail_Use_Cases_Illustration_Blog_f066679bf7","mime":"image/webp","name":"thumbnail_UseCases-Illustration-Blog.webp","path":null,"size":"21.25","width":245,"height":152}},"hash":"Use_Cases_Illustration_Blog_f066679bf7","ext":".webp","mime":"image/webp","size":180.67,"url":"https://www-uploads.scaleway.com/Use_Cases_Illustration_Blog_f066679bf7.webp","previewUrl":null,"provider":"@website/strapi-provider-upload-scaleway-bucket","provider_metadata":null,"createdAt":"2023-01-17T11:07:36.217Z","updatedAt":"2023-01-17T11:07:36.217Z"}}},"recommendedArticles":{"data":[{"id":181,"attributes":{"title":"understanding-network-latency","path":"understanding-network-latency/","description":"In this blog post, we attempt to demystify the topic of network latency. We take a look at the relationship between **latency**, **bandwidth** and **packet drops** and the impact of all this on \"the speed of the internet\". Next, we examine factors that contribute to latency including **propagation**, **serialization**, **queuing** and **switching** **delays**. Finally, we consider the more advanced topic of the relationship between latency and common **bandwidth limitation techniques**.\n\nWhile the concept of bandwidth is easily understood by most IT engineers, latency has always been something of a mystery. At first glance, it might seem that it doesn't matter how long it takes for a chunk of data to cross the Atlantic ocean, as long as we have enough bandwidth between the source and destination. What's the big deal if our 100GE link is one kilometer or one thousand kilometers long, it's still 100GE isn't it?\n\nThis would be correct if we never experienced any [packet loss](https://en.wikipedia.org/wiki/Packet%5Floss) in the network. Latency impact is all about flow and congestion control (which these days is mainly part of TCP). When one end sends a chunk of data to the other, it usually needs an acknowledgment that the data has been received by the other end. If no acknowledgment is received, the source end must resend the data. The greater the latency, the longer it takes to detect a data loss.\n\nIn fact, packet drops are part of the very nature of IP networks. Packets may be dropped even in a perfectly-operating network that has no issues with congestion or anything else.\n\n## Latency and the Unknown \n\nWe can regard network latency as a buffer for the data \"in fly\" between the source and the destination, where packets could possibly disappear. The greater the latency, the larger this buffer is, and the more data is in fly. But being in fly, the data is like [Schrodinger's cat](https://en.wikipedia.org/wiki/Schr%C3%B6dinger%27s%5Fcat): neither sender nor receiver know if it's safely traveling towards its destination or already dropped somewhere along the way. The bigger the amount of data in this unknown state, the longer it takes to detect a loss event and recover from it. Consequently, the lower the effective bandwidth between the source and destination.\n\nThe maximum theoretical bandwidth that can be achieved is determined as follows:\n\n![](https://www-uploads.scaleway.com/blog-Bandwidth_formula_resized-1.webp)\n\nAs the **R**ound **T**rip **T**ime (RTT) can not be changed for a given source/destination pair, if we want to increase the available bandwidth we have to increase the amount of data in fly. Technically speaking, to achieve a given bandwidth over a route with a given RTT (**Delay**), the TCP transmission window (amount of data in fly) must be equal to the so-called **B**andwidth-**D**elay **P**roduct (**BDP**):\n![BDP_formula_resized.webp](https://www-uploads.scaleway.com/BDP_formula_resized_e5704e1564.webp)\n\nSo the bandwidth in packet networks comes at the cost of entropy (the amount of data in the unknown state being transported between the source and the destination) and is a linear function of network latency.\n\nAs noted above, packet drops are normal on the Internet due to the nature of packet networks. Modern, sophisticated congestion control schemes can increase the amount of data in fly (TCP transmission window) to large values quickly, in order to reach maximum bandwidth. However, when a drop is experienced, the time needed to detect that loss is still a function of latency.\n\nFinally, all this translates into \"the speed of the Internet\". We can see the difference by downloading a 1GB file from a test server in Paris and then the same file in a version with 90 ms of additional simulated latency:\n\n* \u003chttp://ping.online.net/1000Mo.dat\u003e\n* \u003chttp://ping-90ms.online.net/1000Mo.dat\u003e\n\n(You will need a good internet connection to carry out this test)\n\n## Latency User Experience Guidelines \n\nThe following guidelines can be used to _roughly_ estimate the impact of latency:\n\n| **RTT** | **User Experience** |\n| ---------- | ---------------------------------------------------------------------------------------------------------- |\n| \u003c30 ms | little or no impact on user experience |\n| 30-60 ms | still OK but noticeable for certain applications (gaming etc.) |\n| 60-100 ms | mostly acceptable, but users do start to feel it: websites a little slower, downloads not fast enough, etc |\n| 100-150 ms | user feels typically that \"the Internet is slow\". |\n| \\\u003e150 ms | \"it works\", but is not acceptable for most commercial applications these days |\n\nThese numbers are subjective and may vary for different applications and user groups. They also depend on the user's habitual location. For example, people living in New Zealand or North-East Russia are somewhat used to higher latency for the majority of internet resources. They can tolerate \"slower Internet\", while US West Coast users who are more used to \"fast internet\" are often unhappy with the RTT to the European resources.\n\n![](https://www-uploads.scaleway.com/blog-globe-supercropped-1.webp)\n\n## Where Latency Comes From\n\n### Propagation Delay\n\nIn modern networks, the primary source of latency is distance. This factor is also called _propagation delay_. The speed of light in a fiber is roughly 200,000 km per second, which gives us 5 ms per 1000 km single-direction and the mnemonic rule of **1 ms of round-trip time per 100 km.**\n\nHowever, fibers rarely follow as-the-crow-flies lines on the map, so the true distance is not always easy to estimate. While the routes of submarine cables are more or less straightforward, metro fiber paths in highly urbanized areas are anything but. And so, real-world RTT values can be roughly determined by coupling the above rule with the following considerations:\n\n* For multi-thousand kilometers long, transcontinental routes: as-the-crow-flies distance multiplied by a factor of 1.5\n* For shorter, terrestrial urban routes: as-the-crow-flies distance multiplied by a factor of 2.\n\nSee the examples below.\n\n**Paris—New York** \n5,800 km, 58×1.5 = 87 ms of RTT\n\n```js\npar1-instance$\nPING 157.230.229.24 (157.230.229.24) 56(84) bytes of data.\n64 bytes from 157.230.229.24: icmp_seq=1 ttl=52 time=84.0 ms\n64 bytes from 157.230.229.24: icmp_seq=2 ttl=52 time=83.9 ms\n64 bytes from 157.230.229.24: icmp_seq=3 ttl=52 time=84.0 ms\n \n--- 157.230.229.24 ping statistics ---\n3 packets transmitted, 3 received, 0% packet loss, time 2002ms\nrtt min/avg/max/mdev = 83.992/84.015/84.040/0.237 ms\n```\n\n**Paris—Singapore** \n11,000 km, 110×1.5 = 165 ms\n\n```js\npar1-instance$ ping 188.166.213.141 -c 3\nPING 188.166.213.141 (188.166.213.141) 56(84) bytes of data.\n64 bytes from 188.166.213.141: icmp_seq=1 ttl=49 time=158 ms\n64 bytes from 188.166.213.141: icmp_seq=2 ttl=49 time=158 ms\n64 bytes from 188.166.213.141: icmp_seq=3 ttl=49 time=158 ms\n \n--- 188.166.213.141 ping statistics ---\n3 packets transmitted, 3 received, 0% packet loss, time 2002ms\nrtt min/avg/max/mdev = 158.845/158.912/158.991/0.330 ms\n```\n\n**Paris—Moscow** \n2,500 km, 25×2 = 50 ms of RTT\n\n```js\npar1-instance$ ping www.nic.ru -c 3\nPING www.nic.ru (31.177.80.4) 56(84) bytes of data.\n64 bytes from www.nic.ru (31.177.80.4): icmp_seq=1 ttl=52 time=50.5 ms\n64 bytes from www.nic.ru (31.177.80.4): icmp_seq=2 ttl=52 time=50.0 ms\n64 bytes from www.nic.ru (31.177.80.4): icmp_seq=3 ttl=52 time=50.3 ms\n \n--- www.nic.ru ping statistics ---\n3 packets transmitted, 3 received, 0% packet loss, time 2002ms\nrtt min/avg/max/mdev = 50.041/50.314/50.522/0.328 ms\n```\n\n**Paris—Amsterdam** \n450 km, 4.5×2 = 9 ms of RTT\n\n```js\npar1-instance$ ping 51.15.53.77 -c 3\nPING 51.15.53.77 (51.15.53.77) 56(84) bytes of data.\n64 bytes from 51.15.53.77: icmp_seq=1 ttl=52 time=9.58 ms\n64 bytes from 51.15.53.77: icmp_seq=2 ttl=52 time=10.2 ms\n64 bytes from 51.15.53.77: icmp_seq=3 ttl=52 time=9.71 ms\n \n--- 51.15.53.77 ping statistics ---\n3 packets transmitted, 3 received, 0% packet loss, time 2003ms\nrtt min/avg/max/mdev = 9.589/9.852/10.250/0.308 ms\n\n```\n\n![](https://www-uploads.scaleway.com/blog-181207-Online-DC2-104_super_cropped-3.webp)\n\n## Other Sources of Latency\n\nWhile distance is the main source of latency, other factors can also delay data propagation within modern networks.\n\n### Serialization Delay\nThe second most crucial latency factor is the so-called serialization delay. Some time ago it was actually competing with distance for first prize, but nowadays is becoming less significant. \n \nBoth end-point and intermediate devices (routers, switches) are ultimately just computers, which store data chunks in memory before sending them to the transmission media (e.g. optical fiber, copper wires, or radio). In order to send these packets to the network interfaces, computers need to _serialize_ them, i.e. encode the data bits into a sequence of electromagnetic signals, suitable for transmission over the interface media at a constant rate. The time the data spends in a buffer before it gets sent down the wire is called **serialization delay**. \n \nLet's imagine that we have a 1500 byte-long Ethernet frame which needs to be sent down a 1GE interface:\n\n![](https://www-uploads.scaleway.com/blog-1500bytes_in_12us_resized.webp)\n\n12 µs (microseconds) are needed to put a 1500-byte frame onto the wire using a 1GE interface. For 1GB of data the time will be around 8 seconds in total. This is the time the data spends in buffers in order to get signaled to the transmission media. The higher the interface bandwidth, the lower the serialization delay. \n \nThe opposite is true, as well. For lower-rate interfaces like xDSL, WiFi, and others, the serialization delay will be higher and so this factor becomes more significant.\n\n**Queuing Delay** \nThe astute reader might notice that a 48-byte ICMP Echo Request packet (plus the Ethernet overhead) should take much less time to get serialized than a 1500-byte frame. This is correct, however, usually, each link is used for multiple data transfers \"at the same time\", so a ping via an \"empty\" ADSL or WiFi link will give very different RTT values than a ping in parallel with a large file download.\n\nThis phenomenon is known as **queuing delay**. At the time of low-speed interfaces, it would severely affect small packets used by latency-sensitive applications like VoIP, transmitted along with large packets carrying file transfers. When a large packet is being transferred, smaller packets behind are delayed in the queue.\n\nTypically, today's home and mobile internet users experience somewhere from 5 to 10 milliseconds of latency added by their home WiFi and last-mile connection, depending on the particular technology.\n\nSerialization and queuing delays can be compared with what people experience when getting onto an escalator. The escalator is moving at a constant speed, which can be compared with the interface bit-rate, and the crowd just behind it is the buffer. You can notice that even if there is nobody around, when a group of 3 or 4 people comes to the escalator they need to wait for each other to get onto it. A similar phenomenon can be observed on highway exits where vehicles must slow down. During busy hours this creates traffic jams.\n\nSticking with this analogy, we can imagine that you want to drive from your home to some place 1000 km away. The following factors will contribute to the total time it takes you to make this trip:\n\n* Distance-related propagation delay is added because you can't drive 1000 km in much less than half a day, no matter how expensive your car is (you can't overcome the speed of light in a fiber).\n* Serialization delay is added when you drive through urban areas, with intersections, traffic lights and one-way lanes (low bit-rate access interfaces).\n* Queuing delay is added when you get stuck in traffic jams (interface buffers).\n\n### Switching Delay\nThis one is something of a false latency factor in real-life computer networking. Once a router receives a packet, a forwarding decision must be taken in the router data-plane in order to choose the next interface to which the packet should be sent. The router extracts bits from the packet header and uses them as a key to perform the forwarding lookup. In fact, modern packet switching chipsets perform this lookup within sub-microsecond times. Most modern switches and routers operate at a near wire rate, which means that they can perform as many forwarding lookups per second as the number of packets they can receive on the interfaces. There might be special cases and exceptions, but generally when a router cannot perform enough lookups to make a forwarding decision for all arriving packets, those packets are dropped rather than delayed. So in practical applications, the switching delay is negligible as it is several orders of magnitude lower than the propagation delay.\n\nNevertheless, some vendors still manage to position their products as \"low-latency\", specifically highlighting switching delay.\n\n## Latency and Bandwidth Limitation Techniques \n\nTraditionally in residential broadband (home internet) and B2B access networks, the overall bandwidth per subscriber was determined by the access interface bit-rate (ADSL, DOCSIS, etc). However, Ethernet is now becoming more and more popular. In some countries where the legacy last-mile infrastructure was less developed, and aerial links were not prohibited in communities, copper Ethernet became the primary access technology used by broadband ISPs. In datacenter networking we also use 1GE, 10GE, 25GE, and higher rate Ethernet as access interfaces for servers. This creates a problem of artificial bandwidth limitation, where bandwidth must be limited to that which is stated in the service agreement.\n\nTwo main techniques exist to achieve this goal.\n\n### Shaping\n\nTraffic shaping technique is based on the so-called [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky%5Fbucket), which delays packets in a buffer in order to force flow-control mechanisms to decrease the amount of data in fly and therefore reduce the effective bandwidth. When the buffer gets full, the packets are dropped, forcing TCP to slow down further.\n\nAs such, this technique effectively reduces bandwidth by increasing the queuing delay, which consequently increases end-to-end latency. Residential broadband Internet subscribers or B2B VPN customers can often observe an additional delay in the order of several milliseconds which is added by their ISP's broadband access router in order to enforce service agreement bandwidth rate.\n\n### Policing\n\nThis technique is considered more straightforward than the previous one, and is cheaper to implement in hardware as it doesn't require any additional buffer memory. Policing is based on the so-called [token bucket algorithm](https://en.wikipedia.org/wiki/Token%5Fbucket) and employs the notion of a **burst**. When the amount of transferred data bypasses a certain configured limit, packets get dropped, and the burst counter is incremented at constant rate, corresponding to the desired bandwidth.\n\nChoosing the right value for the **policer burst size** has always been a mystery for network engineers, leading to many misconfigurations and mistakes. While many guides still recommend setting the burst size as the amount of data, transferred in 5 or 10 ms at the outgoing interface rate, this recommendation is not applicable for modern high-speed interfaces like 10/100GE and higher. This is because it is based on the hypothesis that the serialization delay is the most important factor of the overall end-to-end latency. As discussed above, this hypothesis doesn't hold nowadays, as the interface rates have risen dramatically during the last decade, meaning that the most important latency factor now is propagation delay.\n\nSo if we want a customer to be able to communicate with internet resources, let's say, 200 ms away, the policer burst size must be equal or greater than the amount of data, transferred at the desired rate in 200 ms. If we want to limit the customer's bandwidth to 1 Gbps we should calculate the burst-size as follows:\n\n![](https://www-uploads.scaleway.com/blog-burst_200ms_resized.webp)\n\nSetting the burst size at less than this amount will impact communication with Internet resources hosted far away from customers. This is a known recurrent problem in some regions which are rather remote from major internet hubs: if an ISP somewhere in Kamchatka or South America sets a burst size which doesn't accommodate those 200-250 milliseconds of traffic, their customers have trouble communicating with resources hosted in Frankfurt or Seattle. The same applies to hosting providers: lower policer burst sizes applied to these servers will make their services unusable for end-users on remote continents.\n\n## Conclusion\n\n* Latency and bandwidth are closely connected in modern IP networks.\n* End-to-end latency is determined by several factors, the most important of which is propagation delay, in turn determined by light speed.\n* In the modern Internet, most source/destination pairs are within 200-250 ms.\n* RTT of up to 100 ms is acceptable from the customer experience point of view, \u003c60ms is considered \"good\" and \u003c30 ms perfect.","createdAt":"2023-01-18T16:05:44.838Z","updatedAt":"2023-01-26T22:30:31.969Z","publishedAt":"2023-01-18T16:11:17.334Z","locale":"en","tags":"Network\nIntroduction","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":14,"excerpt":"Learn about bandwidth and packet drops and the impact on \"the speed of the internet », the factors that contribute to latency including propagation, serialization, queuing, and switching delays.","author":"Pavel Lunin","h1":"Network Latency: how latency, bandwidth \u0026 packet drops impact your speed","createdOn":"2022-07-21"}},{"id":62,"attributes":{"title":"behind-the-scenes-of-the-night-our-transformer-shut-down-in-our-data-center","path":"behind-the-scenes-of-the-night-our-transformer-shut-down-in-our-data-center/","description":"One night in September, a power transformer shut down in one of our Parisian data centers. While we were writing this article, this situation happened again for the third time in ten years. Like the two other times, our two power backups ensured the power lineup worked while our team rallied to bring the situation back to normal. Read on to find out what happened during this tense night.\n\nWe equip all of our data centers with a Scaleway-made building management system tool called SiMA. Thanks to this tool, we can monitor and analyze hundreds of thousands of real-time data points from our equipment. This allows us to have a complete overview of our infrastructure at all times, and to be able to optimize it to be as close as possible to customers’ demands.\n\nWe build our software and hardware to monitor our equipment because manufacturers’ products do not come equipped with the technical level we require.\n\nIt is common to see building management system tools exceed one million euros in our business. \n\nSo, we built our own and integrated it as an internal chatbot. Thanks to SiMA, we started receiving notifications at 05:09 AM, alerting us that one of our power lineups was no longer being supplied by the grid. Our technicians immediately checked the [programmable logic controller](https://en.wikipedia.org/wiki/Programmable%5Flogic%5Fcontroller) and confirmed what we feared: SiMA was right, and we had a long night ahead of us. As soon as the failure occurred, the automatic switch to our generators had been made.\n\n### \nFirst step: synchronize and assess the situation\n\nWe synched with our on-call engineers and board members, and [notified our clients](https://status.scaleway.com/incidents/nycn0qdbm71g). At this point, we have an autonomy of five days on fuel oil, and 20 minutes on battery. The issue was likely caused by insufficient oil in the transformer itself. Our team quickly went to the site and found an oil leak by a transformer component called the Buchholz relay. This is a protection relay that acts as a sensor to monitor the temperature, oil level, and gas discharge of the transformer. \n\n![](https://www-uploads.scaleway.com/blog-E-RkTjuXEAQi_SD.webp)\n\n## Safe working conditions - isolate the high-tension unit \n\nThe fault with the Buchholz relay triggered the insufficiency, but luckily, we were only a few liters of oil short. We then started by creating a safe working environment by isolating the high-tension unit from the power transformer, while other members of the team searched for [vegetable oil](https://en.wikipedia.org/wiki/Transformer%5Foil) to stock up on - this proved to be quite a mission in itself as the incident occurred in the middle of the night.\n\nWe use vegetable oil instead of other types of oil to power our transformers, mainly for environmental and security reasons. The oil we use has a fire point of over 300°C, which makes it barely flammable. It also is bio-sourced, easily biodegradable, and non-toxic. Unfortunately, so far, our experience with vegetable oil has been pretty bad. \n\nThe power lineup continued to be fed by its two electric generators, supervised by our engineers. The company that handles the maintenance came to the site, too, ready to assist us. Even with two electric generators, you can never be too cautious. The faulty Buchholz relay was dismantled and checked to diagnose and understand what went wrong, and learn from there. The new relay was calibrated and then installed. \n\n## We have now been relying on our electric generators for eight hours.\n\nWe secured a shipment of 90L of vegetable oil, and a brand new Buchholz relay from our supplier that will arrive at 9am. \n\nAfter installing our new Buchholz relay, we added 30L of oil, and we then needed to purge any air from the system. There’s always a risk of fire with transformers, like batteries or power inverters. That is why we installed it in a fire-resistant retention tank, partitioned by a fire-proof wall. The tank also collects oil leaks - if the oil catches fire, the fire naturally gets suppressed by the tank. Even if the vegetable oil is pretty harmless for the environment, we ensure any residue and leaks are collected. \n\nFollowing the system purge, the Buchholz relay was ready to be pushed into production. Our team prepared to put the transformer live again, and switch the system over to stop using the generators. \n\n![](https://www-uploads.scaleway.com/E_R1_Xmp_WUAAH_LJ_29d0249fd7.webp)\n\n## The transformer sings at 50 Hertz again\n\n...and now all we had to do was to test it! \n\nTo monitor and test the new installation live, we must close its MV circuit breaker (20,000V). A special gas, SF6, is used to prevent the formation of an electric arc.\n\nUsually, this is inevitable in this voltage range. The operator must use the appropriate PPE - gloves, an anti-UV helmet, an insulating mat, and a stool. \n\n\u003ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/Ku1fnkc_t-g\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen\u003e\u003c/iframe\u003e\n\nThe Buchholz relay is a fire safety element, so we had to test it carefully: three engineers synched by radio were necessary to validate the alarm reports and that the circuit breaker would trip correctly. \n\nAfter 10 hours of intervention, dozens of people involved, and zero customers impacted, the issue was resolved. In 10 years, this exact same incident has occurred three times.\n\nTransparency is one of our core values, and that is why SiMA also provides live data like our real-time PUE reports for each of our data centers, as you can see, for [instance, here for DC3](https://pue.dc3.scaleway.com/en/).","createdAt":"2023-01-17T13:29:52.639Z","updatedAt":"2023-01-26T08:25:14.510Z","publishedAt":"2023-01-17T13:33:23.006Z","locale":"en","tags":"Story\nDatacenter\nPost mortem","popular":false,"articleOfTheMonth":false,"category":"Deploy","timeToRead":3,"excerpt":"One night in September, a power transformer shut down in one of our Parisian data centers. Read on to find out what happened during this tense night.","author":"Hana Khelifa","h1":"Behind the scenes of the night our transformer shut down in our data center","createdOn":"2021-11-29"}},{"id":126,"attributes":{"title":"building-a-globally-distributed-key-value-store","path":"building-a-globally-distributed-key-value-store/","description":"As a cloud provider, we handle a [considerable amount of data on a daily basis](https://www.scaleway.com/en/blog/improve-disaster-resiliency-with-highly-redundant-multi-az-object-storage/), especially with our [Object Storage products](https://www.scaleway.com/fr/object-storage/). So we needed a way to distribute this data globally, with various consistency, replication, and database sharding for linear read and write latency. \n\nWe designed the database we needed in-house. A platform that can scale up to millions of different databases with billions of entries, all the while maintaining client separation, good latency, and great performance. It also had to be:\n* Flexible because in some regions, we can have multi-datacenter replication\n* Consistent because there are strict operational requirements on our production\n* To support continuous growth, any platform needs to be highly scalable\n* Reliable because even the slightest outage has significant repercussions on client trust (and financial consequences)\n\n## Meet the reliability and scaling needs of Object Storage\n\n[Hive](https://papers.s3.fr-par.scw.cloud/hive.pdf) is a database that stores key-value pairs and shards data across many RAFT clusters, with replication to ensure global availability at the highest level of abstraction. We designed it to scale up to thousands of machines across multiple data centers worldwide and billions of database entries.\n\nFor any cloud provider, dealing with failures in infrastructure comprising millions of components is a standard mode of operation; there are always a significant number of failures at any given time. So, we designed Hive to treat failures as the typical case without having an impact on availability or performance. \n\n## Ensure consistency and data repartitioning\n\nClients can use Hive to store data safely, with specific optimizations for specific access patterns. A client can choose a consistency per read or write request.\n\nFor instance, for a DNS database engine, consistency might be preferable to have low write latencies. But for an Object Storage engine, strong consistency is paramount. \n\nRather than creating a generic database engine with a query language, we decided to create specific storage engines optimized for their dedicated use cases.\n\nHive’s main client is Scaleway’s Object Storage. It stores a reference to objects in the database and uses it for specific Object Storage operations (versioning, using delimiters, or prefix listing). \n\nIt also uses consistency features to ensure bucket unicity worldwide and strong consistency multi-datacenter replication to ensure safety. \n\nOur main problem with the previous Object Storage database architecture was the database sharding - the databases were growing larger and this impacted latency and sometimes even replication. We solved this by splitting objects lexicographically among many shards, automatically splitting and merging shards when needed. \n\nWe used a modified version of the [RAFT quorum protocol](https://raft.github.io/) to ensure consistency and replication for a shard. In order to avoid having one big RAFT cluster, we split all the shards into RAFT groups, which are RAFT state machines dedicated to specific data sets.\n\nThis also enabled us to avoid catastrophic failures in a quorum fail or some other internal error.\n\n## Design overview\n\nHive is composed of many different nodes on different machines, data centers, and regions.\n\nA node stores multiple clusters (thousands of clusters per node) and responds to queries for reading and writing on those clusters. A node can also take clients’ requests and redirect operations to the specific nodes holding the information. \n\nSo we split the cluster into two logical parts: Storage \u0026 API. \n\n### Designing the API\n\nAny node can respond to requests regarding whether or not the node has the data. The node does a cluster _resolve_ on each request, caching most of the results for the next one, and then knows which node it needs to talk to in order to fulfill said request. \n\nThis is the opposite of the traditional client redirection from RAFT architectures, as the node does the redirection for the client. This approach enables multiple optimizations, but the main ones are that we can cache the path and the nodes of the requests to avoid having to resolve it again, and we can put a simple load balancer in front of Hive without worrying about redirections. \n\nWe made multiple APIs for Hive: frontal ones, usually an HTTP API, that clients talk to, and an internal one, using protobuf for internal node communications. \n\nThis design allowed us to bind the client-facing server to a private IP address and use IPv6 internet addresses for node-to-node communication. To ensure the safety of communications between nodes, every packet is end-to-end encrypted. \n\n### Designing the storage\n\nEach Hive cluster is composed of at least one storage backend. They can have different database engines and maintain an in-memory state machine. A storage backend implements a subset of a RAFT cluster with a log application and its database engine. It does not know what other backends it is paired with, even though a cluster shares the same RAFT log for all of its backends. Each backend can use different storage engines for their storage: backend ’A’ can use SQLite, and backend ’B’ can use LMDB without any issues. \n\nA common RAFT log is shared by the cluster, storing all the cluster operations and some \"meta\" operations, like adding a node into a cluster or changing the default consistency of a cluster. \n\nEach node maintains a global cache to quickly resolve a cluster when needed. This cache is stored in RAM, with an AVL tree, and stores all the addresses of the nodes composing a cluster and which node was the last known leader. When a leader changes, any node can redirect the caller to it, and the caller then updates its cache. Upon cluster deletion or re-creation, the node will return a \"Does not exist\" code, resulting in a cache flush for this particular entry on the caller’s side.\n\n![Hive: bird’s eye view](https://lh4.googleusercontent.com/QKcKo7lVXxzO1LX4BWlK_yRWdQptQvSVf536V57g9CZXVA6uTq9miqzggsJX0Nql9CWqW58_LeGcIfPnuLPSHevb1HvJNeq3CX9fgWpIkWLZrH5S-GIrk6xoIIT-ZHwoqzumugHO)\n \n## Amazon S3 is a key-value store we exposed it to operate on \n\nAmazon S3 is an Amazon API, which is, in essence, a key-value store. The key is an object name, the value is binary content. Some metadata can be set by the user on an object: tags, access control lists, or simple flags. Alongside this, the actual backend has to store the data’s physical position if one does not store the object content alongside its metadata.\n\nHive was created for this purpose: it exposes a key-value store HTTP API to an Object Storage API gateway and handles most Object Storage operations under the hood. It also enables the use case of huge buckets, with billions of objects, with no performance penalty.\n\nHive is a database, but it is not designed to store the object content, only the metadata.\n\nLikewise, Hive is not an Object Storage API. Its HTTP API is related to Amazon S3 API calls but cannot be exposed directly to the client. A third party must handle some features like signature, ACLs and data storage. \n\n## Object storage API\n\nHive exposes an HTTP API for Object Storage in the form of a one-route, body-action JSON API. The value may depend on the type of call, but the global rule is that every client-facing Object Storage call has an equivalent Hive call. \n\nThe idea is for the gateway to do as little work as possible, leaving Hive the specific behavior of Object Storage in some cases: Versioning, Multipart Upload, and other operations. Even though HTTP comes with a bit of overhead, it was chosen to make the client implementation easier. Every language has a library to implement an HTTP client with JSON payloads. \n\n## Key unicity across multiple regions\n\nA key Object Storage feature is that a bucket name is unique across all Object Storage regions. We use Hive to ensure strongly consistent writing and unicity.\n\nThe safety calls and two-step-commits are automatically used on the ’CreateBucket’ API call, leaving almost no work to do for the gateway.\n\nIn order to ensure unicity, a Redis-like backend is used, with a set nx=1, which means a key is set only if it does not already exist. It exposes a Redis-like API over protobuf but cannot be accessed directly by the caller. The cluster is automatically created on the first bucket, with a worldwide failure domain configured. \n\n![](https://lh5.googleusercontent.com/YxKqeLhbyJjYfhJIRyfl0ZlcX5YeoTVTvRtYx-bb4u7o1jrvK8NnuyQTkPXocCAD4IA_6VFE0n_XJmrYKUn_OlVkEDCIUfYNpOAKdZVEgMceXiCIL_jTpE8zt3d4FOpnqvsfA0ZG)\n\n \n## Hive has been in production for months now\n\nSince Hive has been in production on the Scaleway Object Storage product, 5 billion entries have been written, with an average write P90 around 1ms and an average read of P90 around 150us.\n\nOur initial tests showed that the aggregate availability (return\\_code != 5xx / total\\_requests \\* 100) is 99.9998%. It is mainly due to bugs and crashes which are to be expected with a first deployment, but still higher than the proposal on our current platform. \nWe are pretty happy with those numbers, as the node crashes almost had no impact on client-facing calls, despite the occasional spikes in tail latency.","createdAt":"2023-01-18T10:26:18.379Z","updatedAt":"2024-10-25T14:03:20.611Z","publishedAt":"2023-01-18T10:28:02.407Z","locale":"en","tags":"Story\nEngineering\nStorage","popular":false,"articleOfTheMonth":false,"category":"Build","timeToRead":7,"excerpt":"For any cloud provider, dealing with failures in infrastructure comprising millions of components is a standard mode of operation; there are always a significant number of failures at any given time. ","author":"Louis Solofrizzo","h1":"Building a distributed Key-Value store to power Object Storage","createdOn":"2022-09-09"}}]},"meta":{"id":565,"title":"Understanding PAM - Pluggable Authentication Modules","description":"PAM is one of the Linux components you probably already heard of. You know that it is used someway to authenticate users.","ogtype":null,"ogtitle":null,"ogdescription":null,"noindex":false},"localizations":{"data":[]}}}]},"meta":{"id":661,"title":"Load Balancer at Scaleway","description":"Server applications failures and lack of scalability can cause real problems for a server. When server applications fail, a node is unavailable to answer requests. In that case, if there is no redundancy, the service is unavailable as long as the server is not back in service. ","ogtype":null,"ogtitle":null,"ogdescription":"Server applications failures and lack of scalability can cause real problems for a server. When server applications fail, a node is unavailable to answer requests. In that case, if there is no redundancy, the service is unavailable as long as the server is not back in service. ","noindex":false,"ogimage":{"data":null}},"localizations":{"data":[]}}},"_nextI18Next":{"initialI18nStore":{"en":{"common":{"open":"Open","close":"Close","backTo":"Back to {{page}}","seeMore":"See more","skip":"Skip {{to}}","toLogin":"to login","toMain":"to main content","toFooter":"to footer section","footer":{"followUs":"Follow us","subLinks":{"contracts":{"href":"/en/contracts/","title":"Contracts"},"legalNotice":{"href":"/en/legal-notice/","title":"Legal Notice"},"privacyPolicy":{"href":"/en/privacy-policy/","title":"Privacy Policy"},"cookie":{"href":"/en/cookie/","title":"Cookie"},"securityMeasures":{"href":"https://security.scaleway.com","title":"Security Measures"}}},"breadcrumb":{"homepageLink":{"home":{"href":"/","title":"Home"}}},"cookies":{"acceptAll":"Accept all","rejectAll":"Reject all","save":"Save settings","panelManagementTitle":"Manage cookies settings","panelConsent":{"title":"Cookie time!","description":"We use cookies in order to improve our website and to offer you a better experience. You can also consult our ","linkLabel":"Cookie policy","link":"/en/privacy-policy/","settings":"Manage your preferences"},"categories":{"functional":{"title":"Functional","subtitle":"Always active","description":"These cookies are required for the website to function properly and to allow you to use its services and features. Without these cookies, we would be unable to provide certain requested services or features."},"analytics":{"title":"Analytics","description":"These cookies are used to monitor the performance of our site and to enhance your browsing experience."},"marketing":{"title":"Marketing","description":"These cookies are used to understand user behavior in order to provide you with a more relevant browsing experience or personalize the content on our site."}}}},"blog":{"tagsAriaLabel":"Tags list. Click to choose as filter.","timeToRead":"{{min}} min read","recommendedArticles":"Recommended articles","pagination":{"next":"Forward to Next","previous":"Back to Previous","goToPage":"Go to page ","currentPage":"Current page: "},"copyButton":{"copied":"Copied!","defaultValue":"Copy","code":"Copy code"},"home":{"title":"Scaleway Blog - All posts","description":"Scaleway’s blog helps developers and startups to build, deploy and scale applications.","heading":"Scaleway Blog","articleOfMonth":"Must read","latestArticles":"Latest articles","popularArticles":"Most popular articles"},"categoryPage":{"build":{"title":"Build Projects with Scaleway","description":"Learn how to easily build and develop projects using Scaleway products."},"deploy":{"title":"Deploy Applications with Scaleway","description":"Discover how to deploy your applications smoothly with Scaleway."},"scale":{"title":"Scale Your Applications with Scaleway","description":"Find out how to efficiently scale your applications on Scaleway."},"incidents":{"title":"Incident Reports","description":"All the latest updates on Scaleway Cloud ecosystem incidents, and how they were resolved."}},"authorPage":{"title_one":"A {{author}}'s post","title_other":"All {{author}}'s posts","description_one":"Discover a blog post written by {{author}}.","description_other":"Discover all the blog posts written by {{author}}."}},"pages":{"available_zones":"Available zones:","city":"{{code}}:","AMS":"Amsterdam","PAR":"Paris","WAW":"Warsaw","yes":"Yes","no":"No","daily":"Daily","weekly":"Weekly","monthly":"Monthly","yearly":"Yearly","published":"Published on","seeMore":"See more","blocks":{"calculator":{"choose":"Choose your plan","availabilityZone":"Availability Zone","instanceType":"Instance Type","quantity":"Quantity","selectPlaceholder":"Select...","volumeSize":"Volume Size","volumeSizeHelper":"Min. 10 GB","volumeType":"Volume Type","sizeUnit":"GB","flexibleIp":"Flexible IPv4","ipHelper":"You need a Flexible IP if you want to get an Instance with a public IPv4.\n Uncheck this box if you already have one available on your account, or if you don’t need an IPv4.","noOtherType":"No other type available with this Instance"},"productFaq":{"title":"Frequently asked questions"},"productTutorials":{"title":"Get started with tutorials"},"customerStories":{"defaultTitle":"Customer success stories"}},"templates":{"beta":{"discovery":{"title":"Discovery","description":"Discovery products are prototypical versions of a product. This phase aims to validate an idea and to prove there is interest in the product. During the Discovery phase, customers can be contacted by the Product team to ask them to share their thoughts on the product and to help with the development of the new solution.\nProducts in the Discovery phase are not guaranteed to be released. The duration of the Discovery phase may vary depending on the product."},"early-access":{"title":"Early Access","description":""},"private":{"title":"Private Beta","description":"Private Beta products are early versions of future products or features. This phase allows users to test, validate a product in development, and give feedback to the Product team.\nPrivate Beta is limited to users selected by the Product Development team. Users can request access to the product via a form and the development team will grant access rights. The Private Beta stage usually lasts three to six months."},"public":{"title":"Public Beta","description":"Public Beta products are ready to be tested by all customers. Public Beta products may not include all of the final product’s features.\nPublic Beta is the last stage of testing before a product is released in General Availability.\nThese products are intended to be used in test environments unless specified otherwise. The Public Beta phase lasts six months on average."}},"domains":{"register":"Register","registerInfo":"Price before tax\nFirst year registration's price.","transfer":"Transfer","transferInfo":"Price before tax\nTransfer price for domain during first year.","renewing":"Renewing","renewingInfo":"Price before tax\nSecond year registration's price.","restoration":"Restoration","restorationInfo":"Price before tax\nPrice for renewing after expiration and/or redemption period."},"contact":{"titleForm":"Your contact","firstName":"First Name","lastName":"Last Name","email":"Email","jobRole":"Job Role","tel":"Phone (format: 33600000000)","digits":"Only digits","acme":"ACME","8ball":"8ball","emailPlaceholder":"username@domain.tld","john":"John","doe":"Doe","SocietyTitle":"Your société","company":"Company","industry":"Industry","country":"Country","legal":"Your data will be processed by Scaleway S.A.S. in order to manage your request. To know more, visit our","privacy":"Privacy Policy","ctaLabel":"Be contacted","optional":"(optional)","countries":{"labelCountry":"Select your country","france":"France","germany":"Germany","austria":"Austria","belgium":"Belgium","czechia":"Czechia","denmark":"Denmark","estonia":"Estonia","finland":"Finland","greece":"Greece","hungary":"Hungary","ireland":"Ireland","italy":"Italy","latvia":"Latvia","lithuania":"Lithuania","norway":"Norway","netherlands":"Netherlands","poland":"Poland","portugal":"Portugal","romania":"Romania","slovenia":"Slovenia","spain":"Spain","sweden":"Sweden","switzerland":"Switzerland","gb":"United Kingdom of Great Britain and Northern Ireland","usa":"United States of America","other":"Other"},"industries":{"labelIndustry":"Select your industry","computerSoftware":"Computer Software","consulting":"Consulting","ecommerce":"E-commerce","education":"Education","energy":"Energy","finance":"Finance,","gaming":"Gaming","hospitalHealthcare":"Hospital \u0026 Healthcare","infoTechnoServices":"Information Technology \u0026 Services","manufacturing":"Manufacturing","media":"Media","publicSector":"Public Sector","retail":"Retail","startups":"Startups","technology":"Technology","telecommunications":"Telecommunications","transportTruckRailroad":"Transportation/Trucking/Railroad"}},"contactPartner":{"partner":{"title":"Partner Identification","domain":"Your domain","placeholder":"example.com"},"opportunity":{"title":"Opportunity information","company":"Company name","project":"Project name","yourProject":"Your project name","name":"Contact name","firstName":"Contact first name","email":"Contact email","number":"Contact phone number","job":"Contact job title","onlyDigits":"Only digits","countries":{"title":"Country","labelCountry":"Select your country","france":"France","germany":"Germany","austria":"Austria","belgium":"Belgium","czechia":"Czechia","denmark":"Denmark","estonia":"Estonia","finland":"Finland","greece":"Greece","hungary":"Hungary","ireland":"Ireland","italy":"Italy","latvia":"Latvia","lithuania":"Lithuania","norway":"Norway","netherlands":"Netherlands","poland":"Poland","portugal":"Portugal","romania":"Romania","slovenia":"Slovenia","spain":"Spain","sweden":"Sweden","switzerland":"Switzerland","gb":"United Kingdom of Great Britain and Northern Ireland","usa":"United States of America","other":"Other"}},"product":{"title":"Product scope","label":"Description : Compute, Containers, AI …"},"timing":{"title":"Timing","timeScale":"Time scale","placeholder":"Select time scale","budget":"Budget","company":"Company name","event":"Compelling event","eventPlaceholder":"Reason to act","date":"Estimated close date"},"budget":{"title":"Budget","overallBudget":"Overall project budget","productBudget":"Product budget (MRR)","currency":"In euros €"},"legal":"Your data will be processed by Scaleway S.A.S. in order to manage your request. To know more, visit our","privacy":"Privacy Policy","ctaLabel":"Submit"},"testimonials":{"title":"Customer Success Story","readMore":"Read more"},"pricingPage":{"backButton":"Back to Pricing page","title":"All Range","legal":"Legal notice","backToProducts":"Return to products","openAll":"Open all","closeAll":"Close all","close":"Close","open":"Open","regions":"Regions","subtableToggleButtons":"Subtable open buttons","viewPricing":"View pricing"},"partnerProgram":{"showingPartners_zero":"No partners to show","showingPartners_one":"Showing {{count}} partner","showingPartners_other":"Showing {{count}} partners","type_one":"Partner type","type_other":"Partner types","expertise_one":"Expertise","expertise_other":"Expertises","industry_one":"Industry","industry_other":"Industries","location_one":"Location","location_other":"Locations","filtersTitle":"Filters","clearFiltersLabel":"Clear filters"},"partnerPage":{"partners":"Partners","details":"Details","activity":"Activity","contact":"Contact","viewWebsite":"View Website"}},"notFound":{"title":"Page not found","text":"It seems that the page you want to access does not exist. Please check your URL or renew your request later.","link":"Return to homepage"}}}},"initialLocale":"en","ns":["common","blog","pages"],"userConfig":{"i18n":{"locales":["default","en","fr"],"defaultLocale":"default","localeDetection":false},"default":{"i18n":{"locales":["default","en","fr"],"defaultLocale":"default","localeDetection":false}}}},"header":{"mainNavigationItems":[{"id":542,"title":"Dedibox and Bare Metal","menuAttached":false,"order":1,"path":"/DediboxBareMetal","type":"WRAPPER","uiRouterKey":"dedibox-and-bare-metal-1","slug":"dedibox-bare-metal","external":false,"items":[{"id":543,"title":"Dedibox - dedicated servers","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Dedibox","type":"INTERNAL","uiRouterKey":"dedibox-dedicated-servers","slug":"dedibox-bare-metal-dedibox","external":false,"related":{"id":29,"title":"Dedibox","path":"/dedibox/","scheduledAt":null,"createdAt":"2022-04-19T15:29:02.488Z","updatedAt":"2024-11-24T18:03:28.395Z","publishedAt":"2022-04-28T17:05:07.122Z","locale":"en","__contentType":"api::page.page","navigationItemId":543,"__templateName":"Generic"},"items":[{"id":544,"title":"Start","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Dedibox/Start","type":"INTERNAL","uiRouterKey":"start-2","slug":"dedibox-bare-metal-dedibox-start","external":false,"related":{"id":53,"title":"Start","path":"/dedibox/start/","scheduledAt":null,"createdAt":"2022-04-21T16:44:17.577Z","updatedAt":"2024-11-24T18:10:42.131Z","publishedAt":"2022-04-28T17:12:40.426Z","locale":"en","__contentType":"api::page.page","navigationItemId":544,"__templateName":"Generic"},"items":[],"description":"Affordable servers with the best price-performance ratio on the market"},{"id":545,"title":"Pro","menuAttached":false,"order":2,"path":"/DediboxBareMetal/Dedibox/Pro","type":"INTERNAL","uiRouterKey":"pro-4","slug":"dedibox-bare-metal-dedibox-pro","external":false,"related":{"id":9,"title":"Pro","path":"/dedibox/pro/","scheduledAt":null,"createdAt":"2022-04-07T13:51:48.537Z","updatedAt":"2024-11-24T18:11:51.190Z","publishedAt":"2022-04-28T17:04:00.983Z","locale":"en","__contentType":"api::page.page","navigationItemId":545,"__templateName":"Generic"},"items":[],"description":"Perfect balance of processing power, memory and storage"},{"id":546,"title":"Core","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Dedibox/Core","type":"INTERNAL","uiRouterKey":"core-1","slug":"dedibox-bare-metal-dedibox-core","external":false,"related":{"id":14,"title":"Core","path":"/dedibox/core/","scheduledAt":null,"createdAt":"2022-04-11T09:05:58.588Z","updatedAt":"2024-11-24T18:12:59.955Z","publishedAt":"2022-04-28T17:04:22.560Z","locale":"en","__contentType":"api::page.page","navigationItemId":546,"__templateName":"Generic"},"items":[],"description":"The high performance backbone of your mission-critical infrastructure"},{"id":547,"title":"Store","menuAttached":false,"order":4,"path":"/DediboxBareMetal/Dedibox/Store","type":"INTERNAL","uiRouterKey":"store-2","slug":"dedibox-bare-metal-dedibox-store","external":false,"related":{"id":5,"title":"Store","path":"/dedibox/store/","scheduledAt":null,"createdAt":"2022-04-01T15:14:47.812Z","updatedAt":"2024-11-24T18:13:51.719Z","publishedAt":"2022-04-28T17:03:51.376Z","locale":"en","__contentType":"api::page.page","navigationItemId":547,"__templateName":"Generic"},"items":[],"description":"For mission-critical data, fast storage, backup and streaming"},{"id":832,"title":"GPU","menuAttached":false,"order":5,"path":"/DediboxBareMetal/Dedibox/GPU_ddx","type":"INTERNAL","uiRouterKey":"gpu-9","slug":"dedibox-bare-metal-dedibox-gpu-ddx","external":false,"related":{"id":1454,"title":"GPU","path":"/dedibox/gpu/","scheduledAt":null,"createdAt":"2024-10-31T10:01:24.876Z","updatedAt":"2024-11-24T18:18:19.841Z","publishedAt":"2024-11-07T07:38:37.573Z","locale":"en","__contentType":"api::page.page","navigationItemId":832,"__templateName":"Generic"},"items":[],"description":"Dedicated GPU power with reliable performance and stability"},{"id":548,"title":"Dedirack","menuAttached":false,"order":6,"path":"/DediboxBareMetal/Dedibox/Dedirack","type":"INTERNAL","uiRouterKey":"dedirack-1","slug":"dedibox-bare-metal-dedibox-dedirack","external":false,"related":{"id":155,"title":"Dedirack","path":"/dedibox/dedirack/","scheduledAt":null,"createdAt":"2022-05-02T10:08:21.002Z","updatedAt":"2024-11-24T18:03:29.095Z","publishedAt":"2022-05-02T10:46:06.212Z","locale":"en","__contentType":"api::page.page","navigationItemId":548,"__templateName":"Generic"},"items":[],"description":"Host your Hardware in our secured French datacenters"},{"id":742,"title":"Dedibox VPS","menuAttached":false,"order":7,"path":"/DediboxBareMetal/Dedibox/VPS","type":"INTERNAL","uiRouterKey":"dedibox-vps","slug":"dedibox-bare-metal-dedibox-vps","external":false,"related":{"id":1234,"title":"Dedibox VPS","path":"/dedibox-vps/","scheduledAt":null,"createdAt":"2024-05-08T16:42:21.258Z","updatedAt":"2024-11-24T18:37:48.947Z","publishedAt":"2024-05-14T16:28:25.184Z","locale":"en","__contentType":"api::page.page","navigationItemId":742,"__templateName":"Generic"},"items":[],"description":"60 locations worldwide, starting at €4,99/month"}],"description":""},{"id":553,"title":"Elastic Metal - bare metal cloud","menuAttached":false,"order":2,"path":"/DediboxBareMetal/elasticmetal","type":"INTERNAL","uiRouterKey":"elastic-metal-bare-metal-cloud-1","slug":"dedibox-bare-metal-elasticmetal","external":false,"related":{"id":87,"title":"Elastic Metal","path":"/elastic-metal/","scheduledAt":null,"createdAt":"2022-04-28T12:45:28.696Z","updatedAt":"2024-11-08T15:01:56.485Z","publishedAt":"2022-04-28T13:22:46.501Z","locale":"en","__contentType":"api::page.page","navigationItemId":553,"__templateName":"Generic"},"items":[{"id":554,"title":"Aluminium","menuAttached":false,"order":1,"path":"/DediboxBareMetal/elasticmetal/Aluminium","type":"INTERNAL","uiRouterKey":"aluminium-1","slug":"dedibox-bare-metal-elasticmetal-aluminium","external":false,"related":{"id":8,"title":"Aluminium","path":"/elastic-metal/aluminium/","scheduledAt":null,"createdAt":"2022-04-06T13:13:04.829Z","updatedAt":"2024-11-08T15:01:56.748Z","publishedAt":"2022-04-28T17:04:04.448Z","locale":"en","__contentType":"api::page.page","navigationItemId":554,"__templateName":"Generic"},"items":[],"description":"Fully dedicated bare metal servers with native cloud integration, at the best price"},{"id":557,"title":"Beryllium","menuAttached":false,"order":2,"path":"/DediboxBareMetal/elasticmetal/Beryllium","type":"INTERNAL","uiRouterKey":"beryllium-1","slug":"dedibox-bare-metal-elasticmetal-beryllium","external":false,"related":{"id":15,"title":"Beryllium","path":"/elastic-metal/beryllium/","scheduledAt":null,"createdAt":"2022-04-11T10:57:25.297Z","updatedAt":"2024-11-08T15:01:56.754Z","publishedAt":"2022-04-28T17:13:35.576Z","locale":"en","__contentType":"api::page.page","navigationItemId":557,"__templateName":"Generic"},"items":[],"description":"Powerful, balanced and reliable servers for production-grade applications"},{"id":556,"title":"Iridium","menuAttached":false,"order":3,"path":"/DediboxBareMetal/elasticmetal/Iridium","type":"INTERNAL","uiRouterKey":"iridium-1","slug":"dedibox-bare-metal-elasticmetal-iridium","external":false,"related":{"id":810,"title":"Iridium","path":"/elastic-metal/iridium/","scheduledAt":null,"createdAt":"2023-04-27T13:53:48.244Z","updatedAt":"2024-11-08T15:01:56.752Z","publishedAt":"2023-05-29T08:52:19.666Z","locale":"en","__contentType":"api::page.page","navigationItemId":556,"__templateName":"Generic"},"items":[],"description":"Powerful dedicated server designed to handle high-workload applications"},{"id":555,"title":"Lithium","menuAttached":false,"order":4,"path":"/DediboxBareMetal/elasticmetal/Lithium","type":"INTERNAL","uiRouterKey":"lithium-1","slug":"dedibox-bare-metal-elasticmetal-lithium","external":false,"related":{"id":16,"title":"Lithium","path":"/elastic-metal/lithium/","scheduledAt":null,"createdAt":"2022-04-11T11:15:36.538Z","updatedAt":"2024-11-08T15:01:56.753Z","publishedAt":"2022-04-28T17:13:30.074Z","locale":"en","__contentType":"api::page.page","navigationItemId":555,"__templateName":"Generic"},"items":[],"description":"Designed with huge local storage to keep, back up, and protect your data"},{"id":833,"title":"Titanium","menuAttached":false,"order":5,"path":"/DediboxBareMetal/elasticmetal/Titanium","type":"INTERNAL","uiRouterKey":"titanium","slug":"dedibox-bare-metal-elasticmetal-titanium","external":false,"related":{"id":1457,"title":"Titanium","path":"/elastic-metal/titanium/","scheduledAt":null,"createdAt":"2024-10-31T15:08:59.416Z","updatedAt":"2024-11-08T15:52:51.005Z","publishedAt":"2024-11-07T06:52:37.648Z","locale":"en","__contentType":"api::page.page","navigationItemId":833,"__templateName":"Generic"},"items":[],"description":"Power and stability of dedicated GPU hardware integrated into the Scaleway ecosystem"}],"description":""},{"id":558,"title":"Apple","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Apple","type":"INTERNAL","uiRouterKey":"apple-2","slug":"dedibox-bare-metal-apple","external":false,"related":{"id":1088,"title":"Apple Mac mini","path":"/apple-mac-mini/","scheduledAt":null,"createdAt":"2024-01-31T15:28:49.276Z","updatedAt":"2024-11-06T08:30:29.831Z","publishedAt":"2024-08-02T07:56:22.454Z","locale":"en","__contentType":"api::page.page","navigationItemId":558,"__templateName":"Generic"},"items":[{"id":561,"title":"Mac mini M1","menuAttached":false,"order":1,"path":"/DediboxBareMetal/Apple/M1","type":"INTERNAL","uiRouterKey":"mac-mini-m1-1","slug":"dedibox-bare-metal-apple-m1","external":false,"related":{"id":91,"title":"Hello m1","path":"/hello-m1/","scheduledAt":null,"createdAt":"2022-04-28T15:24:50.963Z","updatedAt":"2024-11-06T08:29:13.324Z","publishedAt":"2023-10-16T14:15:59.310Z","locale":"en","__contentType":"api::page.page","navigationItemId":561,"__templateName":"Generic"},"items":[],"description":"Enjoy the Mac mini experience with great simplicity"},{"id":560,"title":"Mac mini M2","menuAttached":false,"order":2,"path":"/DediboxBareMetal/Apple/m2","type":"INTERNAL","uiRouterKey":"mac-mini-m2-2","slug":"dedibox-bare-metal-apple-m2","external":false,"related":{"id":1086,"title":"mac mini M2","path":"/mac-mini-m2/","scheduledAt":null,"createdAt":"2024-01-31T09:30:46.938Z","updatedAt":"2024-08-07T16:00:48.720Z","publishedAt":"2024-02-05T15:21:02.196Z","locale":"en","__contentType":"api::page.page","navigationItemId":560,"__templateName":"Generic"},"items":[],"description":"Perform your daily tasks with speed and efficiency"},{"id":559,"title":"Mac mini M2 Pro","menuAttached":false,"order":3,"path":"/DediboxBareMetal/Apple/M2pro","type":"INTERNAL","uiRouterKey":"mac-mini-m2-pro-1","slug":"dedibox-bare-metal-apple-m2pro","external":false,"related":{"id":991,"title":"mac mini M2 pro","path":"/mac-mini-m2-pro/","scheduledAt":null,"createdAt":"2023-10-25T08:56:21.435Z","updatedAt":"2024-08-07T16:02:51.939Z","publishedAt":"2023-11-16T12:11:33.094Z","locale":"en","__contentType":"api::page.page","navigationItemId":559,"__templateName":"Generic"},"items":[],"description":"Realize your most ambitious projects thanks to a new level of power"}],"description":""}],"description":""},{"id":562,"title":"Compute","menuAttached":false,"order":2,"path":"/Compute","type":"WRAPPER","uiRouterKey":"compute-3","slug":"compute-4","external":false,"items":[{"id":563,"title":"Virtual Instances","menuAttached":false,"order":1,"path":"/Compute/VirtualInstances","type":"INTERNAL","uiRouterKey":"virtual-instances-1","slug":"compute-virtual-instances","external":false,"related":{"id":655,"title":"Virtual Instances","path":"/virtual-instances/","scheduledAt":null,"createdAt":"2023-02-20T10:48:52.279Z","updatedAt":"2024-08-28T07:01:50.413Z","publishedAt":"2023-02-28T08:32:03.960Z","locale":"en","__contentType":"api::page.page","navigationItemId":563,"__templateName":"Generic"},"items":[{"id":567,"title":"Production-Optimized","menuAttached":false,"order":1,"path":"/Compute/VirtualInstances/Prod","type":"INTERNAL","uiRouterKey":"production-optimized-2","slug":"compute-virtual-instances-prod","external":false,"related":{"id":657,"title":"Production-Optimized Instances","path":"/production-optimized-instances/","scheduledAt":null,"createdAt":"2023-02-20T15:13:14.415Z","updatedAt":"2024-10-30T14:59:58.375Z","publishedAt":"2023-02-28T08:34:34.739Z","locale":"en","__contentType":"api::page.page","navigationItemId":567,"__templateName":"Generic"},"items":[],"description":"Dedicated vCPU for the most demanding workloads (x86)"},{"id":566,"title":"Workload-Optimized","menuAttached":false,"order":2,"path":"/Compute/VirtualInstances/Workload-Optimized","type":"INTERNAL","uiRouterKey":"workload-optimized-1","slug":"compute-virtual-instances-workload-optimized","external":false,"related":{"id":802,"title":"Workload-Optimized Instances","path":"/workload-optimized-instances/","scheduledAt":null,"createdAt":"2023-04-25T12:38:13.577Z","updatedAt":"2024-08-28T12:05:29.294Z","publishedAt":"2023-05-26T13:36:52.797Z","locale":"en","__contentType":"api::page.page","navigationItemId":566,"__templateName":"Generic"},"items":[],"description":"Secure, scalable VMs, equipped for high memory and compute demands (x86)"},{"id":565,"title":"Cost-Optimized","menuAttached":false,"order":3,"path":"/Compute/VirtualInstances/Cost-Optimized","type":"INTERNAL","uiRouterKey":"cost-optimized-1","slug":"compute-virtual-instances-cost-optimized","external":false,"related":{"id":656,"title":"Cost-Optimized Instances","path":"/cost-optimized-instances/","scheduledAt":null,"createdAt":"2023-02-20T12:55:45.865Z","updatedAt":"2024-08-28T08:44:44.416Z","publishedAt":"2023-02-28T08:34:47.421Z","locale":"en","__contentType":"api::page.page","navigationItemId":565,"__templateName":"Generic"},"items":[],"description":"Highly reliable and priced affordably Instances with shared vCPUs (x86 and ARM)"},{"id":564,"title":"Learning","menuAttached":false,"order":4,"path":"/Compute/VirtualInstances/Learning","type":"INTERNAL","uiRouterKey":"learning-1","slug":"compute-virtual-instances-learning","external":false,"related":{"id":13,"title":"Stardust Instances","path":"/stardust-instances/","scheduledAt":null,"createdAt":"2022-04-11T09:03:33.397Z","updatedAt":"2024-05-15T13:51:19.969Z","publishedAt":"2022-04-28T17:04:10.708Z","locale":"en","__contentType":"api::page.page","navigationItemId":564,"__templateName":"Generic"},"items":[],"description":"A tiny instance to test and host your personal projects (x86)"}],"description":""},{"id":568,"title":"GPU","menuAttached":false,"order":2,"path":"/Compute/gpu","type":"INTERNAL","uiRouterKey":"gpu-8","slug":"compute-gpu","external":false,"related":{"id":1025,"title":"GPU Instances","path":"/gpu-instances/","scheduledAt":null,"createdAt":"2023-11-30T13:15:51.769Z","updatedAt":"2024-11-19T16:38:15.121Z","publishedAt":"2023-12-12T12:52:20.083Z","locale":"en","__contentType":"api::page.page","navigationItemId":568,"__templateName":"Generic"},"items":[{"id":571,"title":"L4 GPU Instance","menuAttached":false,"order":1,"path":"/Compute/gpu/L4","type":"INTERNAL","uiRouterKey":"l4-gpu-instance","slug":"compute-gpu-l4","external":false,"related":{"id":1108,"title":"L4 GPU Instance","path":"/l4-gpu-instance/","scheduledAt":null,"createdAt":"2024-02-28T16:20:43.240Z","updatedAt":"2024-11-20T14:49:27.542Z","publishedAt":"2024-03-04T13:37:45.809Z","locale":"en","__contentType":"api::page.page","navigationItemId":571,"__templateName":"Generic"},"items":[],"description":"Maximize your AI infrastructures with a versatile Instance"},{"id":572,"title":"L40S GPU Instance","menuAttached":false,"order":2,"path":"/Compute/gpu/L40s","type":"INTERNAL","uiRouterKey":"l40-s-gpu-instance","slug":"compute-gpu-l40s","external":false,"related":{"id":1221,"title":"L40S GPU Instance","path":"/l40s-gpu-instance/","scheduledAt":null,"createdAt":"2024-04-26T13:37:31.531Z","updatedAt":"2024-11-20T14:50:10.681Z","publishedAt":"2024-04-29T12:12:07.466Z","locale":"en","__contentType":"api::page.page","navigationItemId":572,"__templateName":"Generic"},"items":[],"description":"Universal Instance, faster than L4 and cheaper than H100 PCIe"},{"id":569,"title":"H100 PCIe GPU Instance","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/h100-pcie-try-it-now/","type":"EXTERNAL","uiRouterKey":"h100-pc-ie-gpu-instance-4","slug":{},"external":true,"description":"Accelerate your model training with the most high-end AI chip"},{"id":570,"title":"GPU 3070 Instances","menuAttached":false,"order":4,"path":"/Compute/gpu/3070","type":"INTERNAL","uiRouterKey":"gpu-3070-instances-1","slug":"compute-gpu-3070","external":false,"related":{"id":397,"title":"GPU 3070 Instances","path":"/gpu-3070-instances/","scheduledAt":null,"createdAt":"2022-05-30T11:52:26.506Z","updatedAt":"2023-11-16T16:38:12.184Z","publishedAt":"2022-05-30T12:33:10.212Z","locale":"en","__contentType":"api::page.page","navigationItemId":570,"__templateName":"Generic"},"items":[],"description":"Dedicated NVIDIA® RTX 3070 with the best price/performance ratio"},{"id":573,"title":"Render GPU Instances","menuAttached":false,"order":5,"path":"/Compute/gpu/render","type":"INTERNAL","uiRouterKey":"render-gpu-instances","slug":"compute-gpu-render","external":false,"related":{"id":52,"title":"GPU Render Instances","path":"/gpu-render-instances/","scheduledAt":null,"createdAt":"2022-04-21T16:00:29.592Z","updatedAt":"2024-09-25T09:40:12.404Z","publishedAt":"2022-04-28T17:12:46.136Z","locale":"en","__contentType":"api::page.page","navigationItemId":573,"__templateName":"Generic"},"items":[],"description":"Dedicated Tesla P100s for all your Machine Learning \u0026 Artificial Intelligence needs."}],"description":""},{"id":574,"title":"Serverless","menuAttached":false,"order":3,"path":"/Compute/Serverless","type":"WRAPPER","uiRouterKey":"serverless-7","slug":"compute-serverless","external":false,"items":[{"id":576,"title":"Serverless Functions","menuAttached":false,"order":1,"path":"/Compute/Serverless/Functions","type":"INTERNAL","uiRouterKey":"serverless-functions-1","slug":"compute-serverless-functions","external":false,"related":{"id":50,"title":"Serverless Functions","path":"/serverless-functions/","scheduledAt":null,"createdAt":"2022-04-21T15:28:10.687Z","updatedAt":"2024-07-05T11:44:44.356Z","publishedAt":"2022-04-28T17:12:49.569Z","locale":"en","__contentType":"api::page.page","navigationItemId":576,"__templateName":"Generic"},"items":[],"description":"Experience an easy way to run your code on the cloud"},{"id":575,"title":"Serverless Containers","menuAttached":false,"order":2,"path":"/Compute/Serverless/Containers","type":"INTERNAL","uiRouterKey":"serverless-containers-2","slug":"compute-serverless-containers","external":false,"related":{"id":7,"title":"Serverless Containers","path":"/serverless-containers/","scheduledAt":null,"createdAt":"2022-04-04T07:02:24.178Z","updatedAt":"2024-07-05T11:46:09.955Z","publishedAt":"2022-04-28T17:03:54.693Z","locale":"en","__contentType":"api::page.page","navigationItemId":575,"__templateName":"Generic"},"items":[],"description":"Easily run containers on the cloud with a single command"},{"id":579,"title":"Serverless Jobs","menuAttached":false,"order":3,"path":"/Compute/Serverless/Jobs","type":"INTERNAL","uiRouterKey":"serverless-jobs-1","slug":"compute-serverless-jobs","external":false,"related":{"id":980,"title":"Serverless Jobs","path":"/serverless-jobs/","scheduledAt":null,"createdAt":"2023-10-13T16:05:31.205Z","updatedAt":"2024-08-20T12:28:03.639Z","publishedAt":"2023-12-07T15:55:35.668Z","locale":"en","__contentType":"api::page.page","navigationItemId":579,"__templateName":"Generic"},"items":[],"description":"Run batches of tasks in the cloud"}],"description":""},{"id":580,"title":"Containers","menuAttached":false,"order":4,"path":"/Compute/Containers","type":"INTERNAL","uiRouterKey":"containers-4","slug":"compute-containers","external":false,"related":{"id":465,"title":"Containers","path":"/containers/","scheduledAt":null,"createdAt":"2022-07-29T15:09:20.535Z","updatedAt":"2024-08-28T07:05:23.005Z","publishedAt":"2023-02-27T13:53:48.270Z","locale":"en","__contentType":"api::page.page","navigationItemId":580,"__templateName":"Generic"},"items":[{"id":581,"title":"Kubernetes Kapsule","menuAttached":false,"order":1,"path":"/Compute/Containers/Kapsule","type":"INTERNAL","uiRouterKey":"kubernetes-kapsule-1","slug":"compute-containers-kapsule","external":false,"related":{"id":6,"title":"Kubernetes Kapsule","path":"/kubernetes-kapsule/","scheduledAt":null,"createdAt":"2022-04-01T15:40:18.523Z","updatedAt":"2024-04-30T14:13:12.823Z","publishedAt":"2022-11-02T17:14:27.738Z","locale":"en","__contentType":"api::page.page","navigationItemId":581,"__templateName":"Generic"},"items":[],"description":"Kubernetes exclusively for Scaleway products and resources"},{"id":582,"title":"Kubernetes Kosmos","menuAttached":false,"order":2,"path":"/Compute/Containers/Kosmos","type":"INTERNAL","uiRouterKey":"kubernetes-kosmos-1","slug":"compute-containers-kosmos","external":false,"related":{"id":43,"title":"Kubernetes Kosmos","path":"/kubernetes-kosmos/","scheduledAt":null,"createdAt":"2022-04-20T17:18:27.347Z","updatedAt":"2024-07-12T09:35:39.810Z","publishedAt":"2022-04-28T17:13:15.597Z","locale":"en","__contentType":"api::page.page","navigationItemId":582,"__templateName":"Generic"},"items":[],"description":"Multi-cloud Kubernetes for Scaleway and external providers resources"},{"id":583,"title":"Container Registry","menuAttached":false,"order":3,"path":"/Compute/Containers/containerregisrt","type":"INTERNAL","uiRouterKey":"container-registry-1","slug":"compute-containers-containerregisrt","external":false,"related":{"id":39,"title":"Container Registry","path":"/container-registry/","scheduledAt":null,"createdAt":"2022-04-20T14:07:31.417Z","updatedAt":"2023-11-15T08:49:34.191Z","publishedAt":"2022-04-28T17:06:10.179Z","locale":"en","__contentType":"api::page.page","navigationItemId":583,"__templateName":"Generic"},"items":[],"description":"An easy-to-use Docker repository"}],"description":""}],"description":""},{"id":584,"title":"AI","menuAttached":false,"order":3,"path":"/AI","type":"WRAPPER","uiRouterKey":"ai","slug":"ai-1","external":false,"items":[{"id":585,"title":"Clusters","menuAttached":false,"order":1,"path":"/AI/Clusters","type":"WRAPPER","uiRouterKey":"clusters-1","slug":"ai-clusters","external":false,"items":[{"id":588,"title":"Custom-built Clusters","menuAttached":false,"order":1,"path":"/AI/Clusters/AIsuper","type":"INTERNAL","uiRouterKey":"custom-built-clusters","slug":"ai-clusters-a-isuper","external":false,"related":{"id":953,"title":"Custom-built Clusters","path":"/custom-built-clusters/","scheduledAt":null,"createdAt":"2023-09-22T14:14:40.961Z","updatedAt":"2024-10-29T12:48:55.663Z","publishedAt":"2023-10-04T14:49:01.987Z","locale":"en","__contentType":"api::page.page","navigationItemId":588,"__templateName":"Generic"},"items":[],"description":"Build the next Foundation Model with one of the fastest and most energy-efficient supercomputers in the world"},{"id":776,"title":"On Demand Cluster","menuAttached":false,"order":2,"path":"/AI/Clusters/Clusterondemand","type":"INTERNAL","uiRouterKey":"on-demand-cluster","slug":"ai-clusters-clusterondemand","external":false,"related":{"id":1266,"title":"Cluster On Demand ","path":"/cluster-on-demand/","scheduledAt":null,"createdAt":"2024-05-16T15:00:19.723Z","updatedAt":"2024-11-08T08:52:40.598Z","publishedAt":"2024-05-21T14:10:00.511Z","locale":"en","__contentType":"api::page.page","navigationItemId":776,"__templateName":"Generic"},"items":[],"description":"Rent a GPU-cluster from 32 to more than a thousand GPUs to speed up distributed training"}],"description":""},{"id":592,"title":"Model-as-a-service","menuAttached":false,"order":2,"path":"/AI/ManagedServices","type":"WRAPPER","uiRouterKey":"model-as-a-service-1","slug":"ai-managed-services","external":false,"items":[{"id":593,"title":"Managed Inference","menuAttached":false,"order":1,"path":"/AI/ManagedServices/llm","type":"INTERNAL","uiRouterKey":"managed-inference-2","slug":"ai-managed-services-llm","external":false,"related":{"id":1303,"title":"Inference","path":"/inference/","scheduledAt":null,"createdAt":"2024-06-13T13:16:26.427Z","updatedAt":"2024-11-15T14:11:15.846Z","publishedAt":"2024-06-28T12:43:39.677Z","locale":"en","__contentType":"api::page.page","navigationItemId":593,"__templateName":"Generic"},"items":[],"description":"Deploy AI models in a dedicated inference infrastructure. Get tailored security and predictable throughput"},{"id":824,"title":"Generative APIs","menuAttached":false,"order":2,"path":"/AI/ManagedServices/GenerativeAPIs","type":"INTERNAL","uiRouterKey":"generative-ap-is-2","slug":"ai-managed-services-generative-ap-is","external":false,"related":{"id":1418,"title":"Generative APIs","path":"/generative-apis/","scheduledAt":null,"createdAt":"2024-10-10T16:23:00.732Z","updatedAt":"2024-11-20T17:52:03.232Z","publishedAt":"2024-10-11T12:17:56.286Z","locale":"en","__contentType":"api::page.page","navigationItemId":824,"__templateName":"Generic"},"items":[],"description":"Consume AI models instantly via a simple API call. All hosted in Europe"}],"description":""},{"id":586,"title":"GPU Instances","menuAttached":false,"order":3,"path":"/AI/gpu","type":"WRAPPER","uiRouterKey":"gpu-instances","slug":"ai-gpu","external":false,"items":[{"id":589,"title":"L40S GPU Instance","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/l40s-gpu-instance/","type":"EXTERNAL","uiRouterKey":"l40-s-gpu-instance-1","slug":{},"external":true,"description":"Accelerate the next generation of AI-enabled applications with the universal L40S GPU Instance, faster than L4 and cheaper than H100 PCIe"},{"id":590,"title":"L4 GPU Instance","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/l4-gpu-instance/","type":"EXTERNAL","uiRouterKey":"l4-gpu-instance-1","slug":{},"external":true,"description":"Maximize your AI infrastructure's potential with a versatile and cost-effective GPU Instance"},{"id":587,"title":"H100 PCIe GPU Instance","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/h100-pcie-try-it-now/","type":"EXTERNAL","uiRouterKey":"h100-pc-ie-gpu-instance-2","slug":{},"external":true,"description":"Accelerate your model training with the most high-end AI chip"},{"id":591,"title":"Render GPU Instance","menuAttached":false,"order":4,"path":"https://www.scaleway.com/en/gpu-render-instances/","type":"EXTERNAL","uiRouterKey":"render-gpu-instance-1","slug":{},"external":true,"description":"Dedicated Tesla P100s for all your Machine Learning \u0026 Artificial Intelligence needs"}],"description":""}],"description":""},{"id":594,"title":"Storage","menuAttached":false,"order":4,"path":"/Storage","type":"WRAPPER","uiRouterKey":"storage-3","slug":"storage-2","external":false,"items":[{"id":602,"title":"Storage","menuAttached":false,"order":1,"path":"/Storage/storage","type":"WRAPPER","uiRouterKey":"storage-4","slug":"storage-storage","external":false,"items":[{"id":604,"title":"Object Storage","menuAttached":false,"order":1,"path":"/Storage/storage/ObjectStorage","type":"INTERNAL","uiRouterKey":"object-storage-4","slug":"storage-storage-object-storage","external":false,"related":{"id":652,"title":"Object Storage","path":"/object-storage/","scheduledAt":null,"createdAt":"2023-02-16T09:44:56.414Z","updatedAt":"2024-11-24T18:22:54.952Z","publishedAt":"2023-03-07T18:05:15.061Z","locale":"en","__contentType":"api::page.page","navigationItemId":604,"__templateName":"Generic"},"items":[],"description":"Amazon S3-compatible and Multi-AZ resilient object storage service. Ensuring high availability for your data"},{"id":605,"title":"Scaleway Glacier","menuAttached":false,"order":2,"path":"/Storage/storage/glacier","type":"INTERNAL","uiRouterKey":"scaleway-glacier-1","slug":"storage-storage-glacier","external":false,"related":{"id":17,"title":"Glacier Cold storage","path":"/glacier-cold-storage/","scheduledAt":null,"createdAt":"2022-04-11T11:58:13.079Z","updatedAt":"2024-10-25T13:13:55.154Z","publishedAt":"2022-04-28T17:13:24.608Z","locale":"en","__contentType":"api::page.page","navigationItemId":605,"__templateName":"Generic"},"items":[],"description":"Cold Storage class to secure long-term object storage. Ideal for deep archived data."},{"id":606,"title":"Block Storage","menuAttached":false,"order":3,"path":"/Storage/storage/BlockStorage","type":"INTERNAL","uiRouterKey":"block-storage-3","slug":"storage-storage-block-storage","external":false,"related":{"id":141,"title":"Block Storage","path":"/block-storage/","scheduledAt":null,"createdAt":"2022-05-02T08:20:39.280Z","updatedAt":"2024-10-30T16:13:44.480Z","publishedAt":"2022-05-02T08:28:12.783Z","locale":"en","__contentType":"api::page.page","navigationItemId":606,"__templateName":"Generic"},"items":[],"description":"Flexible and reliable storage for demanding workloads"}],"description":""}],"description":""},{"id":595,"title":"Network","menuAttached":false,"order":5,"path":"/Network","type":"WRAPPER","uiRouterKey":"network-3","slug":"network-4","external":false,"items":[{"id":603,"title":"Network","menuAttached":false,"order":1,"path":"/Network/Network","type":"WRAPPER","uiRouterKey":"network-4","slug":"network-network","external":false,"items":[{"id":607,"title":"Virtual Private Cloud","menuAttached":false,"order":1,"path":"/Network/Network/VPC","type":"INTERNAL","uiRouterKey":"virtual-private-cloud-1","slug":"network-network-vpc","external":false,"related":{"id":885,"title":"VPC","path":"/vpc/","scheduledAt":null,"createdAt":"2023-07-11T14:38:07.412Z","updatedAt":"2024-06-18T10:05:19.765Z","publishedAt":"2023-07-11T14:38:10.387Z","locale":"en","__contentType":"api::page.page","navigationItemId":607,"__templateName":"Generic"},"items":[],"description":"Secure your cloud resources with ease on a resilient regional network"},{"id":609,"title":"Public Gateway","menuAttached":false,"order":2,"path":"/Network/Network/public","type":"INTERNAL","uiRouterKey":"public-gateway-1","slug":"network-network-public","external":false,"related":{"id":54,"title":"Public Gateway","path":"/public-gateway/","scheduledAt":null,"createdAt":"2022-04-22T09:34:12.578Z","updatedAt":"2024-09-11T14:24:49.432Z","publishedAt":"2022-04-28T17:13:01.025Z","locale":"en","__contentType":"api::page.page","navigationItemId":609,"__templateName":"Generic"},"items":[],"description":" A single and secure entrance to your infrastructure"},{"id":608,"title":"Load Balancer","menuAttached":false,"order":3,"path":"/Network/Network/load","type":"INTERNAL","uiRouterKey":"load-balancer-1","slug":"network-network-load","external":false,"related":{"id":45,"title":"Load Balancer","path":"/load-balancer/","scheduledAt":null,"createdAt":"2022-04-21T07:46:46.140Z","updatedAt":"2024-07-24T14:48:37.806Z","publishedAt":"2022-11-18T08:58:30.309Z","locale":"en","__contentType":"api::page.page","navigationItemId":608,"__templateName":"Generic"},"items":[],"description":"Improve the performance of your services as you grow"},{"id":610,"title":"Domains and DNS","menuAttached":false,"order":4,"path":"/Network/Network/DomainsandDNS","type":"INTERNAL","uiRouterKey":"domains-and-dns-1","slug":"network-network-domainsand-dns","external":false,"related":{"id":44,"title":"Domains and DNS","path":"/domains-and-dns/","scheduledAt":null,"createdAt":"2022-04-21T07:26:18.059Z","updatedAt":"2024-03-05T17:01:32.782Z","publishedAt":"2022-04-28T17:13:12.082Z","locale":"en","__contentType":"api::page.page","navigationItemId":610,"__templateName":"Generic"},"items":[],"description":"Buy domain names and manage DNS. Find your favourite extensions at a fair price"},{"id":792,"title":"IPAM (IP Address Manager)","menuAttached":false,"order":5,"path":"/Network/Network/IPAM","type":"INTERNAL","uiRouterKey":"ipam-ip-address-manager","slug":"network-network-ipam","external":false,"related":{"id":1300,"title":"IPAM","path":"/ipam/","scheduledAt":null,"createdAt":"2024-06-07T13:07:18.728Z","updatedAt":"2024-07-12T10:47:10.965Z","publishedAt":"2024-07-10T07:39:07.627Z","locale":"en","__contentType":"api::page.page","navigationItemId":792,"__templateName":"Generic"},"items":[],"description":"Centralize and simplify your Scaleway IP address management"},{"id":820,"title":"Edge Services","menuAttached":false,"order":6,"path":"/Network/Network/EdgeServices","type":"INTERNAL","uiRouterKey":"edge-services","slug":"network-network-edge-services","external":false,"related":{"id":1399,"title":"Edge Services","path":"/edge-services/","scheduledAt":null,"createdAt":"2024-07-12T10:30:47.181Z","updatedAt":"2024-11-04T15:19:29.792Z","publishedAt":"2024-09-24T10:34:53.990Z","locale":"en","__contentType":"api::page.page","navigationItemId":820,"__templateName":"Generic"},"items":[],"description":"Expose your HTTP services to the internet with security, reliability, and efficiency by design."}],"description":""}],"description":""},{"id":596,"title":"Data \u0026 Tools","menuAttached":false,"order":6,"path":"/ManagedServices","type":"WRAPPER","uiRouterKey":"data-2","slug":"managed-services","external":false,"items":[{"id":611,"title":"Data","menuAttached":false,"order":1,"path":"/ManagedServices/Data","type":"WRAPPER","uiRouterKey":"data","slug":"managed-services-data","external":false,"items":[{"id":612,"title":"Managed Database for PostgreSQL \u0026 MySQL","menuAttached":false,"order":1,"path":"/ManagedServices/Data/SQL","type":"INTERNAL","uiRouterKey":"managed-database-for-postgre-sql-2","slug":"managed-services-data-sql","external":false,"related":{"id":48,"title":"Database","path":"/database/","scheduledAt":null,"createdAt":"2022-04-21T14:06:34.262Z","updatedAt":"2024-07-02T15:50:10.807Z","publishedAt":"2022-04-28T17:12:57.201Z","locale":"en","__contentType":"api::page.page","navigationItemId":612,"__templateName":"Generic"},"items":[],"description":"New generation of Relational Databases designed to scale on-demand"},{"id":613,"title":"Managed Database for Redis™","menuAttached":false,"order":2,"path":"/ManagedServices/Data/Redis","type":"INTERNAL","uiRouterKey":"managed-database-for-redis-1","slug":"managed-services-data-redis","external":false,"related":{"id":427,"title":"Managed Database for Redis™","path":"/managed-database-for-redistm/","scheduledAt":null,"createdAt":"2022-06-10T13:30:28.356Z","updatedAt":"2024-06-18T10:05:41.869Z","publishedAt":"2022-07-27T15:29:59.282Z","locale":"en","__contentType":"api::page.page","navigationItemId":613,"__templateName":"Generic"},"items":[],"description":"Accelerate your web application with powerful caching of Memory Databases"},{"id":614,"title":"Managed MongoDB®","menuAttached":false,"order":3,"path":"/ManagedServices/Data/document","type":"INTERNAL","uiRouterKey":"managed-mongo-db","slug":"managed-services-data-document","external":false,"related":{"id":890,"title":"Managed MongoDB","path":"/managed-mongodb/","scheduledAt":null,"createdAt":"2023-07-25T07:58:39.536Z","updatedAt":"2024-11-14T08:16:03.033Z","publishedAt":"2023-10-03T08:31:21.477Z","locale":"en","__contentType":"api::page.page","navigationItemId":614,"__templateName":"Generic"},"items":[],"description":"Drive your own document-oriented database. Let us managed the engine"},{"id":781,"title":"Serverless SQL Database","menuAttached":false,"order":4,"path":"/ManagedServices/Data/Serverless_SQL","type":"INTERNAL","uiRouterKey":"serverless-sql-database-2","slug":"managed-services-data-serverless-sql","external":false,"related":{"id":823,"title":"Serverless Sql Database","path":"/serverless-sql-database/","scheduledAt":null,"createdAt":"2023-05-11T22:46:48.805Z","updatedAt":"2024-11-06T14:51:53.874Z","publishedAt":"2023-05-11T22:47:00.320Z","locale":"en","__contentType":"api::page.page","navigationItemId":781,"__templateName":"Generic"},"items":[],"description":"Go serverless with fully managed database"},{"id":780,"title":"Messaging and Queuing","menuAttached":false,"order":5,"path":"/ManagedServices/Data/m\u0026q","type":"INTERNAL","uiRouterKey":"messaging-and-queuing-1","slug":"managed-services-data-m-and-q","external":false,"related":{"id":642,"title":"Messaging and Queuing","path":"/messaging-and-queuing/","scheduledAt":null,"createdAt":"2023-02-09T16:38:42.456Z","updatedAt":"2024-05-21T14:34:56.011Z","publishedAt":"2023-02-09T16:46:35.902Z","locale":"en","__contentType":"api::page.page","navigationItemId":780,"__templateName":"Generic"},"items":[],"description":"Send messages and events without having to manage your message broker"},{"id":822,"title":"Distributed Data Lab","menuAttached":false,"order":6,"path":"/ManagedServices/Data/DataLab","type":"INTERNAL","uiRouterKey":"distributed-data-lab","slug":"managed-services-data-data-lab","external":false,"related":{"id":949,"title":"Distributed Data Lab ","path":"/distributed-data-lab/","scheduledAt":null,"createdAt":"2023-09-21T11:57:12.802Z","updatedAt":"2024-10-30T15:28:03.991Z","publishedAt":"2024-09-27T15:10:48.257Z","locale":"en","__contentType":"api::page.page","navigationItemId":822,"__templateName":"Generic"},"items":[],"description":"Speed up data processing over very large volumes of data with an Apache Spark™ managed solution."}],"description":""},{"id":619,"title":"Business Applications","menuAttached":false,"order":2,"path":"/ManagedServices/ManagedServices","type":"WRAPPER","uiRouterKey":"business-applications","slug":"managed-services-managed-services","external":false,"items":[{"id":620,"title":"Web Hosting","menuAttached":false,"order":1,"path":"/ManagedServices/ManagedServices/hosting","type":"INTERNAL","uiRouterKey":"web-hosting-4","slug":"managed-services-managed-services-hosting","external":false,"related":{"id":47,"title":"Web hosting","path":"/web-hosting/","scheduledAt":null,"createdAt":"2022-04-21T11:51:48.689Z","updatedAt":"2024-11-20T15:59:55.910Z","publishedAt":"2022-04-28T13:34:58.879Z","locale":"en","__contentType":"api::page.page","navigationItemId":620,"__templateName":"Generic"},"items":[],"description":"Hosting for individuals, professionals, and everyone in between."},{"id":621,"title":"Web Platform","menuAttached":false,"order":2,"path":"/ManagedServices/ManagedServices/WebPlatform","type":"INTERNAL","uiRouterKey":"web-platform-2","slug":"managed-services-managed-services-web-platform","external":false,"related":{"id":576,"title":"Web Platform - powered by Clever Cloud","path":"/web-platform-powered-by-clever-cloud/","scheduledAt":null,"createdAt":"2022-12-07T14:07:50.856Z","updatedAt":"2023-11-16T15:19:36.970Z","publishedAt":"2022-12-13T08:01:42.916Z","locale":"en","__contentType":"api::page.page","navigationItemId":621,"__templateName":"Generic"},"items":[],"description":"Ship your applications only in a few clicks."},{"id":622,"title":"Transactional Email","menuAttached":false,"order":3,"path":"/ManagedServices/ManagedServices/tem","type":"INTERNAL","uiRouterKey":"transactional-email-2","slug":"managed-services-managed-services-tem","external":false,"related":{"id":776,"title":"Transactional Email (TEM)","path":"/transactional-email-tem/","scheduledAt":null,"createdAt":"2023-04-05T16:33:35.536Z","updatedAt":"2024-10-21T14:45:56.496Z","publishedAt":"2023-04-06T10:30:43.491Z","locale":"en","__contentType":"api::page.page","navigationItemId":622,"__templateName":"Generic"},"items":[],"description":"Instant delivery of your transactional emails"},{"id":623,"title":"Cockpit","menuAttached":false,"order":4,"path":"/ManagedServices/ManagedServices/Cockpit","type":"INTERNAL","uiRouterKey":"cockpit-2","slug":"managed-services-managed-services-cockpit","external":false,"related":{"id":814,"title":"Cockpit","path":"/cockpit/","scheduledAt":null,"createdAt":"2023-05-02T08:04:46.085Z","updatedAt":"2024-07-05T11:54:39.588Z","publishedAt":"2023-05-04T16:18:10.562Z","locale":"en","__contentType":"api::page.page","navigationItemId":623,"__templateName":"Generic"},"items":[],"description":"Monitor infrastructures in minutes with a fully managed observability solution"},{"id":784,"title":"IoT Hub","menuAttached":false,"order":5,"path":"/ManagedServices/ManagedServices/iot","type":"INTERNAL","uiRouterKey":"io-t-hub","slug":"managed-services-managed-services-iot","external":false,"related":{"id":31,"title":"Iot hub","path":"/iot-hub/","scheduledAt":null,"createdAt":"2022-04-20T04:58:03.085Z","updatedAt":"2023-11-15T15:42:53.313Z","publishedAt":"2022-04-28T17:13:21.005Z","locale":"en","__contentType":"api::page.page","navigationItemId":784,"__templateName":"Generic"},"items":[],"description":"A purpose-built bridge between connected hardware and cloud."}],"description":""},{"id":615,"title":"Security \u0026 Organization","menuAttached":false,"order":3,"path":"/ManagedServices/SecurityandAccount","type":"WRAPPER","uiRouterKey":"security-3","slug":"managed-services-securityand-account","external":false,"items":[{"id":618,"title":"Identity and Access Management (IAM)","menuAttached":false,"order":1,"path":"/ManagedServices/SecurityandAccount/iam","type":"INTERNAL","uiRouterKey":"identity-and-access-management-iam-1","slug":"managed-services-securityand-account-iam","external":false,"related":{"id":569,"title":"IAM","path":"/iam/","scheduledAt":null,"createdAt":"2022-12-02T16:25:06.762Z","updatedAt":"2024-08-22T09:40:22.523Z","publishedAt":"2022-12-06T15:27:30.794Z","locale":"en","__contentType":"api::page.page","navigationItemId":618,"__templateName":"Generic"},"items":[],"description":"The easiest way to safely collaborate in the cloud"},{"id":616,"title":"Secret Manager","menuAttached":false,"order":2,"path":"/ManagedServices/SecurityandAccount/secretmanager","type":"INTERNAL","uiRouterKey":"secret-manager-1","slug":"managed-services-securityand-account-secretmanager","external":false,"related":{"id":779,"title":"Secret Manager","path":"/secret-manager/","scheduledAt":null,"createdAt":"2023-04-11T11:04:18.808Z","updatedAt":"2024-08-28T09:57:43.021Z","publishedAt":"2023-04-26T07:47:45.718Z","locale":"en","__contentType":"api::page.page","navigationItemId":616,"__templateName":"Generic"},"items":[],"description":"Protect your sensitive data across your cloud infrastructure"},{"id":617,"title":"Cost Manager","menuAttached":false,"order":3,"path":"/ManagedServices/SecurityandAccount/cost-manager","type":"INTERNAL","uiRouterKey":"cost-manager-1","slug":"managed-services-securityand-account-cost-manager","external":false,"related":{"id":1186,"title":"Cost Manager","path":"/cost-manager/","scheduledAt":null,"createdAt":"2024-04-08T07:36:07.839Z","updatedAt":"2024-04-08T09:14:21.699Z","publishedAt":"2024-04-08T09:14:21.666Z","locale":"en","__contentType":"api::page.page","navigationItemId":617,"__templateName":"Generic"},"items":[],"description":"Easily track your consumption in an all-in-one tool"},{"id":830,"title":"Environmental Footprint Calculator","menuAttached":false,"order":4,"path":"/ManagedServices/SecurityandAccount/Footprint","type":"INTERNAL","uiRouterKey":"environmental-footprint-calculator","slug":"managed-services-securityand-account-footprint","external":false,"related":{"id":1450,"title":"Environmental Footprint Calculator","path":"/environmental-footprint-calculator/","scheduledAt":null,"createdAt":"2024-10-28T14:47:30.518Z","updatedAt":"2024-11-05T16:23:53.555Z","publishedAt":"2024-11-04T12:12:34.311Z","locale":"en","__contentType":"api::page.page","navigationItemId":830,"__templateName":"Generic"},"items":[],"description":"Accurately track your environmental impact and make informed choices"}],"description":""},{"id":624,"title":"Developer Tools","menuAttached":false,"order":4,"path":"/ManagedServices/DeveloperTools","type":"WRAPPER","uiRouterKey":"developer-tools","slug":"managed-services-developer-tools","external":false,"items":[{"id":625,"title":"Scaleway API","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/developers/api/","type":"EXTERNAL","uiRouterKey":"scaleway-api-2","slug":{},"external":true,"description":"The Public Interface for developers"},{"id":626,"title":"CLI","menuAttached":false,"order":2,"path":"/ManagedServices/DeveloperTools/cli","type":"INTERNAL","uiRouterKey":"cli-2","slug":"managed-services-developer-tools-cli","external":false,"related":{"id":187,"title":"CLI","path":"/cli/","scheduledAt":null,"createdAt":"2022-05-03T08:37:17.214Z","updatedAt":"2024-08-22T05:35:23.543Z","publishedAt":"2022-05-03T11:43:09.246Z","locale":"en","__contentType":"api::page.page","navigationItemId":626,"__templateName":"Generic"},"items":[],"description":"Deploy and manage your infrastructure directly from the command line"},{"id":627,"title":"Terraform","menuAttached":false,"order":3,"path":"/ManagedServices/DeveloperTools/terraform","type":"INTERNAL","uiRouterKey":"terraform-1","slug":"managed-services-developer-tools-terraform","external":false,"related":{"id":40,"title":"Terraform","path":"/terraform/","scheduledAt":null,"createdAt":"2022-04-20T14:37:30.508Z","updatedAt":"2023-11-15T08:32:57.793Z","publishedAt":"2022-04-28T17:05:15.208Z","locale":"en","__contentType":"api::page.page","navigationItemId":627,"__templateName":"Generic"},"items":[],"description":"Securely and efficiently provision and manage Infrastructure as Code with Terraform"}],"description":""}],"description":""},{"id":597,"title":"Solutions","menuAttached":false,"order":7,"path":"/Solutions","type":"WRAPPER","uiRouterKey":"solutions-2","slug":"solutions-2","external":false,"items":[{"id":628,"title":"Industries","menuAttached":false,"order":1,"path":"/Solutions/Industries","type":"WRAPPER","uiRouterKey":"industries-1","slug":"solutions-industries","external":false,"items":[{"id":629,"title":"Artificial Intelligence","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/ai-solutions/","type":"EXTERNAL","uiRouterKey":"artificial-intelligence","slug":{},"external":true,"description":""},{"id":630,"title":"Public Sector","menuAttached":false,"order":2,"path":"/Solutions/Industries/PublicSector","type":"INTERNAL","uiRouterKey":"public-sector","slug":"solutions-industries-public-sector","external":false,"related":{"id":986,"title":"Public sector solutions","path":"/public-sector-solutions/","scheduledAt":null,"createdAt":"2023-10-20T14:23:52.057Z","updatedAt":"2024-09-30T17:00:38.498Z","publishedAt":"2023-11-30T14:58:23.419Z","locale":"en","__contentType":"api::page.page","navigationItemId":630,"__templateName":"Generic"},"items":[],"description":""},{"id":631,"title":"Gaming","menuAttached":false,"order":3,"path":"/Solutions/Industries/Gaming","type":"INTERNAL","uiRouterKey":"gaming-1","slug":"solutions-industries-gaming","external":false,"related":{"id":1024,"title":"Gaming Cloud Solutions","path":"/gaming-cloud-solutions/","scheduledAt":null,"createdAt":"2023-11-29T17:06:47.458Z","updatedAt":"2024-09-24T13:29:47.657Z","publishedAt":"2023-12-13T16:53:50.074Z","locale":"en","__contentType":"api::page.page","navigationItemId":631,"__templateName":"Generic"},"items":[],"description":""},{"id":633,"title":"Media and Entertainment","menuAttached":false,"order":4,"path":"/Solutions/Industries/MediaandEntertainment","type":"INTERNAL","uiRouterKey":"media-and-entertainment","slug":"solutions-industries-mediaand-entertainment","external":false,"related":{"id":1048,"title":"Media and Entertainment","path":"/media-and-entertainment/","scheduledAt":null,"createdAt":"2023-12-13T16:23:27.055Z","updatedAt":"2024-09-24T13:30:40.809Z","publishedAt":"2024-01-02T18:08:08.725Z","locale":"en","__contentType":"api::page.page","navigationItemId":633,"__templateName":"Generic"},"items":[],"description":""},{"id":632,"title":"Retail and E-commerce","menuAttached":false,"order":5,"path":"/Solutions/Industries/Retail","type":"INTERNAL","uiRouterKey":"retail-and-e-commerce-2","slug":"solutions-industries-retail","external":false,"related":{"id":1105,"title":"E-commerce retail Solutions","path":"/e-commerce-retail-solutions/","scheduledAt":null,"createdAt":"2024-02-28T09:44:45.583Z","updatedAt":"2024-09-24T13:12:26.843Z","publishedAt":"2024-04-02T14:56:24.762Z","locale":"en","__contentType":"api::page.page","navigationItemId":632,"__templateName":"Generic"},"items":[],"description":""},{"id":634,"title":"Startup Program","menuAttached":false,"order":6,"path":"/Solutions/Industries/Startup","type":"INTERNAL","uiRouterKey":"startup-program-1","slug":"solutions-industries-startup","external":false,"related":{"id":82,"title":"Startup program","path":"/startup-program/","scheduledAt":null,"createdAt":"2022-04-27T19:14:18.251Z","updatedAt":"2024-08-27T13:22:49.823Z","publishedAt":"2022-05-11T15:19:00.591Z","locale":"en","__contentType":"api::page.page","navigationItemId":634,"__templateName":"Generic"},"items":[],"description":""},{"id":794,"title":"Financial Services","menuAttached":false,"order":7,"path":"/Solutions/Industries/FinancialServices","type":"INTERNAL","uiRouterKey":"financial-services","slug":"solutions-industries-financial-services","external":false,"related":{"id":1381,"title":"Financial services solutions","path":"/financial-services-solutions/","scheduledAt":null,"createdAt":"2024-08-06T12:19:51.917Z","updatedAt":"2024-11-12T09:58:52.666Z","publishedAt":"2024-08-06T12:31:25.580Z","locale":"en","__contentType":"api::page.page","navigationItemId":794,"__templateName":"Generic"},"items":[],"description":""},{"id":826,"title":"Industrial","menuAttached":false,"order":8,"path":"/Solutions/Industries/Industrial","type":"INTERNAL","uiRouterKey":"industrial","slug":"solutions-industries-industrial","external":false,"related":{"id":1411,"title":"Industrial solutions","path":"/industrial-solutions/","scheduledAt":null,"createdAt":"2024-10-02T10:14:37.728Z","updatedAt":"2024-11-08T16:36:55.075Z","publishedAt":"2024-10-03T16:29:42.042Z","locale":"en","__contentType":"api::page.page","navigationItemId":826,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":635,"title":"Use Cases","menuAttached":false,"order":2,"path":"/Solutions/usecases","type":"WRAPPER","uiRouterKey":"use-cases","slug":"solutions-usecases","external":false,"items":[{"id":638,"title":"Cloud Storage Solutions","menuAttached":false,"order":1,"path":"/Solutions/usecases/cloudstorage","type":"INTERNAL","uiRouterKey":"cloud-storage-solutions","slug":"solutions-usecases-cloudstorage","external":false,"related":{"id":595,"title":"Cloud Storage Solutions","path":"/cloud-storage-solutions/","scheduledAt":null,"createdAt":"2022-12-19T13:31:12.676Z","updatedAt":"2024-10-25T13:40:34.304Z","publishedAt":"2023-01-31T10:48:28.580Z","locale":"en","__contentType":"api::page.page","navigationItemId":638,"__templateName":"Generic"},"items":[],"description":""},{"id":637,"title":"Kubernetes Solutions","menuAttached":false,"order":2,"path":"/Solutions/usecases/kub-sol","type":"INTERNAL","uiRouterKey":"kubernetes-solutions-1","slug":"solutions-usecases-kub-sol","external":false,"related":{"id":616,"title":"Kubernetes Solutions","path":"/kubernetes-solutions/","scheduledAt":null,"createdAt":"2023-01-10T16:25:48.652Z","updatedAt":"2024-11-20T16:45:40.105Z","publishedAt":"2023-03-28T07:49:24.834Z","locale":"en","__contentType":"api::page.page","navigationItemId":637,"__templateName":"Generic"},"items":[],"description":""},{"id":636,"title":"Serverless Applications","menuAttached":false,"order":3,"path":"/Solutions/usecases/ServerlessApplications","type":"INTERNAL","uiRouterKey":"serverless-applications-1","slug":"solutions-usecases-serverless-applications","external":false,"related":{"id":780,"title":"Build Scalable Applications With Serverless","path":"/build-scalable-applications-with-serverless/","scheduledAt":null,"createdAt":"2023-04-12T08:42:06.395Z","updatedAt":"2024-05-15T13:59:21.827Z","publishedAt":"2023-05-12T06:59:34.924Z","locale":"en","__contentType":"api::page.page","navigationItemId":636,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":639,"title":"Web Hosting","menuAttached":false,"order":3,"path":"/Solutions/WebHosting","type":"WRAPPER","uiRouterKey":"web-hosting-3","slug":"solutions-web-hosting","external":false,"items":[{"id":640,"title":"Managed Web Hosting","menuAttached":false,"order":1,"path":"/Solutions/WebHosting/ManagedWebHosting","type":"INTERNAL","uiRouterKey":"managed-web-hosting-1","slug":"solutions-web-hosting-managed-web-hosting","external":false,"related":{"id":827,"title":"Managed Web Hosting","path":"/managed-web-hosting/","scheduledAt":null,"createdAt":"2023-05-15T09:39:39.531Z","updatedAt":"2024-08-28T06:42:02.109Z","publishedAt":"2023-05-15T12:31:13.810Z","locale":"en","__contentType":"api::page.page","navigationItemId":640,"__templateName":"Generic"},"items":[],"description":""},{"id":641,"title":"Dedicated Web Hosting","menuAttached":false,"order":2,"path":"/Solutions/WebHosting/DedicatedWebHosting","type":"INTERNAL","uiRouterKey":"dedicated-web-hosting","slug":"solutions-web-hosting-dedicated-web-hosting","external":false,"related":{"id":798,"title":"Dedicated Web Hosting","path":"/dedicated-web-hosting/","scheduledAt":null,"createdAt":"2023-04-25T09:15:11.185Z","updatedAt":"2024-08-28T06:37:46.212Z","publishedAt":"2023-05-29T08:11:44.369Z","locale":"en","__contentType":"api::page.page","navigationItemId":641,"__templateName":"Generic"},"items":[],"description":""}],"description":""}],"description":""},{"id":744,"title":"Resources","menuAttached":false,"order":8,"path":"/Resources","type":"WRAPPER","uiRouterKey":"resources-2","slug":"resources-3","external":false,"items":[{"id":746,"title":"Ecosystem","menuAttached":false,"order":1,"path":"/Resources/Ecosystem","type":"WRAPPER","uiRouterKey":"ecosystem","slug":"resources-ecosystem","external":false,"items":[{"id":751,"title":"All products","menuAttached":false,"order":1,"path":"/Resources/Ecosystem/All_products","type":"INTERNAL","uiRouterKey":"all-products-2","slug":"resources-ecosystem-all-products","external":false,"related":{"id":223,"title":"All Products","path":"/all-products/","scheduledAt":null,"createdAt":"2022-05-09T13:56:36.517Z","updatedAt":"2024-10-28T10:43:19.295Z","publishedAt":"2022-05-09T14:37:46.378Z","locale":"en","__contentType":"api::page.page","navigationItemId":751,"__templateName":"Generic"},"items":[],"description":""},{"id":828,"title":"Product updates","menuAttached":false,"order":2,"path":"/Resources/Ecosystem/Productupdates","type":"INTERNAL","uiRouterKey":"product-updates","slug":"resources-ecosystem-productupdates","external":false,"related":{"id":1451,"title":"Product updates","path":"/product-updates/","scheduledAt":null,"createdAt":"2024-10-28T16:25:15.626Z","updatedAt":"2024-10-30T16:22:06.602Z","publishedAt":"2024-10-30T16:21:39.156Z","locale":"en","__contentType":"api::page.page","navigationItemId":828,"__templateName":"Generic"},"items":[],"description":""},{"id":750,"title":"Betas","menuAttached":false,"order":3,"path":"/Resources/Ecosystem/betas","type":"INTERNAL","uiRouterKey":"betas","slug":"resources-ecosystem-betas","external":false,"related":{"id":90,"title":"Betas","path":"/betas/","scheduledAt":null,"createdAt":"2022-04-28T14:06:08.789Z","updatedAt":"2024-11-05T16:26:58.483Z","publishedAt":"2022-04-28T14:39:18.717Z","locale":"en","__contentType":"api::page.page","navigationItemId":750,"__templateName":"Generic"},"items":[],"description":""},{"id":747,"title":"Changelog","menuAttached":false,"order":4,"path":"https://www.scaleway.com/en/docs/changelog/","type":"EXTERNAL","uiRouterKey":"changelog-2","slug":{},"external":true,"description":""},{"id":758,"title":"Blog","menuAttached":false,"order":5,"path":"https://www.scaleway.com/en/blog/","type":"EXTERNAL","uiRouterKey":"blog-2","slug":{},"external":true,"description":""}],"description":""},{"id":745,"title":"Community","menuAttached":false,"order":2,"path":"/Resources/Community","type":"WRAPPER","uiRouterKey":"community","slug":"resources-community","external":false,"items":[{"id":748,"title":"Slack Community","menuAttached":false,"order":1,"path":"https://slack.scaleway.com/","type":"EXTERNAL","uiRouterKey":"slack-community-2","slug":{},"external":true,"description":""},{"id":749,"title":"Feature Requests","menuAttached":false,"order":2,"path":"https://feature-request.scaleway.com/","type":"EXTERNAL","uiRouterKey":"feature-requests-2","slug":{},"external":true,"description":""},{"id":757,"title":"Scaleway Learning","menuAttached":false,"order":3,"path":"/Resources/Community/Scaleway_Learning","type":"INTERNAL","uiRouterKey":"scaleway-learning-2","slug":"resources-community-scaleway-learning","external":false,"related":{"id":597,"title":"Scaleway Learning","path":"/scaleway-learning/","scheduledAt":null,"createdAt":"2022-12-20T08:57:37.886Z","updatedAt":"2024-08-22T15:58:41.554Z","publishedAt":"2023-01-02T21:14:10.049Z","locale":"en","__contentType":"api::page.page","navigationItemId":757,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":752,"title":"Company","menuAttached":false,"order":3,"path":"/Resources/Company","type":"WRAPPER","uiRouterKey":"company-1","slug":"resources-company","external":false,"items":[{"id":756,"title":"Events","menuAttached":false,"order":1,"path":"/Resources/Company/Events","type":"INTERNAL","uiRouterKey":"events-1","slug":"resources-company-events","external":false,"related":{"id":699,"title":"Events","path":"/events/","scheduledAt":null,"createdAt":"2023-03-13T09:14:30.830Z","updatedAt":"2024-11-21T14:08:26.020Z","publishedAt":"2023-03-13T09:14:41.552Z","locale":"en","__contentType":"api::page.page","navigationItemId":756,"__templateName":"Generic"},"items":[],"description":""},{"id":796,"title":"Marketplace","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/marketplace/","type":"EXTERNAL","uiRouterKey":"marketplace","slug":{},"external":true,"description":""},{"id":755,"title":"Careers","menuAttached":false,"order":3,"path":"/Resources/Company/Careers","type":"INTERNAL","uiRouterKey":"careers-1","slug":"resources-company-careers","external":false,"related":{"id":766,"title":"Careers","path":"/careers/","scheduledAt":null,"createdAt":"2023-03-31T14:17:38.589Z","updatedAt":"2024-07-16T10:08:23.648Z","publishedAt":"2024-02-12T15:39:28.684Z","locale":"en","__contentType":"api::page.page","navigationItemId":755,"__templateName":"Generic"},"items":[],"description":""},{"id":753,"title":"About us","menuAttached":false,"order":4,"path":"/Resources/Company/Aboutus","type":"INTERNAL","uiRouterKey":"about-us-1","slug":"resources-company-aboutus","external":false,"related":{"id":195,"title":"About us","path":"/about-us/","scheduledAt":null,"createdAt":"2022-05-03T13:05:13.546Z","updatedAt":"2023-12-14T09:00:58.075Z","publishedAt":"2022-05-11T12:26:40.217Z","locale":"en","__contentType":"api::page.page","navigationItemId":753,"__templateName":"Generic"},"items":[],"description":""},{"id":788,"title":"Labs","menuAttached":false,"order":5,"path":"https://labs.scaleway.com/","type":"EXTERNAL","uiRouterKey":"labs-4","slug":{},"external":true,"description":""},{"id":754,"title":"Customer Testimonials","menuAttached":false,"order":6,"path":"/Resources/Company/customer-testimonials","type":"INTERNAL","uiRouterKey":"customer-testimonials","slug":"resources-company-customer-testimonials","external":false,"related":{"id":294,"title":"Customer testimonials","path":"/customer-testimonials/","scheduledAt":null,"createdAt":"2022-05-19T15:33:42.418Z","updatedAt":"2024-07-08T12:41:04.663Z","publishedAt":"2022-05-19T15:37:23.202Z","locale":"en","__contentType":"api::page.page","navigationItemId":754,"__templateName":"Generic"},"items":[],"description":""}],"description":""}],"description":""},{"id":598,"title":"Pricing","menuAttached":false,"order":9,"path":"/pricing","type":"INTERNAL","uiRouterKey":"pricing-2","slug":"pricing-1","external":false,"related":{"id":1236,"title":"Pricing","path":"/pricing/","scheduledAt":null,"createdAt":"2024-05-14T07:33:54.370Z","updatedAt":"2024-09-30T10:00:47.281Z","publishedAt":"2024-05-14T13:19:03.795Z","locale":"en","__contentType":"api::page.page","navigationItemId":598,"__templateName":"Generic"},"items":[],"description":""}],"topBarNavigationItems":[{"id":425,"title":"Docs","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/docs/","type":"EXTERNAL","uiRouterKey":"docs","slug":{},"external":true},{"id":427,"title":"Contact","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/contact/","type":"EXTERNAL","uiRouterKey":"contact-2","slug":{},"external":true,"description":""}],"MOTD":{"id":7803,"label":"NEW: Dedicated GPU power with Dedibox GPU!","url":null,"page":{"data":{"id":1454,"attributes":{"title":"GPU","path":"/dedibox/gpu/","scheduledAt":null,"createdAt":"2024-10-31T10:01:24.876Z","updatedAt":"2024-11-24T18:18:19.841Z","publishedAt":"2024-11-07T07:38:37.573Z","locale":"en"}}}},"ctaList":{"dediboxCTAList":[{"id":6611,"label":"Log in","url":"https://console.online.net/en/login","page":{"data":null}},{"id":6612,"label":"Sign up","url":"https://console.online.net/en/user/subscribe","page":{"data":null}}],"defaultCTAList":[{"id":6610,"label":"Log in","url":"https://console.scaleway.com/login","page":{"data":null}},{"id":6609,"label":"Sign up","url":"https://console.scaleway.com/register","page":{"data":null}}]}},"footer":[{"id":276,"title":"Products","menuAttached":false,"order":1,"path":"/products","type":"WRAPPER","uiRouterKey":"products","slug":"products-2","external":false,"items":[{"id":283,"title":"All Products","menuAttached":false,"order":1,"path":"/products/AllProducts","type":"INTERNAL","uiRouterKey":"all-products","slug":"products-all-products","external":false,"related":{"id":223,"title":"All Products","path":"/all-products/","scheduledAt":null,"createdAt":"2022-05-09T13:56:36.517Z","updatedAt":"2024-10-28T10:43:19.295Z","publishedAt":"2022-05-09T14:37:46.378Z","locale":"en","__contentType":"api::page.page","navigationItemId":283,"__templateName":"Generic"},"items":[],"description":""},{"id":759,"title":"Betas","menuAttached":false,"order":2,"path":"/products/betas","type":"INTERNAL","uiRouterKey":"betas-1","slug":"products-betas","external":false,"related":{"id":90,"title":"Betas","path":"/betas/","scheduledAt":null,"createdAt":"2022-04-28T14:06:08.789Z","updatedAt":"2024-11-05T16:26:58.483Z","publishedAt":"2022-04-28T14:39:18.717Z","locale":"en","__contentType":"api::page.page","navigationItemId":759,"__templateName":"Generic"},"items":[],"description":""},{"id":281,"title":"Bare Metal","menuAttached":false,"order":3,"path":"/products/BareMetal","type":"INTERNAL","uiRouterKey":"bare-metal-2","slug":"products-bare-metal","external":false,"related":{"id":961,"title":"Bare Metal","path":"/bare-metal/","scheduledAt":null,"createdAt":"2023-09-27T07:45:06.975Z","updatedAt":"2024-11-24T19:16:29.645Z","publishedAt":"2023-10-17T12:08:02.344Z","locale":"en","__contentType":"api::page.page","navigationItemId":281,"__templateName":"Generic"},"items":[],"description":""},{"id":284,"title":"Dedibox","menuAttached":false,"order":4,"path":"/products/Dedibox","type":"INTERNAL","uiRouterKey":"dedibox-4","slug":"products-dedibox","external":false,"related":{"id":29,"title":"Dedibox","path":"/dedibox/","scheduledAt":null,"createdAt":"2022-04-19T15:29:02.488Z","updatedAt":"2024-11-24T18:03:28.395Z","publishedAt":"2022-04-28T17:05:07.122Z","locale":"en","__contentType":"api::page.page","navigationItemId":284,"__templateName":"Generic"},"items":[],"description":""},{"id":282,"title":"Elastic Metal","menuAttached":false,"order":5,"path":"/products/ElasticMetal","type":"INTERNAL","uiRouterKey":"elastic-metal-4","slug":"products-elastic-metal","external":false,"related":{"id":87,"title":"Elastic Metal","path":"/elastic-metal/","scheduledAt":null,"createdAt":"2022-04-28T12:45:28.696Z","updatedAt":"2024-11-08T15:01:56.485Z","publishedAt":"2022-04-28T13:22:46.501Z","locale":"en","__contentType":"api::page.page","navigationItemId":282,"__templateName":"Generic"},"items":[],"description":""},{"id":285,"title":"Compute Instances","menuAttached":false,"order":6,"path":"/products/Compute","type":"INTERNAL","uiRouterKey":"compute-instances","slug":"products-compute","external":false,"related":{"id":655,"title":"Virtual Instances","path":"/virtual-instances/","scheduledAt":null,"createdAt":"2023-02-20T10:48:52.279Z","updatedAt":"2024-08-28T07:01:50.413Z","publishedAt":"2023-02-28T08:32:03.960Z","locale":"en","__contentType":"api::page.page","navigationItemId":285,"__templateName":"Generic"},"items":[],"description":""},{"id":286,"title":"GPU","menuAttached":false,"order":7,"path":"/products/GPu","type":"INTERNAL","uiRouterKey":"gpu-6","slug":"products-g-pu","external":false,"related":{"id":1025,"title":"GPU Instances","path":"/gpu-instances/","scheduledAt":null,"createdAt":"2023-11-30T13:15:51.769Z","updatedAt":"2024-11-19T16:38:15.121Z","publishedAt":"2023-12-12T12:52:20.083Z","locale":"en","__contentType":"api::page.page","navigationItemId":286,"__templateName":"Generic"},"items":[],"description":""},{"id":287,"title":"Containers","menuAttached":false,"order":8,"path":"/products/Containers","type":"INTERNAL","uiRouterKey":"containers-6","slug":"products-containers","external":false,"related":{"id":465,"title":"Containers","path":"/containers/","scheduledAt":null,"createdAt":"2022-07-29T15:09:20.535Z","updatedAt":"2024-08-28T07:05:23.005Z","publishedAt":"2023-02-27T13:53:48.270Z","locale":"en","__contentType":"api::page.page","navigationItemId":287,"__templateName":"Generic"},"items":[],"description":""},{"id":288,"title":"Object Storage","menuAttached":false,"order":9,"path":"/products/ObjectStorage","type":"INTERNAL","uiRouterKey":"object-storage-4","slug":"products-object-storage","external":false,"related":{"id":652,"title":"Object Storage","path":"/object-storage/","scheduledAt":null,"createdAt":"2023-02-16T09:44:56.414Z","updatedAt":"2024-11-24T18:22:54.952Z","publishedAt":"2023-03-07T18:05:15.061Z","locale":"en","__contentType":"api::page.page","navigationItemId":288,"__templateName":"Generic"},"items":[],"description":""},{"id":289,"title":"Block Storage","menuAttached":false,"order":10,"path":"/products/BlockStorage","type":"INTERNAL","uiRouterKey":"block-storage-4","slug":"products-block-storage","external":false,"related":{"id":141,"title":"Block Storage","path":"/block-storage/","scheduledAt":null,"createdAt":"2022-05-02T08:20:39.280Z","updatedAt":"2024-10-30T16:13:44.480Z","publishedAt":"2022-05-02T08:28:12.783Z","locale":"en","__contentType":"api::page.page","navigationItemId":289,"__templateName":"Generic"},"items":[],"description":""}],"description":""},{"id":275,"title":"Resources","menuAttached":false,"order":2,"path":"/resources","type":"WRAPPER","uiRouterKey":"resources","slug":"resources-3","external":false,"items":[{"id":290,"title":"Documentation","menuAttached":false,"order":1,"path":"https://www.scaleway.com/en/docs/","type":"EXTERNAL","uiRouterKey":"documentation","slug":{},"external":true,"description":""},{"id":292,"title":"Changelog","menuAttached":false,"order":2,"path":"https://www.scaleway.com/en/docs/changelog/","type":"EXTERNAL","uiRouterKey":"changelog","slug":{},"external":true,"description":""},{"id":291,"title":"Blog","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/blog/","type":"EXTERNAL","uiRouterKey":"blog","slug":{},"external":true,"description":""},{"id":293,"title":"Feature Requests","menuAttached":false,"order":4,"path":"https://feature-request.scaleway.com/","type":"EXTERNAL","uiRouterKey":"feature-requests","slug":{},"external":true,"description":""},{"id":321,"title":"Slack Community","menuAttached":false,"order":5,"path":"https://slack.scaleway.com/","type":"EXTERNAL","uiRouterKey":"slack-community-2","slug":{},"external":true,"description":""}],"description":""},{"id":280,"title":"Contact","menuAttached":false,"order":3,"path":"/Contact","type":"WRAPPER","uiRouterKey":"contact-2","slug":"contact-4","external":false,"items":[{"id":294,"title":"Create a ticket","menuAttached":false,"order":1,"path":"https://console.scaleway.com/support/create/","type":"EXTERNAL","uiRouterKey":"create-a-ticket","slug":{},"external":true,"description":""},{"id":296,"title":"Report Abuse","menuAttached":false,"order":2,"path":"https://console.scaleway.com/support/abuses/create/","type":"EXTERNAL","uiRouterKey":"report-abuse","slug":{},"external":true,"description":""},{"id":295,"title":"Status","menuAttached":false,"order":3,"path":"https://status.scaleway.com/","type":"EXTERNAL","uiRouterKey":"status","slug":{},"external":true,"description":""},{"id":298,"title":"Dedibox Console online.net","menuAttached":false,"order":4,"path":"https://console.online.net/fr/login","type":"EXTERNAL","uiRouterKey":"dedibox-console-online-net","slug":{},"external":true,"description":""},{"id":407,"title":"Support plans","menuAttached":false,"order":5,"path":"/Contact/Support","type":"INTERNAL","uiRouterKey":"support-plans","slug":"contact-support","external":false,"related":{"id":493,"title":"Assistance","path":"/assistance/","scheduledAt":null,"createdAt":"2022-09-26T15:14:28.440Z","updatedAt":"2024-08-28T07:19:37.841Z","publishedAt":"2022-10-03T12:20:34.441Z","locale":"en","__contentType":"api::page.page","navigationItemId":407,"__templateName":"Generic"},"items":[],"description":""},{"id":409,"title":"Brand resources","menuAttached":false,"order":6,"path":"https://ultraviolet.scaleway.com/6dd9b5c45/p/62b4e2-ultraviolet","type":"EXTERNAL","uiRouterKey":"brand-resources","slug":{},"external":true,"description":""}],"description":""},{"id":436,"title":"Company","menuAttached":false,"order":4,"path":"/scw","type":"WRAPPER","uiRouterKey":"company","slug":"scw","external":false,"items":[{"id":440,"title":"About us","menuAttached":false,"order":1,"path":"/scw/About-us","type":"INTERNAL","uiRouterKey":"about-us","slug":"scw-about-us","external":false,"related":{"id":195,"title":"About us","path":"/about-us/","scheduledAt":null,"createdAt":"2022-05-03T13:05:13.546Z","updatedAt":"2023-12-14T09:00:58.075Z","publishedAt":"2022-05-11T12:26:40.217Z","locale":"en","__contentType":"api::page.page","navigationItemId":440,"__templateName":"Generic"},"items":[],"description":""},{"id":441,"title":"Events","menuAttached":false,"order":2,"path":"/scw/events","type":"INTERNAL","uiRouterKey":"events","slug":"scw-events","external":false,"related":{"id":699,"title":"Events","path":"/events/","scheduledAt":null,"createdAt":"2023-03-13T09:14:30.830Z","updatedAt":"2024-11-21T14:08:26.020Z","publishedAt":"2023-03-13T09:14:41.552Z","locale":"en","__contentType":"api::page.page","navigationItemId":441,"__templateName":"Generic"},"items":[],"description":""},{"id":798,"title":"Marketplace","menuAttached":false,"order":3,"path":"https://www.scaleway.com/en/marketplace/","type":"EXTERNAL","uiRouterKey":"marketplace-2","slug":{},"external":true,"description":""},{"id":439,"title":"Environment ","menuAttached":false,"order":4,"path":"/scw/environment","type":"INTERNAL","uiRouterKey":"environment","slug":"scw-environment","external":false,"related":{"id":59,"title":"Environmental leadership ","path":"/environmental-leadership/","scheduledAt":null,"createdAt":"2022-04-26T08:30:15.289Z","updatedAt":"2024-11-09T10:51:38.014Z","publishedAt":"2022-04-28T17:12:24.574Z","locale":"en","__contentType":"api::page.page","navigationItemId":439,"__templateName":"Generic"},"items":[],"description":""},{"id":790,"title":"Social Responsibility","menuAttached":false,"order":5,"path":"/scw/SocialResponsibility","type":"INTERNAL","uiRouterKey":"social-responsibility","slug":"scw-social-responsibility","external":false,"related":{"id":184,"title":"Social responsibility","path":"/social-responsibility/","scheduledAt":null,"createdAt":"2022-05-03T07:48:38.038Z","updatedAt":"2024-08-28T07:08:11.382Z","publishedAt":"2022-05-03T13:08:48.890Z","locale":"en","__contentType":"api::page.page","navigationItemId":790,"__templateName":"Generic"},"items":[],"description":""},{"id":438,"title":"Security","menuAttached":false,"order":6,"path":"/scw/security","type":"INTERNAL","uiRouterKey":"security-4","slug":"scw-security","external":false,"related":{"id":190,"title":"Security and resilience","path":"/security-and-resilience/","scheduledAt":null,"createdAt":"2022-05-03T10:22:40.696Z","updatedAt":"2024-08-28T08:56:56.744Z","publishedAt":"2022-05-11T12:39:01.810Z","locale":"en","__contentType":"api::page.page","navigationItemId":438,"__templateName":"Generic"},"items":[],"description":""},{"id":782,"title":"Shared Responsibility Model","menuAttached":false,"order":7,"path":"/scw/Model","type":"INTERNAL","uiRouterKey":"shared-responsibility-model","slug":"scw-model","external":false,"related":{"id":1180,"title":"Shared Responsibility Model","path":"/shared-responsibility-model/","scheduledAt":null,"createdAt":"2024-04-04T15:54:36.614Z","updatedAt":"2024-11-18T13:28:57.006Z","publishedAt":"2024-04-04T15:56:39.573Z","locale":"en","__contentType":"api::page.page","navigationItemId":782,"__templateName":"Generic"},"items":[],"description":""},{"id":442,"title":"News","menuAttached":false,"order":8,"path":"/scw/news","type":"INTERNAL","uiRouterKey":"news","slug":"scw-news","external":false,"related":{"id":263,"title":"News","path":"/news/","scheduledAt":null,"createdAt":"2022-05-19T10:28:45.212Z","updatedAt":"2022-05-31T07:47:17.728Z","publishedAt":"2022-05-19T10:29:13.394Z","locale":"en","__contentType":"api::page.page","navigationItemId":442,"__templateName":"Generic"},"items":[],"description":""},{"id":443,"title":"Careers","menuAttached":false,"order":9,"path":"/scw/career/","type":"INTERNAL","uiRouterKey":"careers","slug":"scw-career","external":false,"related":{"id":766,"title":"Careers","path":"/careers/","scheduledAt":null,"createdAt":"2023-03-31T14:17:38.589Z","updatedAt":"2024-07-16T10:08:23.648Z","publishedAt":"2024-02-12T15:39:28.684Z","locale":"en","__contentType":"api::page.page","navigationItemId":443,"__templateName":"Generic"},"items":[],"description":""},{"id":445,"title":"Scaleway Learning","menuAttached":false,"order":10,"path":"/scw/learning","type":"INTERNAL","uiRouterKey":"scaleway-learning","slug":"scw-learning","external":false,"related":{"id":597,"title":"Scaleway Learning","path":"/scaleway-learning/","scheduledAt":null,"createdAt":"2022-12-20T08:57:37.886Z","updatedAt":"2024-08-22T15:58:41.554Z","publishedAt":"2023-01-02T21:14:10.049Z","locale":"en","__contentType":"api::page.page","navigationItemId":445,"__templateName":"Generic"},"items":[],"description":""},{"id":444,"title":"Client Success Stories","menuAttached":false,"order":11,"path":"/scw/clientstor/","type":"INTERNAL","uiRouterKey":"client-success-stories","slug":"scw-clientstor","external":false,"related":{"id":294,"title":"Customer testimonials","path":"/customer-testimonials/","scheduledAt":null,"createdAt":"2022-05-19T15:33:42.418Z","updatedAt":"2024-07-08T12:41:04.663Z","publishedAt":"2022-05-19T15:37:23.202Z","locale":"en","__contentType":"api::page.page","navigationItemId":444,"__templateName":"Generic"},"items":[],"description":""},{"id":437,"title":"Labs","menuAttached":false,"order":12,"path":"https://labs.scaleway.com/en/","type":"EXTERNAL","uiRouterKey":"labs","slug":{},"external":true,"description":""}],"description":""}],"pageType":"post"},"__N_SSG":true},"page":"/blog/[slug]","query":{"slug":"load-balancer-scaleway-what-is-it"},"buildId":"RHFDf2FUsZGq9xdClkNw2","isFallback":false,"gsp":true,"locale":"en","locales":["default","en","fr"],"defaultLocale":"default","scriptLoader":[]}</script></body></html>

Pages: 1 2 3 4 5 6 7 8 9 10