diff --git a/latest/404.html b/latest/404.html index 4924ee7d..68de1421 100644 --- a/latest/404.html +++ b/latest/404.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../v1.9.1/404.html... + Redirecting to ../v1.9.2/404.html... \ No newline at end of file diff --git a/latest/addons/argo-events/index.html b/latest/addons/argo-events/index.html index 0351dc14..f57f7ad3 100644 --- a/latest/addons/argo-events/index.html +++ b/latest/addons/argo-events/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/argo-events/... + Redirecting to ../../../v1.9.2/addons/argo-events/... \ No newline at end of file diff --git a/latest/addons/argo-rollouts/index.html b/latest/addons/argo-rollouts/index.html index 08eab7a4..76af2e96 100644 --- a/latest/addons/argo-rollouts/index.html +++ b/latest/addons/argo-rollouts/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/argo-rollouts/... + Redirecting to ../../../v1.9.2/addons/argo-rollouts/... \ No newline at end of file diff --git a/latest/addons/argo-workflows/index.html b/latest/addons/argo-workflows/index.html index bc99f9ee..51fdc706 100644 --- a/latest/addons/argo-workflows/index.html +++ b/latest/addons/argo-workflows/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/argo-workflows/... + Redirecting to ../../../v1.9.2/addons/argo-workflows/... \ No newline at end of file diff --git a/latest/addons/argocd/index.html b/latest/addons/argocd/index.html index 7d50a691..8418e1c6 100644 --- a/latest/addons/argocd/index.html +++ b/latest/addons/argocd/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/argocd/... + Redirecting to ../../../v1.9.2/addons/argocd/... \ No newline at end of file diff --git a/latest/addons/aws-cloudwatch-metrics/index.html b/latest/addons/aws-cloudwatch-metrics/index.html index 0b97e974..f08ccbdc 100644 --- a/latest/addons/aws-cloudwatch-metrics/index.html +++ b/latest/addons/aws-cloudwatch-metrics/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/aws-cloudwatch-metrics/... + Redirecting to ../../../v1.9.2/addons/aws-cloudwatch-metrics/... \ No newline at end of file diff --git a/latest/addons/aws-efs-csi-driver/index.html b/latest/addons/aws-efs-csi-driver/index.html index 9412c043..899f0631 100644 --- a/latest/addons/aws-efs-csi-driver/index.html +++ b/latest/addons/aws-efs-csi-driver/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/aws-efs-csi-driver/... + Redirecting to ../../../v1.9.2/addons/aws-efs-csi-driver/... \ No newline at end of file diff --git a/latest/addons/aws-for-fluentbit/index.html b/latest/addons/aws-for-fluentbit/index.html index e7c46d48..87706288 100644 --- a/latest/addons/aws-for-fluentbit/index.html +++ b/latest/addons/aws-for-fluentbit/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/aws-for-fluentbit/... + Redirecting to ../../../v1.9.2/addons/aws-for-fluentbit/... \ No newline at end of file diff --git a/latest/addons/aws-fsx-csi-driver/index.html b/latest/addons/aws-fsx-csi-driver/index.html index dce83edb..41259ede 100644 --- a/latest/addons/aws-fsx-csi-driver/index.html +++ b/latest/addons/aws-fsx-csi-driver/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/aws-fsx-csi-driver/... + Redirecting to ../../../v1.9.2/addons/aws-fsx-csi-driver/... \ No newline at end of file diff --git a/latest/addons/aws-gateway-api-controller/index.html b/latest/addons/aws-gateway-api-controller/index.html index 2e9d02c5..2c9f8c70 100644 --- a/latest/addons/aws-gateway-api-controller/index.html +++ b/latest/addons/aws-gateway-api-controller/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/aws-gateway-api-controller/... + Redirecting to ../../../v1.9.2/addons/aws-gateway-api-controller/... \ No newline at end of file diff --git a/latest/addons/aws-load-balancer-controller/index.html b/latest/addons/aws-load-balancer-controller/index.html index 77a85dd2..896c5a44 100644 --- a/latest/addons/aws-load-balancer-controller/index.html +++ b/latest/addons/aws-load-balancer-controller/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/aws-load-balancer-controller/... + Redirecting to ../../../v1.9.2/addons/aws-load-balancer-controller/... \ No newline at end of file diff --git a/latest/addons/aws-node-termination-handler/index.html b/latest/addons/aws-node-termination-handler/index.html index 3d6f129a..d6eaecf2 100644 --- a/latest/addons/aws-node-termination-handler/index.html +++ b/latest/addons/aws-node-termination-handler/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/aws-node-termination-handler/... + Redirecting to ../../../v1.9.2/addons/aws-node-termination-handler/... \ No newline at end of file diff --git a/latest/addons/aws-private-ca-issuer/index.html b/latest/addons/aws-private-ca-issuer/index.html index b4f62f0d..5ed7e37f 100644 --- a/latest/addons/aws-private-ca-issuer/index.html +++ b/latest/addons/aws-private-ca-issuer/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/aws-private-ca-issuer/... + Redirecting to ../../../v1.9.2/addons/aws-private-ca-issuer/... \ No newline at end of file diff --git a/latest/addons/cert-manager/index.html b/latest/addons/cert-manager/index.html index e7cd70b9..e8e60d5d 100644 --- a/latest/addons/cert-manager/index.html +++ b/latest/addons/cert-manager/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/cert-manager/... + Redirecting to ../../../v1.9.2/addons/cert-manager/... \ No newline at end of file diff --git a/latest/addons/cluster-autoscaler/index.html b/latest/addons/cluster-autoscaler/index.html index a15c83e6..ad505dd3 100644 --- a/latest/addons/cluster-autoscaler/index.html +++ b/latest/addons/cluster-autoscaler/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/cluster-autoscaler/... + Redirecting to ../../../v1.9.2/addons/cluster-autoscaler/... \ No newline at end of file diff --git a/latest/addons/cluster-proportional-autoscaler/index.html b/latest/addons/cluster-proportional-autoscaler/index.html index fbc86535..70b15e61 100644 --- a/latest/addons/cluster-proportional-autoscaler/index.html +++ b/latest/addons/cluster-proportional-autoscaler/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/cluster-proportional-autoscaler/... + Redirecting to ../../../v1.9.2/addons/cluster-proportional-autoscaler/... \ No newline at end of file diff --git a/latest/addons/external-dns/index.html b/latest/addons/external-dns/index.html index 4d42da91..98361bdd 100644 --- a/latest/addons/external-dns/index.html +++ b/latest/addons/external-dns/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/external-dns/... + Redirecting to ../../../v1.9.2/addons/external-dns/... \ No newline at end of file diff --git a/latest/addons/external-secrets/index.html b/latest/addons/external-secrets/index.html index c0954dc1..7f96aa80 100644 --- a/latest/addons/external-secrets/index.html +++ b/latest/addons/external-secrets/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/external-secrets/... + Redirecting to ../../../v1.9.2/addons/external-secrets/... \ No newline at end of file diff --git a/latest/addons/fargate-fluentbit/index.html b/latest/addons/fargate-fluentbit/index.html index 38d29ff6..f49ed828 100644 --- a/latest/addons/fargate-fluentbit/index.html +++ b/latest/addons/fargate-fluentbit/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/fargate-fluentbit/... + Redirecting to ../../../v1.9.2/addons/fargate-fluentbit/... \ No newline at end of file diff --git a/latest/addons/ingress-nginx/index.html b/latest/addons/ingress-nginx/index.html index 00d524d9..4d976f95 100644 --- a/latest/addons/ingress-nginx/index.html +++ b/latest/addons/ingress-nginx/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/ingress-nginx/... + Redirecting to ../../../v1.9.2/addons/ingress-nginx/... \ No newline at end of file diff --git a/latest/addons/karpenter/index.html b/latest/addons/karpenter/index.html index d8486193..41e02b67 100644 --- a/latest/addons/karpenter/index.html +++ b/latest/addons/karpenter/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/karpenter/... + Redirecting to ../../../v1.9.2/addons/karpenter/... \ No newline at end of file diff --git a/latest/addons/kube-prometheus-stack/index.html b/latest/addons/kube-prometheus-stack/index.html index 9688d497..061e8fb2 100644 --- a/latest/addons/kube-prometheus-stack/index.html +++ b/latest/addons/kube-prometheus-stack/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/kube-prometheus-stack/... + Redirecting to ../../../v1.9.2/addons/kube-prometheus-stack/... \ No newline at end of file diff --git a/latest/addons/metrics-server/index.html b/latest/addons/metrics-server/index.html index 31eabc2f..a593c4c5 100644 --- a/latest/addons/metrics-server/index.html +++ b/latest/addons/metrics-server/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/metrics-server/... + Redirecting to ../../../v1.9.2/addons/metrics-server/... \ No newline at end of file diff --git a/latest/addons/opa-gatekeeper/index.html b/latest/addons/opa-gatekeeper/index.html index b6b7bda4..16fe3a1d 100644 --- a/latest/addons/opa-gatekeeper/index.html +++ b/latest/addons/opa-gatekeeper/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/opa-gatekeeper/... + Redirecting to ../../../v1.9.2/addons/opa-gatekeeper/... \ No newline at end of file diff --git a/latest/addons/secrets-store-csi-driver-provider-aws/index.html b/latest/addons/secrets-store-csi-driver-provider-aws/index.html index a4a3f49c..4ecf4f02 100644 --- a/latest/addons/secrets-store-csi-driver-provider-aws/index.html +++ b/latest/addons/secrets-store-csi-driver-provider-aws/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/secrets-store-csi-driver-provider-aws/... + Redirecting to ../../../v1.9.2/addons/secrets-store-csi-driver-provider-aws/... \ No newline at end of file diff --git a/latest/addons/velero/index.html b/latest/addons/velero/index.html index 1d600549..a1ea47d3 100644 --- a/latest/addons/velero/index.html +++ b/latest/addons/velero/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/velero/... + Redirecting to ../../../v1.9.2/addons/velero/... \ No newline at end of file diff --git a/latest/addons/vertical-pod-autoscaler/index.html b/latest/addons/vertical-pod-autoscaler/index.html index 19f063ff..2bf47a16 100644 --- a/latest/addons/vertical-pod-autoscaler/index.html +++ b/latest/addons/vertical-pod-autoscaler/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../../v1.9.1/addons/vertical-pod-autoscaler/... + Redirecting to ../../../v1.9.2/addons/vertical-pod-autoscaler/... \ No newline at end of file diff --git a/latest/amazon-eks-addons/index.html b/latest/amazon-eks-addons/index.html index 2db1bde4..7f098486 100644 --- a/latest/amazon-eks-addons/index.html +++ b/latest/amazon-eks-addons/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../v1.9.1/amazon-eks-addons/... + Redirecting to ../../v1.9.2/amazon-eks-addons/... \ No newline at end of file diff --git a/latest/architectures/index.html b/latest/architectures/index.html index 3b8c6901..2575afc3 100644 --- a/latest/architectures/index.html +++ b/latest/architectures/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../v1.9.1/architectures/... + Redirecting to ../../v1.9.2/architectures/... \ No newline at end of file diff --git a/latest/aws-partner-addons/index.html b/latest/aws-partner-addons/index.html index 2eab6bc8..98076dc9 100644 --- a/latest/aws-partner-addons/index.html +++ b/latest/aws-partner-addons/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../v1.9.1/aws-partner-addons/... + Redirecting to ../../v1.9.2/aws-partner-addons/... \ No newline at end of file diff --git a/latest/helm-release/index.html b/latest/helm-release/index.html index 5bcde855..5907ee14 100644 --- a/latest/helm-release/index.html +++ b/latest/helm-release/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../../v1.9.1/helm-release/... + Redirecting to ../../v1.9.2/helm-release/... \ No newline at end of file diff --git a/latest/index.html b/latest/index.html index 5146d7d9..c64b5227 100644 --- a/latest/index.html +++ b/latest/index.html @@ -4,13 +4,13 @@ Redirecting - Redirecting to ../v1.9.1/... + Redirecting to ../v1.9.2/... \ No newline at end of file diff --git a/v1.9.2/404.html b/v1.9.2/404.html new file mode 100644 index 00000000..cddbe89e --- /dev/null +++ b/v1.9.2/404.html @@ -0,0 +1,789 @@ + + + + + + + + + + + + + + + + + + + + Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/argo-events/index.html b/v1.9.2/addons/argo-events/index.html new file mode 100644 index 00000000..e7fa42a2 --- /dev/null +++ b/v1.9.2/addons/argo-events/index.html @@ -0,0 +1,892 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Argo Events - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Argo Events

+

Argo Events is an open source container-native event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources. Argo Events is implemented as a Kubernetes CRD (Custom Resource Definition).

+

Usage

+

Argo Events can be deployed by enabling the add-on via the following.

+
enable_argo_events = true
+
+

You can optionally customize the Helm chart that deploys Argo Events via the following configuration.

+
  enable_argo_events = true
+
+  argo_events = {
+    name          = "argo-events"
+    chart_version = "2.4.0"
+    repository    = "https://argoproj.github.io/argo-helm"
+    namespace     = "argo-events"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify argo-events pods are running.

+
$ kubectl get pods -n argo-events
+NAME                                                  READY   STATUS    RESTARTS   AGE
+argo-events-controller-manager-bfb894cdb-k8hzn        1/1     Running   0          11m
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/argo-rollouts/index.html b/v1.9.2/addons/argo-rollouts/index.html new file mode 100644 index 00000000..30b31733 --- /dev/null +++ b/v1.9.2/addons/argo-rollouts/index.html @@ -0,0 +1,892 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Argo Rollouts - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Argo Rollouts

+

Argo Rollouts is a Kubernetes controller and set of CRDs which provide advanced deployment capabilities such as blue-green, canary, canary analysis, experimentation, and progressive delivery features to Kubernetes.

+

Usage

+

Argo Rollouts can be deployed by enabling the add-on via the following.

+
enable_argo_rollouts = true
+
+

You can optionally customize the Helm chart that deploys Argo Rollouts via the following configuration.

+
  enable_argo_rollouts = true
+
+  argo_rollouts = {
+    name          = "argo-rollouts"
+    chart_version = "2.22.3"
+    repository    = "https://argoproj.github.io/argo-helm"
+    namespace     = "argo-rollouts"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify argo-rollouts pods are running.

+
$ kubectl get pods -n argo-rollouts
+NAME                             READY   STATUS    RESTARTS   AGE
+argo-rollouts-5db5688849-x89zb   0/1     Running   0          11s
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/argo-workflows/index.html b/v1.9.2/addons/argo-workflows/index.html new file mode 100644 index 00000000..c009972e --- /dev/null +++ b/v1.9.2/addons/argo-workflows/index.html @@ -0,0 +1,893 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Argo Workflows - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Argo Workflows

+

Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).

+

Usage

+

Argo Workflows can be deployed by enabling the add-on via the following.

+
enable_argo_workflows = true
+
+

You can optionally customize the Helm chart that deploys Argo Workflows via the following configuration.

+
  enable_argo_workflows = true
+
+  argo_workflows = {
+    name          = "argo-workflows"
+    chart_version = "0.28.2"
+    repository    = "https://argoproj.github.io/argo-helm"
+    namespace     = "argo-workflows"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify argo-workflows pods are running.

+
$ kubectl get pods -n argo-workflows
+NAME                                                  READY   STATUS    RESTARTS   AGE
+argo-workflows-server-68988cd864-22zhr                1/1     Running   0          6m32s
+argo-workflows-workflow-controller-7ff7b5658d-9q44f   1/1     Running   0          6m32s
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/argocd/index.html b/v1.9.2/addons/argocd/index.html new file mode 100644 index 00000000..8ce90f95 --- /dev/null +++ b/v1.9.2/addons/argocd/index.html @@ -0,0 +1,898 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Argo CD - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Argo CD

+

Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.

+

Usage

+

Argo CD can be deployed by enabling the add-on via the following.

+
enable_argocd = true
+
+

You can optionally customize the Helm chart that deploys Argo CD via the following configuration.

+
  enable_argocd = true
+
+  argocd = {
+    name          = "argocd"
+    chart_version = "5.29.1"
+    repository    = "https://argoproj.github.io/argo-helm"
+    namespace     = "argocd"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify argocd pods are running.

+
$ kubectl get pods -n argocd
+NAME                                                        READY   STATUS    RESTARTS   AGE
+argo-cd-argocd-application-controller-0                     1/1     Running   0          146m
+argo-cd-argocd-applicationset-controller-678d85f77b-rmpcb   1/1     Running   0          146m
+argo-cd-argocd-dex-server-7b6c9b5969-zpqnl                  1/1     Running   0          146m
+argo-cd-argocd-notifications-controller-6d489b99c9-j6fdw    1/1     Running   0          146m
+argo-cd-argocd-redis-59dd95f5b5-8fx74                       1/1     Running   0          146m
+argo-cd-argocd-repo-server-7b9bd88c95-mh2fz                 1/1     Running   0          146m
+argo-cd-argocd-server-6f9cfdd4d5-8mfpc                      1/1     Running   0          146m
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/aws-cloudwatch-metrics/index.html b/v1.9.2/addons/aws-cloudwatch-metrics/index.html new file mode 100644 index 00000000..e473b191 --- /dev/null +++ b/v1.9.2/addons/aws-cloudwatch-metrics/index.html @@ -0,0 +1,896 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS CloudWatch Metrics - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS CloudWatch Metrics

+

Use CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects.

+

Container Insights collects data as performance log events using embedded metric format. These performance log events are entries that use a structured JSON schema that enables high-cardinality data to be ingested and stored at scale. From this data, CloudWatch creates aggregated metrics at the cluster, node, pod, task, and service level as CloudWatch metrics. The metrics that Container Insights collects are available in CloudWatch automatic dashboards, and also viewable in the Metrics section of the CloudWatch console.

+

Usage

+

aws-cloudwatch-metrics can be deployed by enabling the add-on via the following.

+
enable_aws_cloudwatch_metrics = true
+
+

You can also customize the Helm chart that deploys aws-cloudwatch-metrics via the following configuration:

+
  enable_aws_cloudwatch_metrics        = true
+
+  aws_cloudwatch_metrics_irsa_policies = ["IAM Policies"]
+  aws_cloudwatch_metrics   = {
+    role_policies = ["IAM Policies"]  # extra policies in addition of CloudWatchAgentServerPolicy
+    name          = "aws-cloudwatch-metrics"
+    repository    = "https://aws.github.io/eks-charts"
+    chart_version = "0.0.9"
+    namespace     = "amazon-cloudwatch"
+    values        = [templatefile("${path.module}/values.yaml", {})] # The value `clusterName` is already set to the EKS cluster name, no need to specify here
+  }
+
+

Verify aws-cloudwatch-metrics pods are running

+
$ kubectl get pods -n amazon-cloudwatch
+
+NAME                           READY   STATUS    RESTARTS   AGE
+aws-cloudwatch-metrics-2dt5h   1/1     Running   0          149m
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/aws-efs-csi-driver/index.html b/v1.9.2/addons/aws-efs-csi-driver/index.html new file mode 100644 index 00000000..bc0f35aa --- /dev/null +++ b/v1.9.2/addons/aws-efs-csi-driver/index.html @@ -0,0 +1,915 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS EFS CSI Driver - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS EFS CSI Driver

+

This add-on deploys the AWS EFS CSI driver into an EKS cluster.

+

Usage

+

The AWS EFS CSI driver can be deployed by enabling the add-on via the following. Check out the full example to deploy an EKS Cluster with EFS backing the dynamic provisioning of persistent volumes.

+
  enable_aws_efs_csi_driver = true
+
+

You can optionally customize the Helm chart that deploys the driver via the following configuration.

+
  enable_aws_efs_csi_driver = true
+
+  # Optional aws_efs_csi_driver_helm_config
+  aws_efs_csi_driver = {
+    repository     = "https://kubernetes-sigs.github.io/aws-efs-csi-driver/"
+    chart_version  = "2.4.1"
+  }
+  aws_efs_csi_driver {
+    role_policies = ["<ADDITIONAL_IAM_POLICY_ARN>"]
+  }
+
+

Once deployed, you will be able to see a number of supporting resources in the kube-system namespace.

+
$ kubectl get deployment efs-csi-controller -n kube-system
+
+NAME                 READY   UP-TO-DATE   AVAILABLE   AGE
+efs-csi-controller   2/2     2            2           4m29s
+
+
$ kubectl get daemonset efs-csi-node -n kube-system
+
+NAME           DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE
+efs-csi-node   3         3         3       3            3           beta.kubernetes.io/os=linux   4m32s
+
+

Validate EFS CSI Driver

+

Follow the static provisioning example described here to validate the CSI driver is working as expected.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/aws-for-fluentbit/index.html b/v1.9.2/addons/aws-for-fluentbit/index.html new file mode 100644 index 00000000..5ba073c9 --- /dev/null +++ b/v1.9.2/addons/aws-for-fluentbit/index.html @@ -0,0 +1,955 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS for Fluent Bit - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS for Fluent Bit

+

AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. We recommend using Fluent Bit as your log router because it has a lower resource utilization rate than Fluentd.

+

Usage

+

AWS for Fluent Bit can be deployed by enabling the add-on via the following.

+
enable_aws_for_fluentbit = true
+
+

You can optionally customize the Helm chart that deploys AWS for Fluent Bit via the following configuration.

+
  enable_aws_for_fluentbit = true
+  aws_for_fluentbit_cw_log_group = {
+    create          = true
+    use_name_prefix = true # Set this to true to enable name prefix
+    name_prefix     = "eks-cluster-logs-"
+    retention       = 7
+  }
+  aws_for_fluentbit = {
+    name          = "aws-for-fluent-bit"
+    chart_version = "0.1.28"
+    repository    = "https://aws.github.io/eks-charts"
+    namespace     = "kube-system"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

If you want to enable Container Insights on Amazon EKS through Fluent Bit, you need to add the following parameter in your configuration:

+
  enable_aws_for_fluentbit = true
+  aws_for_fluentbit = {
+    enable_containerinsights = true
+  }
+
+

By default, ClusterInsights will not enable the kubelet monitoring feature, with AWS for FluentBit integration, since this is an optional feature that is suggested to be enabled only on large clusters. To enable the ClusterInsights Use_Kubelet feature you'll need to provide a few more parametees:

+
  enable_aws_for_fluentbit = true
+  aws_for_fluentbit = {
+    enable_containerinsights = true
+    kubelet_monitoring       = true
+    set = [{
+        name  = "cloudWatchLogs.autoCreateGroup"
+        value = true
+      },
+      {
+        name  = "hostNetwork"
+        value = true
+      },
+      {
+        name  = "dnsPolicy"
+        value = "ClusterFirstWithHostNet"
+      }
+    ]
+  }
+
+

Verify the Fluent Bit setup

+

Verify aws-for-fluentbit pods are running.

+
$ kubectl -n kube-system get pods -l app.kubernetes.io/name=aws-for-fluent-bit
+NAME                       READY   STATUS    RESTARTS   AGE
+aws-for-fluent-bit-6lhkj   1/1     Running   0          15m
+aws-for-fluent-bit-sbn9b   1/1     Running   0          15m
+aws-for-fluent-bit-svhwq   1/1     Running   0          15m
+
+

Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch/

+

In the navigation pane, choose Log groups.

+

Make sure that you're in the Region where you deployed Fluent Bit.

+

Check the list of log groups in the Region. You should see the following:

+
/aws/eks/complete/aws-fluentbit-logs
+
+

If you enabled Container Insights, you should also see the following Log Groups in your CloudWatch Console.

+
/aws/containerinsights/Cluster_Name/application
+
+/aws/containerinsights/Cluster_Name/host
+
+/aws/containerinsights/Cluster_Name/dataplane
+
+

Navigate to one of these log groups and check the Last Event Time for the log streams. If it is recent relative to when you deployed Fluent Bit, the setup is verified.

+

There might be a slight delay in creating the /dataplane log group. This is normal as these log groups only get created when Fluent Bit starts sending logs for that log group.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/aws-fsx-csi-driver/index.html b/v1.9.2/addons/aws-fsx-csi-driver/index.html new file mode 100644 index 00000000..805e3365 --- /dev/null +++ b/v1.9.2/addons/aws-fsx-csi-driver/index.html @@ -0,0 +1,1000 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS FSx CSI Driver - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS FSx CSI Driver

+

This add-on deploys the Amazon FSx CSI Driver in to an Amazon EKS Cluster.

+

Usage

+

The Amazon FSx CSI Driver can be deployed by enabling the add-on via the following.

+
  enable_aws_fsx_csi_driver = true
+
+

Helm Chart customization

+

You can optionally customize the Helm chart deployment using a configuration like the following.

+
  enable_aws_fsx_csi_driver = true
+  aws_fsx_csi_driver = {
+    namespace     = "aws-fsx-csi-driver"
+    chart_version = "1.6.0"
+    role_policies = <ADDITIONAL_IAM_POLICY_ARN>
+  }
+
+

You can find all available Helm Chart parameter values here

+

Validation

+

Once deployed, you will be able to see a number of supporting resources in the kube-system namespace.

+
$ kubectl -n kube-system get deployment fsx-csi-controller
+
+NAME                 READY   UP-TO-DATE   AVAILABLE   AGE
+fsx-csi-controller   2/2     2            2           4m29s
+
+$ kubectl -n kube-system get pods -l app=fsx-csi-controller
+NAME                                  READY   STATUS    RESTARTS   AGE
+fsx-csi-controller-56c6d9bbb8-89cpc   4/4     Running   0          3m30s
+fsx-csi-controller-56c6d9bbb8-9wnlh   4/4     Running   0          3m30s
+
+
$ kubectl -n kube-system get daemonset fsx-csi-node
+NAME           DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
+fsx-csi-node   3         3         3       3            3           kubernetes.io/os=linux   5m27s
+
+$ kubectl -n kube-system get pods -l  app=fsx-csi-node
+NAME                 READY   STATUS    RESTARTS   AGE
+fsx-csi-node-7c5z6   3/3     Running   0          5m29s
+fsx-csi-node-d5q28   3/3     Running   0          5m29s
+fsx-csi-node-hlg8q   3/3     Running   0          5m29s
+
+

Create a StorageClass. Replace the SubnetID and the SecurityGroupID with your own values. More details here.

+
$ cat <<EOF | kubectl apply -f -
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+  name: fsx-sc
+provisioner: fsx.csi.aws.com
+parameters:
+  subnetId: <YOUR_SUBNET_IDs>
+  securityGroupIds: <YOUR_SG_ID>
+  perUnitStorageThroughput: "200"
+  deploymentType: PERSISTENT_1
+mountOptions:
+  - flock
+EOF
+
+
$ kubect describe storageclass fsx-sc
+Name:            fsx-sc
+IsDefaultClass:  No
+Annotations:     kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{},"name":"fsx-sc"},"mountOptions":null,"parameters":{"deploymentType":"PERSISTENT_1","perUnitStorageThroughput":"200","securityGroupIds":"sg-q1w2e3r4t5y6u7i8o","subnetId":"subnet-q1w2e3r4t5y6u7i8o"},"provisioner":"fsx.csi.aws.com"}
+
+Provisioner:           fsx.csi.aws.com
+Parameters:            deploymentType=PERSISTENT_1,perUnitStorageThroughput=200,securityGroupIds=sg-q1w2e3r4t5y6u7i8o,subnetId=subnet-q1w2e3r4t5y6u7i8o
+AllowVolumeExpansion:  <unset>
+MountOptions:          <none>
+ReclaimPolicy:         Delete
+VolumeBindingMode:     Immediate
+Events:                <none>
+
+

Create a PVC.

+
$ cat <<EOF | kubectl apply -f -
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: fsx-claim
+spec:
+  accessModes:
+    - ReadWriteMany
+  storageClassName: fsx-sc
+  resources:
+    requests:
+      storage: 1200Gi
+EOF
+
+

Wait for the PV to be created and bound to your PVC.

+
$ kubectl get pvc
+NAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+fsx-claim   Bound    pvc-df385730-72d6-4b0c-8275-cc055a438760   1200Gi     RWX            fsx-sc         7m47s
+$ kubectl get pv
+NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   REASON   AGE
+pvc-df385730-72d6-4b0c-8275-cc055a438760   1200Gi     RWX            Delete           Bound    default/fsx-claim   fsx-sc                  2m13s
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/aws-gateway-api-controller/index.html b/v1.9.2/addons/aws-gateway-api-controller/index.html new file mode 100644 index 00000000..5e88f656 --- /dev/null +++ b/v1.9.2/addons/aws-gateway-api-controller/index.html @@ -0,0 +1,935 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS Gateway API Controller - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS Gateway API Controller

+

AWS Gateway API Controller lets you connect services across multiple Kubernetes clusters through the Kubernetes Gateway API interface. It is also designed to connect services running on EC2 instances, containers, and as serverless functions. It does this by leveraging Amazon VPC Lattice, which works with Kubernetes Gateway API calls to manage Kubernetes objects.

+

Usage

+

AWS Gateway API Controller can be deployed by enabling the add-on via the following.

+
  enable_aws_gateway_api_controller = true
+  aws_gateway_api_controller = {
+    repository_username = data.aws_ecrpublic_authorization_token.token.user_name
+    repository_password = data.aws_ecrpublic_authorization_token.token.password
+    set = [{
+      name  = "clusterVpcId"
+      value = "vpc-12345abcd"
+    }]
+}
+
+

You can optionally customize the Helm chart that deploys AWS Gateway API Controller via the following configuration.

+
  enable_aws_gateway_api_controller = true
+  aws_gateway_api_controller = {
+    name                = "aws-gateway-api-controller"
+    chart_version       = "v0.0.12"
+    repository          = "oci://public.ecr.aws/aws-application-networking-k8s"
+    repository_username = data.aws_ecrpublic_authorization_token.token.user_name
+    repository_password = data.aws_ecrpublic_authorization_token.token.password
+    namespace           = "aws-application-networking-system"
+    values              = [templatefile("${path.module}/values.yaml", {})]
+    set = [{
+      name  = "clusterVpcId"
+      value = "vpc-12345abcd"
+    }]
+  }
+
+

Verify aws-gateway-api-controller pods are running.

+
$ kubectl get pods -n aws-application-networking-system
+NAME                                                               READY   STATUS    RESTARTS   AGE
+aws-gateway-api-controller-aws-gateway-controller-chart-8f42q426   1/1     Running   0          40s
+aws-gateway-api-controller-aws-gateway-controller-chart-8f4tbl9g   1/1     Running   0          71s
+
+

Deploy example GatewayClass

+
$ kubectl apply -f https://raw.githubusercontent.com/aws/aws-application-networking-k8s/main/examples/gatewayclass.yaml
+gatewayclass.gateway.networking.k8s.io/amazon-vpc-lattice created
+
+

Describe GatewayClass

+
$ kubectl describe gatewayclass
+Name:         amazon-vpc-lattice
+Namespace:
+Labels:       <none>
+Annotations:  <none>
+API Version:  gateway.networking.k8s.io/v1beta1
+Kind:         GatewayClass
+Metadata:
+  Creation Timestamp:  2023-06-22T22:33:32Z
+  Generation:          1
+  Resource Version:    819021
+  UID:                 aac59195-8f37-4c23-a2a5-b0f363deda77
+Spec:
+  Controller Name:  application-networking.k8s.aws/gateway-api-controller
+Status:
+  Conditions:
+    Last Transition Time:  2023-06-22T22:33:32Z
+    Message:               Accepted
+    Observed Generation:   1
+    Reason:                Accepted
+    Status:                True
+    Type:                  Accepted
+Events:                    <none>
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/aws-load-balancer-controller/index.html b/v1.9.2/addons/aws-load-balancer-controller/index.html new file mode 100644 index 00000000..69147d90 --- /dev/null +++ b/v1.9.2/addons/aws-load-balancer-controller/index.html @@ -0,0 +1,1000 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS Load Balancer Controller. - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS Load Balancer Controller.

+

AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. This Add-on deploys this controller in an Amazon EKS Cluster.

+

Usage

+

In order to deploy the AWS Load Balancer Controller Addon via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

+
+

NOTE: In versions 2.5 and newer, the AWS Load Balancer Controller becomes the default controller for Kubernetes service resources with the type: LoadBalancer and makes an AWS Network Load Balancer (NLB) for each service. It does this by making a mutating webhook for services, which sets the spec.loadBalancerClass field to service.k8s.aws/nlb for new services of type: LoadBalancer. You can turn off this feature and revert to using the legacy Cloud Provider as the default controller, by setting the helm chart value enableServiceMutatorWebhook to false. The cluster won't provision new Classic Load Balancers for your services unless you turn off this feature. Existing Classic Load Balancers will continue to work.

+
+
module "eks_blueprints_addons" {
+
+  enable_aws_load_balancer_controller = true
+  aws_load_balancer_controller = {
+    set = [
+      {
+        name  = "vpcId"
+        value = module.vpc.vpc_id
+      },
+      {
+        name  = "podDisruptionBudget.maxUnavailable"
+        value = 1
+      },
+      {
+        name  = "enableServiceMutatorWebhook"
+        value = "false"
+      }
+    ]
+  }
+
+

Helm Chart customization

+

It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller configuration block:

+
  aws_load_balancer_controller = {
+    set = [
+      {
+        name  = "vpcId"
+        value = module.vpc.vpc_id
+      },
+      {
+        name  = "podDisruptionBudget.maxUnavailable"
+        value = 1
+      },
+      {
+        name  = "resources.requests.cpu"
+        value = 100m
+      },
+      {
+        name  = "resources.requests.memory"
+        value = 128Mi
+      },
+    ]
+  }
+}
+
+

You can find all available Helm Chart parameter values here.

+

Validate

+
    +
  1. To validate the deployment, check if the aws-load-balancer-controller Pods were created in the kube-system Namespace, as the following example.
  2. +
+
kubectl -n kube-system get pods | grep aws-load-balancer-controller
+NAMESPACE       NAME                                            READY   STATUS    RESTARTS   AGE
+kube-system     aws-load-balancer-controller-6cbdb58654-fvskt   1/1     Running   0          26m
+kube-system     aws-load-balancer-controller-6cbdb58654-sc7dk   1/1     Running   0          26m
+
+
    +
  1. Create a Kubernetes Ingress, using the alb IngressClass, pointing to an existing Service. In this example we'll use a Service called example-svc.
  2. +
+
kubectl create ingress example-ingress --class alb --rule="/*=example-svc:80" \
+--annotation alb.ingress.kubernetes.io/scheme=internet-facing \
+--annotation alb.ingress.kubernetes.io/target-type=ip
+
+
kubectl get ingress  
+NAME                CLASS   HOSTS   ADDRESS                                                                 PORTS   AGE
+example-ingress     alb     *       k8s-example-ingress-7e0d6f03e7-1234567890.us-west-2.elb.amazonaws.com   80      4m9s
+
+

Resources

+

GitHub Repo +Helm Chart +AWS Docs

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/aws-node-termination-handler/index.html b/v1.9.2/addons/aws-node-termination-handler/index.html new file mode 100644 index 00000000..9ce3d66f --- /dev/null +++ b/v1.9.2/addons/aws-node-termination-handler/index.html @@ -0,0 +1,983 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS Node Termination Handler - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS Node Termination Handler

+

This project ensures that the Kubernetes control plane responds appropriately to events that can cause your EC2 instance to become unavailable, such as EC2 maintenance events, EC2 Spot interruptions, ASG Scale-In, ASG AZ Rebalance, and EC2 Instance Termination via the API or Console. If not handled, your application code may not stop gracefully, take longer to recover full availability, or accidentally schedule work to nodes that are going down.

+

Usage

+

AWS Node Termination Handler can be deployed by enabling the add-on via the following.

+
enable_aws_node_termination_handler = true
+
+

You can optionally customize the Helm chart that deploys AWS Node Termination Handler via the following configuration.

+
  enable_aws_node_termination_handler = true
+
+  aws_node_termination_handler = {
+    name          = "aws-node-termination-handler"
+    chart_version = "0.21.0"
+    repository    = "https://aws.github.io/eks-charts"
+    namespace     = "aws-node-termination-handler"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify aws-node-termination-handler pods are running.

+
$ kubectl get pods -n aws-node-termination-handler
+NAME                                            READY   STATUS    RESTARTS      AGE
+aws-node-termination-handler-6f598b6b89-6mqgk   1/1     Running   1 (22h ago)   26h
+
+

Verify SQS Queue is created.

+
$ aws sqs list-queues
+
+{
+    "QueueUrls": [
+        "https://sqs.us-east-1.amazonaws.com/XXXXXXXXXXXXXX/aws_node_termination_handler20221123072051157700000004"
+    ]
+}
+
+

Verify Event Rules are created.

+
$ aws event list-rules
+{
+    [
+        {
+            "Name": "NTH-ASGTerminiate-20230602191740664900000025",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-ASGTerminiate-20230602191740664900000025",
+            "EventPattern": "{\"detail-type\":[\"EC2 Instance-terminate Lifecycle Action\"],\"source\":[\"aws.autoscaling\"]}",
+            "State": "ENABLED",
+            "Description": "Auto scaling instance terminate event",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTH-HealthEvent-20230602191740079300000022",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-HealthEvent-20230602191740079300000022",
+            "EventPattern": "{\"detail-type\":[\"AWS Health Event\"],\"source\":[\"aws.health\"]}",
+            "State": "ENABLED",
+            "Description": "AWS health event",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTH-InstanceRebalance-20230602191740077100000021",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceRebalance-20230602191740077100000021",
+            "EventPattern": "{\"detail-type\":[\"EC2 Instance Rebalance Recommendation\"],\"source\":[\"aws.ec2\"]}",
+            "State": "ENABLED",
+            "Description": "EC2 instance rebalance recommendation",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTH-InstanceStateChange-20230602191740165000000024",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceStateChange-20230602191740165000000024",
+            "EventPattern": "{\"detail-type\":[\"EC2 Instance State-change Notification\"],\"source\":[\"aws.ec2\"]}",
+            "State": "ENABLED",
+            "Description": "EC2 instance state-change notification",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTH-SpotInterrupt-20230602191740077100000020",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-SpotInterrupt-20230602191740077100000020",
+            "EventPattern": "{\"detail-type\":[\"EC2 Spot Instance Interruption Warning\"],\"source\":[\"aws.ec2\"]}",
+            "State": "ENABLED",
+            "Description": "EC2 spot instance interruption warning",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTHASGTermRule",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHASGTermRule",
+            "EventPattern": "{\"detail-type\":[\"EC2 Instance-terminate Lifecycle Action\"],\"source\":[\"aws.autoscaling\"]}",
+            "State": "ENABLED",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTHInstanceStateChangeRule",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHInstanceStateChangeRule",
+            "EventPattern": "{\"detail-type\":[\"EC2 Instance State-change Notification\"],\"source\":[\"aws.ec2\"]}",
+            "State": "ENABLED",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTHRebalanceRule",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHRebalanceRule",
+            "EventPattern": "{\"detail-type\":[\"EC2 Instance Rebalance Recommendation\"],\"source\":[\"aws.ec2\"]}",
+            "State": "ENABLED",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTHScheduledChangeRule",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHScheduledChangeRule",
+            "EventPattern": "{\"detail-type\":[\"AWS Health Event\"],\"source\":[\"aws.health\"]}",
+            "State": "ENABLED",
+            "EventBusName": "default"
+        },
+        {
+            "Name": "NTHSpotTermRule",
+            "Arn": "arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHSpotTermRule",
+            "EventPattern": "{\"detail-type\":[\"EC2 Spot Instance Interruption Warning\"],\"source\":[\"aws.ec2\"]}",
+            "State": "ENABLED",
+            "EventBusName": "default"
+        }
+    ]
+}
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/aws-private-ca-issuer/index.html b/v1.9.2/addons/aws-private-ca-issuer/index.html new file mode 100644 index 00000000..16cc97ea --- /dev/null +++ b/v1.9.2/addons/aws-private-ca-issuer/index.html @@ -0,0 +1,999 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS Private CA Issuer - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS Private CA Issuer

+

AWS Private CA is an AWS service that can setup and manage private CAs, as well as issue private certificates. This add-on deploys the AWS Private CA Issuer as an external issuer to cert-manager that signs off certificate requests using AWS Private CA in an Amazon EKS Cluster.

+

Usage

+

Pre-requisites

+

To deploy the AWS PCA, you need to install cert-manager first, refer to this documentation to do it through EKS Blueprints Addons.

+

Deployment

+

With cert-manager deployed in place, you can deploy the AWS Private CA Issuer Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

+
module "eks_blueprints_addons" {
+
+  enable_cert_manager         = true
+  enable_aws_privateca_issuer = true
+  aws_privateca_issuer = {
+    acmca_arn        = aws_acmpca_certificate_authority.this.arn
+  }
+}
+
+

Helm Chart customization

+

It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller configuration block:

+
  aws_privateca_issuer = {
+    acmca_arn        = aws_acmpca_certificate_authority.this.arn
+    namespace        = "aws-privateca-issuer"
+    create_namespace = true
+  }
+
+

You can find all available Helm Chart parameter values here.

+

Validation

+
    +
  1. List all the pods running in aws-privateca-issuer and cert-manager Namespace.
  2. +
+
kubectl get pods -n aws-privateca-issuer
+kubectl get pods -n cert-manager
+
+
    +
  1. Check the certificate status in it should be in Ready state, and be pointing to a secret created in the same Namespace.
  2. +
+
kubectl get certificate -o wide
+NAME      READY   SECRET                  ISSUER                    STATUS                                          AGE
+example   True    example-clusterissuer   tls-with-aws-pca-issuer   Certificate is up to date and has not expired   41m
+
+kubectl get secret example-clusterissuer
+NAME                    TYPE                DATA   AGE
+example-clusterissuer   kubernetes.io/tls   3      43m
+
+

Resources

+

GitHub Repo +Helm Chart +AWS Docs

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/cert-manager/index.html b/v1.9.2/addons/cert-manager/index.html new file mode 100644 index 00000000..1d4120a0 --- /dev/null +++ b/v1.9.2/addons/cert-manager/index.html @@ -0,0 +1,1000 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Cert-Manager - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Cert-Manager

+

Cert-manager is a X.509 certificate controller for Kubernetes-like workloads. It will obtain certificates from a variety of Issuers, both popular public Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to renew certificates at a configured time before expiry. This Add-on deploys this controller in an Amazon EKS Cluster.

+

Usage

+

To deploy cert-manager Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

+
module "eks_blueprints_addons" {
+
+  enable_cert_manager         = true
+}
+
+

Helm Chart customization

+

It's possible to customize your deployment using the Helm Chart parameters inside the cert-manager configuration block:

+
  cert-manager = {
+    chart_version    = "v1.11.1"
+    namespace        = "cert-manager"
+    create_namespace = true
+  }
+
+

You can find all available Helm Chart parameter values here

+

Validation

+
    +
  1. Validate if the Cert-Manger Pods are Running.
  2. +
+
kubectl -n cert-manager get pods
+NAME                                      READY   STATUS    RESTARTS   AGE
+cert-manager-5989bcc87-96qvf              1/1     Running   0          2m49s
+cert-manager-cainjector-9b44ddb68-8c7b9   1/1     Running   0          2m49s
+cert-manager-webhook-776b65456-k6br4      1/1     Running   0          2m49s
+
+
    +
  1. Create a SelfSigned ClusterIssuer resource in the cluster.
  2. +
+
apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: selfsigned-cluster-issuer
+spec:
+  selfSigned: {}
+
+
kubectl get clusterissuers -o wide selfsigned-cluster-issuer
+NAME                        READY   STATUS   AGE
+selfsigned-cluster-issuer   True             3m
+
+
    +
  1. Create a Certificate in a given Namespace.
  2. +
+
apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: example
+  namespace: default
+spec:
+  isCA: true
+  commonName: example
+  secretName: example-secret
+  privateKey:
+    algorithm: ECDSA
+    size: 256
+  issuerRef:
+    name: selfsigned-cluster-issuer
+    kind: ClusterIssuer
+    group: cert-manager.io
+
+
    +
  1. Check the certificate status in it should be in Ready state, and be pointing to a secret created in the same Namespace.
  2. +
+
kubectl get certificate -o wide
+NAME      READY   SECRET           ISSUER                      STATUS                                          AGE
+example   True    example-secret   selfsigned-cluster-issuer   Certificate is up to date and has not expired   44s
+
+kubectl get secret example-secret
+NAME             TYPE                DATA   AGE
+example-secret   kubernetes.io/tls   3      70s
+
+

Resources

+

GitHub Repo +Helm Chart

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/cluster-autoscaler/index.html b/v1.9.2/addons/cluster-autoscaler/index.html new file mode 100644 index 00000000..c6cdfa19 --- /dev/null +++ b/v1.9.2/addons/cluster-autoscaler/index.html @@ -0,0 +1,892 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Cluster Autoscaler - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Cluster Autoscaler

+

The Kubernetes Cluster Autoscaler automatically adjusts the number of nodes in your cluster when pods fail or are rescheduled onto other nodes. The Cluster Autoscaler uses Auto Scaling groups. For more information, see Cluster Autoscaler on AWS.

+

Usage

+

Cluster Autoscaler can be deployed by enabling the add-on via the following.

+
enable_cluster_autoscaler = true
+
+

You can optionally customize the Helm chart that deploys Cluster Autoscaler via the following configuration.

+
  enable_cluster_autoscaler = true
+
+  cluster_autoscaler = {
+    name          = "cluster-autoscaler"
+    chart_version = "9.29.0"
+    repository    = "https://kubernetes.github.io/autoscaler"
+    namespace     = "kube-system"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify cluster-autoscaler pods are running.

+
$ kubectl get pods -n kube-system
+NAME                                                         READY   STATUS    RESTARTS     AGE
+cluster-autoscaler-aws-cluster-autoscaler-7ff79bc484-pm8g9   1/1     Running   1 (2d ago)   2d5h
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/cluster-proportional-autoscaler/index.html b/v1.9.2/addons/cluster-proportional-autoscaler/index.html new file mode 100644 index 00000000..c8c2b82d --- /dev/null +++ b/v1.9.2/addons/cluster-proportional-autoscaler/index.html @@ -0,0 +1,1015 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Cluster Proportional Autoscaler - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Cluster Proportional Autoscaler

+

Horizontal cluster-proportional-autoscaler watches over the number of schedulable nodes and cores of the cluster and resizes the number of replicas for the required resource. This functionality may be desirable for applications that need to be autoscaled with the size of the cluster, such as CoreDNS and other services that scale with the number of nodes/pods in the cluster.

+

The cluster-proportional-autoscaler helps to scale the applications using deployment or replicationcontroller or replicaset. This is an alternative solution to Horizontal Pod Autoscaling. +It is typically installed as a Deployment in your cluster.

+

Refer to the eks-best-practices-guides for addional configuration guidanance.

+

Usage

+

This add-on requires both enable_cluster_proportional_autoscaler and cluster_proportional_autoscaler as mandatory fields.

+

The example shows how to enable cluster-proportional-autoscaler for CoreDNS Deployment. CoreDNS deployment is not configured with HPA. So, this add-on helps to scale CoreDNS Add-on according to the size of the nodes and cores.

+

This Add-on can be used to scale any application with Deployment objects.

+
enable_cluster_proportional_autoscaler  = true
+cluster_proportional_autoscaler  = {
+    values = [
+      <<-EOT
+        nameOverride: kube-dns-autoscaler
+
+        # Formula for controlling the replicas. Adjust according to your needs
+        # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) )
+        config:
+          linear:
+            coresPerReplica: 256
+            nodesPerReplica: 16
+            min: 1
+            max: 100
+            preventSinglePointFailure: true
+            includeUnschedulableNodes: true
+
+        # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive).
+        options:
+          target: deployment/coredns # Notice the target as `deployment/coredns`
+
+        serviceAccount:
+          create: true
+          name: kube-dns-autoscaler
+
+        podSecurityContext:
+          seccompProfile:
+            type: RuntimeDefault
+            supplementalGroups: [65534]
+            fsGroup: 65534
+
+        resources:
+          limits:
+            cpu: 100m
+            memory: 128Mi
+          requests:
+            cpu: 100m
+            memory: 128Mi
+
+        tolerations:
+          - key: "CriticalAddonsOnly"
+            operator: "Exists"
+            description: "Cluster Proportional Autoscaler for CoreDNS Service"
+      EOT
+    ]
+}
+
+

Expected result

+

The cluster-proportional-autoscaler pod running in the kube-system namespace. +

kubectl -n kube-system get po -l app.kubernetes.io/instance=cluster-proportional-autoscaler
+NAME                                                              READY   STATUS    RESTARTS   AGE
+cluster-proportional-autoscaler-kube-dns-autoscaler-d8dc8477xx7   1/1     Running   0          21h
+
+The cluster-proportional-autoscaler-kube-dns-autoscaler config map exists. +
kubectl -n kube-system get cm cluster-proportional-autoscaler-kube-dns-autoscaler
+NAME                                                  DATA   AGE
+cluster-proportional-autoscaler-kube-dns-autoscaler   1      21h
+

+

Testing

+

To test that coredns pods scale, first take a baseline of how many nodes the cluster has and how many coredns pods are running. +

kubectl get nodes
+NAME                          STATUS   ROLES    AGE   VERSION
+ip-10-0-19-243.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954
+ip-10-0-25-182.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954
+ip-10-0-40-138.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954
+ip-10-0-8-136.ec2.internal    Ready    <none>   21h   v1.26.4-eks-0a21954
+
+kubectl get po -n kube-system -l k8s-app=kube-dns
+NAME                       READY   STATUS    RESTARTS   AGE
+coredns-7975d6fb9b-dlkdd   1/1     Running   0          21h
+coredns-7975d6fb9b-xqqwp   1/1     Running   0          21h
+

+

Change the following parameters in the hcl code above so a scaling event can be easily triggered: +

        config:
+          linear:
+            coresPerReplica: 4
+            nodesPerReplica: 2
+            min: 1
+            max: 4
+
+and execute terraform apply.

+

Increase the managed node group desired size, in this example from 4 to 5. This can be done via the AWS Console.

+

Check that the new node came up and coredns scaled up. +

NAME                          STATUS   ROLES    AGE   VERSION
+ip-10-0-14-120.ec2.internal   Ready    <none>   10m   v1.26.4-eks-0a21954
+ip-10-0-19-243.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954
+ip-10-0-25-182.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954
+ip-10-0-40-138.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954
+ip-10-0-8-136.ec2.internal    Ready    <none>   21h   v1.26.4-eks-0a21954
+
+kubectl get po -n kube-system -l k8s-app=kube-dns
+NAME                       READY   STATUS    RESTARTS   AGE
+coredns-7975d6fb9b-dlkdd   1/1     Running   0          21h
+coredns-7975d6fb9b-ww64t   1/1     Running   0          10m
+coredns-7975d6fb9b-xqqwp   1/1     Running   0          21h
+

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/external-dns/index.html b/v1.9.2/addons/external-dns/index.html new file mode 100644 index 00000000..3e583c9c --- /dev/null +++ b/v1.9.2/addons/external-dns/index.html @@ -0,0 +1,903 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + External DNS - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

External DNS

+

ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the Kubernetes API to determine a desired list of DNS records. Unlike KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly—e.g. AWS Route 53.

+

Usage

+

External DNS can be deployed by enabling the add-on via the following.

+
enable_external_dns = true
+
+

You can optionally customize the Helm chart that deploys External DNS via the following configuration.

+
  enable_external_dns = true
+
+  external_dns = {
+    name          = "external-dns"
+    chart_version = "1.12.2"
+    repository    = "https://kubernetes-sigs.github.io/external-dns/"
+    namespace     = "external-dns"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+  external_dns_route53_zone_arns = ["XXXXXXXXXXXXXXXXXXXXXXX"]
+
+

Verify external-dns pods are running.

+
$ kubectl get pods -n external-dns
+NAME                            READY   STATUS    RESTARTS     AGE
+external-dns-849b89c675-ffnf6   1/1     Running   1 (2d ago)   2d5h
+
+

To further configure external-dns, refer to the examples:

+ + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/external-secrets/index.html b/v1.9.2/addons/external-secrets/index.html new file mode 100644 index 00000000..a5727764 --- /dev/null +++ b/v1.9.2/addons/external-secrets/index.html @@ -0,0 +1,894 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + External Secrets - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

External Secrets

+

External Secrets Operator is a Kubernetes operator that integrates external secret management systems like AWS Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM Cloud Secrets Manager, and many more. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret.

+

Usage

+

External Secrets can be deployed by enabling the add-on via the following.

+
enable_external_secrets = true
+
+

You can optionally customize the Helm chart that deploys External Secrets via the following configuration.

+
  enable_external_secrets = true
+
+  external_secrets = {
+    name          = "external-secrets"
+    chart_version = "0.8.1"
+    repository    = "https://charts.external-secrets.io"
+    namespace     = "external-secrets"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify external-secrets pods are running.

+
$ kubectl get pods -n external-secrets
+NAME                                               READY   STATUS    RESTARTS       AGE
+external-secrets-67bfd5b47c-xc5xf                  1/1     Running   1 (2d1h ago)   2d6h
+external-secrets-cert-controller-8f75c6f79-qcfx4   1/1     Running   1 (2d1h ago)   2d6h
+external-secrets-webhook-78f6bd456-76wmm           1/1     Running   1 (2d1h ago)   2d6h
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/fargate-fluentbit/index.html b/v1.9.2/addons/fargate-fluentbit/index.html new file mode 100644 index 00000000..2c188fe5 --- /dev/null +++ b/v1.9.2/addons/fargate-fluentbit/index.html @@ -0,0 +1,990 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Fargate FluentBit - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Fargate FluentBit

+

Amazon EKS on Fargate offers a built-in log router based on Fluent Bit. This means that you don't explicitly run a Fluent Bit container as a sidecar, but Amazon runs it for you. All that you have to do is configure the log router. The configuration happens through a dedicated ConfigMap, that is deployed via this Add-on.

+

Usage

+

To configure the Fargate Fluentbit ConfigMap via the EKS Blueprints Addons, just reference the following parameters under the module.eks_blueprints_addons.

+
module "eks_blueprints_addons" {
+
+  enable_fargate_fluentbit = true
+  fargate_fluentbit = {
+    flb_log_cw = true
+  }
+}
+
+

It's possible to customize the CloudWatch Log Group parameters in the fargate_fluentbit_cw_log_group configuration block:

+
  fargate_fluentbit_cw_log_group = {
+
+  name              = "existing-log-group"
+  name_prefix       = "dev-environment-logs"
+  retention_in_days = 7
+  kms_key_id        = "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+  skip_destroy      = true
+
+

Validation

+
    +
  1. Check if the aws-logging configMap for Fargate Fluentbit was created.
  2. +
+
kubectl -n aws-observability get configmap aws-logging -o yaml
+apiVersion: v1
+data:
+  filters.conf: |
+    [FILTER]
+      Name parser
+      Match *
+      Key_Name log
+      Parser regex
+      Preserve_Key True
+      Reserve_Data True
+  flb_log_cw: "true"
+  output.conf: |
+    [OUTPUT]
+      Name cloudwatch_logs
+      Match *
+      region us-west-2
+      log_group_name /fargate-serverless/fargate-fluentbit-logs20230509014113352200000006
+      log_stream_prefix fargate-logs-
+      auto_create_group true
+  parsers.conf: |
+    [PARSER]
+      Name regex
+      Format regex
+      Regex ^(?<time>[^ ]+) (?<stream>[^ ]+) (?<logtag>[^ ]+) (?<message>.+)$
+      Time_Key time
+      Time_Format %Y-%m-%dT%H:%M:%S.%L%z
+      Time_Keep On
+      Decode_Field_As json message
+immutable: false
+kind: ConfigMap
+metadata:
+  creationTimestamp: "2023-05-08T21:14:52Z"
+  name: aws-logging
+  namespace: aws-observability
+  resourceVersion: "1795"
+  uid: d822bcf5-a441-4996-857e-7fb1357bc07e
+
+
    +
  1. Validate if the CloudWatch LogGroup was created accordingly, and LogStreams were populated.
  2. +
+
aws logs describe-log-groups --log-group-name-prefix "/fargate-serverless/fargate-fluentbit"
+{
+    "logGroups": [
+        {
+            "logGroupName": "/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006",
+            "creationTime": 1683580491652,
+            "retentionInDays": 90,
+            "metricFilterCount": 0,
+            "arn": "arn:aws:logs:us-west-2:111122223333:log-group:/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006:*",
+            "storedBytes": 0
+        }
+    ]
+}
+
+
aws logs describe-log-streams --log-group-name "/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006" --log-stream-name-prefix fargate-logs --query 'logStreams[].logStreamName'
+[
+    "fargate-logs-flblogs.var.log.fluent-bit.log",
+    "fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-grjsq_kube-system_aws-load-balancer-controller-feaa22b4cdaa71ecfc8355feb81d4b61ea85598a7bb57aef07667c767c6b98e4.log",
+    "fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-wzr46_kube-system_aws-load-balancer-controller-69075ea9ab3c7474eac2a1696d3a84a848a151420cd783d79aeef960b181567f.log",
+    "fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-8cxvq_kube-system_coredns-9e4f3ab435269a566bcbaa606c02c146ad58508e67cef09fa87d5c09e4ac0088.log",
+    "fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-gcjwp_kube-system_coredns-11016818361cd68c32bf8f0b1328f3d92a6d7b8cf5879bfe8b301f393cb011cc.log"
+]
+
+

Resources

+

AWS Docs +Fluent Bit for Amazon EKS on AWS Fargate Blog Post

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/ingress-nginx/index.html b/v1.9.2/addons/ingress-nginx/index.html new file mode 100644 index 00000000..e38a942d --- /dev/null +++ b/v1.9.2/addons/ingress-nginx/index.html @@ -0,0 +1,893 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Ingress Nginx - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Ingress Nginx

+

This add-on installs Ingress Nginx Controller on Amazon EKS. The Ingress Nginx controller uses Nginx as a reverse proxy and load balancer.

+

Other than handling Kubernetes ingress objects, this ingress controller can facilitate multi-tenancy and segregation of workload ingresses based on host name (host-based routing) and/or URL Path (path based routing).

+

Usage

+

Ingress Nginx Controller can be deployed by enabling the add-on via the following.

+
enable_ingress_nginx = true
+
+

You can optionally customize the Helm chart that deploys ingress-nginx via the following configuration.

+
  enable_ingress_nginx = true
+
+  ingress_nginx = {
+    name          = "ingress-nginx"
+    chart_version = "4.6.1"
+    repository    = "https://kubernetes.github.io/ingress-nginx"
+    namespace     = "ingress-nginx"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify ingress-nginx pods are running.

+
$ kubectl get pods -n ingress-nginx
+NAME                                       READY   STATUS    RESTARTS   AGE
+ingress-nginx-controller-f6c55fdc8-8bt2z   1/1     Running   0          44m
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/karpenter/index.html b/v1.9.2/addons/karpenter/index.html new file mode 100644 index 00000000..6d4831e0 --- /dev/null +++ b/v1.9.2/addons/karpenter/index.html @@ -0,0 +1,970 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Karpenter - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Karpenter

+

Prerequisites

+

If deploying a node template that uses spot, please ensure you have the Spot service linked role available in your account. You can run the following command to ensure this role is available:

+
aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true
+
+

Validate

+

The following command will update the kubeconfig on your local machine and allow you to interact with your EKS Cluster using kubectl to validate the CoreDNS deployment for Fargate.

+
    +
  1. Run update-kubeconfig command:
  2. +
+
aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>
+
+
    +
  1. Test by listing all the pods running currently
  2. +
+
kubectl get pods -n karpenter
+
+# Output should look similar to below
+NAME                         READY   STATUS    RESTARTS   AGE
+karpenter-6f97df4f77-5nqsk   1/1     Running   0          3m28s
+karpenter-6f97df4f77-n7fkf   1/1     Running   0          3m28s
+
+
    +
  1. View the current nodes - this example utilizes EKS Fargate for hosting the Karpenter controller so only Fargate nodes are present currently:
  2. +
+
kubectl get nodes
+
+# Output should look similar to below
+NAME                                                STATUS   ROLES    AGE     VERSION
+fargate-ip-10-0-29-25.us-west-2.compute.internal    Ready    <none>   2m56s   v1.26.3-eks-f4dc2c0
+fargate-ip-10-0-36-148.us-west-2.compute.internal   Ready    <none>   2m57s   v1.26.3-eks-f4dc2c0
+fargate-ip-10-0-42-30.us-west-2.compute.internal    Ready    <none>   2m34s   v1.26.3-eks-f4dc2c0
+fargate-ip-10-0-45-112.us-west-2.compute.internal   Ready    <none>   2m33s   v1.26.3-eks-f4dc2c0
+
+
    +
  1. Create a sample pause deployment to demonstrate scaling:
  2. +
+
kubectl apply -f - <<EOF
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+    name: inflate
+spec:
+    replicas: 0
+    selector:
+    matchLabels:
+        app: inflate
+    template:
+    metadata:
+        labels:
+        app: inflate
+    spec:
+        terminationGracePeriodSeconds: 0
+        containers:
+        - name: inflate
+            image: public.ecr.aws/eks-distro/kubernetes/pause:3.7
+            resources:
+            requests:
+                cpu: 1
+EOF
+
+
    +
  1. Scale up the sample pause deployment to see Karpenter respond by provisioning nodes to support the workload:
  2. +
+
kubectl scale deployment inflate --replicas 5
+# To view logs
+# kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller
+
+
    +
  1. Re-check the nodes, you will now see a new EC2 node provisioned to support the scaled workload:
  2. +
+
kubectl get nodes
+
+# Output should look similar to below
+NAME                                                STATUS   ROLES    AGE     VERSION
+fargate-ip-10-0-29-25.us-west-2.compute.internal    Ready    <none>   5m15s   v1.26.3-eks-f4dc2c0
+fargate-ip-10-0-36-148.us-west-2.compute.internal   Ready    <none>   5m16s   v1.26.3-eks-f4dc2c0
+fargate-ip-10-0-42-30.us-west-2.compute.internal    Ready    <none>   4m53s   v1.26.3-eks-f4dc2c0
+fargate-ip-10-0-45-112.us-west-2.compute.internal   Ready    <none>   4m52s   v1.26.3-eks-f4dc2c0
+ip-10-0-1-184.us-west-2.compute.internal            Ready    <none>   26s     v1.26.2-eks-a59e1f0 # <= new EC2 node launched
+
+
    +
  1. Remove the sample pause deployment:
  2. +
+
kubectl delete deployment inflate
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/kube-prometheus-stack/index.html b/v1.9.2/addons/kube-prometheus-stack/index.html new file mode 100644 index 00000000..fae67cd9 --- /dev/null +++ b/v1.9.2/addons/kube-prometheus-stack/index.html @@ -0,0 +1,897 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Kube Prometheus Stack - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kube Prometheus Stack

+

Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.

+

Usage

+

Kube Prometheus Stack can be deployed by enabling the add-on via the following.

+
enable_kube_prometheus_stack = true
+
+

You can optionally customize the Helm chart that deploys Kube Prometheus Stack via the following configuration.

+
  enable_kube_prometheus_stack = true
+
+  kube_prometheus_stack = {
+    name          = "kube-prometheus-stack"
+    chart_version = "51.2.0"
+    repository    = "https://prometheus-community.github.io/helm-charts"
+    namespace     = "kube-prometheus-stack"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify kube-prometheus-stack pods are running.

+
$ kubectl get pods -n external-secrets
+NAME                                                        READY   STATUS    RESTARTS       AGE
+alertmanager-kube-prometheus-stack-alertmanager-0           2/2     Running   3 (2d2h ago)   2d7h
+kube-prometheus-stack-grafana-5c6cf88fd9-8wc9k              3/3     Running   3 (2d2h ago)   2d7h
+kube-prometheus-stack-kube-state-metrics-584d8b5d5f-s6p8d   1/1     Running   1 (2d2h ago)   2d7h
+kube-prometheus-stack-operator-c74ddccb5-8cprr              1/1     Running   1 (2d2h ago)   2d7h
+kube-prometheus-stack-prometheus-node-exporter-vd8lw        1/1     Running   1 (2d2h ago)   2d7h
+prometheus-kube-prometheus-stack-prometheus-0               2/2     Running   2 (2d2h ago)   2d7h
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/metrics-server/index.html b/v1.9.2/addons/metrics-server/index.html new file mode 100644 index 00000000..db081b61 --- /dev/null +++ b/v1.9.2/addons/metrics-server/index.html @@ -0,0 +1,893 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Metrics Server - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Metrics Server

+

Metrics Server is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines.

+

Metrics Server collects resource metrics from Kubelets and exposes them in Kubernetes apiserver through Metrics API for use by Horizontal Pod Autoscaler and Vertical Pod Autoscaler. Metrics API can also be accessed by kubectl top, making it easier to debug autoscaling pipelines.

+

Usage

+

Metrics Server can be deployed by enabling the add-on via the following.

+
enable_metrics_server = true
+
+

You can optionally customize the Helm chart that deploys External DNS via the following configuration.

+
  enable_metrics_server = true
+
+  metrics_server = {
+    name          = "metrics-server"
+    chart_version = "3.10.0"
+    repository    = "https://kubernetes-sigs.github.io/metrics-server/"
+    namespace     = "kube-system"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify metrics-server pods are running.

+
$ kubectl get pods -n kube-system
+NAME                                   READY   STATUS    RESTARTS       AGE
+metrics-server-6f9cdd486c-njh8b        1/1     Running   1 (2d2h ago)   2d7h
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/opa-gatekeeper/index.html b/v1.9.2/addons/opa-gatekeeper/index.html new file mode 100644 index 00000000..87fb8d47 --- /dev/null +++ b/v1.9.2/addons/opa-gatekeeper/index.html @@ -0,0 +1,889 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + OPA Gatekeeper - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

OPA Gatekeeper

+

Gatekeeper is an admission controller that validates requests to create and update Pods on Kubernetes clusters, using the Open Policy Agent (OPA). Using Gatekeeper allows administrators to define policies with a constraint, which is a set of conditions that permit or deny deployment behaviors in Kubernetes.

+

For complete project documentation, please visit the Gatekeeper. +For reference templates refer Templates

+

Usage

+

Gatekeeper can be deployed by enabling the add-on via the following.

+
enable_gatekeeper = true
+
+

You can also customize the Helm chart that deploys gatekeeper via the following configuration:

+
  enable_gatekeeper = true
+
+  gatekeeper = {
+    name          = "gatekeeper"
+    chart_version = "3.12.0"
+    repository    = "https://open-policy-agent.github.io/gatekeeper/charts"
+    namespace     = "gatekeeper-system"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/secrets-store-csi-driver-provider-aws/index.html b/v1.9.2/addons/secrets-store-csi-driver-provider-aws/index.html new file mode 100644 index 00000000..8ead16b1 --- /dev/null +++ b/v1.9.2/addons/secrets-store-csi-driver-provider-aws/index.html @@ -0,0 +1,895 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + AWS Secrets Manager and Config Provider for Secret Store CSI Driver - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS Secrets Manager and Config Provider for Secret Store CSI Driver

+

AWS offers two services to manage secrets and parameters conveniently in your code. AWS Secrets Manager allows you to easily rotate, manage, and retrieve database credentials, API keys, certificates, and other secrets throughout their lifecycle. AWS Systems Manager Parameter Store provides hierarchical storage for configuration data. The AWS provider for the Secrets Store CSI Driver allows you to make secrets stored in Secrets Manager and parameters stored in Parameter Store appear as files mounted in Kubernetes pods.

+

Usage

+

AWS Secrets Store CSI Driver can be deployed by enabling the add-on via the following.

+
enable_secrets_store_csi_driver              = true
+enable_secrets_store_csi_driver_provider_aws = true
+
+

You can optionally customize the Helm chart via the following configuration.

+
  enable_secrets_store_csi_driver              = true
+  enable_secrets_store_csi_driver_provider_aws = true
+
+  secrets_store_csi_driver_provider_aws = {
+    name          = "secrets-store-csi-driver"
+    chart_version = "0.3.2"
+    repository    = "https://aws.github.io/secrets-store-csi-driver-provider-aws"
+    namespace     = "kube-system"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+

Verify metrics-server pods are running.

+
$ kubectl get pods -n kube-system
+NAME                                         READY   STATUS    RESTARTS       AGE
+secrets-store-csi-driver-9l2z8               3/3     Running   1 (2d5h ago)   2d9h
+secrets-store-csi-driver-provider-aws-2qqkk  1/1     Running   1 (2d5h ago)   2d9h
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/velero/index.html b/v1.9.2/addons/velero/index.html new file mode 100644 index 00000000..2f4b618d --- /dev/null +++ b/v1.9.2/addons/velero/index.html @@ -0,0 +1,1028 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Velero - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Velero

+

Velero is an open source tool to safely backup and restore, perform disaster recovery, and migrate Kubernetes cluster resources and persistent volumes.

+ +

Usage

+

Velero can be deployed by enabling the add-on via the following.

+
enable_velero           = true
+velero_backup_s3_bucket = "<YOUR_BUCKET_NAME>"
+velero = {
+    s3_backup_location = "<YOUR_S3_BUCKET_ARN>[/prefix]"
+  }
+
+

You can also customize the Helm chart that deploys velero via the following configuration:

+
enable_velero           = true
+
+velero = {
+  name          = "velero"
+  description   = "A Helm chart for velero"
+  chart_version = "3.1.6"
+  repository    = "https://vmware-tanzu.github.io/helm-charts/"
+  namespace     = "velero"
+  values        = [templatefile("${path.module}/values.yaml", {})]
+}
+
+

To see a working example, see the stateful example blueprint.

+

Validate

+
    +
  1. Run update-kubeconfig command:
  2. +
+
aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>
+
+
    +
  1. Test by listing velero resources provisioned:
  2. +
+
kubectl get all -n velero
+
+# Output should look similar to below
+NAME                         READY   STATUS    RESTARTS   AGE
+pod/velero-7b8994d56-z89sl   1/1     Running   0          25h
+
+NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
+service/velero   ClusterIP   172.20.20.118   <none>        8085/TCP   25h
+
+NAME                     READY   UP-TO-DATE   AVAILABLE   AGE
+deployment.apps/velero   1/1     1            1           25h
+
+NAME                               DESIRED   CURRENT   READY   AGE
+replicaset.apps/velero-7b8994d56   1         1         1       25h
+
+
    +
  1. Get backup location using velero CLI
  2. +
+
velero backup-location get
+
+# Output should look similar to below
+NAME      PROVIDER   BUCKET/PREFIX                                 PHASE       LAST VALIDATED                  ACCESS MODE   DEFAULT
+default   aws        stateful-20230503175301619800000005/backups   Available   2023-05-04 15:15:00 -0400 EDT   ReadWrite     true
+
+
    +
  1. To demonstrate creating a backup and restoring, create a new namespace and run nginx using below commands:
  2. +
+
kubectl create namespace backupdemo
+kubectl run nginx --image=nginx -n backupdemo
+
+
    +
  1. Create backup of this namespace using velero
  2. +
+
velero backup create backup1 --include-namespaces backupdemo
+
+# Output should look similar to below
+Backup request "backup1" submitted successfully.
+Run `velero backup describe backup1` or `velero backup logs backup1` for more details.
+
+
    +
  1. Describe the backup to check the backup status
  2. +
+
velero backup describe backup1
+
+# Output should look similar to below
+Name:         backup1
+Namespace:    velero
+Labels:       velero.io/storage-location=default
+Annotations:  velero.io/source-cluster-k8s-gitversion=v1.26.2-eks-a59e1f0
+              velero.io/source-cluster-k8s-major-version=1
+              velero.io/source-cluster-k8s-minor-version=26+
+
+Phase:  Completed
+
+
+Namespaces:
+  Included:  backupdemo
+  Excluded:  <none>
+
+Resources:
+  Included:        *
+  Excluded:        <none>
+  Cluster-scoped:  auto
+
+Label selector:  <none>
+
+Storage Location:  default
+
+Velero-Native Snapshot PVs:  auto
+
+TTL:  720h0m0s
+
+CSISnapshotTimeout:    10m0s
+ItemOperationTimeout:  0s
+
+Hooks:  <none>
+
+Backup Format Version:  1.1.0
+
+Started:    2023-05-04 15:16:31 -0400 EDT
+Completed:  2023-05-04 15:16:33 -0400 EDT
+
+Expiration:  2023-06-03 15:16:31 -0400 EDT
+
+Total items to be backed up:  9
+Items backed up:              9
+
+Velero-Native Snapshots: <none included>
+
+
    +
  1. Delete the namespace - this will be restored using the backup created
  2. +
+
kubectl delete namespace backupdemo
+
+
    +
  1. Restore the namespace from your backup
  2. +
+
velero restore create --from-backup backup1
+
+
    +
  1. Verify that the namespace is restored
  2. +
+
kubectl get all -n backupdemo
+
+# Output should look similar to below
+NAME        READY   STATUS    RESTARTS   AGE
+pod/nginx   1/1     Running   0          21s
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/addons/vertical-pod-autoscaler/index.html b/v1.9.2/addons/vertical-pod-autoscaler/index.html new file mode 100644 index 00000000..09023ccd --- /dev/null +++ b/v1.9.2/addons/vertical-pod-autoscaler/index.html @@ -0,0 +1,888 @@ + + + + + + + + + + + + + + + + + + + + + + + + Vertical Pod Autoscaler - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Vertical Pod Autoscaler

+

VPA Vertical Pod Autoscaler (VPA) automatically adjusts the CPU and memory reservations for your pods to help "right size" your applications. When configured, it will automatically request the necessary reservations based on usage and thus allow proper scheduling onto nodes so that the appropriate resource amount is available for each pod. It will also maintain ratios between limits and requests that were specified in initial container configuration.

+

NOTE: Metrics Server add-on is a dependency for this addon

+

Usage

+

This step deploys the Vertical Pod Autoscaler with default Helm Chart config

+
  enable_vpa            = true
+  enable_metrics_server = true
+
+

You can also customize the Helm chart that deploys vpa via the following configuration:

+
  enable_vpa = true
+  enable_metrics_server = true
+
+  vpa = {
+    name          = "vpa"
+    chart_version = "1.7.5"
+    repository    = "https://charts.fairwinds.com/stable"
+    namespace     = "vpa"
+    values        = [templatefile("${path.module}/values.yaml", {})]
+  }
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/amazon-eks-addons/index.html b/v1.9.2/amazon-eks-addons/index.html new file mode 100644 index 00000000..5e9c2113 --- /dev/null +++ b/v1.9.2/amazon-eks-addons/index.html @@ -0,0 +1,1133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Amazon EKS Addons - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Amazon EKS Add-ons

+

The Amazon EKS add-on implementation is generic and can be used to deploy any add-on supported by the EKS API; either native EKS addons or third party add-ons supplied via the AWS Marketplace.

+

See the EKS documentation for more details on EKS addon-ons, including the list of Amazon EKS add-ons from Amazon EKS, as well as Additional Amazon EKS add-ons from independent software vendors.

+

Architecture Support

+

The Amazon EKS provided add-ons listed below support both x86_64/amd64 and arm64 architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64 and arm64 architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Add-onx86_64/amd64arm64
vpc-cni
aws-ebs-csi-driver
coredns
kube-proxy
adot
aws-guardduty-agent
+

Usage

+

The Amazon EKS add-ons are provisioned via a generic interface behind the eks_addons argument which accepts a map of add-on configurations. The generic interface for an add-on is defined below for reference:

+
module "eks_blueprints_addons" {
+  source = "aws-ia/eks-blueprints-addons/aws"
+
+  # ... truncated for brevity
+
+  eks_addons = {
+    <key> = {
+      name = string # Optional - <key> is used if `name` is not set
+
+      most_recent          = bool
+      addon_version        = string # overrides `most_recent` if set
+      configuration_values = string # JSON string
+
+      preserve                    = bool # defaults to `true`
+      resolve_conflicts_on_create = string # defaults to `OVERWRITE`
+      resolve_conflicts_on_update = string # defaults to `OVERWRITE`
+
+      timeouts = {
+        create = string # optional
+        update = string # optional
+        delete = string # optional
+      }
+
+      tags = map(string)
+    }
+  }
+}
+
+

Example

+
module "eks_blueprints_addons" {
+  source = "aws-ia/eks-blueprints-addons/aws"
+
+  # ... truncated for brevity
+
+  eks_addons = {
+    # Amazon EKS add-ons
+    aws-ebs-csi-driver = {
+      most_recent              = true
+      service_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn
+    }
+
+    coredns = {
+      most_recent = true
+
+      timeouts = {
+        create = "25m"
+        delete = "10m"
+      }
+    }
+
+    vpc-cni = {
+      most_recent              = true
+      service_account_role_arn = module.vpc_cni_irsa.iam_role_arn
+    }
+
+    kube-proxy = {}
+
+    # Third party add-ons via AWS Marketplace
+    kubecost_kubecost = {
+      most_recent = true
+    }
+
+    teleport_teleport = {
+      most_recent = true
+    }
+  }
+}
+
+

Configuration Values

+

You can supply custom configuration values to each addon via the configuration_values argument of the add-on definition. The value provided must be a JSON encoded string and adhere to the JSON scheme provided by the version of the add-on. You can view this schema using the awscli by supplying the add-on name and version to the describe-addon-configuration command:

+
aws eks describe-addon-configuration \
+ --addon-name coredns \
+ --addon-version v1.8.7-eksbuild.2 \
+ --query 'configurationSchema' \
+ --output text | jq
+
+

Which returns the formatted JSON schema like below:

+
{
+  "$ref": "#/definitions/Coredns",
+  "$schema": "http://json-schema.org/draft-06/schema#",
+  "definitions": {
+    "Coredns": {
+      "additionalProperties": false,
+      "properties": {
+        "computeType": {
+          "type": "string"
+        },
+        "corefile": {
+          "description": "Entire corefile contents to use with installation",
+          "type": "string"
+        },
+        "nodeSelector": {
+          "additionalProperties": {
+            "type": "string"
+          },
+          "type": "object"
+        },
+        "replicaCount": {
+          "type": "integer"
+        },
+        "resources": {
+          "$ref": "#/definitions/Resources"
+        }
+      },
+      "title": "Coredns",
+      "type": "object"
+    },
+    "Limits": {
+      "additionalProperties": false,
+      "properties": {
+        "cpu": {
+          "type": "string"
+        },
+        "memory": {
+          "type": "string"
+        }
+      },
+      "title": "Limits",
+      "type": "object"
+    },
+    "Resources": {
+      "additionalProperties": false,
+      "properties": {
+        "limits": {
+          "$ref": "#/definitions/Limits"
+        },
+        "requests": {
+          "$ref": "#/definitions/Limits"
+        }
+      },
+      "title": "Resources",
+      "type": "object"
+    }
+  }
+}
+
+

You can supply the configuration values to the add-on by passing a map of the values wrapped in the jsonencode() function as shown below:

+
module "eks_blueprints_addons" {
+  source = "aws-ia/eks-blueprints-addons/aws"
+
+  # ... truncated for brevity
+
+  eks_addons = {
+    coredns = {
+      most_recent = true
+
+      configuration_values = jsonencode({
+        replicaCount = 4
+        resources = {
+          limits = {
+            cpu    = "100m"
+            memory = "150Mi"
+          }
+          requests = {
+            cpu    = "100m"
+            memory = "150Mi"
+          }
+        }
+      })
+    }
+  }
+}
+
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/architectures/index.html b/v1.9.2/architectures/index.html new file mode 100644 index 00000000..5a2866b9 --- /dev/null +++ b/v1.9.2/architectures/index.html @@ -0,0 +1,1053 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Architectures - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Architectures

+ +

Addons

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Addonx86_64/amd64arm64
Argo Rollouts
Argo Workflows
Argo CD
AWS CloudWatch Metrics
AWS EFS CSI Driver
AWS for FluentBit
AWS FSx CSI Driver
AWS Load Balancer Controller
AWS Node Termination Handler
AWS Private CA Issuer
Cert Manager
Cluster Autoscaler
Cluster Proportional Autoscaler
External DNS
External Secrets
OPA Gatekeeper
Ingress Nginx
Karpenter
Kube-Prometheus Stack
Metrics Server
Secrets Store CSI Driver
Secrets Store CSI Driver Provider AWS
Velero
Vertical Pod Autoscaler
+

Amazon EKS Addons

+

The Amazon EKS provided add-ons listed below support both x86_64/amd64 and arm64 architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64 and arm64 architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality. These addons are specified via the eks_addons input variable.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Addonx86_64/amd64arm64
AWS VPC CNI
AWS EBS CSI Driver
CoreDNS
Kube-proxy
ADOT Collector
AWS GuardDuty Agent
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/assets/images/favicon.png b/v1.9.2/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/v1.9.2/assets/images/favicon.png differ diff --git a/v1.9.2/assets/javascripts/bundle.19047be9.min.js b/v1.9.2/assets/javascripts/bundle.19047be9.min.js new file mode 100644 index 00000000..0e09ba9a --- /dev/null +++ b/v1.9.2/assets/javascripts/bundle.19047be9.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Ri=Object.create;var gr=Object.defineProperty;var ki=Object.getOwnPropertyDescriptor;var Hi=Object.getOwnPropertyNames,Ht=Object.getOwnPropertySymbols,Pi=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,on=Object.prototype.propertyIsEnumerable;var nn=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&nn(e,r,t[r]);if(Ht)for(var r of Ht(t))on.call(t,r)&&nn(e,r,t[r]);return e};var an=(e,t)=>{var r={};for(var n in e)yr.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&Ht)for(var n of Ht(e))t.indexOf(n)<0&&on.call(e,n)&&(r[n]=e[n]);return r};var Pt=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var $i=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of Hi(t))!yr.call(e,o)&&o!==r&&gr(e,o,{get:()=>t[o],enumerable:!(n=ki(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Ri(Pi(e)):{},$i(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var cn=Pt((xr,sn)=>{(function(e,t){typeof xr=="object"&&typeof sn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function c(T){var Qe=T.type,De=T.tagName;return!!(De==="INPUT"&&s[Qe]&&!T.readOnly||De==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function f(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(a(r.activeElement)&&f(r.activeElement),n=!0)}function m(T){n=!1}function d(T){a(T.target)&&(n||c(T.target))&&f(T.target)}function h(T){a(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),G())}function G(){document.addEventListener("mousemove",N),document.addEventListener("mousedown",N),document.addEventListener("mouseup",N),document.addEventListener("pointermove",N),document.addEventListener("pointerdown",N),document.addEventListener("pointerup",N),document.addEventListener("touchmove",N),document.addEventListener("touchstart",N),document.addEventListener("touchend",N)}function oe(){document.removeEventListener("mousemove",N),document.removeEventListener("mousedown",N),document.removeEventListener("mouseup",N),document.removeEventListener("pointermove",N),document.removeEventListener("pointerdown",N),document.removeEventListener("pointerup",N),document.removeEventListener("touchmove",N),document.removeEventListener("touchstart",N),document.removeEventListener("touchend",N)}function N(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,oe())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),G(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var fn=Pt(Er=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(f){return!1}},r=t(),n=function(f){var u={next:function(){var p=f.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(f){return encodeURIComponent(f).replace(/%20/g,"+")},i=function(f){return decodeURIComponent(String(f).replace(/\+/g," "))},s=function(){var f=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof f){var d=this;p.forEach(function(oe,N){d.append(N,oe)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),f._entries&&(f._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(c,f){typeof c!="string"&&(c=String(c)),f&&typeof f!="string"&&(f=String(f));var u=document,p;if(f&&(e.location===void 0||f!==e.location.href)){f=f.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=f,u.head.appendChild(p);try{if(p.href.indexOf(f)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+f+" due to "+T)}}var m=u.createElement("a");m.href=c,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=c,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!f)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,G=!0,oe=this;["append","delete","set"].forEach(function(T){var Qe=h[T];h[T]=function(){Qe.apply(h,arguments),v&&(G=!1,oe.search=h.toString(),G=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var N=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==N&&(N=this.search,G&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},s=i.prototype,a=function(c){Object.defineProperty(s,c,{get:function(){return this._anchorElement[c]},set:function(f){this._anchorElement[c]=f},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(c){a(c)}),Object.defineProperty(s,"search",{get:function(){return this._anchorElement.search},set:function(c){this._anchorElement.search=c,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(s,{toString:{get:function(){var c=this;return function(){return c.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(c){this._anchorElement.href=c,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(c){this._anchorElement.pathname=c},enumerable:!0},origin:{get:function(){var c={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],f=this._anchorElement.port!=c&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(f?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(c){},enumerable:!0},username:{get:function(){return""},set:function(c){},enumerable:!0}}),i.createObjectURL=function(c){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(c){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Er)});var Kr=Pt((Mt,qr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Mt=="object"&&typeof qr=="object"?qr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Mt=="object"?Mt.ClipboardJS=r():t.ClipboardJS=r()})(Mt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return Ci}});var s=i(279),a=i.n(s),c=i(370),f=i.n(c),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var E=p()(O);return m("cut"),E},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",E=document.createElement("textarea");E.style.fontSize="12pt",E.style.border="0",E.style.padding="0",E.style.margin="0",E.style.position="absolute",E.style[O?"right":"left"]="-9999px";var H=window.pageYOffset||document.documentElement.scrollTop;return E.style.top="".concat(H,"px"),E.setAttribute("readonly",""),E.value=j,E}var G=function(O,E){var H=v(O);E.container.appendChild(H);var I=p()(H);return m("copy"),H.remove(),I},oe=function(O){var E=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},H="";return typeof O=="string"?H=G(O,E):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?H=G(O.value,E):(H=p()(O),m("copy")),H},N=oe;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(E){return typeof E}:T=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},T(j)}var Qe=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},E=O.action,H=E===void 0?"copy":E,I=O.container,q=O.target,Me=O.text;if(H!=="copy"&&H!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(H==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(H==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Me)return N(Me,{container:I});if(q)return H==="cut"?h(q):N(q,{container:I})},De=Qe;function $e(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?$e=function(E){return typeof E}:$e=function(E){return E&&typeof Symbol=="function"&&E.constructor===Symbol&&E!==Symbol.prototype?"symbol":typeof E},$e(j)}function wi(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function rn(j,O){for(var E=0;E0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof I.action=="function"?I.action:this.defaultAction,this.target=typeof I.target=="function"?I.target:this.defaultTarget,this.text=typeof I.text=="function"?I.text:this.defaultText,this.container=$e(I.container)==="object"?I.container:document.body}},{key:"listenClick",value:function(I){var q=this;this.listener=f()(I,"click",function(Me){return q.onClick(Me)})}},{key:"onClick",value:function(I){var q=I.delegateTarget||I.currentTarget,Me=this.action(q)||"copy",kt=De({action:Me,container:this.container,target:this.target(q),text:this.text(q)});this.emit(kt?"success":"error",{action:Me,text:kt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(I){return vr("action",I)}},{key:"defaultTarget",value:function(I){var q=vr("target",I);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(I){return vr("text",I)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(I){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return N(I,q)}},{key:"cut",value:function(I){return h(I)}},{key:"isSupported",value:function(){var I=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof I=="string"?[I]:I,Me=!!document.queryCommandSupported;return q.forEach(function(kt){Me=Me&&!!document.queryCommandSupported(kt)}),Me}}]),E}(a()),Ci=Ai},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==o;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}n.exports=s},438:function(n,o,i){var s=i(828);function a(u,p,m,d,h){var v=f.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function c(u,p,m,d,h){return typeof u.addEventListener=="function"?a.apply(null,arguments):typeof m=="function"?a.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return a(v,p,m,d,h)}))}function f(u,p,m,d){return function(h){h.delegateTarget=s(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=c},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(n,o,i){var s=i(879),a=i(438);function c(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(h))throw new TypeError("Third argument must be a Function");if(s.node(m))return f(m,d,h);if(s.nodeList(m))return u(m,d,h);if(s.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function f(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return a(document.body,m,d,h)}n.exports=c},817:function(n){function o(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),f=document.createRange();f.selectNodeContents(i),c.removeAllRanges(),c.addRange(f),s=c.toString()}return s}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function f(){c.off(i,f),s.apply(a,arguments)}return f._=s,this.on(i,f,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,f=a.length;for(c;c{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ns=/["'&<>]/;Go.exports=os;function os(e){var t=""+e,r=ns.exec(t);if(!r)return t;var n,o="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(f[0]===6||f[0]===2)){r=0;continue}if(f[0]===3&&(!i||f[1]>i[0]&&f[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],s;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(a){s={error:a}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||a(m,d)})})}function a(m,d){try{c(n[m](d))}catch(h){p(i[0][3],h)}}function c(m){m.value instanceof et?Promise.resolve(m.value.v).then(f,u):p(i[0][2],m)}function f(m){a("next",m)}function u(m){a("throw",m)}function p(m,d){m(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function ln(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Ee=="function"?Ee(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),o(a,c,s.done,s.value)})}}function o(i,s,a,c){Promise.resolve(c).then(function(f){i({value:f,done:a})},s)}}function C(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var It=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Ee(s),c=a.next();!c.done;c=a.next()){var f=c.value;f.remove(this)}}catch(v){t={error:v}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var u=this.initialTeardown;if(C(u))try{u()}catch(v){i=v instanceof It?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=Ee(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{mn(h)}catch(v){i=i!=null?i:[],v instanceof It?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new It(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)mn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Sr=Ie.EMPTY;function jt(e){return e instanceof Ie||e&&"closed"in e&&C(e.remove)&&C(e.add)&&C(e.unsubscribe)}function mn(e){C(e)?e():e.unsubscribe()}var Le={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,s=o.isStopped,a=o.observers;return i||s?Sr:(this.currentObservers=null,a.push(r),new Ie(function(){n.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,s=n.isStopped;o?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,n){return new En(r,n)},t}(F);var En=function(e){ie(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Sr},t}(x);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ie(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,s=n._infiniteTimeWindow,a=n._timestampProvider,c=n._windowTime;o||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,s=o._buffer,a=s.slice(),c=0;c0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var s=r.actions;n!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Wt);var Tn=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Dt);var Te=new Tn(Sn);var _=new F(function(e){return e.complete()});function Vt(e){return e&&C(e.schedule)}function Cr(e){return e[e.length-1]}function Ye(e){return C(Cr(e))?e.pop():void 0}function Oe(e){return Vt(Cr(e))?e.pop():void 0}function zt(e,t){return typeof Cr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Nt(e){return C(e==null?void 0:e.then)}function qt(e){return C(e[ft])}function Kt(e){return Symbol.asyncIterator&&C(e==null?void 0:e[Symbol.asyncIterator])}function Qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ni(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Yt=Ni();function Gt(e){return C(e==null?void 0:e[Yt])}function Bt(e){return pn(this,arguments,function(){var r,n,o,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,et(r.read())];case 3:return n=s.sent(),o=n.value,i=n.done,i?[4,et(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,et(o)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Jt(e){return C(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(qt(e))return qi(e);if(pt(e))return Ki(e);if(Nt(e))return Qi(e);if(Kt(e))return On(e);if(Gt(e))return Yi(e);if(Jt(e))return Gi(e)}throw Qt(e)}function qi(e){return new F(function(t){var r=e[ft]();if(C(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Ki(e){return new F(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?A(function(o,i){return e(o,i,n)}):de,ge(1),r?He(t):Vn(function(){return new Zt}))}}function zn(){for(var e=[],t=0;t=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(f){var u,p,m,d=0,h=!1,v=!1,G=function(){p==null||p.unsubscribe(),p=void 0},oe=function(){G(),u=m=void 0,h=v=!1},N=function(){var T=u;oe(),T==null||T.unsubscribe()};return y(function(T,Qe){d++,!v&&!h&&G();var De=m=m!=null?m:r();Qe.add(function(){d--,d===0&&!v&&!h&&(p=$r(N,c))}),De.subscribe(Qe),!u&&d>0&&(u=new rt({next:function($e){return De.next($e)},error:function($e){v=!0,G(),p=$r(oe,o,$e),De.error($e)},complete:function(){h=!0,G(),p=$r(oe,s),De.complete()}}),U(T).subscribe(u))})(f)}}function $r(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function z(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),V(e===_e()),B())}function Xe(e){return{x:e.offsetLeft,y:e.offsetTop}}function Qn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,Te),l(()=>Xe(e)),V(Xe(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,Te),l(()=>rr(e)),V(rr(e)))}var Gn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!Dr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),ga?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!Dr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=va.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Bn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Xn=typeof WeakMap!="undefined"?new WeakMap:new Gn,Zn=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=ya.getInstance(),n=new Aa(t,r,this);Xn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){Zn.prototype[e]=function(){var t;return(t=Xn.get(this))[e].apply(t,arguments)}});var Ca=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:Zn}(),eo=Ca;var to=new x,Ra=$(()=>k(new eo(e=>{for(let t of e)to.next(t)}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),J(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return Ra.pipe(S(t=>t.observe(e)),g(t=>to.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var ro=new x,ka=$(()=>k(new IntersectionObserver(e=>{for(let t of e)ro.next(t)},{threshold:0}))).pipe(g(e=>L(ze,k(e)).pipe(R(()=>e.disconnect()))),J(1));function sr(e){return ka.pipe(S(t=>t.observe(e)),g(t=>ro.pipe(A(({target:r})=>r===e),R(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function no(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=he(e),o=bt(e);return r>=o.height-n.height-t}),B())}var cr={drawer:z("[data-md-toggle=drawer]"),search:z("[data-md-toggle=search]")};function oo(e){return cr[e].checked}function Ke(e,t){cr[e].checked!==t&&cr[e].click()}function Ue(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),V(t.checked))}function Ha(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Pa(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(V(!1))}function io(){let e=b(window,"keydown").pipe(A(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:oo("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),A(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!Ha(n,r)}return!0}),pe());return Pa().pipe(g(t=>t?_:e))}function le(){return new URL(location.href)}function ot(e){location.href=e.href}function ao(){return new x}function so(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)so(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)so(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function co(){return location.hash.substring(1)}function Vr(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function $a(e){return L(b(window,"hashchange"),e).pipe(l(co),V(co()),A(t=>t.length>0),J(1))}function fo(e){return $a(e).pipe(l(t=>ce(`[id="${t}"]`)),A(t=>typeof t!="undefined"))}function zr(e){let t=matchMedia(e);return er(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function uo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(V(e.matches))}function Nr(e,t){return e.pipe(g(r=>r?t():_))}function ur(e,t={credentials:"same-origin"}){return ue(fetch(`${e}`,t)).pipe(fe(()=>_),g(r=>r.status!==200?Tt(()=>new Error(r.statusText)):k(r)))}function We(e,t){return ur(e,t).pipe(g(r=>r.json()),J(1))}function po(e,t){let r=new DOMParser;return ur(e,t).pipe(g(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return $(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(g(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),R(()=>document.head.removeChild(t)),ge(1))))}function lo(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function mo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(lo),V(lo()))}function ho(){return{width:innerWidth,height:innerHeight}}function bo(){return b(window,"resize",{passive:!0}).pipe(l(ho),V(ho()))}function vo(){return Q([mo(),bo()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(Z("size")),o=Q([n,r]).pipe(l(()=>Xe(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:s,size:a},{x:c,y:f}])=>({offset:{x:s.x-c,y:s.y-f+i},size:a})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(s=>{let a=document.createElement("script");a.src=i,a.onload=s,document.body.appendChild(a)})),Promise.resolve())}var r=class extends EventTarget{constructor(n){super(),this.url=n,this.m=i=>{i.source===this.w&&(this.dispatchEvent(new MessageEvent("message",{data:i.data})),this.onmessage&&this.onmessage(i))},this.e=(i,s,a,c,f)=>{if(s===`${this.url}`){let u=new ErrorEvent("error",{message:i,filename:s,lineno:a,colno:c,error:f});this.dispatchEvent(u),this.onerror&&this.onerror(u)}};let o=document.createElement("iframe");o.hidden=!0,document.body.appendChild(this.iframe=o),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

AWS Partner Addons

+

The following addons are provided by AWS Partners for use with Amazon EKS Blueprints for Terraform. Please see the respective addon repository for more information on the addon, its supported configuration values, as well as questions, comments, and feature requests.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
AddonDescription
OndatOndat is a Kubernetes-native storage platform that enables stateful applications to run on Kubernetes.
Hashicorp - ConsulConsul is a service networking solution to automate network configurations, discover services, and enable secure connectivity across any cloud or runtime.
Hashicorp - VaultVault secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets in modern computing.
SysdigSysdig CNAPP helps you stop cloud and container security attacks with no wasted time.
Tetrate IstioTetrate Istio Distro is an open source project from Tetrate that provides vetted builds of Istio tested against all major cloud platforms.
NetApp ONTAP Astra TridentNetApp's Astra Trident provides dynamic storage orchestration for FSx for NetApp ONTAP using a Container Storage Interface (CSI) compliant driver.
Kong Konnect GatewayKong Gateway is the fastest and most adopted API gateway that integrates with Kong Konnect, the end-to-end SaaS API lifecycle management platform.
Kong Konnect Kong Ingress ControllerKong Ingress Controller combines the powerful features of the widely popular Kong Gateway with Kubernetes in a truly Kubernetes-native manner and now integrated with Kong Konnect, the end-to-end SaaS API lifecycle management platform.
+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/helm-release/index.html b/v1.9.2/helm-release/index.html new file mode 100644 index 00000000..bb401193 --- /dev/null +++ b/v1.9.2/helm-release/index.html @@ -0,0 +1,973 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Helm Releases - Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Helm Release Add-ons

+

Starting with EKS Blueprints v5 we have made a decision to only support the provisioning of a certain core set of add-ons. On an going basis, we will evaluate the current list to see if more add-ons need to be supported via this repo. Typically you can expect that any AWS created add-on that is not yet available via the Amazon EKS add-ons will be prioritized to be provisioned through this repository.

+

In addition to these AWS add-ons, we will also support the provisioning of certain OSS add-ons that we think customers will benefit from. These are selected based on customer demand (e.g. metrics-server) and certain patterns (gitops) that are foundational elements for a complete blueprint of an EKS cluster.

+

One of the reasons customers pick Kubernetes is because of its strong commercial and open-source software ecosystem and would like to provision add-ons that are not necessarily supported by EKS Blueprints. For such add-ons the options are as following:

+

With helm_release Terraform Resource

+

The helm_release resource is the most fundamental way to provision a helm chart via Terraform.

+

Use this resource, if you need to control the lifecycle add-ons down to level of each add-on resource.

+

With helm_releases Variable

+

You can use the helm_releases variable in EKS Blueprints Add-ons to provide a map of add-ons and their respective Helm configuration. Under the hood, we just iterate through the provided map and pass each configuration to the Terraform helm_release resource.

+

E.g.

+
module "addons" {
+  source  = "aws-ia/eks-blueprints-addons/aws"
+  version = "~> 1.0"
+
+  cluster_name      = "<cluster_name>"
+  cluster_endpoint  = "<cluster_endpoint>"
+  cluster_version   = "<cluster_version>"
+  oidc_provider_arn = "<oidc_provider_arn>"
+
+  # EKS add-ons
+  eks_addons = {
+    coredns = {}
+    vpc-cni = {}
+    kube-proxy = {}
+  }
+
+  # Blueprints add-ons
+  enable_aws_efs_csi_driver                    = true
+  enable_aws_cloudwatch_metrics                = true
+  enable_cert_manager                          = true
+  ...
+
+  # Pass in any number of Helm charts to be created for those that are not natively supported
+  helm_releases = {
+    prometheus-adapter = {
+      description      = "A Helm chart for k8s prometheus adapter"
+      namespace        = "prometheus-adapter"
+      create_namespace = true
+      chart            = "prometheus-adapter"
+      chart_version    = "4.2.0"
+      repository       = "https://prometheus-community.github.io/helm-charts"
+      values = [
+        <<-EOT
+          replicas: 2
+          podDisruptionBudget:
+            enabled: true
+        EOT
+      ]
+    }
+    gpu-operator = {
+      description      = "A Helm chart for NVIDIA GPU operator"
+      namespace        = "gpu-operator"
+      create_namespace = true
+      chart            = "gpu-operator"
+      chart_version    = "v23.3.2"
+      repository       = "https://nvidia.github.io/gpu-operator"
+      values = [
+        <<-EOT
+          operator:
+            defaultRuntime: containerd
+        EOT
+      ]
+    }
+  }
+
+  tags = local.tags
+}
+
+

With this pattern, the lifecycle of all your add-ons is tied to that of the addons module. This allows you to easily target the addon module in your Terraform apply and destroy commands. E.g.

+
terraform apply -target=module.addons
+
+terraform destroy -target=module.addons
+
+

With EKS Blueprints Addon Module

+

If you have an add-on that requires an IAM Role for Service Account (IRSA), we have created a new Terraform module terraform-aws-eks-blueprints-addon that can help provision a Helm chart along with an IAM role and policies with permissions required for the add-on to function properly. We use this module for all of the add-ons that are provisioned by EKS Blueprints Add-ons today.

+

You can optionally use this module for add-ons that do not need IRSA or even just to create the IAM resources for IRSA and skip the helm release. Detailed usage of how to consume this module can be found in its readme.

+

This pattern can be used to create a Terraform module with a set of add-ons that are not supported in the EKS Blueprints Add-ons today and wrap them in the same module definition. An example of this is the ACK add-ons repository which is a collection of ACK helm chart deployments with IRSA for each of the ACK controllers.

+ + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/images/colored-logo.png b/v1.9.2/images/colored-logo.png new file mode 100644 index 00000000..d49129e3 Binary files /dev/null and b/v1.9.2/images/colored-logo.png differ diff --git a/v1.9.2/images/white-logo.png b/v1.9.2/images/white-logo.png new file mode 100644 index 00000000..004fcf1a Binary files /dev/null and b/v1.9.2/images/white-logo.png differ diff --git a/v1.9.2/index.html b/v1.9.2/index.html new file mode 100644 index 00000000..dd47e9a1 --- /dev/null +++ b/v1.9.2/index.html @@ -0,0 +1,2062 @@ + + + + + + + + + + + + + + + + + + + + + + + + Amazon EKS Blueprints Addons + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + +
+ + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + +

Amazon EKS Blueprints Addons

+

Terraform module to deploy Kubernetes addons on Amazon EKS clusters.

+

Usage

+
module "eks" {
+  source = "terraform-aws-modules/eks/aws"
+
+  cluster_name    = "my-cluster"
+  cluster_version = "1.27"
+
+  ... truncated for brevity
+}
+
+module "eks_blueprints_addons" {
+  source = "aws-ia/eks-blueprints-addons/aws"
+  version = "~> 1.0" #ensure to update this to the latest/desired version
+
+  cluster_name      = module.eks.cluster_name
+  cluster_endpoint  = module.eks.cluster_endpoint
+  cluster_version   = module.eks.cluster_version
+  oidc_provider_arn = module.eks.oidc_provider_arn
+
+  eks_addons = {
+    aws-ebs-csi-driver = {
+      most_recent = true
+    }
+    coredns = {
+      most_recent = true
+    }
+    vpc-cni = {
+      most_recent = true
+    }
+    kube-proxy = {
+      most_recent = true
+    }
+  }
+
+  enable_aws_load_balancer_controller    = true
+  enable_cluster_proportional_autoscaler = true
+  enable_karpenter                       = true
+  enable_kube_prometheus_stack           = true
+  enable_metrics_server                  = true
+  enable_external_dns                    = true
+  enable_cert_manager                    = true
+  cert_manager_route53_hosted_zone_arns  = ["arn:aws:route53:::hostedzone/XXXXXXXXXXXXX"]
+
+  tags = {
+    Environment = "dev"
+  }
+}
+
+ +

Requirements

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameVersion
terraform>= 1.0
aws>= 5.0
helm>= 2.9
kubernetes>= 2.20
time>= 0.9
+

Providers

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameVersion
aws>= 5.0
helm>= 2.9
kubernetes>= 2.20
time>= 0.9
+

Modules

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameSourceVersion
argo_eventsaws-ia/eks-blueprints-addon/aws1.1.1
argo_rolloutsaws-ia/eks-blueprints-addon/aws1.1.1
argo_workflowsaws-ia/eks-blueprints-addon/aws1.1.1
argocdaws-ia/eks-blueprints-addon/aws1.1.1
aws_cloudwatch_metricsaws-ia/eks-blueprints-addon/aws1.1.1
aws_efs_csi_driveraws-ia/eks-blueprints-addon/aws1.1.1
aws_for_fluentbitaws-ia/eks-blueprints-addon/aws1.1.1
aws_fsx_csi_driveraws-ia/eks-blueprints-addon/aws1.1.1
aws_gateway_api_controlleraws-ia/eks-blueprints-addon/aws1.1.1
aws_load_balancer_controlleraws-ia/eks-blueprints-addon/aws1.1.1
aws_node_termination_handleraws-ia/eks-blueprints-addon/aws1.1.1
aws_node_termination_handler_sqsterraform-aws-modules/sqs/aws4.0.1
aws_privateca_issueraws-ia/eks-blueprints-addon/aws1.1.1
cert_manageraws-ia/eks-blueprints-addon/aws1.1.1
cluster_autoscaleraws-ia/eks-blueprints-addon/aws1.1.1
cluster_proportional_autoscaleraws-ia/eks-blueprints-addon/aws1.1.1
external_dnsaws-ia/eks-blueprints-addon/aws1.1.1
external_secretsaws-ia/eks-blueprints-addon/aws1.1.1
gatekeeperaws-ia/eks-blueprints-addon/aws1.1.1
ingress_nginxaws-ia/eks-blueprints-addon/aws1.1.1
karpenteraws-ia/eks-blueprints-addon/aws1.1.1
karpenter_sqsterraform-aws-modules/sqs/aws4.0.1
kube_prometheus_stackaws-ia/eks-blueprints-addon/aws1.1.1
metrics_serveraws-ia/eks-blueprints-addon/aws1.1.1
secrets_store_csi_driveraws-ia/eks-blueprints-addon/aws1.1.1
secrets_store_csi_driver_provider_awsaws-ia/eks-blueprints-addon/aws1.1.1
veleroaws-ia/eks-blueprints-addon/aws1.1.1
vpaaws-ia/eks-blueprints-addon/aws1.1.1
+

Resources

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameType
aws_autoscaling_group_tag.aws_node_termination_handlerresource
aws_autoscaling_lifecycle_hook.aws_node_termination_handlerresource
aws_cloudwatch_event_rule.aws_node_termination_handlerresource
aws_cloudwatch_event_rule.karpenterresource
aws_cloudwatch_event_target.aws_node_termination_handlerresource
aws_cloudwatch_event_target.karpenterresource
aws_cloudwatch_log_group.aws_for_fluentbitresource
aws_cloudwatch_log_group.fargate_fluentbitresource
aws_eks_addon.thisresource
aws_iam_instance_profile.karpenterresource
aws_iam_policy.fargate_fluentbitresource
aws_iam_role.karpenterresource
aws_iam_role_policy_attachment.additionalresource
aws_iam_role_policy_attachment.karpenterresource
helm_release.thisresource
kubernetes_config_map_v1.aws_loggingresource
kubernetes_config_map_v1_data.aws_for_fluentbit_containerinsightsresource
kubernetes_namespace_v1.aws_observabilityresource
time_sleep.thisresource
aws_caller_identity.currentdata source
aws_eks_addon_version.thisdata source
aws_iam_policy_document.aws_efs_csi_driverdata source
aws_iam_policy_document.aws_for_fluentbitdata source
aws_iam_policy_document.aws_fsx_csi_driverdata source
aws_iam_policy_document.aws_gateway_api_controllerdata source
aws_iam_policy_document.aws_load_balancer_controllerdata source
aws_iam_policy_document.aws_node_termination_handlerdata source
aws_iam_policy_document.aws_privateca_issuerdata source
aws_iam_policy_document.cert_managerdata source
aws_iam_policy_document.cluster_autoscalerdata source
aws_iam_policy_document.external_dnsdata source
aws_iam_policy_document.external_secretsdata source
aws_iam_policy_document.fargate_fluentbitdata source
aws_iam_policy_document.karpenterdata source
aws_iam_policy_document.karpenter_assume_roledata source
aws_iam_policy_document.velerodata source
aws_partition.currentdata source
aws_region.currentdata source
+

Inputs

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionTypeDefaultRequired
argo_eventsArgo Events add-on configuration valuesany{}no
argo_rolloutsArgo Rollouts add-on configuration valuesany{}no
argo_workflowsArgo Workflows add-on configuration valuesany{}no
argocdArgoCD add-on configuration valuesany{}no
aws_cloudwatch_metricsCloudwatch Metrics add-on configuration valuesany{}no
aws_efs_csi_driverEFS CSI Driver add-on configuration valuesany{}no
aws_for_fluentbitAWS Fluentbit add-on configurationsany{}no
aws_for_fluentbit_cw_log_groupAWS Fluentbit CloudWatch Log Group configurationsany{}no
aws_fsx_csi_driverFSX CSI Driver add-on configuration valuesany{}no
aws_gateway_api_controllerAWS Gateway API Controller add-on configuration valuesany{}no
aws_load_balancer_controllerAWS Load Balancer Controller add-on configuration valuesany{}no
aws_node_termination_handlerAWS Node Termination Handler add-on configuration valuesany{}no
aws_node_termination_handler_asg_arnsList of Auto Scaling group ARNs that AWS Node Termination Handler will monitor for EC2 eventslist(string)[]no
aws_node_termination_handler_sqsAWS Node Termination Handler SQS queue configuration valuesany{}no
aws_privateca_issuerAWS PCA Issuer add-on configurationsany{}no
cert_managercert-manager add-on configuration valuesany{}no
cert_manager_route53_hosted_zone_arnsList of Route53 Hosted Zone ARNs that are used by cert-manager to create DNS recordslist(string)
[
"arn:aws:route53:::hostedzone/*"
]
no
cluster_autoscalerCluster Autoscaler add-on configuration valuesany{}no
cluster_endpointEndpoint for your Kubernetes API serverstringn/ayes
cluster_nameName of the EKS clusterstringn/ayes
cluster_proportional_autoscalerCluster Proportional Autoscaler add-on configurationsany{}no
cluster_versionKubernetes <major>.<minor> version to use for the EKS cluster (i.e.: 1.24)stringn/ayes
create_delay_dependenciesDependency attribute which must be resolved before starting the create_delay_durationlist(string)[]no
create_delay_durationThe duration to wait before creating resourcesstring"30s"no
create_kubernetes_resourcesCreate Kubernetes resource with Helm or Kubernetes providerbooltrueno
eks_addonsMap of EKS add-on configurations to enable for the cluster. Add-on name can be the map keys or set with nameany{}no
eks_addons_timeoutsCreate, update, and delete timeout configurations for the EKS add-onsmap(string){}no
enable_argo_eventsEnable Argo Events add-onboolfalseno
enable_argo_rolloutsEnable Argo Rollouts add-onboolfalseno
enable_argo_workflowsEnable Argo workflows add-onboolfalseno
enable_argocdEnable Argo CD Kubernetes add-onboolfalseno
enable_aws_cloudwatch_metricsEnable AWS Cloudwatch Metrics add-on for Container Insightsboolfalseno
enable_aws_efs_csi_driverEnable AWS EFS CSI Driver add-onboolfalseno
enable_aws_for_fluentbitEnable AWS for FluentBit add-onboolfalseno
enable_aws_fsx_csi_driverEnable AWS FSX CSI Driver add-onboolfalseno
enable_aws_gateway_api_controllerEnable AWS Gateway API Controller add-onboolfalseno
enable_aws_load_balancer_controllerEnable AWS Load Balancer Controller add-onboolfalseno
enable_aws_node_termination_handlerEnable AWS Node Termination Handler add-onboolfalseno
enable_aws_privateca_issuerEnable AWS PCA Issuerboolfalseno
enable_cert_managerEnable cert-manager add-onboolfalseno
enable_cluster_autoscalerEnable Cluster autoscaler add-onboolfalseno
enable_cluster_proportional_autoscalerEnable Cluster Proportional Autoscalerboolfalseno
enable_external_dnsEnable external-dns operator add-onboolfalseno
enable_external_secretsEnable External Secrets operator add-onboolfalseno
enable_fargate_fluentbitEnable Fargate FluentBit add-onboolfalseno
enable_gatekeeperEnable Gatekeeper add-onboolfalseno
enable_ingress_nginxEnable Ingress Nginxboolfalseno
enable_karpenterEnable Karpenter controller add-onboolfalseno
enable_kube_prometheus_stackEnable Kube Prometheus Stackboolfalseno
enable_metrics_serverEnable metrics server add-onboolfalseno
enable_secrets_store_csi_driverEnable CSI Secrets Store Providerboolfalseno
enable_secrets_store_csi_driver_provider_awsEnable AWS CSI Secrets Store Providerboolfalseno
enable_veleroEnable Kubernetes Dashboard add-onboolfalseno
enable_vpaEnable Vertical Pod Autoscaler add-onboolfalseno
external_dnsexternal-dns add-on configuration valuesany{}no
external_dns_route53_zone_arnsList of Route53 zones ARNs which external-dns will have access to create/manage records (if using Route53)list(string)[]no
external_secretsExternal Secrets add-on configuration valuesany{}no
external_secrets_kms_key_arnsList of KMS Key ARNs that are used by Secrets Manager that contain secrets to mount using External Secretslist(string)
[
"arn:aws:kms:::key/*"
]
no
external_secrets_secrets_manager_arnsList of Secrets Manager ARNs that contain secrets to mount using External Secretslist(string)
[
"arn:aws:secretsmanager:::secret:*"
]
no
external_secrets_ssm_parameter_arnsList of Systems Manager Parameter ARNs that contain secrets to mount using External Secretslist(string)
[
"arn:aws:ssm:::parameter/*"
]
no
fargate_fluentbitFargate fluentbit add-on configany{}no
fargate_fluentbit_cw_log_groupAWS Fargate Fluentbit CloudWatch Log Group configurationsany{}no
gatekeeperGatekeeper add-on configurationany{}no
helm_releasesA map of Helm releases to create. This provides the ability to pass in an arbitrary map of Helm chart definitions to createany{}no
ingress_nginxIngress Nginx add-on configurationsany{}no
karpenterKarpenter add-on configuration valuesany{}no
karpenter_enable_spot_terminationDetermines whether to enable native node termination handlingbooltrueno
karpenter_nodeKarpenter IAM role and IAM instance profile configuration valuesany{}no
karpenter_sqsKarpenter SQS queue for native node termination handling configuration valuesany{}no
kube_prometheus_stackKube Prometheus Stack add-on configurationsany{}no
metrics_serverMetrics Server add-on configurationsany{}no
oidc_provider_arnThe ARN of the cluster OIDC Providerstringn/ayes
secrets_store_csi_driverCSI Secrets Store Provider add-on configurationsany{}no
secrets_store_csi_driver_provider_awsCSI Secrets Store Provider add-on configurationsany{}no
tagsA map of tags to add to all resourcesmap(string){}no
veleroVelero add-on configuration valuesany{}no
vpaVertical Pod Autoscaler add-on configuration valuesany{}no
+

Outputs

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescription
argo_eventsMap of attributes of the Helm release created
argo_rolloutsMap of attributes of the Helm release created
argo_workflowsMap of attributes of the Helm release created
argocdMap of attributes of the Helm release created
aws_cloudwatch_metricsMap of attributes of the Helm release and IRSA created
aws_efs_csi_driverMap of attributes of the Helm release and IRSA created
aws_for_fluentbitMap of attributes of the Helm release and IRSA created
aws_fsx_csi_driverMap of attributes of the Helm release and IRSA created
aws_gateway_api_controllerMap of attributes of the Helm release and IRSA created
aws_load_balancer_controllerMap of attributes of the Helm release and IRSA created
aws_node_termination_handlerMap of attributes of the Helm release and IRSA created
aws_privateca_issuerMap of attributes of the Helm release and IRSA created
cert_managerMap of attributes of the Helm release and IRSA created
cluster_autoscalerMap of attributes of the Helm release and IRSA created
cluster_proportional_autoscalerMap of attributes of the Helm release and IRSA created
eks_addonsMap of attributes for each EKS addons enabled
external_dnsMap of attributes of the Helm release and IRSA created
external_secretsMap of attributes of the Helm release and IRSA created
fargate_fluentbitMap of attributes of the configmap and IAM policy created
gatekeeperMap of attributes of the Helm release and IRSA created
gitops_metadataGitOps Bridge metadata
helm_releasesMap of attributes of the Helm release created
ingress_nginxMap of attributes of the Helm release and IRSA created
karpenterMap of attributes of the Helm release and IRSA created
kube_prometheus_stackMap of attributes of the Helm release and IRSA created
metrics_serverMap of attributes of the Helm release and IRSA created
secrets_store_csi_driverMap of attributes of the Helm release and IRSA created
secrets_store_csi_driver_provider_awsMap of attributes of the Helm release and IRSA created
veleroMap of attributes of the Helm release and IRSA created
vpaMap of attributes of the Helm release and IRSA created
+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/v1.9.2/search/search_index.json b/v1.9.2/search/search_index.json new file mode 100644 index 00000000..ff85c6e2 --- /dev/null +++ b/v1.9.2/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":""},{"location":"#amazon-eks-blueprints-addons","title":"Amazon EKS Blueprints Addons","text":"

Terraform module to deploy Kubernetes addons on Amazon EKS clusters.

"},{"location":"#usage","title":"Usage","text":"
module \"eks\" {\nsource = \"terraform-aws-modules/eks/aws\"\ncluster_name    = \"my-cluster\"\ncluster_version = \"1.27\"\n... truncated for brevity\n}\nmodule \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\nversion = \"~> 1.0\" #ensure to update this to the latest/desired version\ncluster_name      = module.eks.cluster_name\ncluster_endpoint  = module.eks.cluster_endpoint\ncluster_version   = module.eks.cluster_version\noidc_provider_arn = module.eks.oidc_provider_arn\neks_addons = {\naws-ebs-csi-driver = {\nmost_recent = true\n}\ncoredns = {\nmost_recent = true\n}\nvpc-cni = {\nmost_recent = true\n}\nkube-proxy = {\nmost_recent = true\n}\n}\nenable_aws_load_balancer_controller    = true\nenable_cluster_proportional_autoscaler = true\nenable_karpenter                       = true\nenable_kube_prometheus_stack           = true\nenable_metrics_server                  = true\nenable_external_dns                    = true\nenable_cert_manager                    = true\ncert_manager_route53_hosted_zone_arns  = [\"arn:aws:route53:::hostedzone/XXXXXXXXXXXXX\"]\ntags = {\nEnvironment = \"dev\"\n}\n}\n
"},{"location":"#requirements","title":"Requirements","text":"Name Version terraform >= 1.0 aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#providers","title":"Providers","text":"Name Version aws >= 5.0 helm >= 2.9 kubernetes >= 2.20 time >= 0.9"},{"location":"#modules","title":"Modules","text":"Name Source Version argo_events aws-ia/eks-blueprints-addon/aws 1.1.1 argo_rollouts aws-ia/eks-blueprints-addon/aws 1.1.1 argo_workflows aws-ia/eks-blueprints-addon/aws 1.1.1 argocd aws-ia/eks-blueprints-addon/aws 1.1.1 aws_cloudwatch_metrics aws-ia/eks-blueprints-addon/aws 1.1.1 aws_efs_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 aws_for_fluentbit aws-ia/eks-blueprints-addon/aws 1.1.1 aws_fsx_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 aws_gateway_api_controller aws-ia/eks-blueprints-addon/aws 1.1.1 aws_load_balancer_controller aws-ia/eks-blueprints-addon/aws 1.1.1 aws_node_termination_handler aws-ia/eks-blueprints-addon/aws 1.1.1 aws_node_termination_handler_sqs terraform-aws-modules/sqs/aws 4.0.1 aws_privateca_issuer aws-ia/eks-blueprints-addon/aws 1.1.1 cert_manager aws-ia/eks-blueprints-addon/aws 1.1.1 cluster_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.1 cluster_proportional_autoscaler aws-ia/eks-blueprints-addon/aws 1.1.1 external_dns aws-ia/eks-blueprints-addon/aws 1.1.1 external_secrets aws-ia/eks-blueprints-addon/aws 1.1.1 gatekeeper aws-ia/eks-blueprints-addon/aws 1.1.1 ingress_nginx aws-ia/eks-blueprints-addon/aws 1.1.1 karpenter aws-ia/eks-blueprints-addon/aws 1.1.1 karpenter_sqs terraform-aws-modules/sqs/aws 4.0.1 kube_prometheus_stack aws-ia/eks-blueprints-addon/aws 1.1.1 metrics_server aws-ia/eks-blueprints-addon/aws 1.1.1 secrets_store_csi_driver aws-ia/eks-blueprints-addon/aws 1.1.1 secrets_store_csi_driver_provider_aws aws-ia/eks-blueprints-addon/aws 1.1.1 velero aws-ia/eks-blueprints-addon/aws 1.1.1 vpa aws-ia/eks-blueprints-addon/aws 1.1.1"},{"location":"#resources","title":"Resources","text":"Name Type aws_autoscaling_group_tag.aws_node_termination_handler resource aws_autoscaling_lifecycle_hook.aws_node_termination_handler resource aws_cloudwatch_event_rule.aws_node_termination_handler resource aws_cloudwatch_event_rule.karpenter resource aws_cloudwatch_event_target.aws_node_termination_handler resource aws_cloudwatch_event_target.karpenter resource aws_cloudwatch_log_group.aws_for_fluentbit resource aws_cloudwatch_log_group.fargate_fluentbit resource aws_eks_addon.this resource aws_iam_instance_profile.karpenter resource aws_iam_policy.fargate_fluentbit resource aws_iam_role.karpenter resource aws_iam_role_policy_attachment.additional resource aws_iam_role_policy_attachment.karpenter resource helm_release.this resource kubernetes_config_map_v1.aws_logging resource kubernetes_config_map_v1_data.aws_for_fluentbit_containerinsights resource kubernetes_namespace_v1.aws_observability resource time_sleep.this resource aws_caller_identity.current data source aws_eks_addon_version.this data source aws_iam_policy_document.aws_efs_csi_driver data source aws_iam_policy_document.aws_for_fluentbit data source aws_iam_policy_document.aws_fsx_csi_driver data source aws_iam_policy_document.aws_gateway_api_controller data source aws_iam_policy_document.aws_load_balancer_controller data source aws_iam_policy_document.aws_node_termination_handler data source aws_iam_policy_document.aws_privateca_issuer data source aws_iam_policy_document.cert_manager data source aws_iam_policy_document.cluster_autoscaler data source aws_iam_policy_document.external_dns data source aws_iam_policy_document.external_secrets data source aws_iam_policy_document.fargate_fluentbit data source aws_iam_policy_document.karpenter data source aws_iam_policy_document.karpenter_assume_role data source aws_iam_policy_document.velero data source aws_partition.current data source aws_region.current data source"},{"location":"#inputs","title":"Inputs","text":"Name Description Type Default Required argo_events Argo Events add-on configuration values any {} no argo_rollouts Argo Rollouts add-on configuration values any {} no argo_workflows Argo Workflows add-on configuration values any {} no argocd ArgoCD add-on configuration values any {} no aws_cloudwatch_metrics Cloudwatch Metrics add-on configuration values any {} no aws_efs_csi_driver EFS CSI Driver add-on configuration values any {} no aws_for_fluentbit AWS Fluentbit add-on configurations any {} no aws_for_fluentbit_cw_log_group AWS Fluentbit CloudWatch Log Group configurations any {} no aws_fsx_csi_driver FSX CSI Driver add-on configuration values any {} no aws_gateway_api_controller AWS Gateway API Controller add-on configuration values any {} no aws_load_balancer_controller AWS Load Balancer Controller add-on configuration values any {} no aws_node_termination_handler AWS Node Termination Handler add-on configuration values any {} no aws_node_termination_handler_asg_arns List of Auto Scaling group ARNs that AWS Node Termination Handler will monitor for EC2 events list(string) [] no aws_node_termination_handler_sqs AWS Node Termination Handler SQS queue configuration values any {} no aws_privateca_issuer AWS PCA Issuer add-on configurations any {} no cert_manager cert-manager add-on configuration values any {} no cert_manager_route53_hosted_zone_arns List of Route53 Hosted Zone ARNs that are used by cert-manager to create DNS records list(string)
[  \"arn:aws:route53:::hostedzone/*\"]
no cluster_autoscaler Cluster Autoscaler add-on configuration values any {} no cluster_endpoint Endpoint for your Kubernetes API server string n/a yes cluster_name Name of the EKS cluster string n/a yes cluster_proportional_autoscaler Cluster Proportional Autoscaler add-on configurations any {} no cluster_version Kubernetes <major>.<minor> version to use for the EKS cluster (i.e.: 1.24) string n/a yes create_delay_dependencies Dependency attribute which must be resolved before starting the create_delay_duration list(string) [] no create_delay_duration The duration to wait before creating resources string \"30s\" no create_kubernetes_resources Create Kubernetes resource with Helm or Kubernetes provider bool true no eks_addons Map of EKS add-on configurations to enable for the cluster. Add-on name can be the map keys or set with name any {} no eks_addons_timeouts Create, update, and delete timeout configurations for the EKS add-ons map(string) {} no enable_argo_events Enable Argo Events add-on bool false no enable_argo_rollouts Enable Argo Rollouts add-on bool false no enable_argo_workflows Enable Argo workflows add-on bool false no enable_argocd Enable Argo CD Kubernetes add-on bool false no enable_aws_cloudwatch_metrics Enable AWS Cloudwatch Metrics add-on for Container Insights bool false no enable_aws_efs_csi_driver Enable AWS EFS CSI Driver add-on bool false no enable_aws_for_fluentbit Enable AWS for FluentBit add-on bool false no enable_aws_fsx_csi_driver Enable AWS FSX CSI Driver add-on bool false no enable_aws_gateway_api_controller Enable AWS Gateway API Controller add-on bool false no enable_aws_load_balancer_controller Enable AWS Load Balancer Controller add-on bool false no enable_aws_node_termination_handler Enable AWS Node Termination Handler add-on bool false no enable_aws_privateca_issuer Enable AWS PCA Issuer bool false no enable_cert_manager Enable cert-manager add-on bool false no enable_cluster_autoscaler Enable Cluster autoscaler add-on bool false no enable_cluster_proportional_autoscaler Enable Cluster Proportional Autoscaler bool false no enable_external_dns Enable external-dns operator add-on bool false no enable_external_secrets Enable External Secrets operator add-on bool false no enable_fargate_fluentbit Enable Fargate FluentBit add-on bool false no enable_gatekeeper Enable Gatekeeper add-on bool false no enable_ingress_nginx Enable Ingress Nginx bool false no enable_karpenter Enable Karpenter controller add-on bool false no enable_kube_prometheus_stack Enable Kube Prometheus Stack bool false no enable_metrics_server Enable metrics server add-on bool false no enable_secrets_store_csi_driver Enable CSI Secrets Store Provider bool false no enable_secrets_store_csi_driver_provider_aws Enable AWS CSI Secrets Store Provider bool false no enable_velero Enable Kubernetes Dashboard add-on bool false no enable_vpa Enable Vertical Pod Autoscaler add-on bool false no external_dns external-dns add-on configuration values any {} no external_dns_route53_zone_arns List of Route53 zones ARNs which external-dns will have access to create/manage records (if using Route53) list(string) [] no external_secrets External Secrets add-on configuration values any {} no external_secrets_kms_key_arns List of KMS Key ARNs that are used by Secrets Manager that contain secrets to mount using External Secrets list(string)
[  \"arn:aws:kms:::key/*\"]
no external_secrets_secrets_manager_arns List of Secrets Manager ARNs that contain secrets to mount using External Secrets list(string)
[  \"arn:aws:secretsmanager:::secret:*\"]
no external_secrets_ssm_parameter_arns List of Systems Manager Parameter ARNs that contain secrets to mount using External Secrets list(string)
[  \"arn:aws:ssm:::parameter/*\"]
no fargate_fluentbit Fargate fluentbit add-on config any {} no fargate_fluentbit_cw_log_group AWS Fargate Fluentbit CloudWatch Log Group configurations any {} no gatekeeper Gatekeeper add-on configuration any {} no helm_releases A map of Helm releases to create. This provides the ability to pass in an arbitrary map of Helm chart definitions to create any {} no ingress_nginx Ingress Nginx add-on configurations any {} no karpenter Karpenter add-on configuration values any {} no karpenter_enable_spot_termination Determines whether to enable native node termination handling bool true no karpenter_node Karpenter IAM role and IAM instance profile configuration values any {} no karpenter_sqs Karpenter SQS queue for native node termination handling configuration values any {} no kube_prometheus_stack Kube Prometheus Stack add-on configurations any {} no metrics_server Metrics Server add-on configurations any {} no oidc_provider_arn The ARN of the cluster OIDC Provider string n/a yes secrets_store_csi_driver CSI Secrets Store Provider add-on configurations any {} no secrets_store_csi_driver_provider_aws CSI Secrets Store Provider add-on configurations any {} no tags A map of tags to add to all resources map(string) {} no velero Velero add-on configuration values any {} no vpa Vertical Pod Autoscaler add-on configuration values any {} no"},{"location":"#outputs","title":"Outputs","text":"Name Description argo_events Map of attributes of the Helm release created argo_rollouts Map of attributes of the Helm release created argo_workflows Map of attributes of the Helm release created argocd Map of attributes of the Helm release created aws_cloudwatch_metrics Map of attributes of the Helm release and IRSA created aws_efs_csi_driver Map of attributes of the Helm release and IRSA created aws_for_fluentbit Map of attributes of the Helm release and IRSA created aws_fsx_csi_driver Map of attributes of the Helm release and IRSA created aws_gateway_api_controller Map of attributes of the Helm release and IRSA created aws_load_balancer_controller Map of attributes of the Helm release and IRSA created aws_node_termination_handler Map of attributes of the Helm release and IRSA created aws_privateca_issuer Map of attributes of the Helm release and IRSA created cert_manager Map of attributes of the Helm release and IRSA created cluster_autoscaler Map of attributes of the Helm release and IRSA created cluster_proportional_autoscaler Map of attributes of the Helm release and IRSA created eks_addons Map of attributes for each EKS addons enabled external_dns Map of attributes of the Helm release and IRSA created external_secrets Map of attributes of the Helm release and IRSA created fargate_fluentbit Map of attributes of the configmap and IAM policy created gatekeeper Map of attributes of the Helm release and IRSA created gitops_metadata GitOps Bridge metadata helm_releases Map of attributes of the Helm release created ingress_nginx Map of attributes of the Helm release and IRSA created karpenter Map of attributes of the Helm release and IRSA created kube_prometheus_stack Map of attributes of the Helm release and IRSA created metrics_server Map of attributes of the Helm release and IRSA created secrets_store_csi_driver Map of attributes of the Helm release and IRSA created secrets_store_csi_driver_provider_aws Map of attributes of the Helm release and IRSA created velero Map of attributes of the Helm release and IRSA created vpa Map of attributes of the Helm release and IRSA created"},{"location":"amazon-eks-addons/","title":"Amazon EKS Add-ons","text":"

The Amazon EKS add-on implementation is generic and can be used to deploy any add-on supported by the EKS API; either native EKS addons or third party add-ons supplied via the AWS Marketplace.

See the EKS documentation for more details on EKS addon-ons, including the list of Amazon EKS add-ons from Amazon EKS, as well as Additional Amazon EKS add-ons from independent software vendors.

"},{"location":"amazon-eks-addons/#architecture-support","title":"Architecture Support","text":"

The Amazon EKS provided add-ons listed below support both x86_64/amd64 and arm64 architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64 and arm64 architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality.

Add-on x86_64/amd64 arm64 vpc-cni \u2705 \u2705 aws-ebs-csi-driver \u2705 \u2705 coredns \u2705 \u2705 kube-proxy \u2705 \u2705 adot \u2705 \u2705 aws-guardduty-agent \u2705 \u2705"},{"location":"amazon-eks-addons/#usage","title":"Usage","text":"

The Amazon EKS add-ons are provisioned via a generic interface behind the eks_addons argument which accepts a map of add-on configurations. The generic interface for an add-on is defined below for reference:

module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n  # ... truncated for brevity\neks_addons = {\n<key> = {\nname = string # Optional - <key> is used if `name` is not set\nmost_recent          = bool\naddon_version        = string # overrides `most_recent` if set\nconfiguration_values = string # JSON string\npreserve                    = bool # defaults to `true`\nresolve_conflicts_on_create = string # defaults to `OVERWRITE`\nresolve_conflicts_on_update = string # defaults to `OVERWRITE`\ntimeouts = {\ncreate = string # optional\nupdate = string # optional\ndelete = string # optional\n}\ntags = map(string)\n}\n}\n}\n
"},{"location":"amazon-eks-addons/#example","title":"Example","text":"
module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n  # ... truncated for brevity\neks_addons = {\n    # Amazon EKS add-ons\naws-ebs-csi-driver = {\nmost_recent              = true\nservice_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn\n}\ncoredns = {\nmost_recent = true\ntimeouts = {\ncreate = \"25m\"\ndelete = \"10m\"\n}\n}\nvpc-cni = {\nmost_recent              = true\nservice_account_role_arn = module.vpc_cni_irsa.iam_role_arn\n}\nkube-proxy = {}\n    # Third party add-ons via AWS Marketplace\nkubecost_kubecost = {\nmost_recent = true\n}\nteleport_teleport = {\nmost_recent = true\n}\n}\n}\n
"},{"location":"amazon-eks-addons/#configuration-values","title":"Configuration Values","text":"

You can supply custom configuration values to each addon via the configuration_values argument of the add-on definition. The value provided must be a JSON encoded string and adhere to the JSON scheme provided by the version of the add-on. You can view this schema using the awscli by supplying the add-on name and version to the describe-addon-configuration command:

aws eks describe-addon-configuration \\\n--addon-name coredns \\\n--addon-version v1.8.7-eksbuild.2 \\\n--query 'configurationSchema' \\\n--output text | jq\n

Which returns the formatted JSON schema like below:

{\n\"$ref\": \"#/definitions/Coredns\",\n\"$schema\": \"http://json-schema.org/draft-06/schema#\",\n\"definitions\": {\n\"Coredns\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"computeType\": {\n\"type\": \"string\"\n},\n\"corefile\": {\n\"description\": \"Entire corefile contents to use with installation\",\n\"type\": \"string\"\n},\n\"nodeSelector\": {\n\"additionalProperties\": {\n\"type\": \"string\"\n},\n\"type\": \"object\"\n},\n\"replicaCount\": {\n\"type\": \"integer\"\n},\n\"resources\": {\n\"$ref\": \"#/definitions/Resources\"\n}\n},\n\"title\": \"Coredns\",\n\"type\": \"object\"\n},\n\"Limits\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"cpu\": {\n\"type\": \"string\"\n},\n\"memory\": {\n\"type\": \"string\"\n}\n},\n\"title\": \"Limits\",\n\"type\": \"object\"\n},\n\"Resources\": {\n\"additionalProperties\": false,\n\"properties\": {\n\"limits\": {\n\"$ref\": \"#/definitions/Limits\"\n},\n\"requests\": {\n\"$ref\": \"#/definitions/Limits\"\n}\n},\n\"title\": \"Resources\",\n\"type\": \"object\"\n}\n}\n}\n

You can supply the configuration values to the add-on by passing a map of the values wrapped in the jsonencode() function as shown below:

module \"eks_blueprints_addons\" {\nsource = \"aws-ia/eks-blueprints-addons/aws\"\n  # ... truncated for brevity\neks_addons = {\ncoredns = {\nmost_recent = true\nconfiguration_values = jsonencode({\nreplicaCount = 4\nresources = {\nlimits = {\ncpu    = \"100m\"\nmemory = \"150Mi\"\n}\nrequests = {\ncpu    = \"100m\"\nmemory = \"150Mi\"\n}\n}\n})\n}\n}\n}\n
"},{"location":"architectures/","title":"Architectures","text":""},{"location":"architectures/#addons","title":"Addons","text":"Addon x86_64/amd64 arm64 Argo Rollouts \u2705 \u2705 Argo Workflows \u2705 \u2705 Argo CD \u2705 \u2705 AWS CloudWatch Metrics \u2705 \u2705 AWS EFS CSI Driver \u2705 \u2705 AWS for FluentBit \u2705 \u2705 AWS FSx CSI Driver \u2705 \u2705 AWS Load Balancer Controller \u2705 \u2705 AWS Node Termination Handler \u2705 \u2705 AWS Private CA Issuer \u2705 \u2705 Cert Manager \u2705 \u2705 Cluster Autoscaler \u2705 \u2705 Cluster Proportional Autoscaler \u2705 \u2705 External DNS \u2705 \u2705 External Secrets \u2705 \u2705 OPA Gatekeeper \u2705 \u2705 Ingress Nginx \u2705 \u2705 Karpenter \u2705 \u2705 Kube-Prometheus Stack \u2705 \u2705 Metrics Server \u2705 \u2705 Secrets Store CSI Driver \u2705 \u2705 Secrets Store CSI Driver Provider AWS \u2705 \u2705 Velero \u2705 \u2705 Vertical Pod Autoscaler \u2705 \u2705"},{"location":"architectures/#amazon-eks-addons","title":"Amazon EKS Addons","text":"

The Amazon EKS provided add-ons listed below support both x86_64/amd64 and arm64 architectures. Third party add-ons that are available via the AWS Marketplace will vary based on the support provided by the add-on vendor. No additional changes are required to add-on configurations when switching between x86_64/amd64 and arm64 architectures; Amazon EKS add-ons utilize multi-architecture container images to support this functionality. These addons are specified via the eks_addons input variable.

Addon x86_64/amd64 arm64 AWS VPC CNI \u2705 \u2705 AWS EBS CSI Driver \u2705 \u2705 CoreDNS \u2705 \u2705 Kube-proxy \u2705 \u2705 ADOT Collector \u2705 \u2705 AWS GuardDuty Agent \u2705 \u2705"},{"location":"aws-partner-addons/","title":"AWS Partner Addons","text":"

The following addons are provided by AWS Partners for use with Amazon EKS Blueprints for Terraform. Please see the respective addon repository for more information on the addon, its supported configuration values, as well as questions, comments, and feature requests.

Addon Description Ondat Ondat is a Kubernetes-native storage platform that enables stateful applications to run on Kubernetes. Hashicorp - Consul Consul is a service networking solution to automate network configurations, discover services, and enable secure connectivity across any cloud or runtime. Hashicorp - Vault Vault secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets in modern computing. Sysdig Sysdig CNAPP helps you stop cloud and container security attacks with no wasted time. Tetrate Istio Tetrate Istio Distro is an open source project from Tetrate that provides vetted builds of Istio tested against all major cloud platforms. NetApp ONTAP Astra Trident NetApp's Astra Trident provides dynamic storage orchestration for FSx for NetApp ONTAP using a Container Storage Interface (CSI) compliant driver. Kong Konnect Gateway Kong Gateway is the fastest and most adopted API gateway that integrates with Kong Konnect, the end-to-end SaaS API lifecycle management platform. Kong Konnect Kong Ingress Controller Kong Ingress Controller combines the powerful features of the widely popular Kong Gateway with Kubernetes in a truly Kubernetes-native manner and now integrated with Kong Konnect, the end-to-end SaaS API lifecycle management platform."},{"location":"helm-release/","title":"Helm Release Add-ons","text":"

Starting with EKS Blueprints v5 we have made a decision to only support the provisioning of a certain core set of add-ons. On an going basis, we will evaluate the current list to see if more add-ons need to be supported via this repo. Typically you can expect that any AWS created add-on that is not yet available via the Amazon EKS add-ons will be prioritized to be provisioned through this repository.

In addition to these AWS add-ons, we will also support the provisioning of certain OSS add-ons that we think customers will benefit from. These are selected based on customer demand (e.g. metrics-server) and certain patterns (gitops) that are foundational elements for a complete blueprint of an EKS cluster.

One of the reasons customers pick Kubernetes is because of its strong commercial and open-source software ecosystem and would like to provision add-ons that are not necessarily supported by EKS Blueprints. For such add-ons the options are as following:

"},{"location":"helm-release/#with-helm_release-terraform-resource","title":"With helm_release Terraform Resource","text":"

The helm_release resource is the most fundamental way to provision a helm chart via Terraform.

Use this resource, if you need to control the lifecycle add-ons down to level of each add-on resource.

"},{"location":"helm-release/#with-helm_releases-variable","title":"With helm_releases Variable","text":"

You can use the helm_releases variable in EKS Blueprints Add-ons to provide a map of add-ons and their respective Helm configuration. Under the hood, we just iterate through the provided map and pass each configuration to the Terraform helm_release resource.

E.g.

module \"addons\" {\nsource  = \"aws-ia/eks-blueprints-addons/aws\"\nversion = \"~> 1.0\"\ncluster_name      = \"<cluster_name>\"\ncluster_endpoint  = \"<cluster_endpoint>\"\ncluster_version   = \"<cluster_version>\"\noidc_provider_arn = \"<oidc_provider_arn>\"\n  # EKS add-ons\neks_addons = {\ncoredns = {}\nvpc-cni = {}\nkube-proxy = {}\n}\n  # Blueprints add-ons\nenable_aws_efs_csi_driver                    = true\nenable_aws_cloudwatch_metrics                = true\nenable_cert_manager                          = true\n...\n  # Pass in any number of Helm charts to be created for those that are not natively supported\nhelm_releases = {\nprometheus-adapter = {\ndescription      = \"A Helm chart for k8s prometheus adapter\"\nnamespace        = \"prometheus-adapter\"\ncreate_namespace = true\nchart            = \"prometheus-adapter\"\nchart_version    = \"4.2.0\"\nrepository       = \"https://prometheus-community.github.io/helm-charts\"\nvalues = [\n<<-EOT\n          replicas: 2\n          podDisruptionBudget:\n            enabled: true\n        EOT\n]\n}\ngpu-operator = {\ndescription      = \"A Helm chart for NVIDIA GPU operator\"\nnamespace        = \"gpu-operator\"\ncreate_namespace = true\nchart            = \"gpu-operator\"\nchart_version    = \"v23.3.2\"\nrepository       = \"https://nvidia.github.io/gpu-operator\"\nvalues = [\n<<-EOT\n          operator:\n            defaultRuntime: containerd\n        EOT\n]\n}\n}\ntags = local.tags\n}\n

With this pattern, the lifecycle of all your add-ons is tied to that of the addons module. This allows you to easily target the addon module in your Terraform apply and destroy commands. E.g.

terraform apply -target=module.addons\n\nterraform destroy -target=module.addons\n
"},{"location":"helm-release/#with-eks-blueprints-addon-module","title":"With EKS Blueprints Addon Module","text":"

If you have an add-on that requires an IAM Role for Service Account (IRSA), we have created a new Terraform module terraform-aws-eks-blueprints-addon that can help provision a Helm chart along with an IAM role and policies with permissions required for the add-on to function properly. We use this module for all of the add-ons that are provisioned by EKS Blueprints Add-ons today.

You can optionally use this module for add-ons that do not need IRSA or even just to create the IAM resources for IRSA and skip the helm release. Detailed usage of how to consume this module can be found in its readme.

This pattern can be used to create a Terraform module with a set of add-ons that are not supported in the EKS Blueprints Add-ons today and wrap them in the same module definition. An example of this is the ACK add-ons repository which is a collection of ACK helm chart deployments with IRSA for each of the ACK controllers.

"},{"location":"addons/argo-events/","title":"Argo Events","text":"

Argo Events is an open source container-native event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources. Argo Events is implemented as a Kubernetes CRD (Custom Resource Definition).

"},{"location":"addons/argo-events/#usage","title":"Usage","text":"

Argo Events can be deployed by enabling the add-on via the following.

enable_argo_events = true\n

You can optionally customize the Helm chart that deploys Argo Events via the following configuration.

  enable_argo_events = true\nargo_events = {\nname          = \"argo-events\"\nchart_version = \"2.4.0\"\nrepository    = \"https://argoproj.github.io/argo-helm\"\nnamespace     = \"argo-events\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify argo-events pods are running.

$ kubectl get pods -n argo-events\nNAME                                                  READY   STATUS    RESTARTS   AGE\nargo-events-controller-manager-bfb894cdb-k8hzn        1/1     Running   0          11m\n
"},{"location":"addons/argo-rollouts/","title":"Argo Rollouts","text":"

Argo Rollouts is a Kubernetes controller and set of CRDs which provide advanced deployment capabilities such as blue-green, canary, canary analysis, experimentation, and progressive delivery features to Kubernetes.

"},{"location":"addons/argo-rollouts/#usage","title":"Usage","text":"

Argo Rollouts can be deployed by enabling the add-on via the following.

enable_argo_rollouts = true\n

You can optionally customize the Helm chart that deploys Argo Rollouts via the following configuration.

  enable_argo_rollouts = true\nargo_rollouts = {\nname          = \"argo-rollouts\"\nchart_version = \"2.22.3\"\nrepository    = \"https://argoproj.github.io/argo-helm\"\nnamespace     = \"argo-rollouts\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify argo-rollouts pods are running.

$ kubectl get pods -n argo-rollouts\nNAME                             READY   STATUS    RESTARTS   AGE\nargo-rollouts-5db5688849-x89zb   0/1     Running   0          11s\n
"},{"location":"addons/argo-workflows/","title":"Argo Workflows","text":"

Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).

"},{"location":"addons/argo-workflows/#usage","title":"Usage","text":"

Argo Workflows can be deployed by enabling the add-on via the following.

enable_argo_workflows = true\n

You can optionally customize the Helm chart that deploys Argo Workflows via the following configuration.

  enable_argo_workflows = true\nargo_workflows = {\nname          = \"argo-workflows\"\nchart_version = \"0.28.2\"\nrepository    = \"https://argoproj.github.io/argo-helm\"\nnamespace     = \"argo-workflows\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify argo-workflows pods are running.

$ kubectl get pods -n argo-workflows\nNAME                                                  READY   STATUS    RESTARTS   AGE\nargo-workflows-server-68988cd864-22zhr                1/1     Running   0          6m32s\nargo-workflows-workflow-controller-7ff7b5658d-9q44f   1/1     Running   0          6m32s\n
"},{"location":"addons/argocd/","title":"Argo CD","text":"

Argo CD is a declarative, GitOps continuous delivery tool for Kubernetes.

"},{"location":"addons/argocd/#usage","title":"Usage","text":"

Argo CD can be deployed by enabling the add-on via the following.

enable_argocd = true\n

You can optionally customize the Helm chart that deploys Argo CD via the following configuration.

  enable_argocd = true\nargocd = {\nname          = \"argocd\"\nchart_version = \"5.29.1\"\nrepository    = \"https://argoproj.github.io/argo-helm\"\nnamespace     = \"argocd\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify argocd pods are running.

$ kubectl get pods -n argocd\nNAME                                                        READY   STATUS    RESTARTS   AGE\nargo-cd-argocd-application-controller-0                     1/1     Running   0          146m\nargo-cd-argocd-applicationset-controller-678d85f77b-rmpcb   1/1     Running   0          146m\nargo-cd-argocd-dex-server-7b6c9b5969-zpqnl                  1/1     Running   0          146m\nargo-cd-argocd-notifications-controller-6d489b99c9-j6fdw    1/1     Running   0          146m\nargo-cd-argocd-redis-59dd95f5b5-8fx74                       1/1     Running   0          146m\nargo-cd-argocd-repo-server-7b9bd88c95-mh2fz                 1/1     Running   0          146m\nargo-cd-argocd-server-6f9cfdd4d5-8mfpc                      1/1     Running   0          146m\n
"},{"location":"addons/aws-cloudwatch-metrics/","title":"AWS CloudWatch Metrics","text":"

Use CloudWatch Container Insights to collect, aggregate, and summarize metrics and logs from your containerized applications and microservices. CloudWatch automatically collects metrics for many resources, such as CPU, memory, disk, and network. Container Insights also provides diagnostic information, such as container restart failures, to help you isolate issues and resolve them quickly. You can also set CloudWatch alarms on metrics that Container Insights collects.

Container Insights collects data as performance log events using embedded metric format. These performance log events are entries that use a structured JSON schema that enables high-cardinality data to be ingested and stored at scale. From this data, CloudWatch creates aggregated metrics at the cluster, node, pod, task, and service level as CloudWatch metrics. The metrics that Container Insights collects are available in CloudWatch automatic dashboards, and also viewable in the Metrics section of the CloudWatch console.

"},{"location":"addons/aws-cloudwatch-metrics/#usage","title":"Usage","text":"

aws-cloudwatch-metrics can be deployed by enabling the add-on via the following.

enable_aws_cloudwatch_metrics = true\n

You can also customize the Helm chart that deploys aws-cloudwatch-metrics via the following configuration:

  enable_aws_cloudwatch_metrics        = true\naws_cloudwatch_metrics_irsa_policies = [\"IAM Policies\"]\naws_cloudwatch_metrics   = {\nrole_policies = [\"IAM Policies\"]  # extra policies in addition of CloudWatchAgentServerPolicy\nname          = \"aws-cloudwatch-metrics\"\nrepository    = \"https://aws.github.io/eks-charts\"\nchart_version = \"0.0.9\"\nnamespace     = \"amazon-cloudwatch\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})] # The value `clusterName` is already set to the EKS cluster name, no need to specify here\n}\n

Verify aws-cloudwatch-metrics pods are running

$ kubectl get pods -n amazon-cloudwatch\n\nNAME                           READY   STATUS    RESTARTS   AGE\naws-cloudwatch-metrics-2dt5h   1/1     Running   0          149m\n
"},{"location":"addons/aws-efs-csi-driver/","title":"AWS EFS CSI Driver","text":"

This add-on deploys the AWS EFS CSI driver into an EKS cluster.

"},{"location":"addons/aws-efs-csi-driver/#usage","title":"Usage","text":"

The AWS EFS CSI driver can be deployed by enabling the add-on via the following. Check out the full example to deploy an EKS Cluster with EFS backing the dynamic provisioning of persistent volumes.

  enable_aws_efs_csi_driver = true\n

You can optionally customize the Helm chart that deploys the driver via the following configuration.

  enable_aws_efs_csi_driver = true\n  # Optional aws_efs_csi_driver_helm_config\naws_efs_csi_driver = {\nrepository     = \"https://kubernetes-sigs.github.io/aws-efs-csi-driver/\"\nchart_version  = \"2.4.1\"\n}\naws_efs_csi_driver {\nrole_policies = [\"<ADDITIONAL_IAM_POLICY_ARN>\"]\n}\n

Once deployed, you will be able to see a number of supporting resources in the kube-system namespace.

$ kubectl get deployment efs-csi-controller -n kube-system\n\nNAME                 READY   UP-TO-DATE   AVAILABLE   AGE\nefs-csi-controller   2/2     2            2           4m29s\n
$ kubectl get daemonset efs-csi-node -n kube-system\n\nNAME           DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                 AGE\nefs-csi-node   3         3         3       3            3           beta.kubernetes.io/os=linux   4m32s\n
"},{"location":"addons/aws-efs-csi-driver/#validate-efs-csi-driver","title":"Validate EFS CSI Driver","text":"

Follow the static provisioning example described here to validate the CSI driver is working as expected.

"},{"location":"addons/aws-for-fluentbit/","title":"AWS for Fluent Bit","text":"

AWS provides a Fluent Bit image with plugins for both CloudWatch Logs and Kinesis Data Firehose. We recommend using Fluent Bit as your log router because it has a lower resource utilization rate than Fluentd.

"},{"location":"addons/aws-for-fluentbit/#usage","title":"Usage","text":"

AWS for Fluent Bit can be deployed by enabling the add-on via the following.

enable_aws_for_fluentbit = true\n

You can optionally customize the Helm chart that deploys AWS for Fluent Bit via the following configuration.

  enable_aws_for_fluentbit = true\naws_for_fluentbit_cw_log_group = {\ncreate          = true\nuse_name_prefix = true # Set this to true to enable name prefix\nname_prefix     = \"eks-cluster-logs-\"\nretention       = 7\n}\naws_for_fluentbit = {\nname          = \"aws-for-fluent-bit\"\nchart_version = \"0.1.28\"\nrepository    = \"https://aws.github.io/eks-charts\"\nnamespace     = \"kube-system\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

If you want to enable Container Insights on Amazon EKS through Fluent Bit, you need to add the following parameter in your configuration:

  enable_aws_for_fluentbit = true\naws_for_fluentbit = {\nenable_containerinsights = true\n}\n

By default, ClusterInsights will not enable the kubelet monitoring feature, with AWS for FluentBit integration, since this is an optional feature that is suggested to be enabled only on large clusters. To enable the ClusterInsights Use_Kubelet feature you'll need to provide a few more parametees:

  enable_aws_for_fluentbit = true\naws_for_fluentbit = {\nenable_containerinsights = true\nkubelet_monitoring       = true\nset = [{\nname  = \"cloudWatchLogs.autoCreateGroup\"\nvalue = true\n},\n{\nname  = \"hostNetwork\"\nvalue = true\n},\n{\nname  = \"dnsPolicy\"\nvalue = \"ClusterFirstWithHostNet\"\n}\n]\n}\n
"},{"location":"addons/aws-for-fluentbit/#verify-the-fluent-bit-setup","title":"Verify the Fluent Bit setup","text":"

Verify aws-for-fluentbit pods are running.

$ kubectl -n kube-system get pods -l app.kubernetes.io/name=aws-for-fluent-bit\nNAME                       READY   STATUS    RESTARTS   AGE\naws-for-fluent-bit-6lhkj   1/1     Running   0          15m\naws-for-fluent-bit-sbn9b   1/1     Running   0          15m\naws-for-fluent-bit-svhwq   1/1     Running   0          15m\n

Open the CloudWatch console at https://console.aws.amazon.com/cloudwatch/

In the navigation pane, choose Log groups.

Make sure that you're in the Region where you deployed Fluent Bit.

Check the list of log groups in the Region. You should see the following:

/aws/eks/complete/aws-fluentbit-logs\n

If you enabled Container Insights, you should also see the following Log Groups in your CloudWatch Console.

/aws/containerinsights/Cluster_Name/application\n\n/aws/containerinsights/Cluster_Name/host\n\n/aws/containerinsights/Cluster_Name/dataplane\n

Navigate to one of these log groups and check the Last Event Time for the log streams. If it is recent relative to when you deployed Fluent Bit, the setup is verified.

There might be a slight delay in creating the /dataplane log group. This is normal as these log groups only get created when Fluent Bit starts sending logs for that log group.

"},{"location":"addons/aws-fsx-csi-driver/","title":"AWS FSx CSI Driver","text":"

This add-on deploys the Amazon FSx CSI Driver in to an Amazon EKS Cluster.

"},{"location":"addons/aws-fsx-csi-driver/#usage","title":"Usage","text":"

The Amazon FSx CSI Driver can be deployed by enabling the add-on via the following.

  enable_aws_fsx_csi_driver = true\n
"},{"location":"addons/aws-fsx-csi-driver/#helm-chart-customization","title":"Helm Chart customization","text":"

You can optionally customize the Helm chart deployment using a configuration like the following.

  enable_aws_fsx_csi_driver = true\naws_fsx_csi_driver = {\nnamespace     = \"aws-fsx-csi-driver\"\nchart_version = \"1.6.0\"\nrole_policies = <ADDITIONAL_IAM_POLICY_ARN>\n}\n

You can find all available Helm Chart parameter values here

"},{"location":"addons/aws-fsx-csi-driver/#validation","title":"Validation","text":"

Once deployed, you will be able to see a number of supporting resources in the kube-system namespace.

$ kubectl -n kube-system get deployment fsx-csi-controller\n\nNAME                 READY   UP-TO-DATE   AVAILABLE   AGE\nfsx-csi-controller   2/2     2            2           4m29s\n\n$ kubectl -n kube-system get pods -l app=fsx-csi-controller\nNAME                                  READY   STATUS    RESTARTS   AGE\nfsx-csi-controller-56c6d9bbb8-89cpc   4/4     Running   0          3m30s\nfsx-csi-controller-56c6d9bbb8-9wnlh   4/4     Running   0          3m30s\n
$ kubectl -n kube-system get daemonset fsx-csi-node\nNAME           DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE\nfsx-csi-node   3         3         3       3            3           kubernetes.io/os=linux   5m27s\n\n$ kubectl -n kube-system get pods -l  app=fsx-csi-node\nNAME                 READY   STATUS    RESTARTS   AGE\nfsx-csi-node-7c5z6   3/3     Running   0          5m29s\nfsx-csi-node-d5q28   3/3     Running   0          5m29s\nfsx-csi-node-hlg8q   3/3     Running   0          5m29s\n

Create a StorageClass. Replace the SubnetID and the SecurityGroupID with your own values. More details here.

$ cat <<EOF | kubectl apply -f -\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n  name: fsx-sc\nprovisioner: fsx.csi.aws.com\nparameters:\n  subnetId: <YOUR_SUBNET_IDs>\n  securityGroupIds: <YOUR_SG_ID>\n  perUnitStorageThroughput: \"200\"\n  deploymentType: PERSISTENT_1\nmountOptions:\n  - flock\nEOF\n
$ kubect describe storageclass fsx-sc\nName:            fsx-sc\nIsDefaultClass:  No\nAnnotations:     kubectl.kubernetes.io/last-applied-configuration={\"apiVersion\":\"storage.k8s.io/v1\",\"kind\":\"StorageClass\",\"metadata\":{\"annotations\":{},\"name\":\"fsx-sc\"},\"mountOptions\":null,\"parameters\":{\"deploymentType\":\"PERSISTENT_1\",\"perUnitStorageThroughput\":\"200\",\"securityGroupIds\":\"sg-q1w2e3r4t5y6u7i8o\",\"subnetId\":\"subnet-q1w2e3r4t5y6u7i8o\"},\"provisioner\":\"fsx.csi.aws.com\"}\nProvisioner:           fsx.csi.aws.com\nParameters:            deploymentType=PERSISTENT_1,perUnitStorageThroughput=200,securityGroupIds=sg-q1w2e3r4t5y6u7i8o,subnetId=subnet-q1w2e3r4t5y6u7i8o\nAllowVolumeExpansion:  <unset>\nMountOptions:          <none>\nReclaimPolicy:         Delete\nVolumeBindingMode:     Immediate\nEvents:                <none>\n

Create a PVC.

$ cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: fsx-claim\nspec:\n  accessModes:\n    - ReadWriteMany\n  storageClassName: fsx-sc\n  resources:\n    requests:\n      storage: 1200Gi\nEOF\n

Wait for the PV to be created and bound to your PVC.

$ kubectl get pvc\nNAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE\nfsx-claim   Bound    pvc-df385730-72d6-4b0c-8275-cc055a438760   1200Gi     RWX            fsx-sc         7m47s\n$ kubectl get pv\nNAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM               STORAGECLASS   REASON   AGE\npvc-df385730-72d6-4b0c-8275-cc055a438760   1200Gi     RWX            Delete           Bound    default/fsx-claim   fsx-sc                  2m13s\n
"},{"location":"addons/aws-gateway-api-controller/","title":"AWS Gateway API Controller","text":"

AWS Gateway API Controller lets you connect services across multiple Kubernetes clusters through the Kubernetes Gateway API interface. It is also designed to connect services running on EC2 instances, containers, and as serverless functions. It does this by leveraging Amazon VPC Lattice, which works with Kubernetes Gateway API calls to manage Kubernetes objects.

"},{"location":"addons/aws-gateway-api-controller/#usage","title":"Usage","text":"

AWS Gateway API Controller can be deployed by enabling the add-on via the following.

  enable_aws_gateway_api_controller = true\naws_gateway_api_controller = {\nrepository_username = data.aws_ecrpublic_authorization_token.token.user_name\nrepository_password = data.aws_ecrpublic_authorization_token.token.password\nset = [{\nname  = \"clusterVpcId\"\nvalue = \"vpc-12345abcd\"\n}]\n}\n

You can optionally customize the Helm chart that deploys AWS Gateway API Controller via the following configuration.

  enable_aws_gateway_api_controller = true\naws_gateway_api_controller = {\nname                = \"aws-gateway-api-controller\"\nchart_version       = \"v0.0.12\"\nrepository          = \"oci://public.ecr.aws/aws-application-networking-k8s\"\nrepository_username = data.aws_ecrpublic_authorization_token.token.user_name\nrepository_password = data.aws_ecrpublic_authorization_token.token.password\nnamespace           = \"aws-application-networking-system\"\nvalues              = [templatefile(\"${path.module}/values.yaml\", {})]\nset = [{\nname  = \"clusterVpcId\"\nvalue = \"vpc-12345abcd\"\n}]\n}\n

Verify aws-gateway-api-controller pods are running.

$ kubectl get pods -n aws-application-networking-system\nNAME                                                               READY   STATUS    RESTARTS   AGE\naws-gateway-api-controller-aws-gateway-controller-chart-8f42q426   1/1     Running   0          40s\naws-gateway-api-controller-aws-gateway-controller-chart-8f4tbl9g   1/1     Running   0          71s\n

Deploy example GatewayClass

$ kubectl apply -f https://raw.githubusercontent.com/aws/aws-application-networking-k8s/main/examples/gatewayclass.yaml\ngatewayclass.gateway.networking.k8s.io/amazon-vpc-lattice created\n

Describe GatewayClass

$ kubectl describe gatewayclass\nName:         amazon-vpc-lattice\nNamespace:\nLabels:       <none>\nAnnotations:  <none>\nAPI Version:  gateway.networking.k8s.io/v1beta1\nKind:         GatewayClass\nMetadata:\n  Creation Timestamp:  2023-06-22T22:33:32Z\n  Generation:          1\nResource Version:    819021\nUID:                 aac59195-8f37-4c23-a2a5-b0f363deda77\nSpec:\n  Controller Name:  application-networking.k8s.aws/gateway-api-controller\nStatus:\n  Conditions:\n    Last Transition Time:  2023-06-22T22:33:32Z\n    Message:               Accepted\n    Observed Generation:   1\nReason:                Accepted\n    Status:                True\n    Type:                  Accepted\nEvents:                    <none>\n
"},{"location":"addons/aws-load-balancer-controller/","title":"AWS Load Balancer Controller.","text":"

AWS Load Balancer Controller is a controller to help manage Elastic Load Balancers for a Kubernetes cluster. This Add-on deploys this controller in an Amazon EKS Cluster.

"},{"location":"addons/aws-load-balancer-controller/#usage","title":"Usage","text":"

In order to deploy the AWS Load Balancer Controller Addon via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

NOTE: In versions 2.5 and newer, the AWS Load Balancer Controller becomes the default controller for Kubernetes service resources with the type: LoadBalancer and makes an AWS Network Load Balancer (NLB) for each service. It does this by making a mutating webhook for services, which sets the spec.loadBalancerClass field to service.k8s.aws/nlb for new services of type: LoadBalancer. You can turn off this feature and revert to using the legacy Cloud Provider as the default controller, by setting the helm chart value enableServiceMutatorWebhook to false. The cluster won't provision new Classic Load Balancers for your services unless you turn off this feature. Existing Classic Load Balancers will continue to work.

module \"eks_blueprints_addons\" {\nenable_aws_load_balancer_controller = true\naws_load_balancer_controller = {\nset = [\n{\nname  = \"vpcId\"\nvalue = module.vpc.vpc_id\n},\n{\nname  = \"podDisruptionBudget.maxUnavailable\"\nvalue = 1\n},\n{\nname  = \"enableServiceMutatorWebhook\"\nvalue = \"false\"\n}\n]\n}\n
"},{"location":"addons/aws-load-balancer-controller/#helm-chart-customization","title":"Helm Chart customization","text":"

It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller configuration block:

  aws_load_balancer_controller = {\nset = [\n{\nname  = \"vpcId\"\nvalue = module.vpc.vpc_id\n},\n{\nname  = \"podDisruptionBudget.maxUnavailable\"\nvalue = 1\n},\n{\nname  = \"resources.requests.cpu\"\nvalue = 100m\n},\n{\nname  = \"resources.requests.memory\"\nvalue = 128Mi\n},\n]\n}\n}\n

You can find all available Helm Chart parameter values here.

"},{"location":"addons/aws-load-balancer-controller/#validate","title":"Validate","text":"
  1. To validate the deployment, check if the aws-load-balancer-controller Pods were created in the kube-system Namespace, as the following example.
kubectl -n kube-system get pods | grep aws-load-balancer-controller\nNAMESPACE       NAME                                            READY   STATUS    RESTARTS   AGE\nkube-system     aws-load-balancer-controller-6cbdb58654-fvskt   1/1     Running   0          26m\nkube-system     aws-load-balancer-controller-6cbdb58654-sc7dk   1/1     Running   0          26m\n
  1. Create a Kubernetes Ingress, using the alb IngressClass, pointing to an existing Service. In this example we'll use a Service called example-svc.
kubectl create ingress example-ingress --class alb --rule=\"/*=example-svc:80\" \\\n--annotation alb.ingress.kubernetes.io/scheme=internet-facing \\\n--annotation alb.ingress.kubernetes.io/target-type=ip\n
kubectl get ingress  NAME                CLASS   HOSTS   ADDRESS                                                                 PORTS   AGE\nexample-ingress     alb     *       k8s-example-ingress-7e0d6f03e7-1234567890.us-west-2.elb.amazonaws.com   80      4m9s\n
"},{"location":"addons/aws-load-balancer-controller/#resources","title":"Resources","text":"

GitHub Repo Helm Chart AWS Docs

"},{"location":"addons/aws-node-termination-handler/","title":"AWS Node Termination Handler","text":"

This project ensures that the Kubernetes control plane responds appropriately to events that can cause your EC2 instance to become unavailable, such as EC2 maintenance events, EC2 Spot interruptions, ASG Scale-In, ASG AZ Rebalance, and EC2 Instance Termination via the API or Console. If not handled, your application code may not stop gracefully, take longer to recover full availability, or accidentally schedule work to nodes that are going down.

"},{"location":"addons/aws-node-termination-handler/#usage","title":"Usage","text":"

AWS Node Termination Handler can be deployed by enabling the add-on via the following.

enable_aws_node_termination_handler = true\n

You can optionally customize the Helm chart that deploys AWS Node Termination Handler via the following configuration.

  enable_aws_node_termination_handler = true\naws_node_termination_handler = {\nname          = \"aws-node-termination-handler\"\nchart_version = \"0.21.0\"\nrepository    = \"https://aws.github.io/eks-charts\"\nnamespace     = \"aws-node-termination-handler\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify aws-node-termination-handler pods are running.

$ kubectl get pods -n aws-node-termination-handler\nNAME                                            READY   STATUS    RESTARTS      AGE\naws-node-termination-handler-6f598b6b89-6mqgk   1/1     Running   1 (22h ago)   26h\n

Verify SQS Queue is created.

$ aws sqs list-queues\n\n{\n\"QueueUrls\": [\n\"https://sqs.us-east-1.amazonaws.com/XXXXXXXXXXXXXX/aws_node_termination_handler20221123072051157700000004\"\n]\n}\n

Verify Event Rules are created.

$ aws event list-rules\n{\n[\n{\n\"Name\": \"NTH-ASGTerminiate-20230602191740664900000025\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-ASGTerminiate-20230602191740664900000025\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"Auto scaling instance terminate event\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTH-HealthEvent-20230602191740079300000022\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-HealthEvent-20230602191740079300000022\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"AWS health event\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTH-InstanceRebalance-20230602191740077100000021\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceRebalance-20230602191740077100000021\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 instance rebalance recommendation\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTH-InstanceStateChange-20230602191740165000000024\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-InstanceStateChange-20230602191740165000000024\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 instance state-change notification\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTH-SpotInterrupt-20230602191740077100000020\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTH-SpotInterrupt-20230602191740077100000020\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"Description\": \"EC2 spot instance interruption warning\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTHASGTermRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHASGTermRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance-terminate Lifecycle Action\\\"],\\\"source\\\":[\\\"aws.autoscaling\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTHInstanceStateChangeRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHInstanceStateChangeRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance State-change Notification\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTHRebalanceRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHRebalanceRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Instance Rebalance Recommendation\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTHScheduledChangeRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHScheduledChangeRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"AWS Health Event\\\"],\\\"source\\\":[\\\"aws.health\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n},\n        {\n\"Name\": \"NTHSpotTermRule\",\n            \"Arn\": \"arn:aws:events:us-west-2:XXXXXXXXXXXXXX:rule/NTHSpotTermRule\",\n            \"EventPattern\": \"{\\\"detail-type\\\":[\\\"EC2 Spot Instance Interruption Warning\\\"],\\\"source\\\":[\\\"aws.ec2\\\"]}\",\n            \"State\": \"ENABLED\",\n            \"EventBusName\": \"default\"\n}\n]\n}\n
"},{"location":"addons/aws-private-ca-issuer/","title":"AWS Private CA Issuer","text":"

AWS Private CA is an AWS service that can setup and manage private CAs, as well as issue private certificates. This add-on deploys the AWS Private CA Issuer as an external issuer to cert-manager that signs off certificate requests using AWS Private CA in an Amazon EKS Cluster.

"},{"location":"addons/aws-private-ca-issuer/#usage","title":"Usage","text":""},{"location":"addons/aws-private-ca-issuer/#pre-requisites","title":"Pre-requisites","text":"

To deploy the AWS PCA, you need to install cert-manager first, refer to this documentation to do it through EKS Blueprints Addons.

"},{"location":"addons/aws-private-ca-issuer/#deployment","title":"Deployment","text":"

With cert-manager deployed in place, you can deploy the AWS Private CA Issuer Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

module \"eks_blueprints_addons\" {\nenable_cert_manager         = true\nenable_aws_privateca_issuer = true\naws_privateca_issuer = {\nacmca_arn        = aws_acmpca_certificate_authority.this.arn\n}\n}\n
"},{"location":"addons/aws-private-ca-issuer/#helm-chart-customization","title":"Helm Chart customization","text":"

It's possible to customize your deployment using the Helm Chart parameters inside the aws_load_balancer_controller configuration block:

  aws_privateca_issuer = {\nacmca_arn        = aws_acmpca_certificate_authority.this.arn\nnamespace        = \"aws-privateca-issuer\"\ncreate_namespace = true\n}\n

You can find all available Helm Chart parameter values here.

"},{"location":"addons/aws-private-ca-issuer/#validation","title":"Validation","text":"
  1. List all the pods running in aws-privateca-issuer and cert-manager Namespace.
kubectl get pods -n aws-privateca-issuer\nkubectl get pods -n cert-manager\n
  1. Check the certificate status in it should be in Ready state, and be pointing to a secret created in the same Namespace.
kubectl get certificate -o wide\nNAME      READY   SECRET                  ISSUER                    STATUS                                          AGE\nexample   True    example-clusterissuer   tls-with-aws-pca-issuer   Certificate is up to date and has not expired   41m\n\nkubectl get secret example-clusterissuer\nNAME                    TYPE                DATA   AGE\nexample-clusterissuer   kubernetes.io/tls   3      43m\n
"},{"location":"addons/aws-private-ca-issuer/#resources","title":"Resources","text":"

GitHub Repo Helm Chart AWS Docs

"},{"location":"addons/cert-manager/","title":"Cert-Manager","text":"

Cert-manager is a X.509 certificate controller for Kubernetes-like workloads. It will obtain certificates from a variety of Issuers, both popular public Issuers as well as private Issuers, and ensure the certificates are valid and up-to-date, and will attempt to renew certificates at a configured time before expiry. This Add-on deploys this controller in an Amazon EKS Cluster.

"},{"location":"addons/cert-manager/#usage","title":"Usage","text":"

To deploy cert-manager Add-on via EKS Blueprints Addons, reference the following parameters under the module.eks_blueprints_addons.

module \"eks_blueprints_addons\" {\nenable_cert_manager         = true\n}\n
"},{"location":"addons/cert-manager/#helm-chart-customization","title":"Helm Chart customization","text":"

It's possible to customize your deployment using the Helm Chart parameters inside the cert-manager configuration block:

  cert-manager = {\nchart_version    = \"v1.11.1\"\nnamespace        = \"cert-manager\"\ncreate_namespace = true\n}\n

You can find all available Helm Chart parameter values here

"},{"location":"addons/cert-manager/#validation","title":"Validation","text":"
  1. Validate if the Cert-Manger Pods are Running.
kubectl -n cert-manager get pods\nNAME                                      READY   STATUS    RESTARTS   AGE\ncert-manager-5989bcc87-96qvf              1/1     Running   0          2m49s\ncert-manager-cainjector-9b44ddb68-8c7b9   1/1     Running   0          2m49s\ncert-manager-webhook-776b65456-k6br4      1/1     Running   0          2m49s\n
  1. Create a SelfSigned ClusterIssuer resource in the cluster.
apiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\nname: selfsigned-cluster-issuer\nspec:\nselfSigned: {}\n
kubectl get clusterissuers -o wide selfsigned-cluster-issuer\nNAME                        READY   STATUS   AGE\nselfsigned-cluster-issuer   True             3m\n
  1. Create a Certificate in a given Namespace.
apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: example\nnamespace: default\nspec:\nisCA: true\ncommonName: example\nsecretName: example-secret\nprivateKey:\nalgorithm: ECDSA\nsize: 256\nissuerRef:\nname: selfsigned-cluster-issuer\nkind: ClusterIssuer\ngroup: cert-manager.io\n
  1. Check the certificate status in it should be in Ready state, and be pointing to a secret created in the same Namespace.
kubectl get certificate -o wide\nNAME      READY   SECRET           ISSUER                      STATUS                                          AGE\nexample   True    example-secret   selfsigned-cluster-issuer   Certificate is up to date and has not expired   44s\n\nkubectl get secret example-secret\nNAME             TYPE                DATA   AGE\nexample-secret   kubernetes.io/tls   3      70s\n
"},{"location":"addons/cert-manager/#resources","title":"Resources","text":"

GitHub Repo Helm Chart

"},{"location":"addons/cluster-autoscaler/","title":"Cluster Autoscaler","text":"

The Kubernetes Cluster Autoscaler automatically adjusts the number of nodes in your cluster when pods fail or are rescheduled onto other nodes. The Cluster Autoscaler uses Auto Scaling groups. For more information, see Cluster Autoscaler on AWS.

"},{"location":"addons/cluster-autoscaler/#usage","title":"Usage","text":"

Cluster Autoscaler can be deployed by enabling the add-on via the following.

enable_cluster_autoscaler = true\n

You can optionally customize the Helm chart that deploys Cluster Autoscaler via the following configuration.

  enable_cluster_autoscaler = true\ncluster_autoscaler = {\nname          = \"cluster-autoscaler\"\nchart_version = \"9.29.0\"\nrepository    = \"https://kubernetes.github.io/autoscaler\"\nnamespace     = \"kube-system\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify cluster-autoscaler pods are running.

$ kubectl get pods -n kube-system\nNAME                                                         READY   STATUS    RESTARTS     AGE\ncluster-autoscaler-aws-cluster-autoscaler-7ff79bc484-pm8g9   1/1     Running   1 (2d ago)   2d5h\n
"},{"location":"addons/cluster-proportional-autoscaler/","title":"Cluster Proportional Autoscaler","text":"

Horizontal cluster-proportional-autoscaler watches over the number of schedulable nodes and cores of the cluster and resizes the number of replicas for the required resource. This functionality may be desirable for applications that need to be autoscaled with the size of the cluster, such as CoreDNS and other services that scale with the number of nodes/pods in the cluster.

The cluster-proportional-autoscaler helps to scale the applications using deployment or replicationcontroller or replicaset. This is an alternative solution to Horizontal Pod Autoscaling. It is typically installed as a Deployment in your cluster.

Refer to the eks-best-practices-guides for addional configuration guidanance.

"},{"location":"addons/cluster-proportional-autoscaler/#usage","title":"Usage","text":"

This add-on requires both enable_cluster_proportional_autoscaler and cluster_proportional_autoscaler as mandatory fields.

The example shows how to enable cluster-proportional-autoscaler for CoreDNS Deployment. CoreDNS deployment is not configured with HPA. So, this add-on helps to scale CoreDNS Add-on according to the size of the nodes and cores.

This Add-on can be used to scale any application with Deployment objects.

enable_cluster_proportional_autoscaler  = true\ncluster_proportional_autoscaler  = {\nvalues = [\n<<-EOT\n        nameOverride: kube-dns-autoscaler\n        # Formula for controlling the replicas. Adjust according to your needs\n        # replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) )\n        config:\n          linear:\n            coresPerReplica: 256\n            nodesPerReplica: 16\n            min: 1\n            max: 100\n            preventSinglePointFailure: true\n            includeUnschedulableNodes: true\n        # Target to scale. In format: deployment/*, replicationcontroller/* or replicaset/* (not case sensitive).\n        options:\n          target: deployment/coredns # Notice the target as `deployment/coredns`\n        serviceAccount:\n          create: true\n          name: kube-dns-autoscaler\n        podSecurityContext:\n          seccompProfile:\n            type: RuntimeDefault\n            supplementalGroups: [65534]\n            fsGroup: 65534\n        resources:\n          limits:\n            cpu: 100m\n            memory: 128Mi\n          requests:\n            cpu: 100m\n            memory: 128Mi\n        tolerations:\n          - key: \"CriticalAddonsOnly\"\n            operator: \"Exists\"\n            description: \"Cluster Proportional Autoscaler for CoreDNS Service\"\n      EOT\n]\n}\n
"},{"location":"addons/cluster-proportional-autoscaler/#expected-result","title":"Expected result","text":"

The cluster-proportional-autoscaler pod running in the kube-system namespace.

kubectl -n kube-system get po -l app.kubernetes.io/instance=cluster-proportional-autoscaler\nNAME                                                              READY   STATUS    RESTARTS   AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler-d8dc8477xx7   1/1     Running   0          21h\n
The cluster-proportional-autoscaler-kube-dns-autoscaler config map exists.
kubectl -n kube-system get cm cluster-proportional-autoscaler-kube-dns-autoscaler\nNAME                                                  DATA   AGE\ncluster-proportional-autoscaler-kube-dns-autoscaler   1      21h\n

"},{"location":"addons/cluster-proportional-autoscaler/#testing","title":"Testing","text":"

To test that coredns pods scale, first take a baseline of how many nodes the cluster has and how many coredns pods are running.

kubectl get nodes\nNAME                          STATUS   ROLES    AGE   VERSION\nip-10-0-19-243.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal    Ready    <none>   21h   v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME                       READY   STATUS    RESTARTS   AGE\ncoredns-7975d6fb9b-dlkdd   1/1     Running   0          21h\ncoredns-7975d6fb9b-xqqwp   1/1     Running   0          21h\n

Change the following parameters in the hcl code above so a scaling event can be easily triggered:

        config:\nlinear:\ncoresPerReplica: 4\nnodesPerReplica: 2\nmin: 1\nmax: 4\n
and execute terraform apply.

Increase the managed node group desired size, in this example from 4 to 5. This can be done via the AWS Console.

Check that the new node came up and coredns scaled up.

NAME                          STATUS   ROLES    AGE   VERSION\nip-10-0-14-120.ec2.internal   Ready    <none>   10m   v1.26.4-eks-0a21954\nip-10-0-19-243.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-25-182.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-40-138.ec2.internal   Ready    <none>   21h   v1.26.4-eks-0a21954\nip-10-0-8-136.ec2.internal    Ready    <none>   21h   v1.26.4-eks-0a21954\n\nkubectl get po -n kube-system -l k8s-app=kube-dns\nNAME                       READY   STATUS    RESTARTS   AGE\ncoredns-7975d6fb9b-dlkdd   1/1     Running   0          21h\ncoredns-7975d6fb9b-ww64t   1/1     Running   0          10m\ncoredns-7975d6fb9b-xqqwp   1/1     Running   0          21h\n

"},{"location":"addons/external-dns/","title":"External DNS","text":"

ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the Kubernetes API to determine a desired list of DNS records. Unlike KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly\u2014e.g. AWS Route 53.

"},{"location":"addons/external-dns/#usage","title":"Usage","text":"

External DNS can be deployed by enabling the add-on via the following.

enable_external_dns = true\n

You can optionally customize the Helm chart that deploys External DNS via the following configuration.

  enable_external_dns = true\nexternal_dns = {\nname          = \"external-dns\"\nchart_version = \"1.12.2\"\nrepository    = \"https://kubernetes-sigs.github.io/external-dns/\"\nnamespace     = \"external-dns\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\nexternal_dns_route53_zone_arns = [\"XXXXXXXXXXXXXXXXXXXXXXX\"]\n

Verify external-dns pods are running.

$ kubectl get pods -n external-dns\nNAME                            READY   STATUS    RESTARTS     AGE\nexternal-dns-849b89c675-ffnf6   1/1     Running   1 (2d ago)   2d5h\n

To further configure external-dns, refer to the examples:

  • AWS Load Balancer Controller
  • Route53
    • Same domain for public and private Route53 zones
  • Cloud Map
  • Kube Ingress AWS Controller
"},{"location":"addons/external-secrets/","title":"External Secrets","text":"

External Secrets Operator is a Kubernetes operator that integrates external secret management systems like AWS Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM Cloud Secrets Manager, and many more. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret.

"},{"location":"addons/external-secrets/#usage","title":"Usage","text":"

External Secrets can be deployed by enabling the add-on via the following.

enable_external_secrets = true\n

You can optionally customize the Helm chart that deploys External Secrets via the following configuration.

  enable_external_secrets = true\nexternal_secrets = {\nname          = \"external-secrets\"\nchart_version = \"0.8.1\"\nrepository    = \"https://charts.external-secrets.io\"\nnamespace     = \"external-secrets\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify external-secrets pods are running.

$ kubectl get pods -n external-secrets\nNAME                                               READY   STATUS    RESTARTS       AGE\nexternal-secrets-67bfd5b47c-xc5xf                  1/1     Running   1 (2d1h ago)   2d6h\nexternal-secrets-cert-controller-8f75c6f79-qcfx4   1/1     Running   1 (2d1h ago)   2d6h\nexternal-secrets-webhook-78f6bd456-76wmm           1/1     Running   1 (2d1h ago)   2d6h\n
"},{"location":"addons/fargate-fluentbit/","title":"Fargate FluentBit","text":"

Amazon EKS on Fargate offers a built-in log router based on Fluent Bit. This means that you don't explicitly run a Fluent Bit container as a sidecar, but Amazon runs it for you. All that you have to do is configure the log router. The configuration happens through a dedicated ConfigMap, that is deployed via this Add-on.

"},{"location":"addons/fargate-fluentbit/#usage","title":"Usage","text":"

To configure the Fargate Fluentbit ConfigMap via the EKS Blueprints Addons, just reference the following parameters under the module.eks_blueprints_addons.

module \"eks_blueprints_addons\" {\nenable_fargate_fluentbit = true\nfargate_fluentbit = {\nflb_log_cw = true\n}\n}\n

It's possible to customize the CloudWatch Log Group parameters in the fargate_fluentbit_cw_log_group configuration block:

  fargate_fluentbit_cw_log_group = {\nname              = \"existing-log-group\"\nname_prefix       = \"dev-environment-logs\"\nretention_in_days = 7\nkms_key_id        = \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"\nskip_destroy      = true\n
"},{"location":"addons/fargate-fluentbit/#validation","title":"Validation","text":"
  1. Check if the aws-logging configMap for Fargate Fluentbit was created.
kubectl -n aws-observability get configmap aws-logging -o yaml\napiVersion: v1\ndata:\n  filters.conf: |\n[FILTER]\nName parser\n      Match *\n      Key_Name log\n      Parser regex\n      Preserve_Key True\n      Reserve_Data True\n  flb_log_cw: \"true\"\noutput.conf: |\n[OUTPUT]\nName cloudwatch_logs\n      Match *\n      region us-west-2\n      log_group_name /fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\n      log_stream_prefix fargate-logs-\n      auto_create_group true\nparsers.conf: |\n[PARSER]\nName regex\n      Format regex\n      Regex ^(?<time>[^ ]+) (?<stream>[^ ]+) (?<logtag>[^ ]+) (?<message>.+)$\n      Time_Key time\nTime_Format %Y-%m-%dT%H:%M:%S.%L%z\n      Time_Keep On\n      Decode_Field_As json message\nimmutable: false\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-05-08T21:14:52Z\"\nname: aws-logging\n  namespace: aws-observability\n  resourceVersion: \"1795\"\nuid: d822bcf5-a441-4996-857e-7fb1357bc07e\n
  1. Validate if the CloudWatch LogGroup was created accordingly, and LogStreams were populated.
aws logs describe-log-groups --log-group-name-prefix \"/fargate-serverless/fargate-fluentbit\"\n{\n\"logGroups\": [\n{\n\"logGroupName\": \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\",\n            \"creationTime\": 1683580491652,\n            \"retentionInDays\": 90,\n            \"metricFilterCount\": 0,\n            \"arn\": \"arn:aws:logs:us-west-2:111122223333:log-group:/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006:*\",\n            \"storedBytes\": 0\n}\n]\n}\n
aws logs describe-log-streams --log-group-name \"/fargate-serverless/fargate-fluentbit-logs20230509014113352200000006\" --log-stream-name-prefix fargate-logs --query 'logStreams[].logStreamName'\n[\n\"fargate-logs-flblogs.var.log.fluent-bit.log\",\n    \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-grjsq_kube-system_aws-load-balancer-controller-feaa22b4cdaa71ecfc8355feb81d4b61ea85598a7bb57aef07667c767c6b98e4.log\",\n    \"fargate-logs-kube.var.log.containers.aws-load-balancer-controller-7f989fc6c-wzr46_kube-system_aws-load-balancer-controller-69075ea9ab3c7474eac2a1696d3a84a848a151420cd783d79aeef960b181567f.log\",\n    \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-8cxvq_kube-system_coredns-9e4f3ab435269a566bcbaa606c02c146ad58508e67cef09fa87d5c09e4ac0088.log\",\n    \"fargate-logs-kube.var.log.containers.coredns-7b7bddbc85-gcjwp_kube-system_coredns-11016818361cd68c32bf8f0b1328f3d92a6d7b8cf5879bfe8b301f393cb011cc.log\"\n]\n
"},{"location":"addons/fargate-fluentbit/#resources","title":"Resources","text":"

AWS Docs Fluent Bit for Amazon EKS on AWS Fargate Blog Post

"},{"location":"addons/ingress-nginx/","title":"Ingress Nginx","text":"

This add-on installs Ingress Nginx Controller on Amazon EKS. The Ingress Nginx controller uses Nginx as a reverse proxy and load balancer.

Other than handling Kubernetes ingress objects, this ingress controller can facilitate multi-tenancy and segregation of workload ingresses based on host name (host-based routing) and/or URL Path (path based routing).

"},{"location":"addons/ingress-nginx/#usage","title":"Usage","text":"

Ingress Nginx Controller can be deployed by enabling the add-on via the following.

enable_ingress_nginx = true\n

You can optionally customize the Helm chart that deploys ingress-nginx via the following configuration.

  enable_ingress_nginx = true\ningress_nginx = {\nname          = \"ingress-nginx\"\nchart_version = \"4.6.1\"\nrepository    = \"https://kubernetes.github.io/ingress-nginx\"\nnamespace     = \"ingress-nginx\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify ingress-nginx pods are running.

$ kubectl get pods -n ingress-nginx\nNAME                                       READY   STATUS    RESTARTS   AGE\ningress-nginx-controller-f6c55fdc8-8bt2z   1/1     Running   0          44m\n
"},{"location":"addons/karpenter/","title":"Karpenter","text":""},{"location":"addons/karpenter/#prerequisites","title":"Prerequisites","text":"

If deploying a node template that uses spot, please ensure you have the Spot service linked role available in your account. You can run the following command to ensure this role is available:

aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true\n
"},{"location":"addons/karpenter/#validate","title":"Validate","text":"

The following command will update the kubeconfig on your local machine and allow you to interact with your EKS Cluster using kubectl to validate the CoreDNS deployment for Fargate.

  1. Run update-kubeconfig command:
aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
  1. Test by listing all the pods running currently
kubectl get pods -n karpenter\n\n# Output should look similar to below\nNAME                         READY   STATUS    RESTARTS   AGE\nkarpenter-6f97df4f77-5nqsk   1/1     Running   0          3m28s\nkarpenter-6f97df4f77-n7fkf   1/1     Running   0          3m28s\n
  1. View the current nodes - this example utilizes EKS Fargate for hosting the Karpenter controller so only Fargate nodes are present currently:
kubectl get nodes\n\n# Output should look similar to below\nNAME                                                STATUS   ROLES    AGE     VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal    Ready    <none>   2m56s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal   Ready    <none>   2m57s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal    Ready    <none>   2m34s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal   Ready    <none>   2m33s   v1.26.3-eks-f4dc2c0\n
  1. Create a sample pause deployment to demonstrate scaling:
kubectl apply -f - <<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n    name: inflate\nspec:\n    replicas: 0\n    selector:\n    matchLabels:\n        app: inflate\n    template:\n    metadata:\n        labels:\n        app: inflate\n    spec:\n        terminationGracePeriodSeconds: 0\n        containers:\n        - name: inflate\n            image: public.ecr.aws/eks-distro/kubernetes/pause:3.7\n            resources:\n            requests:\n                cpu: 1\nEOF\n
  1. Scale up the sample pause deployment to see Karpenter respond by provisioning nodes to support the workload:
kubectl scale deployment inflate --replicas 5\n# To view logs\n# kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller\n
  1. Re-check the nodes, you will now see a new EC2 node provisioned to support the scaled workload:
kubectl get nodes\n\n# Output should look similar to below\nNAME                                                STATUS   ROLES    AGE     VERSION\nfargate-ip-10-0-29-25.us-west-2.compute.internal    Ready    <none>   5m15s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-36-148.us-west-2.compute.internal   Ready    <none>   5m16s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-42-30.us-west-2.compute.internal    Ready    <none>   4m53s   v1.26.3-eks-f4dc2c0\nfargate-ip-10-0-45-112.us-west-2.compute.internal   Ready    <none>   4m52s   v1.26.3-eks-f4dc2c0\nip-10-0-1-184.us-west-2.compute.internal            Ready    <none>   26s     v1.26.2-eks-a59e1f0 # <= new EC2 node launched\n
  1. Remove the sample pause deployment:
kubectl delete deployment inflate\n
"},{"location":"addons/kube-prometheus-stack/","title":"Kube Prometheus Stack","text":"

Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator.

"},{"location":"addons/kube-prometheus-stack/#usage","title":"Usage","text":"

Kube Prometheus Stack can be deployed by enabling the add-on via the following.

enable_kube_prometheus_stack = true\n

You can optionally customize the Helm chart that deploys Kube Prometheus Stack via the following configuration.

  enable_kube_prometheus_stack = true\nkube_prometheus_stack = {\nname          = \"kube-prometheus-stack\"\nchart_version = \"51.2.0\"\nrepository    = \"https://prometheus-community.github.io/helm-charts\"\nnamespace     = \"kube-prometheus-stack\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify kube-prometheus-stack pods are running.

$ kubectl get pods -n external-secrets\nNAME                                                        READY   STATUS    RESTARTS       AGE\nalertmanager-kube-prometheus-stack-alertmanager-0           2/2     Running   3 (2d2h ago)   2d7h\nkube-prometheus-stack-grafana-5c6cf88fd9-8wc9k              3/3     Running   3 (2d2h ago)   2d7h\nkube-prometheus-stack-kube-state-metrics-584d8b5d5f-s6p8d   1/1     Running   1 (2d2h ago)   2d7h\nkube-prometheus-stack-operator-c74ddccb5-8cprr              1/1     Running   1 (2d2h ago)   2d7h\nkube-prometheus-stack-prometheus-node-exporter-vd8lw        1/1     Running   1 (2d2h ago)   2d7h\nprometheus-kube-prometheus-stack-prometheus-0               2/2     Running   2 (2d2h ago)   2d7h\n
"},{"location":"addons/metrics-server/","title":"Metrics Server","text":"

Metrics Server is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines.

Metrics Server collects resource metrics from Kubelets and exposes them in Kubernetes apiserver through Metrics API for use by Horizontal Pod Autoscaler and Vertical Pod Autoscaler. Metrics API can also be accessed by kubectl top, making it easier to debug autoscaling pipelines.

"},{"location":"addons/metrics-server/#usage","title":"Usage","text":"

Metrics Server can be deployed by enabling the add-on via the following.

enable_metrics_server = true\n

You can optionally customize the Helm chart that deploys External DNS via the following configuration.

  enable_metrics_server = true\nmetrics_server = {\nname          = \"metrics-server\"\nchart_version = \"3.10.0\"\nrepository    = \"https://kubernetes-sigs.github.io/metrics-server/\"\nnamespace     = \"kube-system\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify metrics-server pods are running.

$ kubectl get pods -n kube-system\nNAME                                   READY   STATUS    RESTARTS       AGE\nmetrics-server-6f9cdd486c-njh8b        1/1     Running   1 (2d2h ago)   2d7h\n
"},{"location":"addons/opa-gatekeeper/","title":"OPA Gatekeeper","text":"

Gatekeeper is an admission controller that validates requests to create and update Pods on Kubernetes clusters, using the Open Policy Agent (OPA). Using Gatekeeper allows administrators to define policies with a constraint, which is a set of conditions that permit or deny deployment behaviors in Kubernetes.

For complete project documentation, please visit the Gatekeeper. For reference templates refer Templates

"},{"location":"addons/opa-gatekeeper/#usage","title":"Usage","text":"

Gatekeeper can be deployed by enabling the add-on via the following.

enable_gatekeeper = true\n

You can also customize the Helm chart that deploys gatekeeper via the following configuration:

  enable_gatekeeper = true\ngatekeeper = {\nname          = \"gatekeeper\"\nchart_version = \"3.12.0\"\nrepository    = \"https://open-policy-agent.github.io/gatekeeper/charts\"\nnamespace     = \"gatekeeper-system\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
"},{"location":"addons/secrets-store-csi-driver-provider-aws/","title":"AWS Secrets Manager and Config Provider for Secret Store CSI Driver","text":"

AWS offers two services to manage secrets and parameters conveniently in your code. AWS Secrets Manager allows you to easily rotate, manage, and retrieve database credentials, API keys, certificates, and other secrets throughout their lifecycle. AWS Systems Manager Parameter Store provides hierarchical storage for configuration data. The AWS provider for the Secrets Store CSI Driver allows you to make secrets stored in Secrets Manager and parameters stored in Parameter Store appear as files mounted in Kubernetes pods.

"},{"location":"addons/secrets-store-csi-driver-provider-aws/#usage","title":"Usage","text":"

AWS Secrets Store CSI Driver can be deployed by enabling the add-on via the following.

enable_secrets_store_csi_driver              = true\nenable_secrets_store_csi_driver_provider_aws = true\n

You can optionally customize the Helm chart via the following configuration.

  enable_secrets_store_csi_driver              = true\nenable_secrets_store_csi_driver_provider_aws = true\nsecrets_store_csi_driver_provider_aws = {\nname          = \"secrets-store-csi-driver\"\nchart_version = \"0.3.2\"\nrepository    = \"https://aws.github.io/secrets-store-csi-driver-provider-aws\"\nnamespace     = \"kube-system\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

Verify metrics-server pods are running.

$ kubectl get pods -n kube-system\nNAME                                         READY   STATUS    RESTARTS       AGE\nsecrets-store-csi-driver-9l2z8               3/3     Running   1 (2d5h ago)   2d9h\nsecrets-store-csi-driver-provider-aws-2qqkk  1/1     Running   1 (2d5h ago)   2d9h\n
"},{"location":"addons/velero/","title":"Velero","text":"

Velero is an open source tool to safely backup and restore, perform disaster recovery, and migrate Kubernetes cluster resources and persistent volumes.

  • Helm chart
  • Plugin for AWS
"},{"location":"addons/velero/#usage","title":"Usage","text":"

Velero can be deployed by enabling the add-on via the following.

enable_velero           = true\nvelero_backup_s3_bucket = \"<YOUR_BUCKET_NAME>\"\nvelero = {\ns3_backup_location = \"<YOUR_S3_BUCKET_ARN>[/prefix]\"\n}\n

You can also customize the Helm chart that deploys velero via the following configuration:

enable_velero           = true\nvelero = {\nname          = \"velero\"\ndescription   = \"A Helm chart for velero\"\nchart_version = \"3.1.6\"\nrepository    = \"https://vmware-tanzu.github.io/helm-charts/\"\nnamespace     = \"velero\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n

To see a working example, see the stateful example blueprint.

"},{"location":"addons/velero/#validate","title":"Validate","text":"
  1. Run update-kubeconfig command:
aws eks --region <REGION> update-kubeconfig --name <CLUSTER_NAME>\n
  1. Test by listing velero resources provisioned:
kubectl get all -n velero\n\n# Output should look similar to below\nNAME                         READY   STATUS    RESTARTS   AGE\npod/velero-7b8994d56-z89sl   1/1     Running   0          25h\n\nNAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE\nservice/velero   ClusterIP   172.20.20.118   <none>        8085/TCP   25h\n\nNAME                     READY   UP-TO-DATE   AVAILABLE   AGE\ndeployment.apps/velero   1/1     1            1           25h\n\nNAME                               DESIRED   CURRENT   READY   AGE\nreplicaset.apps/velero-7b8994d56   1         1         1       25h\n
  1. Get backup location using velero CLI
velero backup-location get\n\n# Output should look similar to below\nNAME      PROVIDER   BUCKET/PREFIX                                 PHASE       LAST VALIDATED                  ACCESS MODE   DEFAULT\ndefault   aws        stateful-20230503175301619800000005/backups   Available   2023-05-04 15:15:00 -0400 EDT   ReadWrite     true\n
  1. To demonstrate creating a backup and restoring, create a new namespace and run nginx using below commands:
kubectl create namespace backupdemo\nkubectl run nginx --image=nginx -n backupdemo\n
  1. Create backup of this namespace using velero
velero backup create backup1 --include-namespaces backupdemo\n\n# Output should look similar to below\nBackup request \"backup1\" submitted successfully.\nRun `velero backup describe backup1` or `velero backup logs backup1` for more details.\n
  1. Describe the backup to check the backup status
velero backup describe backup1\n\n# Output should look similar to below\nName:         backup1\nNamespace:    velero\nLabels:       velero.io/storage-location=default\nAnnotations:  velero.io/source-cluster-k8s-gitversion=v1.26.2-eks-a59e1f0\n              velero.io/source-cluster-k8s-major-version=1\nvelero.io/source-cluster-k8s-minor-version=26+\n\nPhase:  Completed\n\nNamespaces:\n  Included:  backupdemo\n  Excluded:  <none>\n\nResources:\n  Included:        *\n  Excluded:        <none>\n  Cluster-scoped:  auto\n\nLabel selector:  <none>\n\nStorage Location:  default\n\nVelero-Native Snapshot PVs:  auto\n\nTTL:  720h0m0s\n\nCSISnapshotTimeout:    10m0s\nItemOperationTimeout:  0s\n\nHooks:  <none>\n\nBackup Format Version:  1.1.0\n\nStarted:    2023-05-04 15:16:31 -0400 EDT\nCompleted:  2023-05-04 15:16:33 -0400 EDT\n\nExpiration:  2023-06-03 15:16:31 -0400 EDT\n\nTotal items to be backed up:  9\nItems backed up:              9\nVelero-Native Snapshots: <none included>\n
  1. Delete the namespace - this will be restored using the backup created
kubectl delete namespace backupdemo\n
  1. Restore the namespace from your backup
velero restore create --from-backup backup1\n
  1. Verify that the namespace is restored
kubectl get all -n backupdemo\n\n# Output should look similar to below\nNAME        READY   STATUS    RESTARTS   AGE\npod/nginx   1/1     Running   0          21s\n
"},{"location":"addons/vertical-pod-autoscaler/","title":"Vertical Pod Autoscaler","text":"

VPA Vertical Pod Autoscaler (VPA) automatically adjusts the CPU and memory reservations for your pods to help \"right size\" your applications. When configured, it will automatically request the necessary reservations based on usage and thus allow proper scheduling onto nodes so that the appropriate resource amount is available for each pod. It will also maintain ratios between limits and requests that were specified in initial container configuration.

NOTE: Metrics Server add-on is a dependency for this addon

"},{"location":"addons/vertical-pod-autoscaler/#usage","title":"Usage","text":"

This step deploys the Vertical Pod Autoscaler with default Helm Chart config

  enable_vpa            = true\nenable_metrics_server = true\n

You can also customize the Helm chart that deploys vpa via the following configuration:

  enable_vpa = true\nenable_metrics_server = true\nvpa = {\nname          = \"vpa\"\nchart_version = \"1.7.5\"\nrepository    = \"https://charts.fairwinds.com/stable\"\nnamespace     = \"vpa\"\nvalues        = [templatefile(\"${path.module}/values.yaml\", {})]\n}\n
"}]} \ No newline at end of file diff --git a/v1.9.2/sitemap.xml b/v1.9.2/sitemap.xml new file mode 100644 index 00000000..e9821a41 --- /dev/null +++ b/v1.9.2/sitemap.xml @@ -0,0 +1,158 @@ + + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/amazon-eks-addons/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/architectures/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/aws-partner-addons/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/helm-release/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/argo-events/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/argo-rollouts/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/argo-workflows/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/argocd/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/aws-cloudwatch-metrics/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/aws-efs-csi-driver/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/aws-for-fluentbit/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/aws-fsx-csi-driver/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/aws-gateway-api-controller/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/aws-load-balancer-controller/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/aws-node-termination-handler/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/aws-private-ca-issuer/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/cert-manager/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/cluster-autoscaler/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/cluster-proportional-autoscaler/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/external-dns/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/external-secrets/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/fargate-fluentbit/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/ingress-nginx/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/karpenter/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/kube-prometheus-stack/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/metrics-server/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/opa-gatekeeper/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/secrets-store-csi-driver-provider-aws/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/velero/ + 2023-10-04 + daily + + + https://aws-ia.github.io/terraform-aws-eks-blueprints-addons/v1.9.2/addons/vertical-pod-autoscaler/ + 2023-10-04 + daily + + \ No newline at end of file diff --git a/v1.9.2/sitemap.xml.gz b/v1.9.2/sitemap.xml.gz new file mode 100644 index 00000000..ff20d189 Binary files /dev/null and b/v1.9.2/sitemap.xml.gz differ diff --git a/versions.json b/versions.json index 9c226b51..45b503c7 100644 --- a/versions.json +++ b/versions.json @@ -1 +1 @@ -[{"version": "v1.9.1", "title": "v1.9.1", "aliases": ["latest"]}, {"version": "v1.9.0", "title": "v1.9.0", "aliases": []}, {"version": "v1.8.1", "title": "v1.8.1", "aliases": []}, {"version": "v1.8.0", "title": "v1.8.0", "aliases": []}, {"version": "v1.7.2", "title": "v1.7.2", "aliases": []}, {"version": "v1.7.1", "title": "v1.7.1", "aliases": []}, {"version": "v1.7.0", "title": "v1.7.0", "aliases": []}, {"version": "v1.6.0", "title": "v1.6.0", "aliases": []}, {"version": "v1.5.1", "title": "v1.5.1", "aliases": []}, {"version": "v1.5.0", "title": "v1.5.0", "aliases": []}, {"version": "v1.4.0", "title": "v1.4.0", "aliases": []}, {"version": "v1.3.0", "title": "v1.3.0", "aliases": []}, {"version": "v1.2.2", "title": "v1.2.2", "aliases": []}, {"version": "v1.2.1", "title": "v1.2.1", "aliases": []}, {"version": "v1.2.0", "title": "v1.2.0", "aliases": []}, {"version": "v1.1.1", "title": "v1.1.1", "aliases": []}, {"version": "v1.1.0", "title": "v1.1.0", "aliases": []}, {"version": "v1.0.0", "title": "v1.0.0", "aliases": []}, {"version": "v0.2.0", "title": "v0.2.0", "aliases": []}, {"version": "v0.1.0", "title": "v0.1.0", "aliases": []}, {"version": "main", "title": "main", "aliases": []}] \ No newline at end of file +[{"version": "v1.9.2", "title": "v1.9.2", "aliases": ["latest"]}, {"version": "v1.9.1", "title": "v1.9.1", "aliases": []}, {"version": "v1.9.0", "title": "v1.9.0", "aliases": []}, {"version": "v1.8.1", "title": "v1.8.1", "aliases": []}, {"version": "v1.8.0", "title": "v1.8.0", "aliases": []}, {"version": "v1.7.2", "title": "v1.7.2", "aliases": []}, {"version": "v1.7.1", "title": "v1.7.1", "aliases": []}, {"version": "v1.7.0", "title": "v1.7.0", "aliases": []}, {"version": "v1.6.0", "title": "v1.6.0", "aliases": []}, {"version": "v1.5.1", "title": "v1.5.1", "aliases": []}, {"version": "v1.5.0", "title": "v1.5.0", "aliases": []}, {"version": "v1.4.0", "title": "v1.4.0", "aliases": []}, {"version": "v1.3.0", "title": "v1.3.0", "aliases": []}, {"version": "v1.2.2", "title": "v1.2.2", "aliases": []}, {"version": "v1.2.1", "title": "v1.2.1", "aliases": []}, {"version": "v1.2.0", "title": "v1.2.0", "aliases": []}, {"version": "v1.1.1", "title": "v1.1.1", "aliases": []}, {"version": "v1.1.0", "title": "v1.1.0", "aliases": []}, {"version": "v1.0.0", "title": "v1.0.0", "aliases": []}, {"version": "v0.2.0", "title": "v0.2.0", "aliases": []}, {"version": "v0.1.0", "title": "v0.1.0", "aliases": []}, {"version": "main", "title": "main", "aliases": []}] \ No newline at end of file