diff --git a/manifests/base-application/carts/deployment.yaml b/manifests/base-application/carts/deployment.yaml index 7683305f2..79560c8cf 100644 --- a/manifests/base-application/carts/deployment.yaml +++ b/manifests/base-application/carts/deployment.yaml @@ -44,7 +44,7 @@ spec: readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000 - image: "public.ecr.aws/aws-containers/retail-store-sample-cart:0.4.0" + image: "public.ecr.aws/aws-containers/retail-store-sample-cart:0.7.0" imagePullPolicy: IfNotPresent ports: - name: http diff --git a/manifests/modules/security/eks-pod-identity/.workshop/cleanup.sh b/manifests/modules/security/eks-pod-identity/.workshop/cleanup.sh new file mode 100644 index 000000000..7afc04177 --- /dev/null +++ b/manifests/modules/security/eks-pod-identity/.workshop/cleanup.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +POD_ASSOCIATION_ID=$(aws eks list-pod-identity-associations --region $AWS_REGION --cluster-name $EKS_CLUSTER_NAME --service-account carts --namespace carts --output text --query 'associations[0].associationId') + +if [ ! -z "$POD_ASSOCIATION_ID" ]; then + logmessage "Deleting EKS Pod Identity Association..." + + aws eks delete-pod-identity-association --region $AWS_REGION --association-id $POD_ASSOCIATION_ID --cluster-name $EKS_CLUSTER_NAME + +fi + +check=$(aws eks list-addons --cluster-name $EKS_CLUSTER_NAME --region $AWS_REGION --query "addons[? @ == 'eks-pod-identity-agent']" --output text) + +if [ ! -z "$check" ]; then + logmessage "Deleting EKS Pod Identity Agent addon..." + + aws eks delete-addon --cluster-name $EKS_CLUSTER_NAME --addon-name eks-pod-identity-agent --region $AWS_REGION + + aws eks wait addon-deleted --cluster-name $EKS_CLUSTER_NAME --addon-name eks-pod-identity-agent --region $AWS_REGION +fi diff --git a/manifests/modules/security/eks-pod-identity/.workshop/terraform/addon.tf b/manifests/modules/security/eks-pod-identity/.workshop/terraform/addon.tf new file mode 100644 index 000000000..88d87caf0 --- /dev/null +++ b/manifests/modules/security/eks-pod-identity/.workshop/terraform/addon.tf @@ -0,0 +1,223 @@ +module "eks_blueprints_addons" { + source = "aws-ia/eks-blueprints-addons/aws" + version = "~> 1.0" + + enable_aws_load_balancer_controller = true + aws_load_balancer_controller = { + wait = true + } + + cluster_name = local.addon_context.eks_cluster_id + cluster_endpoint = local.addon_context.aws_eks_cluster_endpoint + cluster_version = local.eks_cluster_version + oidc_provider_arn = local.addon_context.eks_oidc_provider_arn +} + +resource "time_sleep" "wait" { + depends_on = [module.eks_blueprints_addons] + + create_duration = "10s" +} + +resource "kubernetes_manifest" "ui_nlb" { + manifest = { + "apiVersion" = "v1" + "kind" = "Service" + "metadata" = { + "name" = "ui-nlb" + "namespace" = "ui" + "annotations" = { + "service.beta.kubernetes.io/aws-load-balancer-type" = "external " + "service.beta.kubernetes.io/aws-load-balancer-scheme" = "internet-facing" + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" = "instance" + } + } + "spec" = { + "type" = "LoadBalancer" + "ports" = [{ + "port" = 80 + "targetPort" = 8080 + "name" = "http" + }] + "selector" = { + "app.kubernetes.io/name" = "ui" + "app.kubernetes.io/instance" = "ui" + "app.kubernetes.io/component" = "service" + } + } + } +} + +resource "aws_dynamodb_table" "carts" { + #checkov:skip=CKV2_AWS_28:Point in time backup not required for workshop + name = "${local.addon_context.eks_cluster_id}-carts" + hash_key = "id" + billing_mode = "PAY_PER_REQUEST" + stream_enabled = true + stream_view_type = "NEW_AND_OLD_IMAGES" + + server_side_encryption { + enabled = true + kms_key_arn = aws_kms_key.cmk_dynamodb.arn + } + + attribute { + name = "id" + type = "S" + } + + attribute { + name = "customerId" + type = "S" + } + + global_secondary_index { + name = "idx_global_customerId" + hash_key = "customerId" + projection_type = "ALL" + } + + tags = local.tags +} + +module "iam_assumable_role_carts" { + source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role" + version = "~> v5.5.5" + create_role = true + role_requires_mfa = false + role_name = "${local.addon_context.eks_cluster_id}-carts-dynamo" + trusted_role_services = ["pods.eks.amazonaws.com"] + custom_role_policy_arns = [aws_iam_policy.carts_dynamo.arn] + trusted_role_actions = ["sts:AssumeRole", "sts:TagSession"] + + tags = local.tags +} + +resource "aws_iam_policy" "carts_dynamo" { + name = "${local.addon_context.eks_cluster_id}-carts-dynamo" + path = "/" + description = "Dynamo policy for carts application" + + policy = <&2 echo "Load balancer did not become available or return HTTP 500 for 180 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/security/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh b/website/docs/security/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh new file mode 100644 index 000000000..66edc6939 --- /dev/null +++ b/website/docs/security/amazon-eks-pod-identity/tests/hook-enable-pod-identity.sh @@ -0,0 +1,27 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 10 + + kubectl wait --for=condition=available --timeout=120s deployment/carts -n carts + + export endpoint=$(kubectl -n kube-system get svc -n ui ui-nlb -o json | jq -r '.status.loadBalancer.ingress[0].hostname') + + EXIT_CODE=0 + + timeout -s TERM 180 bash -c \ + 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${endpoint}/home)" != "200" ]];\ + do echo "Waiting for ${endpoint}" && sleep 30;\ + done' || EXIT_CODE=$? + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Load balancer did not become available or return HTTP 200 for 180 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/security/amazon-eks-pod-identity/tests/hook-suite.sh b/website/docs/security/amazon-eks-pod-identity/tests/hook-suite.sh new file mode 100644 index 000000000..8b5a4baea --- /dev/null +++ b/website/docs/security/amazon-eks-pod-identity/tests/hook-suite.sh @@ -0,0 +1,11 @@ +set -e + +before() { + echo "noop" +} + +after() { + prepare-environment +} + +"$@" diff --git a/website/docs/security/amazon-eks-pod-identity/understanding.md b/website/docs/security/amazon-eks-pod-identity/understanding.md new file mode 100644 index 000000000..95f0d404a --- /dev/null +++ b/website/docs/security/amazon-eks-pod-identity/understanding.md @@ -0,0 +1,23 @@ +--- +title: "Understanding Pod IAM" +sidebar_position: 33 +--- + +The first place to look for the issue is the logs of the `carts` service: + +```bash +$ kubectl -n carts logs deployment/carts +... ommitted output +``` + +This will return a lot of logs, so lets filter it to get a succinct view of the problem: + +```bash +$ kubectl -n carts logs deployment/carts | grep -i Exception +2024-02-12T20:20:47.547Z ERROR 1 --- [nio-8080-exec-7] o.a.c.c.C.[.[.[.[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed: com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException: User: arn:aws:sts::123456789000:assumed-role/eksctl-eks-workshop-nodegroup-defa-NodeInstanceRole-Q1p0w2o9e3i8/i-0p1qaz2wsx3edc4rfv is not authorized to perform: dynamodb:Query on resource: arn:aws:dynamodb:us-west-2:123456789000:table/Items/index/idx_global_customerId because no identity-based policy allows the dynamodb:Query action (Service: AmazonDynamoDBv2; Status Code: 400; Error Code: AccessDeniedException; Request ID: MA54K0UDUOCLJ96UP6PT76VTBBVV4KQNSO5AEMVJF66Q9ASUAAJG; Proxy: null)] with root cause +com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException: User: arn:aws:sts::123456789000:assumed-role/eksctl-eks-workshop-nodegroup-defa-NodeInstanceRole-Q1p0w2o9e3i8/i-0p1qaz2wsx3edc4rfv is not authorized to perform: dynamodb:Query on resource: arn:aws:dynamodb:us-west-2:123456789000:table/Items/index/idx_global_customerId because no identity-based policy allows the dynamodb:Query action (Service: AmazonDynamoDBv2; Status Code: 400; Error Code: AccessDeniedException; Request ID: MA54K0UDUOCLJ96UP6PT76VTBBVV4KQNSO5AEMVJF66Q9ASUAAJG; Proxy: null) +``` + +The application is generating an `AccessDeniedException` which indicates that the IAM Role our Pod is using to access DynamoDB does not have the required permissions. This is happening because by default, if no IAM Roles or Policies are linked to our Pod, it use the IAM Role linked to the Instance Provile assigned to the EC2 instance on which its running, in this case this Role does not have an IAM Policy that allows access to DynamoDB. + +One way we could solve this is to expand the IAM permissions of our EC2 Instance Provile, but this would allow any Pod that runs on them to access our DynamoDB table which is not secure, and not a good practice of granting the principle of least privilege. Instead we'll using EKS Pod Identity to allow specific access required by the `carts` application at Pod level. diff --git a/website/docs/security/amazon-eks-pod-identity/use-pod-identity.md b/website/docs/security/amazon-eks-pod-identity/use-pod-identity.md new file mode 100644 index 000000000..2ae31a212 --- /dev/null +++ b/website/docs/security/amazon-eks-pod-identity/use-pod-identity.md @@ -0,0 +1,125 @@ +--- +title: "Using EKS Pod Identity" +sidebar_position: 34 +hide_table_of_contents: true +--- + +To use EKS Pod Identity in your cluster, the `EKS Pod Identity Agent` addon must be installed on your EKS cluster. Lets install it using below command. + +```bash timeout=300 wait=60 +$ aws eks create-addon --cluster-name $EKS_CLUSTER_NAME --addon-name eks-pod-identity-agent +{ + "addon": { + "addonName": "eks-pod-identity-agent", + "clusterName": "eks-workshop", + "status": "CREATING", + "addonVersion": "v1.1.0-eksbuild.1", + "health": { + "issues": [] + }, + "addonArn": "arn:aws:eks:us-west-2:123456789000:addon/eks-workshop/eks-pod-identity-agent/9ec6cfbd-8c9f-7ff4-fd26-640dda75bcea", + "createdAt": "2024-01-12T22:41:01.414000+00:00", + "modifiedAt": "2024-01-12T22:41:01.434000+00:00", + "tags": {} + } +} + +$ aws eks wait addon-active --cluster-name $EKS_CLUSTER_NAME --addon-name eks-pod-identity-agent +``` + +Now take a look at what has been created in your EKS cluster by the new addon. You can see a DaemonSet deployed on the `kube-system` Namespace, which will run a Pod on each Node in our Cluster. + +```bash +$ kubectl -n kube-system get daemonset eks-pod-identity-agent +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +eks-pod-identity-agent 3 3 3 3 3 3d21h +$ kubectl -n kube-system get pods -l app.kubernetes.io/name=eks-pod-identity-agent +NAME READY STATUS RESTARTS AGE +eks-pod-identity-agent-4tn28 1/1 Running 0 3d21h +eks-pod-identity-agent-hslc5 1/1 Running 0 3d21h +eks-pod-identity-agent-thvf5 1/1 Running 0 3d21h +``` + +An IAM Role which provides the required permissions for the `carts` service to read and write to DynamoDB table has been created when you ran the `prepare-environment` script in the first step of this module. You can view the policy as shown below. + +```bash +$ aws iam get-policy-version \ + --version-id v1 --policy-arn \ + --query 'PolicyVersion.Document' \ + arn:aws:iam::${AWS_ACCOUNT_ID}:policy/${EKS_CLUSTER_NAME}-carts-dynamo | jq . +{ + "Statement": [ + { + "Action": "dynamodb:*", + "Effect": "Allow", + "Resource": [ + "arn:aws:dynamodb:us-west-2:1234567890:table/eks-workshop-carts", + "arn:aws:dynamodb:us-west-2:1234567890:table/eks-workshop-carts/index/*" + ], + "Sid": "AllAPIActionsOnCart" + } + ], + "Version": "2012-10-17" +} +``` + +The role has also been configured with the appropriate trust relationship which allows the EKS Service Principal to assume this role for Pod Identity. You can view it like with the command below. + +```bash +$ aws iam get-role \ + --query 'Role.AssumeRolePolicyDocument' \ + --role-name ${EKS_CLUSTER_NAME}-carts-dynamo | jq . +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "pods.eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +``` + +Next, we will use Amazon EKS Pod Identity feature to associate an AWS IAM role to the Kubernetes service account that will be used by our deployment. To create the association, run the following command. + +```bash wait=30 +$ aws eks create-pod-identity-association --cluster-name ${EKS_CLUSTER_NAME} \ + --role-arn arn:aws:iam::${AWS_ACCOUNT_ID}:role/${EKS_CLUSTER_NAME}-carts-dynamo \ + --namespace carts --service-account carts +{ + "association": { + "clusterName": "eks-workshop", + "namespace": "carts", + "serviceAccount": "carts", + "roleArn": "arn:aws:iam::123456789000:role/eks-workshop-carts-dynamo", + "associationArn": "arn:aws::123456789000:podidentityassociation/eks-workshop/a-abcdefghijklmnop1", + "associationId": "a-abcdefghijklmnop1", + "tags": {}, + "createdAt": "2024-01-09T16:16:38.163000+00:00", + "modifiedAt": "2024-01-09T16:16:38.163000+00:00" + } +} +``` + +All thats left is to verify that the `carts` Deployment is using the `carts` Service Account. + +```bash +$ kubectl -n carts describe deployment carts | grep 'Service Account' + Service Account: carts +``` + +With the Service Account verified recycle the `carts` Pods. + +```bash hook=enable-pod-identity hookTimeout=430 +$ kubectl -n carts rollout restart deployment/carts +deployment.apps/carts restarted +$ kubectl -n carts rollout status deployment/carts +Waiting for deployment "carts" rollout to finish: 1 old replicas are pending termination... +deployment "carts" successfully rolled out +``` diff --git a/website/docs/security/amazon-eks-pod-identity/using-dynamo.md b/website/docs/security/amazon-eks-pod-identity/using-dynamo.md new file mode 100644 index 000000000..535da6fa2 --- /dev/null +++ b/website/docs/security/amazon-eks-pod-identity/using-dynamo.md @@ -0,0 +1,82 @@ +--- +title: "Using Amazon DynamoDB" +sidebar_position: 32 +--- + +The first step in this process is to re-configure the carts service to use a DynamoDB table that has already been created for us. The application loads most of its confirmation from a ConfigMap, lets take look at it: + +```bash +$ kubectl -n carts get -o yaml cm carts +apiVersion: v1 +data: + AWS_ACCESS_KEY_ID: key + AWS_SECRET_ACCESS_KEY: secret + CARTS_DYNAMODB_CREATETABLE: true + CARTS_DYNAMODB_ENDPOINT: http://carts-dynamodb:8000 + CARTS_DYNAMODB_TABLENAME: Items +kind: ConfigMap +metadata: + name: carts + namespace: carts +``` + +Also, check the current status of the application the using the browser. A `LoadBalancer` type service named `ui-nlb` is provisioned in the `ui` namespace from which the application's UI can be accessed. + +```bash +$ kubectl -n ui get service ui-nlb -o jsonpath='{.status.loadBalancer.ingress[*].hostname}{"\n"}' +k8s-ui-uinlb-647e781087-6717c5049aa96bd9.elb.us-west-2.amazonaws.com +``` + +Use the generated URL from the command above to open the UI in your browser. It should open the the Retail Store like shown below. + +![Home](../../../static/img/sample-app-screens/home.png) + +Now, the following kustomization overwrites the ConfigMap, removing the DynamoDB endpoint configuration which tells the SDK to default to the real DynamoDB service instead of our test Pod. We've also provided it with the name of the DynamoDB table thats been created already for us which is being pulled from the environment variable `CARTS_DYNAMODB_TABLENAME`. + +```kustomization +modules/security/eks-pod-identity/dynamo/kustomization.yaml +ConfigMap/carts +``` + +Let's check the value of `CARTS_DYNAMODB_TABLENAME` then run Kustomize to use the real DynamoDB service: + +```bash +$ echo $CARTS_DYNAMODB_TABLENAME +eks-workshop-carts +$ kubectl kustomize ~/environment/eks-workshop/modules/security/eks-pod-identity/dynamo \ + | envsubst | kubectl apply -f- +``` + +This will overwrite our ConfigMap with new values: + +```bash +$ kubectl -n carts get cm carts -o yaml +apiVersion: v1 +data: + CARTS_DYNAMODB_TABLENAME: eks-workshop-carts +kind: ConfigMap +metadata: + labels: + app: carts + name: carts + namespace: carts +``` + +We'll need to recycle all the Pods of the `carts` application to pick up our new ConfigMap contents. + +```bash hook=enable-dynamo hookTimeout=430 +$ kubectl -n carts rollout restart deployment/carts +deployment.apps/carts restarted +$ kubectl -n carts rollout status deployment/carts +``` + +So now our application should be using DynamoDB right? Try to load it up in the browser using the URL outputed in the previous command, and navigate to the shopping cart. + +```bash +$ kubectl -n ui get service ui-nlb -o jsonpath='{.status.loadBalancer.ingress[*].hostname}{"\n"}' +k8s-ui-uinlb-647e781087-6717c5049aa96bd9.elb.us-west-2.amazonaws.com +``` + +![Error500](../../../static/img/sample-app-screens/error-500.png) + +The shopping cart page is not accessible! What's gone wrong? diff --git a/website/docs/security/amazon-eks-pod-identity/verifying-dynamo.md b/website/docs/security/amazon-eks-pod-identity/verifying-dynamo.md new file mode 100644 index 000000000..1447285b8 --- /dev/null +++ b/website/docs/security/amazon-eks-pod-identity/verifying-dynamo.md @@ -0,0 +1,34 @@ +--- +title: "Verifying DynamoDB Access" +sidebar_position: 35 +--- + +Now, with the `carts` Service Account associated with the authorized IAM Role, the `carts` Pod has permission to access the DynamoDB table. Access the web store again and navigate to the shopping cart. + +```bash +$ kubectl -n ui get service ui-nlb -o jsonpath='{.status.loadBalancer.ingress[*].hostname}{"\n"}' +k8s-ui-uinlb-647e781087-6717c5049aa96bd9.elb.us-west-2.amazonaws.com +``` + +The `carts` Pod is able to reach the DynamoDB service and the shopping cart is now accessible! + +![Cart](../../../static/img/sample-app-screens/shopping-cart.png) + +After the AWS IAM role is associated with the Service Account, any newly created Pods using that Service Account will be intercepted by the [EKS Pod Identity webhook](https://github.com/aws/amazon-eks-pod-identity-webhook). This webhook runs on the Amazon EKS cluster’s control plane, and is fully managed by AWS. Take a closer look at the new `carts` Pod to see the new environment variables. + +```bash +$ kubectl -n carts exec deployment/carts -- env | grep AWS +AWS_STS_REGIONAL_ENDPOINTS=regional +AWS_DEFAULT_REGION=us-west-2 +AWS_REGION=us-west-2 +AWS_CONTAINER_CREDENTIALS_FULL_URI=http://169.254.170.23/v1/credentials +AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE=/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token +``` + +Things that are worth noting are: + +* `AWS_DEFAULT_REGION` The region is set automatically to the same as our EKS cluster +* `AWS_STS_REGIONAL_ENDPOINTS` regional STS endpoints are configured to avoid putting too much pressure on the global endpoint in `us-east-1` +* `AWS_CONTAINER_CREDENTIALS_FULL_URI` variable tells AWS SDKs how to obtains credentials using [HTTP credential provider](https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html). This means that EKS Pod Identity does not need to inject credentials via something like an `AWS_ACCESS_KEY_ID`/`AWS_SECRET_ACCESS_KEY` pair, and instead the SDKs can have temporary credentials vending to them via EKS Pod Identity mechanism. You can read more about how this functions in the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). + +You have successfully configured Pod Identity in your Application!!! diff --git a/website/docs/security/guardduty/cleanup.md b/website/docs/security/guardduty/cleanup.md index f08066c7d..c7eb717ad 100644 --- a/website/docs/security/guardduty/cleanup.md +++ b/website/docs/security/guardduty/cleanup.md @@ -1,6 +1,6 @@ --- title: "Cleanup" -sidebar_position: 600 +sidebar_position: 60 --- To disable GuardDuty run the following command: diff --git a/website/docs/security/guardduty/enabling.md b/website/docs/security/guardduty/enabling.md index 144a9a6d9..0462e3cc1 100644 --- a/website/docs/security/guardduty/enabling.md +++ b/website/docs/security/guardduty/enabling.md @@ -1,6 +1,6 @@ --- title: "Enable GuardDuty Protection on EKS" -sidebar_position: 41 +sidebar_position: 51 --- In this lab, we'll enable Amazon GuardDuty EKS Protection. This will provide threat detection coverage for EKS Audit Log Monitoring and EKS Runtime Monitoring to help you protect your clusters. diff --git a/website/docs/security/guardduty/index.md b/website/docs/security/guardduty/index.md index 6af9be608..c2ed22100 100644 --- a/website/docs/security/guardduty/index.md +++ b/website/docs/security/guardduty/index.md @@ -1,6 +1,6 @@ --- title: "Amazon GuardDuty for EKS" -sidebar_position: 40 +sidebar_position: 50 sidebar_custom_props: {"module": true} --- diff --git a/website/docs/security/guardduty/log-monitoring/admin-default-sa.md b/website/docs/security/guardduty/log-monitoring/admin-default-sa.md index 9438fd8d7..f947c87ee 100644 --- a/website/docs/security/guardduty/log-monitoring/admin-default-sa.md +++ b/website/docs/security/guardduty/log-monitoring/admin-default-sa.md @@ -1,6 +1,6 @@ --- title: "Admin access to default Service Account" -sidebar_position: 422 +sidebar_position: 522 --- diff --git a/website/docs/security/guardduty/log-monitoring/exposed_dashboard.md b/website/docs/security/guardduty/log-monitoring/exposed_dashboard.md index 8c169bbbf..c3b3fa78c 100644 --- a/website/docs/security/guardduty/log-monitoring/exposed_dashboard.md +++ b/website/docs/security/guardduty/log-monitoring/exposed_dashboard.md @@ -1,6 +1,6 @@ --- title: "Exposed Kubernetes dashboard" -sidebar_position: 423 +sidebar_position: 523 --- This finding informs you that your EKS Cluster dashboard was exposed to the internet by a Load Balancer service. An exposed dashboard makes the management interface of your cluster publicly accessible from the internet and allows bad actors to exploit any authentication and access control gaps that may be present. diff --git a/website/docs/security/guardduty/log-monitoring/index.md b/website/docs/security/guardduty/log-monitoring/index.md index 87de48d75..967195a59 100644 --- a/website/docs/security/guardduty/log-monitoring/index.md +++ b/website/docs/security/guardduty/log-monitoring/index.md @@ -1,6 +1,6 @@ --- title: "EKS Log Monitoring" -sidebar_position: 420 +sidebar_position: 520 --- EKS Audit Log Monitoring when enabled, immediately begins to monitor Kubernetes audit logs from your clusters and analyze them to detect potentially malicious and suspicious activity. It consumes Kubernetes audit log events directly from the Amazon EKS control plane logging feature through an independent and duplicative stream of flow logs. diff --git a/website/docs/security/guardduty/log-monitoring/privileged_container_mount.md b/website/docs/security/guardduty/log-monitoring/privileged_container_mount.md index 504c26afd..5994b4306 100644 --- a/website/docs/security/guardduty/log-monitoring/privileged_container_mount.md +++ b/website/docs/security/guardduty/log-monitoring/privileged_container_mount.md @@ -1,6 +1,6 @@ --- title: "Privileged Container with sensitive mount" -sidebar_position: 424 +sidebar_position: 524 --- In this lab you will be creating a container with `privileged` Security Context, with root level access in the `default` Namespace of your EKS Cluster. This privileged container will also have a sensitive directory from the host, mounted and accessible as a volume within your container. diff --git a/website/docs/security/guardduty/log-monitoring/unsafe-kube-system.md b/website/docs/security/guardduty/log-monitoring/unsafe-kube-system.md index 9b062c45e..6b93b0e8f 100644 --- a/website/docs/security/guardduty/log-monitoring/unsafe-kube-system.md +++ b/website/docs/security/guardduty/log-monitoring/unsafe-kube-system.md @@ -1,6 +1,6 @@ --- title: "Unsafe execution in kube-system Namespace" -sidebar_position: 421 +sidebar_position: 521 --- This finding indicates that a command was executed inside a Pod in the `kube-system` Namespace on EKS Cluster. diff --git a/website/docs/security/guardduty/runtime-monitoring/crypto-runtime.md b/website/docs/security/guardduty/runtime-monitoring/crypto-runtime.md index 70af31ae6..cf7c92156 100644 --- a/website/docs/security/guardduty/runtime-monitoring/crypto-runtime.md +++ b/website/docs/security/guardduty/runtime-monitoring/crypto-runtime.md @@ -1,6 +1,6 @@ --- title: "Crypto Currency Runtime" -sidebar_position: 431 +sidebar_position: 531 --- This finding indicates that a container tried to do a cryto mining inside a Pod. diff --git a/website/docs/security/guardduty/runtime-monitoring/index.md b/website/docs/security/guardduty/runtime-monitoring/index.md index 9647e6749..798c3fd4e 100644 --- a/website/docs/security/guardduty/runtime-monitoring/index.md +++ b/website/docs/security/guardduty/runtime-monitoring/index.md @@ -1,6 +1,6 @@ --- title: "EKS Runtime Monitoring" -sidebar_position: 430 +sidebar_position: 530 --- EKS Runtime Monitoring provides runtime threat detection coverage for Amazon EKS nodes and containers. It uses the GuardDuty security agent (EKS add-on) that adds runtime visibility into individual EKS workloads, for example, file access, process execution, privilege escalation, and network connections identifying specific containers that may be potentially compromised. diff --git a/website/docs/security/iam-roles-for-service-accounts/understanding.md b/website/docs/security/iam-roles-for-service-accounts/understanding.md index bf5ea6d55..ba276e401 100644 --- a/website/docs/security/iam-roles-for-service-accounts/understanding.md +++ b/website/docs/security/iam-roles-for-service-accounts/understanding.md @@ -13,9 +13,9 @@ This will return a lot of logs, so lets filter it to get a succinct view of the ```bash $ kubectl -n carts logs deployment/carts \ - | grep AmazonDynamoDBException -2022-08-01 20:46:40.648 ERROR 1 --- [nio-8080-exec-1] o.a.c.c.C.[.[.[/].[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed; nested exception is com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException: User: arn:aws:sts::1234567890:assumed-role/eks-workshop-managed-ondemand/i-09e2e801deff1197a is not authorized to perform: dynamodb:Query on resource: arn:aws:dynamodb:us-west-2:1234567890:table/eks-workshop-carts/index/idx_global_customerId because no identity-based policy allows the dynamodb:Query action (Service: AmazonDynamoDBv2; Status Code: 400; Error Code: AccessDeniedException; Request ID: BDDGUIJ5N8PSEI03F4U15NI727VV4KQNSO5AEMVJF66Q9ASUAAJG; Proxy: null)] with root cause -com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException: User: arn:aws:sts::1234567890:assumed-role/eks-workshop-managed-ondemand/i-09e2e801deff1197a is not authorized to perform: dynamodb:Query on resource: arn:aws:dynamodb:us-west-2:1234567890:table/eks-workshop-carts/index/idx_global_customerId because no identity-based policy allows the dynamodb:Query action (Service: AmazonDynamoDBv2; Status Code: 400; Error Code: AccessDeniedException; Request ID: BDDGUIJ5N8PSEI03F4U15NI727VV4KQNSO5AEMVJF66Q9ASUAAJG; Proxy: null) + | grep DynamoDbException +2024-01-09T18:54:10.818Z ERROR 1 --- ${sys:LOGGED_APPLICATION_NAME}[nio-8080-exec-1] o.a.c.c.C.[.[.[.[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed: software.amazon.awssdk.services.dynamodb.model.DynamoDbException: User: arn:aws:sts::123456789012:assumed-role/eksctl-eks-workshop-nodegroup-defa-NodeInstanceRole-vniVa7QtGHXO/i-075976199b049a358 is not authorized to perform: dynamodb:Query on resource: arn:aws:dynamodb:us-west-2:123456789012:table/eks-workshop-carts/index/idx_global_customerId because no identity-based policy allows the dynamodb:Query action (Service: DynamoDb, Status Code: 400, Request ID: QEQBV8R44MI1DSRQFGIAAAOS8FVV4KQNSO5AEMVJF66Q9ASUAAJG)] with root cause +software.amazon.awssdk.services.dynamodb.model.DynamoDbException: User: arn:aws:sts::123456789012:assumed-role/eksctl-eks-workshop-nodegroup-defa-NodeInstanceRole-vniVa7QtGHXO/i-075976199b049a358 is not authorized to perform: dynamodb:Query on resource: arn:aws:dynamodb:us-west-2:123456789012:table/eks-workshop-carts/index/idx_global_customerId because no identity-based policy allows the dynamodb:Query action (Service: DynamoDb, Status Code: 400, Request ID: QEQBV8R44MI1DSRQFGIAAAOS8FVV4KQNSO5AEMVJF66Q9ASUAAJG) ``` Our application is generating an `AccessDeniedException` which indicates that the IAM Role our Pod is using to access DynamoDB does not have the required permissions. This is happening because our Pod is by default using the IAM Role assigned to the EC2 worker node on which its running, which does not have an IAM Policy that allows access to DynamoDB. diff --git a/website/docs/security/kyverno/baseline-pss.md b/website/docs/security/kyverno/baseline-pss.md index 1f3794c24..ea57b552b 100644 --- a/website/docs/security/kyverno/baseline-pss.md +++ b/website/docs/security/kyverno/baseline-pss.md @@ -1,6 +1,6 @@ --- title: "Enforcing Pod Security Standards" -sidebar_position: 62 +sidebar_position: 72 --- As discussed in the introduction for [Pod Security Standards (PSS)](../pod-security-standards/) section, there are 3 pre-defined Policy levels, **Privileged**, **Baseline**, and **Restricted**. While it is recommended to setup a Restricted PSS, it can cause unintended behavior on the application level unless properly set. To get started it is recommended to setup a Baseline Policy that will prevent known Privileged escalations such as Containers accessing HostProcess, HostPath, HostPorts or allow traffic snooping for example, being possible to setup individual policies to restrict or disallow those privileged access to containers. diff --git a/website/docs/security/kyverno/creating-policy.md b/website/docs/security/kyverno/creating-policy.md index e4aacb7fc..d37aef140 100644 --- a/website/docs/security/kyverno/creating-policy.md +++ b/website/docs/security/kyverno/creating-policy.md @@ -1,6 +1,6 @@ --- title: "Creating a Simple Policy" -sidebar_position: 61 +sidebar_position: 71 --- To get an understanding of Kyverno Policies, we will start our lab with a simple Pod Label requirement. As you may know, Labels in Kubernetes can be used to tag objects and resources in the Cluster. diff --git a/website/docs/security/kyverno/index.md b/website/docs/security/kyverno/index.md index 09fa69a5e..789f1e8b4 100644 --- a/website/docs/security/kyverno/index.md +++ b/website/docs/security/kyverno/index.md @@ -1,6 +1,6 @@ --- title: "Policy management with Kyverno" -sidebar_position: 60 +sidebar_position: 70 sidebar_custom_props: {"module": true} --- diff --git a/website/docs/security/kyverno/reports.md b/website/docs/security/kyverno/reports.md index 6de122241..4bdeba108 100644 --- a/website/docs/security/kyverno/reports.md +++ b/website/docs/security/kyverno/reports.md @@ -1,6 +1,6 @@ --- title: "Reports & Auditing" -sidebar_position: 64 +sidebar_position: 74 --- Kyverno also includes a [Policy Reporting](https://kyverno.io/docs/policy-reports/) tool, using the open format defined by the Kubernetes Policy Working Group and deployed as custom resources in the cluster. Kyverno emits these reports when admission actions like *CREATE*, *UPDATE*, and *DELETE* are performed in the cluster, they are also generated as a result of background scans that validate policies on already existing resources. diff --git a/website/docs/security/kyverno/restricting-images.md b/website/docs/security/kyverno/restricting-images.md index a3d2c8fa8..8b23187ba 100644 --- a/website/docs/security/kyverno/restricting-images.md +++ b/website/docs/security/kyverno/restricting-images.md @@ -1,6 +1,6 @@ --- title: "Restricting Image Registries" -sidebar_position: 63 +sidebar_position: 73 --- Using container images form unknown sources on your EKS Clusters, that may not be a scanned for Common Vulnerabilities and Exposure (CVE), represent a risk factor for the overall security of your environment. When chossing container images sources, you need to ensure that they are originated from Trusted Registries, in order to reduce the threat exposure and exploits of vulnerabilities. Some larger organizations also have Security Guidelines that limit containers to use images from their own hosted private image registry. diff --git a/website/docs/security/pod-security-standards/baseline.md b/website/docs/security/pod-security-standards/baseline.md index 4163560c4..dd82a14cf 100644 --- a/website/docs/security/pod-security-standards/baseline.md +++ b/website/docs/security/pod-security-standards/baseline.md @@ -1,6 +1,6 @@ --- title: "Baseline PSS Profile" -sidebar_position: 52 +sidebar_position: 62 --- What if we want to restrict the permissions that a Pod can request? For example the `privileged` permissions we provided to the assets Pod in the previous section can be dangerous, allowing an attacker access to the hosts resources outside of the container. diff --git a/website/docs/security/pod-security-standards/index.md b/website/docs/security/pod-security-standards/index.md index 21b99eb1a..9fc4e869f 100644 --- a/website/docs/security/pod-security-standards/index.md +++ b/website/docs/security/pod-security-standards/index.md @@ -1,6 +1,6 @@ --- title: "Pod Security Standards" -sidebar_position: 50 +sidebar_position: 60 sidebar_custom_props: {"module": true} --- diff --git a/website/docs/security/pod-security-standards/privileged.md b/website/docs/security/pod-security-standards/privileged.md index 122d5a268..77ea81992 100644 --- a/website/docs/security/pod-security-standards/privileged.md +++ b/website/docs/security/pod-security-standards/privileged.md @@ -1,6 +1,6 @@ --- title: "Privileged PSS profile" -sidebar_position: 51 +sidebar_position: 61 --- We'll start looking at PSS by exploring the Privileged profile, which is the most permissive and allows for known privilege escalations. diff --git a/website/docs/security/pod-security-standards/restricted.md b/website/docs/security/pod-security-standards/restricted.md index 4dfcc4d03..8cfa2e95e 100644 --- a/website/docs/security/pod-security-standards/restricted.md +++ b/website/docs/security/pod-security-standards/restricted.md @@ -1,6 +1,6 @@ --- title: "Restricted PSS Profile" -sidebar_position: 53 +sidebar_position: 63 --- Finally we can take a look at the Restricted profile, which is the most heavily restricted policy following current Pod hardening best practices. Add labels to the `assets` namespace to enable all PSA modes for the Restricted PSS profile: diff --git a/website/docs/security/secrets-management/exploring-secrets.md b/website/docs/security/secrets-management/exploring-secrets.md index 679be0a4e..cbf432e92 100644 --- a/website/docs/security/secrets-management/exploring-secrets.md +++ b/website/docs/security/secrets-management/exploring-secrets.md @@ -1,6 +1,6 @@ --- title: "Exploring Secrets" -sidebar_position: 31 +sidebar_position: 41 --- Kubernetes secrets can be exposed to the Pods in different ways such as via environment variables and volumes. diff --git a/website/docs/security/secrets-management/index.md b/website/docs/security/secrets-management/index.md index 8fc0d9834..e0784e152 100644 --- a/website/docs/security/secrets-management/index.md +++ b/website/docs/security/secrets-management/index.md @@ -1,6 +1,6 @@ --- title: "Secrets Management" -sidebar_position: 30 +sidebar_position: 40 --- [Kubernetes Secret](https://kubernetes.io/docs/concepts/configuration/secret/) is a resource that helps cluster operators manage the deployment of sensitive information such as passwords, OAuth tokens, and ssh keys etc. These secrets can be mounted as data volumes or exposed as environment variables to the containers in a Pod, thus decoupling Pod deployment from managing sensitive data needed by the containerized applications within a Pod. diff --git a/website/docs/security/secrets-management/sealed-secrets/index.md b/website/docs/security/secrets-management/sealed-secrets/index.md index da5646e7c..e4871fb65 100644 --- a/website/docs/security/secrets-management/sealed-secrets/index.md +++ b/website/docs/security/secrets-management/sealed-secrets/index.md @@ -1,6 +1,6 @@ --- title: "Securing Secrets Using Sealed Secrets" -sidebar_position: 70 +sidebar_position: 430 sidebar_custom_props: {"module": true} --- diff --git a/website/docs/security/secrets-management/sealed-secrets/installing-sealed-secrets.md b/website/docs/security/secrets-management/sealed-secrets/installing-sealed-secrets.md index 765e8c4ee..ac359507d 100644 --- a/website/docs/security/secrets-management/sealed-secrets/installing-sealed-secrets.md +++ b/website/docs/security/secrets-management/sealed-secrets/installing-sealed-secrets.md @@ -1,6 +1,6 @@ --- title: "Installing Sealed Secrets" -sidebar_position: 72 +sidebar_position: 432 --- The `kubeseal` CLI is used to interact with the sealed secrets controller, and has already been installed in Cloud9. diff --git a/website/docs/security/secrets-management/sealed-secrets/managing-sealing-keys.md b/website/docs/security/secrets-management/sealed-secrets/managing-sealing-keys.md index 2ad757413..3f669e6a2 100644 --- a/website/docs/security/secrets-management/sealed-secrets/managing-sealing-keys.md +++ b/website/docs/security/secrets-management/sealed-secrets/managing-sealing-keys.md @@ -1,6 +1,6 @@ --- title: "Managing the Sealing Key" -sidebar_position: 74 +sidebar_position: 434 --- The only way to decrypt the encrypted data within a SealedSecret is with the sealing key that is managed by the controller. There could be situations where you are trying to restore the original state of a cluster after a disaster or you want to leverage GitOps workflow to deploy the Kubernetes resources, including SealedSecrets, from a Git repository and create a new EKS cluster. The controller deployed in the new EKS cluster must use the same sealing key to be able to unseal the SealedSecrets. diff --git a/website/docs/security/secrets-management/sealed-secrets/sealing-secrets.md b/website/docs/security/secrets-management/sealed-secrets/sealing-secrets.md index 6139c7a22..f64ea2fa2 100644 --- a/website/docs/security/secrets-management/sealed-secrets/sealing-secrets.md +++ b/website/docs/security/secrets-management/sealed-secrets/sealing-secrets.md @@ -1,6 +1,6 @@ --- title: "Sealing your Secrets" -sidebar_position: 73 +sidebar_position: 433 --- ### Exploring the catalog Pod diff --git a/website/docs/security/secrets-management/sealed-secrets/working.md b/website/docs/security/secrets-management/sealed-secrets/working.md index 4259221f2..60caa8ece 100644 --- a/website/docs/security/secrets-management/sealed-secrets/working.md +++ b/website/docs/security/secrets-management/sealed-secrets/working.md @@ -1,6 +1,6 @@ --- title: "Sealed Secrets for Kubernetes" -sidebar_position: 71 +sidebar_position: 431 --- Sealed Secrets is composed of two parts: diff --git a/website/docs/security/secrets-management/secrets-manager/ascp.md b/website/docs/security/secrets-management/secrets-manager/ascp.md index 831817e43..852b9840d 100644 --- a/website/docs/security/secrets-management/secrets-manager/ascp.md +++ b/website/docs/security/secrets-management/secrets-manager/ascp.md @@ -1,6 +1,6 @@ --- title: "AWS Secrets and Configuration Provider (ASCP)" -sidebar_position: 322 +sidebar_position: 422 --- When we ran the `prepare-environment` script detailed in a [previous step](./index.md), it has already installed the AWS Secrets and Configuration Provider (ASCP) for the Kubernetes Secrets Store CSI Driver that's required for this lab. diff --git a/website/docs/security/secrets-management/secrets-manager/create-secret.md b/website/docs/security/secrets-management/secrets-manager/create-secret.md index 47080081f..8946a6566 100644 --- a/website/docs/security/secrets-management/secrets-manager/create-secret.md +++ b/website/docs/security/secrets-management/secrets-manager/create-secret.md @@ -1,6 +1,6 @@ --- title: "Storing secrets in AWS Secrets Manager" -sidebar_position: 321 +sidebar_position: 421 --- First, we need to store a secret in AWS Secrets Manager, lets do that using the AWS CLI: diff --git a/website/docs/security/secrets-management/secrets-manager/external-secrets.md b/website/docs/security/secrets-management/secrets-manager/external-secrets.md index e244fe08d..dee7e6520 100644 --- a/website/docs/security/secrets-management/secrets-manager/external-secrets.md +++ b/website/docs/security/secrets-management/secrets-manager/external-secrets.md @@ -1,6 +1,6 @@ --- title: "External Secrets Operator" -sidebar_position: 324 +sidebar_position: 424 --- Now we can explore integrating with Secrets Managed using the External Secrets operator. This has already been installed in our EKS cluster: diff --git a/website/docs/security/secrets-management/secrets-manager/index.md b/website/docs/security/secrets-management/secrets-manager/index.md index a99793db7..085109f72 100644 --- a/website/docs/security/secrets-management/secrets-manager/index.md +++ b/website/docs/security/secrets-management/secrets-manager/index.md @@ -1,6 +1,6 @@ --- title: "Managing Secrets with AWS Secrets Manager" -sidebar_position: 320 +sidebar_position: 420 sidebar_custom_props: {"module": true} --- diff --git a/website/docs/security/secrets-management/secrets-manager/mounting-secrets.md b/website/docs/security/secrets-management/secrets-manager/mounting-secrets.md index 0aba4e08b..bbb05458d 100644 --- a/website/docs/security/secrets-management/secrets-manager/mounting-secrets.md +++ b/website/docs/security/secrets-management/secrets-manager/mounting-secrets.md @@ -1,6 +1,6 @@ --- title: "Mounting AWS Secrets Manager secret on Kubernetes Pod" -sidebar_position: 323 +sidebar_position: 423 --- Now that we have a secret stored in AWS Secrets Manager and synchronized with a Kubernetes Secret let's mount it inside the Pod. First we should take a look at the `catalog` Deployment and the existing Secrets in the `catalog` namespace.