diff --git a/.github/workflows/ci-latest-release.yml b/.github/workflows/ci-latest-release.yml index 804861c843..2c414dfb73 100644 --- a/.github/workflows/ci-latest-release.yml +++ b/.github/workflows/ci-latest-release.yml @@ -47,7 +47,7 @@ jobs: runs-on: ubuntu-latest-16-cores permissions: id-token: write - timeout-minutes: 120 + timeout-minutes: 150 steps: - uses: actions/checkout@v3 with: @@ -109,7 +109,7 @@ jobs: - name: Test KubeArmor using Ginkgo run: | go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo - make + ginkgo --vv --flake-attempts=10 --timeout=10m smoke/ working-directory: ./tests/k8s_env timeout-minutes: 30 diff --git a/.github/workflows/ci-test-ginkgo.yml b/.github/workflows/ci-test-ginkgo.yml index 553b5e68f7..6b1752963b 100644 --- a/.github/workflows/ci-test-ginkgo.yml +++ b/.github/workflows/ci-test-ginkgo.yml @@ -17,6 +17,7 @@ on: - "tests/**" - "protobuf/**" - ".github/workflows/ci-test-ginkgo.yml" + - "examples/multiubuntu/build/**" - "pkg/KubeArmorOperator/**" - "deployments/helm/**" diff --git a/.github/workflows/ci-test-ubi-image.yml b/.github/workflows/ci-test-ubi-image.yml index 0c9978963a..a1ea9594b2 100644 --- a/.github/workflows/ci-test-ubi-image.yml +++ b/.github/workflows/ci-test-ubi-image.yml @@ -17,6 +17,7 @@ on: - "tests/**" - "protobuf/**" - ".github/workflows/ci-test-ginkgo.yml" + - "examples/multiubuntu/build/**" - "pkg/KubeArmorOperator/**" - "deployments/helm/**" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 6b6d4e0283..eb93f641ed 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -41,11 +41,6 @@ jobs: with: results_file: results.sarif results_format: sarif - # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: - # - you want to enable the Branch-Protection check on a *public* repository, or - # - you are installing Scorecard on a *private* repository - # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. - repo_token: ${{ secrets.SCORECARD_TOKEN }} # Public repositories: # - Publish results to OpenSSF REST API for easy access by consumers diff --git a/Dockerfile b/Dockerfile index b0ace30dc1..39d50a1c22 100644 --- a/Dockerfile +++ b/Dockerfile @@ -83,6 +83,7 @@ ENV KUBEARMOR_UBI=true LABEL name="kubearmor" \ vendor="Accuknox" \ + maintainer="Barun Acharya, Ramakant Sharma" \ version=${VERSION} \ release=${VERSION} \ summary="kubearmor container image based on redhat ubi" \ @@ -119,6 +120,7 @@ ENV KUBEARMOR_UBI=true LABEL name="kubearmor" \ vendor="Accuknox" \ + maintainer="Barun Acharya, Ramakant Sharma" \ version=${VERSION} \ release=${VERSION} \ summary="kubearmor container image based on redhat ubi" \ diff --git a/Dockerfile.init b/Dockerfile.init index 290f81788f..b61e7c1be7 100644 --- a/Dockerfile.init +++ b/Dockerfile.init @@ -8,6 +8,7 @@ ARG VERSION=latest LABEL name="kubearmor-init" \ vendor="Accuknox" \ + maintainer="Barun Acharya, Ramakant Sharma" \ version=${VERSION} \ release=${VERSION} \ summary="kubearmor-init container image based on redhat ubi" \ diff --git a/KubeArmor/BPF/enforcer.bpf.c b/KubeArmor/BPF/enforcer.bpf.c index 02f45a23ea..8b8344b8fe 100644 --- a/KubeArmor/BPF/enforcer.bpf.c +++ b/KubeArmor/BPF/enforcer.bpf.c @@ -75,7 +75,10 @@ int BPF_PROG(enforce_proc, struct linux_binprm *bprm, int ret) { if (src_offset == NULL) fromSourceCheck = false; - void *src_ptr = &src_buf->buf[*src_offset]; + void *src_ptr; + if (src_buf->buf[*src_offset]) { + src_ptr = &src_buf->buf[*src_offset]; + } if (src_ptr == NULL) fromSourceCheck = false; @@ -152,10 +155,9 @@ int BPF_PROG(enforce_proc, struct linux_binprm *bprm, int ret) { goto decision; } - // match exec name struct qstr d_name; - d_name = BPF_CORE_READ(f_path.dentry,d_name); + d_name = BPF_CORE_READ(f_path.dentry, d_name); bpf_map_update_elem(&bufk, &two, z, BPF_ANY); bpf_probe_read_str(pk->path, MAX_STRING_SIZE, d_name.name); diff --git a/KubeArmor/BPF/shared.h b/KubeArmor/BPF/shared.h index 5051d2ed4e..2dbd0d4b8b 100644 --- a/KubeArmor/BPF/shared.h +++ b/KubeArmor/BPF/shared.h @@ -272,6 +272,9 @@ static inline void get_outer_key(struct outer_key *pokey, struct task_struct *t) { pokey->pid_ns = get_task_pid_ns_id(t); pokey->mnt_ns = get_task_mnt_ns_id(t); + // TODO: Use cgroup ns as well for host process identification to support enforcement on deployments using hostpidns + // u32 cg_ns = BPF_CORE_READ(t, nsproxy, cgroup_ns, ns).inum; + // if (pokey->pid_ns == PROC_PID_INIT_INO && cg_ns == PROC_CGROUP_INIT_INO) { if (pokey->pid_ns == PROC_PID_INIT_INO) { pokey->pid_ns = 0; pokey->mnt_ns = 0; @@ -288,20 +291,13 @@ static __always_inline u32 init_context(event *event_data) { event_data->host_ppid = get_task_ppid(task); event_data->host_pid = bpf_get_current_pid_tgid() >> 32; - u32 pid = get_task_ns_tgid(task); - if (event_data->host_pid == pid) { // host - event_data->pid_id = 0; - event_data->mnt_id = 0; - - event_data->ppid = get_task_ppid(task); - event_data->pid = bpf_get_current_pid_tgid() >> 32; - } else { // container - event_data->pid_id = get_task_pid_ns_id(task); - event_data->mnt_id = get_task_mnt_ns_id(task); + struct outer_key okey; + get_outer_key(&okey, task); + event_data->pid_id = okey.pid_ns; + event_data->mnt_id = okey.mnt_ns; - event_data->ppid = get_task_ns_ppid(task); - event_data->pid = pid; - } + event_data->ppid = get_task_ppid(task); + event_data->pid = get_task_ns_tgid(task); event_data->uid = bpf_get_current_uid_gid(); @@ -487,10 +483,15 @@ static inline int match_and_enforce_path_hooks(struct path *f_path, u32 id, if (src_offset == NULL) fromSourceCheck = false; - void *ptr = &src_buf->buf[*src_offset]; + void *src_ptr; + if (src_buf->buf[*src_offset]) { + src_ptr = &src_buf->buf[*src_offset]; + } + if (src_ptr == NULL) + fromSourceCheck = false; if (fromSourceCheck) { - bpf_probe_read_str(store->source, MAX_STRING_SIZE, ptr); + bpf_probe_read_str(store->source, MAX_STRING_SIZE, src_ptr); val = bpf_map_lookup_elem(inner, store); diff --git a/KubeArmor/BPF/system_monitor.c b/KubeArmor/BPF/system_monitor.c index 1682f88572..fe239f29e5 100644 --- a/KubeArmor/BPF/system_monitor.c +++ b/KubeArmor/BPF/system_monitor.c @@ -609,48 +609,68 @@ static __always_inline int save_context_to_buffer(bufs_t *bufs_p, void *ptr) return 0; } -static __always_inline int save_str_to_buffer(bufs_t *bufs_p, void *ptr) -{ - +static __always_inline int save_str_to_buffer(bufs_t *bufs_p, void *ptr) { u32 *off = get_buffer_offset(DATA_BUF_TYPE); - - if (off == NULL) - { + if (off == NULL) { return -1; } - if (*off > MAX_BUFFER_SIZE - MAX_STRING_SIZE - sizeof(int)) - { - return 0; // no enough space + if (*off >= MAX_BUFFER_SIZE) { + return 0; } - u8 type = STR_T; - bpf_probe_read(&(bufs_p->buf[*off & (MAX_BUFFER_SIZE - 1)]), 1, &type); + u32 type_pos = *off; + if (type_pos >= MAX_BUFFER_SIZE || type_pos + 1 > MAX_BUFFER_SIZE) { + return 0; + } - *off += 1; + if (MAX_BUFFER_SIZE - type_pos < (1 + sizeof(int) + 1)) { + return 0; + } - if (*off > MAX_BUFFER_SIZE - MAX_STRING_SIZE - sizeof(int)) - { - return 0; // no enough space + u32 size_pos = type_pos + 1; + if (size_pos >= MAX_BUFFER_SIZE || + size_pos + sizeof(int) > MAX_BUFFER_SIZE) { + return 0; } - int sz = bpf_probe_read_str(&(bufs_p->buf[*off + sizeof(int)]), MAX_STRING_SIZE, ptr); - if (sz > 0) - { - if (*off > MAX_BUFFER_SIZE - sizeof(int)) - { - return 0; // no enough space - } + u8 type_val = STR_T; + if (bpf_probe_read(&(bufs_p->buf[type_pos]), sizeof(u8), &type_val) < 0) { + return 0; + } + + u32 str_pos = size_pos + sizeof(int); + if (str_pos >= MAX_BUFFER_SIZE || str_pos + MAX_STRING_SIZE > MAX_BUFFER_SIZE) { + return 0; + } - bpf_probe_read(&(bufs_p->buf[*off]), sizeof(int), &sz); + u32 remaining_space = MAX_BUFFER_SIZE - str_pos; + u32 read_size = remaining_space; + if (read_size > MAX_STRING_SIZE) { + read_size = MAX_STRING_SIZE; + } - *off += sz + sizeof(int); - set_buffer_offset(DATA_BUF_TYPE, *off); + if (read_size < MAX_STRING_SIZE) { + return 0; + } - return sz + sizeof(int); + int sz = bpf_probe_read_str(&(bufs_p->buf[str_pos]), read_size, ptr); + if (sz <= 0) { + return 0; } - return 0; + if (bpf_probe_read(&(bufs_p->buf[size_pos]), sizeof(int), &sz) < 0) { + return 0; + } + + u32 new_off = str_pos + sz; + if (new_off > MAX_BUFFER_SIZE) { + return 0; + } + + set_buffer_offset(DATA_BUF_TYPE, new_off); + + return sz + sizeof(int); } static __always_inline bool prepend_path(struct path *path, bufs_t *string_p, int buf_type) @@ -1019,7 +1039,7 @@ static __always_inline u32 init_context(sys_context_t *context) } } -#if (defined(BTF_SUPPORTED)) +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 2, 0) // min version that supports 1 million instructions struct fs_struct *fs; fs = READ_KERN(task->fs); struct path path = READ_KERN(fs->pwd); @@ -1046,6 +1066,13 @@ static __always_inline u32 init_context(sys_context_t *context) // To check if subsequent alerts should be dropped per container static __always_inline bool should_drop_alerts_per_container(sys_context_t *context, struct pt_regs *ctx, u32 types, args_t *args) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 2, 0) + + // throttling for host in case of apparmor is handled in userspace + if (context->pid_id == 0 && context->mnt_id == 0) { + return false; + } + u64 current_timestamp = bpf_ktime_get_ns(); struct outer_key key = { @@ -1112,6 +1139,7 @@ static __always_inline bool should_drop_alerts_per_container(sys_context_t *cont } bpf_map_update_elem(&kubearmor_alert_throttle, &key, state, BPF_ANY); +#endif return false; } diff --git a/KubeArmor/common/common.go b/KubeArmor/common/common.go index 284a549838..8f06cc70d8 100644 --- a/KubeArmor/common/common.go +++ b/KubeArmor/common/common.go @@ -18,6 +18,7 @@ import ( "sort" "strconv" "strings" + "sync" "time" kc "github.com/kubearmor/KubeArmor/KubeArmor/config" @@ -291,7 +292,11 @@ func GetCommandOutputWithoutErr(cmd string, args []string) string { return "" } + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() defer func() { if err = stdin.Close(); err != nil { kg.Warnf("Error closing stdin %s\n", err) @@ -300,6 +305,9 @@ func GetCommandOutputWithoutErr(cmd string, args []string) string { _, _ = io.WriteString(stdin, "values written to stdin are passed to cmd's standard input") }() + // Wait for the stdin writing to complete + wg.Wait() + out, err := res.CombinedOutput() if err != nil { return "" diff --git a/KubeArmor/config/config.go b/KubeArmor/config/config.go index a00d49874c..1a667f95a9 100644 --- a/KubeArmor/config/config.go +++ b/KubeArmor/config/config.go @@ -56,10 +56,12 @@ type KubearmorConfig struct { StateAgent bool // enable KubeArmor state agent - AlertThrottling bool // Enable/Disable Alert Throttling - MaxAlertPerSec int // Maximum alerts allowed per second - ThrottleSec int // Number of seconds for which subsequent alerts will be dropped - AnnotateResources bool // enable annotations by kubearmor if kubearmor-controller is not present + AlertThrottling bool // Enable/Disable Alert Throttling + MaxAlertPerSec int32 // Maximum alerts allowed per second + ThrottleSec int32 // Number of seconds for which subsequent alerts will be dropped + AnnotateResources bool // enable annotations by kubearmor if kubearmor-controller is not present + + ProcFsMount string // path where procfs is hosted } // GlobalCfg Global configuration for Kubearmor @@ -105,6 +107,7 @@ const ( ConfigMaxAlertPerSec string = "maxAlertPerSec" ConfigThrottleSec string = "throttleSec" ConfigAnnotateResources string = "annotateResources" + ConfigProcFsMount string = "procfsMount" ) func readCmdLineParams() { @@ -161,6 +164,8 @@ func readCmdLineParams() { annotateResources := flag.Bool(ConfigAnnotateResources, false, "for kubearmor deployment without kubearmor-controller") + procFsMount := flag.String(ConfigProcFsMount, "/proc", "Path to the BPF filesystem to use for storing maps") + flags := []string{} flag.VisitAll(func(f *flag.Flag) { kv := fmt.Sprintf("%s:%v", f.Name, f.Value) @@ -222,6 +227,8 @@ func readCmdLineParams() { viper.SetDefault(ConfigThrottleSec, *throttleSec) viper.SetDefault(ConfigAnnotateResources, *annotateResources) + + viper.SetDefault(ConfigProcFsMount, *procFsMount) } // LoadConfig Load configuration @@ -297,6 +304,8 @@ func LoadConfig() error { GlobalCfg.AnnotateResources = viper.GetBool(ConfigAnnotateResources) + GlobalCfg.ProcFsMount = viper.GetString(ConfigProcFsMount) + LoadDynamicConfig() kg.Printf("Final Configuration [%+v]", GlobalCfg) @@ -329,6 +338,6 @@ func LoadDynamicConfig() { GlobalCfg.DefaultPostureLogs = viper.GetBool(ConfigDefaultPostureLogs) GlobalCfg.AlertThrottling = viper.GetBool(ConfigAlertThrottling) - GlobalCfg.MaxAlertPerSec = viper.GetInt(ConfigMaxAlertPerSec) - GlobalCfg.ThrottleSec = viper.GetInt(ConfigThrottleSec) + GlobalCfg.MaxAlertPerSec = int32(viper.GetInt(ConfigMaxAlertPerSec)) + GlobalCfg.ThrottleSec = int32(viper.GetInt(ConfigThrottleSec)) } diff --git a/KubeArmor/core/containerPolicy_fuzz_test.go b/KubeArmor/core/containerPolicy_fuzz_test.go new file mode 100644 index 0000000000..dc2a5a90f9 --- /dev/null +++ b/KubeArmor/core/containerPolicy_fuzz_test.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 Authors of KubeArmor +package core + +import ( + "context" + "testing" + + "github.com/kubearmor/KubeArmor/KubeArmor/policy" + pb "github.com/kubearmor/KubeArmor/protobuf" +) + +func FuzzContainerPolicy(f *testing.F) { + Data1 := &pb.Policy{ + Policy: []byte(` + apiVersion: security.kubearmor.com/v1 + kind: KubeArmorPolicy + metadata: + name: ksp-group-1-proc-path-block + namespace: multiubuntu + spec: + selector: + matchLabels: + group: group-1 + process: + matchPaths: + - path: /bin/sleep + action: + Block + `), + } + //ksp-group-2-allow-file-path-from-source-path.yaml + Data2 := &pb.Policy{ + Policy: []byte(` + apiVersion: security.kubearmor.com/v1 +kind: KubeArmorPolicy +metadata: + name: ksp-group-2-allow-file-path-from-source-path + namespace: multiubuntu +spec: + severity: 5 + message: "allow /bin/cat to access /secret.txt" + selector: + matchLabels: + group: group-2 + process: + matchDirectories: + - dir: /bin/ + recursive: true + file: + matchPaths: + - path: /secret.txt + fromSource: + - path: /bin/cat + - path: /dev/tty + - path: /lib/terminfo/x/xterm + matchDirectories: + - dir: /pts/ + recursive: true + - dir: /proc/ + recursive: true + - dir: /dev/ + recursive: true + - dir: /lib/x86_64-linux-gnu/ + - dir: /bin/ + action: + Allow + `), + } + Data3 := &pb.Policy{ + Policy: []byte(` + apiVersion: security.kubearmor.com/v1 +kind: KubeArmorPolicy +metadata: + name: ksp-ubuntu-1-allow-net-tcp-from-source + namespace: multiubuntu +spec: + severity: 8 + selector: + matchLabels: + container: ubuntu-1 + network: + matchProtocols: + - protocol: tcp + fromSource: + - path: /usr/bin/curl + action: Allow + `), + } + + f.Add(Data1.Policy) + f.Add(Data2.Policy) + f.Add(Data3.Policy) + dm := NewKubeArmorDaemon() + + f.Fuzz(func(t *testing.T, data []byte) { + p := &policy.PolicyServer{ + UpdateContainerPolicy: dm.ParseAndUpdateContainerSecurityPolicy, + ContainerPolicyEnabled: true, + } + policy := &pb.Policy{ + Policy: data, + } + res, err := p.ContainerPolicy(context.Background(), policy) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if res.Status != pb.PolicyStatus_Invalid && res.Status != pb.PolicyStatus_Applied && res.Status != pb.PolicyStatus_Modified { + t.Errorf("Unexpected status: %v, %v", res.Status, data) + } + }) +} diff --git a/KubeArmor/core/containerdHandler.go b/KubeArmor/core/containerdHandler.go index 23c3e8a5e8..66ca50db4d 100644 --- a/KubeArmor/core/containerdHandler.go +++ b/KubeArmor/core/containerdHandler.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "os" + "path/filepath" "strconv" "strings" "time" @@ -193,13 +194,13 @@ func (ch *ContainerdHandler) GetContainerInfo(ctx context.Context, containerID s pid := strconv.Itoa(int(taskRes.Processes[0].Pid)) - if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil { if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil { kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error()) } } - if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil { if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil { kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error()) } diff --git a/KubeArmor/core/crioHandler.go b/KubeArmor/core/crioHandler.go index 89cc41e0d7..b1828da6c7 100644 --- a/KubeArmor/core/crioHandler.go +++ b/KubeArmor/core/crioHandler.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "os" + "path/filepath" "strconv" "time" @@ -130,7 +131,7 @@ func (ch *CrioHandler) GetContainerInfo(ctx context.Context, containerID string, pid := strconv.Itoa(containerInfo.Pid) - if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil { if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil { kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error()) } @@ -138,7 +139,7 @@ func (ch *CrioHandler) GetContainerInfo(ctx context.Context, containerID string, return container, err } - if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil { if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil { kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error()) } diff --git a/KubeArmor/core/dockerHandler.go b/KubeArmor/core/dockerHandler.go index 87980df627..35f3caab30 100644 --- a/KubeArmor/core/dockerHandler.go +++ b/KubeArmor/core/dockerHandler.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "os" + "path/filepath" "slices" "strconv" "strings" @@ -144,13 +145,13 @@ func (dh *DockerHandler) GetContainerInfo(containerID string, OwnerInfo map[stri pid := strconv.Itoa(inspect.State.Pid) - if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil { if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil { kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error()) } } - if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil { if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil { kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error()) } diff --git a/KubeArmor/core/hostPolicy_fuzz_test.go b/KubeArmor/core/hostPolicy_fuzz_test.go new file mode 100644 index 0000000000..57c4371ca6 --- /dev/null +++ b/KubeArmor/core/hostPolicy_fuzz_test.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 Authors of KubeArmor +package core + +import ( + "context" + "github.com/kubearmor/KubeArmor/KubeArmor/policy" + pb "github.com/kubearmor/KubeArmor/protobuf" + "testing" +) + +func FuzzHostPolicy(f *testing.F) { + data := &pb.Policy{ + Policy: []byte(` +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-cve-2019-14271 +spec: + tags: ["CVE-2019-14271","docker-cp","libraries","docker-tar","root-code-execution"] + message: "Alert! Docker Binary Has Been Executed." + nodeSelector: + matchLabels: + kubernetes.io/hostname: gke-ubuntu #change with your hostname + process: + severity: 2 + matchPaths: + - path: /usr/bin/docker + - path: /usr/sbin/chroot + - path: /usr/lib/tar + - path: /usr/lib/chmod + action: Block + file: + severity: 3 + matchDirectories: + - dir: /lib/x86_64-linux-gnu/ + - dir: /var/log/ + action: Block + `), + } + dm := NewKubeArmorDaemon() + f.Add(data.Policy) + f.Fuzz(func(t *testing.T, data []byte) { + p := &policy.PolicyServer{ + UpdateHostPolicy: dm.ParseAndUpdateHostSecurityPolicy, + HostPolicyEnabled: true, + } + policy := &pb.Policy{ + Policy: data, + } + res, err := p.HostPolicy(context.Background(), policy) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if res.Status != pb.PolicyStatus_Invalid && res.Status != pb.PolicyStatus_Applied { + t.Errorf("Unexpected status: %v, %v", res.Status, data) + } + }) +} diff --git a/KubeArmor/core/kubeArmor.go b/KubeArmor/core/kubeArmor.go index 7f2106270e..e86991de70 100644 --- a/KubeArmor/core/kubeArmor.go +++ b/KubeArmor/core/kubeArmor.go @@ -569,8 +569,6 @@ func KubeArmor() { // Un-orchestrated workloads if !dm.K8sEnabled && cfg.GlobalCfg.Policy { - dm.SetContainerNSVisibility() - // Check if cri socket set, if not then auto detect if cfg.GlobalCfg.CRISocket == "" { if kl.GetCRISocket("") == "" { @@ -579,26 +577,39 @@ func KubeArmor() { } else { cfg.GlobalCfg.CRISocket = "unix://" + kl.GetCRISocket("") } + } else { + // CRI socket supplied by user, check for existence + criSocketPath := strings.TrimPrefix(cfg.GlobalCfg.CRISocket, "unix://") + _, err := os.Stat(criSocketPath) + if err != nil { + enableContainerPolicy = false + dm.Logger.Warnf("Error while looking for CRI socket file %s", err.Error()) + } } - // monitor containers - if strings.Contains(cfg.GlobalCfg.CRISocket, "docker") { - // update already deployed containers - dm.GetAlreadyDeployedDockerContainers() - // monitor docker events - go dm.MonitorDockerEvents() - } else if strings.Contains(cfg.GlobalCfg.CRISocket, "containerd") { - // monitor containerd events - go dm.MonitorContainerdEvents() - } else if strings.Contains(cfg.GlobalCfg.CRISocket, "cri-o") { - // monitor crio events - go dm.MonitorCrioEvents() - } else { - dm.Logger.Warnf("Failed to monitor containers: %s is not a supported CRI socket.", cfg.GlobalCfg.CRISocket) - enableContainerPolicy = false + if enableContainerPolicy { + dm.SetContainerNSVisibility() + + // monitor containers + if strings.Contains(cfg.GlobalCfg.CRISocket, "docker") { + // update already deployed containers + dm.GetAlreadyDeployedDockerContainers() + // monitor docker events + go dm.MonitorDockerEvents() + } else if strings.Contains(cfg.GlobalCfg.CRISocket, "containerd") { + // monitor containerd events + go dm.MonitorContainerdEvents() + } else if strings.Contains(cfg.GlobalCfg.CRISocket, "cri-o") { + // monitor crio events + go dm.MonitorCrioEvents() + } else { + enableContainerPolicy = false + dm.Logger.Warnf("Failed to monitor containers: %s is not a supported CRI socket.", cfg.GlobalCfg.CRISocket) + } + + dm.Logger.Printf("Using %s for monitoring containers", cfg.GlobalCfg.CRISocket) } - dm.Logger.Printf("Using %s for monitoring containers", cfg.GlobalCfg.CRISocket) } if dm.K8sEnabled && cfg.GlobalCfg.Policy { @@ -801,6 +812,7 @@ func KubeArmor() { pb.RegisterProbeServiceServer(dm.Logger.LogServer, probe) dm.SetHealthStatus(pb.PolicyService_ServiceDesc.ServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + dm.SetHealthStatus(pb.ProbeService_ServiceDesc.ServiceName, grpc_health_v1.HealthCheckResponse_SERVING) } reflection.Register(dm.Logger.LogServer) // Helps grpc clients list out what all svc/endpoints available diff --git a/KubeArmor/core/kubeUpdate.go b/KubeArmor/core/kubeUpdate.go index f5298713b0..87e4dbd457 100644 --- a/KubeArmor/core/kubeUpdate.go +++ b/KubeArmor/core/kubeUpdate.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "os" + "reflect" "sort" "strconv" "strings" @@ -345,6 +346,8 @@ func (dm *KubeArmorDaemon) UpdateEndPointWithPod(action string, pod tp.K8sPod) { // add the endpoint into the endpoint list dm.EndPoints = append(dm.EndPoints, endpoints...) + dm.EndPointsLock.Unlock() + if cfg.GlobalCfg.Policy { // update security policies for _, endpoint := range endpoints { @@ -360,19 +363,18 @@ func (dm *KubeArmorDaemon) UpdateEndPointWithPod(action string, pod tp.K8sPod) { } } - dm.EndPointsLock.Unlock() - } else if action == "MODIFIED" { newEndPoint := tp.EndPoint{} endpoints := []tp.EndPoint{} - dm.EndPointsLock.Lock() + dm.EndPointsLock.RLock() for _, endPoint := range dm.EndPoints { if pod.Metadata["namespaceName"] == endPoint.NamespaceName && pod.Metadata["podName"] == endPoint.EndPointName { endpoints = append(endpoints, endPoint) + break } } - dm.EndPointsLock.Unlock() + dm.EndPointsLock.RUnlock() if len(endpoints) == 0 { // No endpoints were added as containers ID have been just added // Same logic as ADDED @@ -527,7 +529,7 @@ func (dm *KubeArmorDaemon) UpdateEndPointWithPod(action string, pod tp.K8sPod) { } idx++ } - + dm.EndPointsLock.Unlock() for _, endpoint := range endpoints { if cfg.GlobalCfg.Policy { // update security policies @@ -543,8 +545,6 @@ func (dm *KubeArmorDaemon) UpdateEndPointWithPod(action string, pod tp.K8sPod) { } } } - - dm.EndPointsLock.Unlock() } } else { // DELETED @@ -731,9 +731,9 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { } // exception: kubearmor - if _, ok := pod.Labels["kubearmor-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" - } + // if _, ok := pod.Labels["kubearmor-app"]; ok { + // pod.Annotations["kubearmor-policy"] = "audited" + // } // == Visibility == // @@ -746,7 +746,7 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { if event.Type == "ADDED" || event.Type == "MODIFIED" { exist := false - dm.K8sPodsLock.Lock() + dm.K8sPodsLock.RLock() for _, k8spod := range dm.K8sPods { if k8spod.Metadata["namespaceName"] == pod.Metadata["namespaceName"] && k8spod.Metadata["podName"] == pod.Metadata["podName"] { if k8spod.Annotations["kubearmor-policy"] == "patched" { @@ -755,7 +755,7 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { } } } - dm.K8sPodsLock.Unlock() + dm.K8sPodsLock.RUnlock() if exist { continue @@ -1020,8 +1020,8 @@ func matchClusterSecurityPolicyRule(policy tp.SecurityPolicy) bool { // GetSecurityPolicies Function func (dm *KubeArmorDaemon) GetSecurityPolicies(identities []string, namespaceName string) []tp.SecurityPolicy { - dm.SecurityPoliciesLock.Lock() - defer dm.SecurityPoliciesLock.Unlock() + dm.SecurityPoliciesLock.RLock() + defer dm.SecurityPoliciesLock.RUnlock() secPolicies := []tp.SecurityPolicy{} @@ -1049,10 +1049,15 @@ func containsPolicy(endPointPolicies []tp.SecurityPolicy, secPolicy tp.SecurityP // UpdateSecurityPolicy Function func (dm *KubeArmorDaemon) UpdateSecurityPolicy(action string, secPolicyType string, secPolicy tp.SecurityPolicy) { - dm.EndPointsLock.Lock() - defer dm.EndPointsLock.Unlock() + dm.EndPointsLock.RLock() + endPointsLength := len(dm.EndPoints) + dm.EndPointsLock.RUnlock() + + for idx := 0; idx < endPointsLength; idx++ { + dm.EndPointsLock.RLock() + endPoint := dm.EndPoints[idx] + dm.EndPointsLock.RUnlock() - for idx, endPoint := range dm.EndPoints { // update a security policy if secPolicyType == KubeArmorPolicy { if kl.MatchIdentities(secPolicy.Spec.Selector.Identities, endPoint.Identities) && (len(secPolicy.Spec.Selector.Containers) == 0 || kl.ContainsElement(secPolicy.Spec.Selector.Containers, endPoint.ContainerName)) { @@ -1066,12 +1071,12 @@ func (dm *KubeArmorDaemon) UpdateSecurityPolicy(action string, secPolicyType str } } if new { - dm.EndPoints[idx].SecurityPolicies = append(dm.EndPoints[idx].SecurityPolicies, secPolicy) + endPoint.SecurityPolicies = append(endPoint.SecurityPolicies, secPolicy) } } else if action == "MODIFIED" { for idxP, policy := range endPoint.SecurityPolicies { if policy.Metadata["namespaceName"] == secPolicy.Metadata["namespaceName"] && policy.Metadata["policyName"] == secPolicy.Metadata["policyName"] { - dm.EndPoints[idx].SecurityPolicies[idxP] = secPolicy + endPoint.SecurityPolicies[idxP] = secPolicy break } } @@ -1079,23 +1084,27 @@ func (dm *KubeArmorDaemon) UpdateSecurityPolicy(action string, secPolicyType str // remove the given policy from the security policy list of this endpoint for idxP, policy := range endPoint.SecurityPolicies { if policy.Metadata["namespaceName"] == secPolicy.Metadata["namespaceName"] && policy.Metadata["policyName"] == secPolicy.Metadata["policyName"] { - dm.EndPoints[idx].SecurityPolicies = append(dm.EndPoints[idx].SecurityPolicies[:idxP], dm.EndPoints[idx].SecurityPolicies[idxP+1:]...) + endPoint.SecurityPolicies = append(endPoint.SecurityPolicies[:idxP], endPoint.SecurityPolicies[idxP+1:]...) break } } } + dm.EndPointsLock.Lock() + dm.EndPoints[idx] = endPoint + dm.EndPointsLock.Unlock() + if cfg.GlobalCfg.Policy { // update security policies - dm.Logger.UpdateSecurityPolicies("UPDATED", dm.EndPoints[idx]) + dm.Logger.UpdateSecurityPolicies("UPDATED", endPoint) if dm.RuntimeEnforcer != nil { - if dm.EndPoints[idx].PolicyEnabled == tp.KubeArmorPolicyEnabled { + if endPoint.PolicyEnabled == tp.KubeArmorPolicyEnabled { // enforce security policies - if !kl.ContainsElement(dm.SystemMonitor.UntrackedNamespaces, dm.EndPoints[idx].NamespaceName) { - dm.RuntimeEnforcer.UpdateSecurityPolicies(dm.EndPoints[idx]) + if !kl.ContainsElement(dm.SystemMonitor.UntrackedNamespaces, endPoint.NamespaceName) { + dm.RuntimeEnforcer.UpdateSecurityPolicies(endPoint) } else { - dm.Logger.Warnf("Policy cannot be enforced in untracked namespace %s", dm.EndPoints[idx].NamespaceName) + dm.Logger.Warnf("Policy cannot be enforced in untracked namespace %s", endPoint.NamespaceName) } } } @@ -1115,7 +1124,7 @@ func (dm *KubeArmorDaemon) UpdateSecurityPolicy(action string, secPolicyType str } } if new { - dm.EndPoints[idx].SecurityPolicies = append(dm.EndPoints[idx].SecurityPolicies, secPolicy) + endPoint.SecurityPolicies = append(endPoint.SecurityPolicies, secPolicy) } } else if action == "MODIFIED" { // when policy is modified and new ns is added in secPolicy.Spec.Selector.MatchExpressions[i].Values @@ -1125,39 +1134,43 @@ func (dm *KubeArmorDaemon) UpdateSecurityPolicy(action string, secPolicyType str if policy.Metadata["policyName"] == secPolicy.Metadata["policyName"] { if !kl.ContainsElement(secPolicy.Spec.Selector.NamespaceList, endPoint.NamespaceName) { // when policy is modified and this endPoint's ns is removed from secPolicy.Spec.Selector.MatchExpressions[i].Values - dm.EndPoints[idx].SecurityPolicies = append(dm.EndPoints[idx].SecurityPolicies[:idxP], dm.EndPoints[idx].SecurityPolicies[idxP+1:]...) + endPoint.SecurityPolicies = append(endPoint.SecurityPolicies[:idxP], endPoint.SecurityPolicies[idxP+1:]...) addNewPolicy = false break } - dm.EndPoints[idx].SecurityPolicies[idxP] = secPolicy + endPoint.SecurityPolicies[idxP] = secPolicy addNewPolicy = false break } } if addNewPolicy { - dm.EndPoints[idx].SecurityPolicies = append(dm.EndPoints[idx].SecurityPolicies, secPolicy) + endPoint.SecurityPolicies = append(endPoint.SecurityPolicies, secPolicy) } } else if action == "DELETED" { // remove the given policy from the security policy list of this endpoint for idxP, policy := range endPoint.SecurityPolicies { if policy.Metadata["policyName"] == secPolicy.Metadata["policyName"] { - dm.EndPoints[idx].SecurityPolicies = append(dm.EndPoints[idx].SecurityPolicies[:idxP], dm.EndPoints[idx].SecurityPolicies[idxP+1:]...) + endPoint.SecurityPolicies = append(endPoint.SecurityPolicies[:idxP], endPoint.SecurityPolicies[idxP+1:]...) break } } } + dm.EndPointsLock.Lock() + dm.EndPoints[idx] = endPoint + dm.EndPointsLock.Unlock() + if cfg.GlobalCfg.Policy { // update security policies - dm.Logger.UpdateSecurityPolicies("UPDATED", dm.EndPoints[idx]) + dm.Logger.UpdateSecurityPolicies("UPDATED", endPoint) if dm.RuntimeEnforcer != nil { - if dm.EndPoints[idx].PolicyEnabled == tp.KubeArmorPolicyEnabled { + if endPoint.PolicyEnabled == tp.KubeArmorPolicyEnabled { // enforce security policies - if !kl.ContainsElement(dm.SystemMonitor.UntrackedNamespaces, dm.EndPoints[idx].NamespaceName) { - dm.RuntimeEnforcer.UpdateSecurityPolicies(dm.EndPoints[idx]) + if !kl.ContainsElement(dm.SystemMonitor.UntrackedNamespaces, endPoint.NamespaceName) { + dm.RuntimeEnforcer.UpdateSecurityPolicies(endPoint) } else { - dm.Logger.Warnf("Policy cannot be enforced in untracked namespace %s", dm.EndPoints[idx].NamespaceName) + dm.Logger.Warnf("Policy cannot be enforced in untracked namespace %s", endPoint.NamespaceName) } } } @@ -1830,12 +1843,17 @@ func (dm *KubeArmorDaemon) WatchClusterSecurityPolicies(timeout time.Duration) c // UpdateHostSecurityPolicies Function func (dm *KubeArmorDaemon) UpdateHostSecurityPolicies() { - dm.HostSecurityPoliciesLock.Lock() - defer dm.HostSecurityPoliciesLock.Unlock() + dm.HostSecurityPoliciesLock.RLock() + hostSecurityPoliciesLength := len(dm.HostSecurityPolicies) + dm.HostSecurityPoliciesLock.RUnlock() secPolicies := []tp.HostSecurityPolicy{} - for _, policy := range dm.HostSecurityPolicies { + for idx := 0; idx < hostSecurityPoliciesLength; idx++ { + dm.EndPointsLock.RLock() + policy := dm.HostSecurityPolicies[idx] + dm.EndPointsLock.RUnlock() + if kl.MatchIdentities(policy.Spec.NodeSelector.Identities, dm.Node.Identities) { secPolicies = append(secPolicies, policy) } @@ -2254,6 +2272,12 @@ func (dm *KubeArmorDaemon) ParseAndUpdateHostSecurityPolicy(event tp.K8sKubeArmo new := true for idx, policy := range dm.HostSecurityPolicies { if policy.Metadata["policyName"] == secPolicy.Metadata["policyName"] { + if reflect.DeepEqual(policy, secPolicy) { + kg.Debugf("No updates to policy %s", policy.Metadata["policyName"]) + dm.HostSecurityPoliciesLock.Unlock() + return pb.PolicyStatus_Applied + } + dm.HostSecurityPolicies[idx] = secPolicy event.Type = "MODIFIED" new = false @@ -2266,6 +2290,12 @@ func (dm *KubeArmorDaemon) ParseAndUpdateHostSecurityPolicy(event tp.K8sKubeArmo } else if event.Type == "MODIFIED" { for idx, policy := range dm.HostSecurityPolicies { if policy.Metadata["policyName"] == secPolicy.Metadata["policyName"] { + if reflect.DeepEqual(policy, secPolicy) { + kg.Debugf("No updates to policy %s", policy.Metadata["policyName"]) + dm.HostSecurityPoliciesLock.Unlock() + return pb.PolicyStatus_Applied + } + dm.HostSecurityPolicies[idx] = secPolicy break } @@ -2465,9 +2495,6 @@ func validateDefaultPosture(key string, ns *corev1.Namespace, defaultPosture str // UpdateDefaultPosture Function func (dm *KubeArmorDaemon) UpdateDefaultPosture(action string, namespace string, defaultPosture tp.DefaultPosture, annotated bool) { - dm.EndPointsLock.Lock() - defer dm.EndPointsLock.Unlock() - dm.DefaultPosturesLock.Lock() defer dm.DefaultPosturesLock.Unlock() @@ -2485,25 +2512,36 @@ func (dm *KubeArmorDaemon) UpdateDefaultPosture(action string, namespace string, } dm.Logger.UpdateDefaultPosture(action, namespace, defaultPosture) - for idx, endPoint := range dm.EndPoints { + dm.EndPointsLock.RLock() + endPointsLen := len(dm.EndPoints) + dm.EndPointsLock.RUnlock() + + for idx := 0; idx < endPointsLen; idx++ { + dm.EndPointsLock.RLock() + endPoint := dm.EndPoints[idx] + dm.EndPointsLock.RUnlock() // update a security policy if namespace == endPoint.NamespaceName { - if dm.EndPoints[idx].DefaultPosture == defaultPosture { + if endPoint.DefaultPosture == defaultPosture { continue } - dm.Logger.Printf("Updating default posture for %s with %v namespace default %v", endPoint.EndPointName, dm.EndPoints[idx].DefaultPosture, defaultPosture) - dm.EndPoints[idx].DefaultPosture = defaultPosture + dm.Logger.Printf("Updating default posture for %s with %v namespace default %v", endPoint.EndPointName, endPoint.DefaultPosture, defaultPosture) + endPoint.DefaultPosture = defaultPosture + + dm.EndPointsLock.Lock() + dm.EndPoints[idx] = endPoint + dm.EndPointsLock.Unlock() if cfg.GlobalCfg.Policy { // update security policies if dm.RuntimeEnforcer != nil { - if dm.EndPoints[idx].PolicyEnabled == tp.KubeArmorPolicyEnabled { + if endPoint.PolicyEnabled == tp.KubeArmorPolicyEnabled { // enforce security policies - if !kl.ContainsElement(dm.SystemMonitor.UntrackedNamespaces, dm.EndPoints[idx].NamespaceName) { - dm.RuntimeEnforcer.UpdateSecurityPolicies(dm.EndPoints[idx]) + if !kl.ContainsElement(dm.SystemMonitor.UntrackedNamespaces, endPoint.NamespaceName) { + dm.RuntimeEnforcer.UpdateSecurityPolicies(endPoint) } else { - dm.Logger.Warnf("Policy cannot be enforced in untracked namespace %s", dm.EndPoints[idx].NamespaceName) + dm.Logger.Warnf("Policy cannot be enforced in untracked namespace %s", endPoint.NamespaceName) } } @@ -2740,16 +2778,18 @@ func (dm *KubeArmorDaemon) WatchConfigMap() cache.InformerSynced { cfg.GlobalCfg.AlertThrottling = (cm.Data[cfg.ConfigAlertThrottling] == "true") } if _, ok := cm.Data[cfg.ConfigMaxAlertPerSec]; ok { - cfg.GlobalCfg.MaxAlertPerSec, err = strconv.Atoi(cm.Data[cfg.ConfigMaxAlertPerSec]) + maxAlertPerSec, err := strconv.ParseInt(cm.Data[cfg.ConfigMaxAlertPerSec], 10, 32) if err != nil { dm.Logger.Warnf("Error: %s", err) } + cfg.GlobalCfg.MaxAlertPerSec = int32(maxAlertPerSec) } - if _, ok := cm.Data[cfg.ConfigMaxAlertPerSec]; ok { - cfg.GlobalCfg.ThrottleSec, err = strconv.Atoi(cm.Data[cfg.ConfigThrottleSec]) + if _, ok := cm.Data[cfg.ConfigThrottleSec]; ok { + throttleSec, err := strconv.ParseInt(cm.Data[cfg.ConfigThrottleSec], 10, 32) if err != nil { dm.Logger.Warnf("Error: %s", err) } + cfg.GlobalCfg.ThrottleSec = int32(throttleSec) } dm.SystemMonitor.UpdateThrottlingConfig() @@ -2790,14 +2830,18 @@ func (dm *KubeArmorDaemon) WatchConfigMap() cache.InformerSynced { if _, ok := cm.Data[cfg.ConfigAlertThrottling]; ok { cfg.GlobalCfg.AlertThrottling = (cm.Data[cfg.ConfigAlertThrottling] == "true") } - cfg.GlobalCfg.MaxAlertPerSec, err = strconv.Atoi(cm.Data[cfg.ConfigMaxAlertPerSec]) + + maxAlertPerSec, err := strconv.ParseInt(cm.Data[cfg.ConfigMaxAlertPerSec], 10, 32) if err != nil { dm.Logger.Warnf("Error: %s", err) } - cfg.GlobalCfg.ThrottleSec, err = strconv.Atoi(cm.Data[cfg.ConfigThrottleSec]) + cfg.GlobalCfg.MaxAlertPerSec = int32(maxAlertPerSec) + + throttleSec, err := strconv.ParseInt(cm.Data[cfg.ConfigThrottleSec], 10, 32) if err != nil { dm.Logger.Warnf("Error: %s", err) } + cfg.GlobalCfg.ThrottleSec = int32(throttleSec) dm.SystemMonitor.UpdateThrottlingConfig() } }, diff --git a/KubeArmor/core/unorchestratedUpdates.go b/KubeArmor/core/unorchestratedUpdates.go index 8ec43dfa4a..13f3a0fce5 100644 --- a/KubeArmor/core/unorchestratedUpdates.go +++ b/KubeArmor/core/unorchestratedUpdates.go @@ -225,16 +225,16 @@ func (dm *KubeArmorDaemon) handlePolicyEvent(eventType string, createEndPoint bo } } else { // DELETED // update security policies after policy deletion - dm.EndPoints[endpointIdx] = newPoint - - dm.Logger.UpdateSecurityPolicies("DELETED", newPoint) - dm.RuntimeEnforcer.UpdateSecurityPolicies(newPoint) - - // delete endpoint if no containers or policies - if len(newPoint.Containers) == 0 && len(newPoint.SecurityPolicies) == 0 { - dm.EndPoints = append(dm.EndPoints[:endpointIdx], dm.EndPoints[endpointIdx+1:]...) - // since the length of endpoints slice reduced - endpointIdx-- + if endpointIdx >= 0 { + dm.EndPoints[endpointIdx] = newPoint + dm.Logger.UpdateSecurityPolicies("DELETED", newPoint) + dm.RuntimeEnforcer.UpdateSecurityPolicies(newPoint) + // delete endpoint if no containers or policies + if len(newPoint.Containers) == 0 && len(newPoint.SecurityPolicies) == 0 { + dm.EndPoints = append(dm.EndPoints[:endpointIdx], dm.EndPoints[endpointIdx+1:]...) + // since the length of endpoints slice reduced + endpointIdx-- + } } } @@ -633,6 +633,7 @@ func (dm *KubeArmorDaemon) ParseAndUpdateContainerSecurityPolicy(event tp.K8sKub newPoint := tp.EndPoint{} policyStatus := pb.PolicyStatus_Applied + // consider reducing coverage for this lock dm.EndPointsLock.Lock() defer dm.EndPointsLock.Unlock() for idx, endPoint := range dm.EndPoints { diff --git a/KubeArmor/enforcer/appArmorEnforcer.go b/KubeArmor/enforcer/appArmorEnforcer.go index 29d03ecbac..74be72e4ef 100644 --- a/KubeArmor/enforcer/appArmorEnforcer.go +++ b/KubeArmor/enforcer/appArmorEnforcer.go @@ -114,11 +114,11 @@ profile apparmor-default flags=(attach_disconnected,mediate_deleted) { existingProfiles := []string{} - if pids, err := os.ReadDir(filepath.Clean("/proc")); err == nil { + if pids, err := os.ReadDir(filepath.Clean(cfg.GlobalCfg.ProcFsMount)); err == nil { for _, f := range pids { if f.IsDir() { if _, err := strconv.Atoi(f.Name()); err == nil { - if content, err := os.ReadFile(filepath.Clean("/proc/" + f.Name() + "/attr/current")); err == nil { + if content, err := os.ReadFile(filepath.Clean(cfg.GlobalCfg.ProcFsMount + "/" + f.Name() + "/attr/current")); err == nil { line := strings.Split(string(content), "\n")[0] words := strings.Split(line, " ") @@ -374,6 +374,7 @@ umount, signal, unix, ptrace, +dbus, file, network, @@ -439,32 +440,37 @@ func (ae *AppArmorEnforcer) UnregisterAppArmorHostProfile() bool { return true } + ae.Logger.Printf("Unregistering the KubeArmor host profile from %s", cfg.GlobalCfg.Host) + ae.AppArmorProfilesLock.Lock() defer ae.AppArmorProfilesLock.Unlock() - if err := ae.CreateAppArmorHostProfile(); err != nil { - ae.Logger.Warnf("Unable to reset the KubeArmor host profile in %s", cfg.GlobalCfg.Host) + if err := kl.RunCommandAndWaitWithErr("aa-remove-unknown", []string{}); err != nil { + ae.Logger.Warnf("Unable to cleanup the KubeArmor host profile in %s", cfg.GlobalCfg.Host) - if err := os.Remove(appArmorHostFile); err != nil { - ae.Logger.Warnf("Unable to remove the KubeArmor host profile from %s (%s)", cfg.GlobalCfg.Host, err.Error()) + if err := ae.CreateAppArmorHostProfile(); err != nil { + ae.Logger.Warnf("Unable to reset the KubeArmor host profile in %s", cfg.GlobalCfg.Host) + + if err := os.Remove(appArmorHostFile); err != nil { + ae.Logger.Warnf("Unable to remove the KubeArmor host profile from %s (%s)", cfg.GlobalCfg.Host, err.Error()) + } + + return false } - return false - } + if err := kl.RunCommandAndWaitWithErr("apparmor_parser", []string{"-r", "-W", "-C", appArmorHostFile}); err != nil { + ae.Logger.Warnf("Unable to reset the KubeArmor host profile in %s", cfg.GlobalCfg.Host) - if err := kl.RunCommandAndWaitWithErr("apparmor_parser", []string{"-r", "-W", "-C", appArmorHostFile}); err != nil { - ae.Logger.Warnf("Unable to reset the KubeArmor host profile in %s", cfg.GlobalCfg.Host) + if err := os.Remove(appArmorHostFile); err != nil { + ae.Logger.Warnf("Unable to remove the KubeArmor host profile from %s (%s)", cfg.GlobalCfg.Host, err.Error()) + } + + } if err := os.Remove(appArmorHostFile); err != nil { ae.Logger.Warnf("Unable to remove the KubeArmor host profile from %s (%s)", cfg.GlobalCfg.Host, err.Error()) + return false } - - return false - } - - if err := os.Remove(appArmorHostFile); err != nil { - ae.Logger.Warnf("Unable to remove the KubeArmor host profile from %s (%s)", cfg.GlobalCfg.Host, err.Error()) - return false } ae.Logger.Printf("Unregistered the KubeArmor host profile from %s", cfg.GlobalCfg.Host) diff --git a/KubeArmor/enforcer/appArmorProfile.go b/KubeArmor/enforcer/appArmorProfile.go index 6068824db0..a1e2accc5b 100644 --- a/KubeArmor/enforcer/appArmorProfile.go +++ b/KubeArmor/enforcer/appArmorProfile.go @@ -433,6 +433,12 @@ func (ae *AppArmorEnforcer) GenerateProfileBody(securityPolicies []tp.SecurityPo ae.Logger.Errf("Error while copying global rules to local profile for %s: %s", source, err.Error()) continue } + for proc, config := range profile.ProcessPaths { + add := checkIfGlobalRuleToBeAdded(proc, val.ProcessPaths) + if add { + newval.ProcessPaths[proc] = config + } + } for file, config := range profile.FilePaths { add := checkIfGlobalRuleToBeAdded(file, val.FilePaths) if add { diff --git a/KubeArmor/enforcer/appArmorTemplate.go b/KubeArmor/enforcer/appArmorTemplate.go index d85ea81e18..0913d5e39d 100644 --- a/KubeArmor/enforcer/appArmorTemplate.go +++ b/KubeArmor/enforcer/appArmorTemplate.go @@ -79,7 +79,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { {{template "file-section" . }} ## == DISPATCHER START == ## {{- range $source, $value:= $.FromSource}} - {{$source}} px -> {{$.Name}}-{{$source}}, + {{$source}} px -> {{$v := $.Name | split "."}}{{$v._0}}_{{ regexReplaceAllLiteral "[^a-z A-Z 0-9]" $source "" }}, {{- end}} {{- range $value, $data := .ProcessPaths}} {{- $suffix := ""}} @@ -146,7 +146,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { ## == FromSource per binary profiles START == ## {{- range $source, $value := $.FromSource}} -profile {{$.Name}}-{{$source}} { +profile {{$v := $.Name | split "."}}{{$v._0}}_{{ regexReplaceAllLiteral "[^a-z A-Z 0-9]" $source "" }} { {{$source}} rix, {{template "pre-section" $value }} {{template "file-section" $value}} @@ -213,6 +213,7 @@ profile {{$.Name}}-{{$source}} { signal, unix, ptrace, + dbus, {{end}} {{ if .File}}file,{{end}} {{ if .Network}}network,{{end}} diff --git a/KubeArmor/enforcer/bpflsm/enforcer.go b/KubeArmor/enforcer/bpflsm/enforcer.go index 1a11ee7dc5..7c3fbc1c40 100644 --- a/KubeArmor/enforcer/bpflsm/enforcer.go +++ b/KubeArmor/enforcer/bpflsm/enforcer.go @@ -23,8 +23,8 @@ import ( tp "github.com/kubearmor/KubeArmor/KubeArmor/types" ) -//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer ../../BPF/enforcer.bpf.c -- -I/usr/include/ -O2 -g -//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer_path ../../BPF/enforcer_path.bpf.c -- -I/usr/include/ -O2 -g +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer ../../BPF/enforcer.bpf.c -- -I/usr/include/ -O2 -g -fno-stack-protector +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer_path ../../BPF/enforcer_path.bpf.c -- -I/usr/include/ -O2 -g -fno-stack-protector // ===================== // // == BPFLSM Enforcer == // @@ -375,8 +375,8 @@ func (be *BPFEnforcer) TraceEvents() { case mon.DropAlert: log.Operation = "AlertThreshold" log.Type = "SystemEvent" - log.MaxAlertsPerSec = int32(cfg.GlobalCfg.MaxAlertPerSec) - log.DroppingAlertsInterval = int32(cfg.GlobalCfg.ThrottleSec) + log.MaxAlertsPerSec = cfg.GlobalCfg.MaxAlertPerSec + log.DroppingAlertsInterval = cfg.GlobalCfg.ThrottleSec } // fallback logic if we don't receive source from BuildLogBase() if log.Operation != "Process" && len(log.Source) == 0 { diff --git a/KubeArmor/enforcer/bpflsm/enforcer_bpfeb.o b/KubeArmor/enforcer/bpflsm/enforcer_bpfeb.o index a832c487a9..7d2cead750 100644 Binary files a/KubeArmor/enforcer/bpflsm/enforcer_bpfeb.o and b/KubeArmor/enforcer/bpflsm/enforcer_bpfeb.o differ diff --git a/KubeArmor/enforcer/bpflsm/enforcer_bpfel.o b/KubeArmor/enforcer/bpflsm/enforcer_bpfel.o index a058fdcd26..af225eb2d5 100644 Binary files a/KubeArmor/enforcer/bpflsm/enforcer_bpfel.o and b/KubeArmor/enforcer/bpflsm/enforcer_bpfel.o differ diff --git a/KubeArmor/enforcer/bpflsm/enforcer_path_bpfeb.o b/KubeArmor/enforcer/bpflsm/enforcer_path_bpfeb.o index 1f73cb1700..80ff9b63b1 100644 Binary files a/KubeArmor/enforcer/bpflsm/enforcer_path_bpfeb.o and b/KubeArmor/enforcer/bpflsm/enforcer_path_bpfeb.o differ diff --git a/KubeArmor/enforcer/bpflsm/enforcer_path_bpfel.o b/KubeArmor/enforcer/bpflsm/enforcer_path_bpfel.o index 9cf9923363..0045eeffd0 100644 Binary files a/KubeArmor/enforcer/bpflsm/enforcer_path_bpfel.o and b/KubeArmor/enforcer/bpflsm/enforcer_path_bpfel.o differ diff --git a/KubeArmor/feeder/feeder.go b/KubeArmor/feeder/feeder.go index 8917b3d0d3..c1c4203c1b 100644 --- a/KubeArmor/feeder/feeder.go +++ b/KubeArmor/feeder/feeder.go @@ -579,6 +579,20 @@ func (fd *Feeder) PushLog(log tp.Log) { // gRPC output if log.Type == "MatchedPolicy" || log.Type == "MatchedHostPolicy" || log.Type == "SystemEvent" { + + // checking throttling condition for "Audit" alerts when enforcer is 'eBPF Monitor' + if cfg.GlobalCfg.AlertThrottling && ((strings.Contains(log.Action, "Audit") && log.Enforcer == "eBPF Monitor") || (log.Type == "MatchedHostPolicy" && (log.Enforcer == "AppArmor" || log.Enforcer == "eBPF Monitor"))) { + nsKey := fd.ContainerNsKey[log.ContainerID] + alert, throttle := fd.ShouldDropAlertsPerContainer(nsKey.PidNs, nsKey.MntNs) + if alert && throttle { + return + } else if alert && !throttle { + log.Operation = "AlertThreshold" + log.Type = "SystemEvent" + log.MaxAlertsPerSec = cfg.GlobalCfg.MaxAlertPerSec + log.DroppingAlertsInterval = cfg.GlobalCfg.ThrottleSec + } + } pbAlert := pb.Alert{} pbAlert.Timestamp = log.Timestamp diff --git a/KubeArmor/feeder/policyMatcher.go b/KubeArmor/feeder/policyMatcher.go index ad0dd7fd26..c369291e31 100644 --- a/KubeArmor/feeder/policyMatcher.go +++ b/KubeArmor/feeder/policyMatcher.go @@ -266,8 +266,9 @@ func (fd *Feeder) UpdateSecurityPolicies(action string, endPoint tp.EndPoint) { name := endPoint.NamespaceName + "_" + endPoint.EndPointName if action == "DELETED" { - delete(fd.SecurityPolicies, name) - return + if _, ok := fd.SecurityPolicies[name]; ok { + delete(fd.SecurityPolicies, name) + } } // ADDED | MODIFIED @@ -1298,7 +1299,7 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { break // break, so that once source is matched for a log it doesn't look for other cases } // match sources - if (!secPolicy.IsFromSource) || (secPolicy.IsFromSource && (secPolicy.Source == log.ParentProcessName || secPolicy.Source == log.ProcessName)) { + if (!secPolicy.IsFromSource) || (secPolicy.IsFromSource && (strings.HasPrefix(log.Source, secPolicy.Source+" ") || secPolicy.Source == log.ProcessName)) { matchedFlags := false protocol := fetchProtocol(log.Resource) @@ -1480,7 +1481,7 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { continue } // match sources - if (!secPolicy.IsFromSource) || (secPolicy.IsFromSource && (secPolicy.Source == log.ParentProcessName || secPolicy.Source == log.ProcessName)) { + if (!secPolicy.IsFromSource) || (secPolicy.IsFromSource && (strings.HasPrefix(log.Source, secPolicy.Source+" ") || secPolicy.Source == log.ProcessName)) { skip := false for _, matchCapability := range strings.Split(secPolicy.Resource, ",") { @@ -1740,20 +1741,6 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { return tp.Log{} } - // check for throttling for "Audit" alerts - if cfg.GlobalCfg.AlertThrottling && strings.Contains(log.Action, "Audit") { - nsKey := fd.ContainerNsKey[log.ContainerID] - alert, throttle := fd.ShouldDropAlertsPerContainer(nsKey.PidNs, nsKey.MntNs) - if alert && throttle { - return tp.Log{} - } else if alert && !throttle { - log.Operation = "AlertThreshold" - log.Type = "SystemEvent" - log.MaxAlertsPerSec = int32(cfg.GlobalCfg.MaxAlertPerSec) - log.DroppingAlertsInterval = int32(cfg.GlobalCfg.ThrottleSec) - } - } - return log } } else { // host @@ -1783,20 +1770,6 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { return tp.Log{} } - // check for throttling for "Audit" alerts - if cfg.GlobalCfg.AlertThrottling && strings.Contains(log.Action, "Audit") { - nsKey := fd.ContainerNsKey[log.ContainerID] - alert, throttle := fd.ShouldDropAlertsPerContainer(nsKey.PidNs, nsKey.MntNs) - if alert && throttle { - return tp.Log{} - } else if alert && !throttle { - log.Operation = "AlertThreshold" - log.Type = "SystemEvent" - log.MaxAlertsPerSec = int32(cfg.GlobalCfg.MaxAlertPerSec) - log.DroppingAlertsInterval = int32(cfg.GlobalCfg.ThrottleSec) - } - } - return log } } diff --git a/KubeArmor/fuzz/build.sh b/KubeArmor/fuzz/build.sh new file mode 100644 index 0000000000..071647306d --- /dev/null +++ b/KubeArmor/fuzz/build.sh @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright 2024 Authors of KubeArmor +printf "package transform\nimport _ \"github.com/AdamKorcz/go-118-fuzz-build/testing\"\n" > $SRC/KubeArmor/KubeArmor/register.go +go mod tidy +compile_native_go_fuzzer github.com/kubearmor/KubeArmor/KubeArmor/core FuzzContainerPolicy FuzzContainerPolicy +compile_native_go_fuzzer github.com/kubearmor/KubeArmor/KubeArmor/core FuzzHostPolicy FuzzHostPolicy + diff --git a/KubeArmor/main_test.go b/KubeArmor/main_test.go index adf21aeb33..c4f3245ac9 100644 --- a/KubeArmor/main_test.go +++ b/KubeArmor/main_test.go @@ -13,7 +13,7 @@ import ( var clusterPtr, gRPCPtr, logPathPtr *string var enableKubeArmorPolicyPtr, enableKubeArmorHostPolicyPtr, enableKubeArmorVMPtr, coverageTestPtr, enableK8sEnv, tlsEnabled *bool -var defaultFilePosturePtr, defaultCapabilitiesPosturePtr, defaultNetworkPosturePtr, hostDefaultCapabilitiesPosturePtr, hostDefaultNetworkPosturePtr, hostDefaultFilePosturePtr *string +var defaultFilePosturePtr, defaultCapabilitiesPosturePtr, defaultNetworkPosturePtr, hostDefaultCapabilitiesPosturePtr, hostDefaultNetworkPosturePtr, hostDefaultFilePosturePtr, procFsMountPtr *string func init() { // options (string) @@ -32,6 +32,8 @@ func init() { hostDefaultNetworkPosturePtr = flag.String("hostDefaultNetworkPosture", "block", "configuring default enforcement action in global network context {allow|audit|block}") hostDefaultCapabilitiesPosturePtr = flag.String("hostDefaultCapabilitiesPosture", "block", "configuring default enforcement action in global capability context {allow|audit|block}") + procFsMountPtr = flag.String("procfsMount", "/proc", "Path to the BPF filesystem to use for storing maps") + // options (boolean) enableKubeArmorPolicyPtr = flag.Bool("enableKubeArmorPolicy", true, "enabling KubeArmorPolicy") enableKubeArmorHostPolicyPtr = flag.Bool("enableKubeArmorHostPolicy", true, "enabling KubeArmorHostPolicy") @@ -42,6 +44,7 @@ func init() { // options (boolean) coverageTestPtr = flag.Bool("coverageTest", false, "enabling CoverageTest") + } // TestMain - test to drive external testing coverage @@ -64,6 +67,7 @@ func TestMain(t *testing.T) { fmt.Sprintf("-enableKubeArmorHostPolicy=%s", strconv.FormatBool(*enableKubeArmorHostPolicyPtr)), fmt.Sprintf("-coverageTest=%s", strconv.FormatBool(*coverageTestPtr)), fmt.Sprintf("-tlsEnabled=%s", strconv.FormatBool(*tlsEnabled)), + fmt.Sprintf("-procfsMount=%s", *procFsMountPtr), } t.Log("[INFO] Executed KubeArmor") diff --git a/KubeArmor/monitor/logUpdate.go b/KubeArmor/monitor/logUpdate.go index a1d4951fe6..1462249dcb 100644 --- a/KubeArmor/monitor/logUpdate.go +++ b/KubeArmor/monitor/logUpdate.go @@ -517,8 +517,8 @@ func (mon *SystemMonitor) UpdateLogs() { case DropAlert: // throttling alert log.Operation = "AlertThreshold" log.Type = "SystemEvent" - log.MaxAlertsPerSec = int32(cfg.GlobalCfg.MaxAlertPerSec) - log.DroppingAlertsInterval = int32(cfg.GlobalCfg.ThrottleSec) + log.MaxAlertsPerSec = cfg.GlobalCfg.MaxAlertPerSec + log.DroppingAlertsInterval = cfg.GlobalCfg.ThrottleSec default: continue diff --git a/KubeArmor/monitor/processTree.go b/KubeArmor/monitor/processTree.go index 4565340327..0c39d49158 100644 --- a/KubeArmor/monitor/processTree.go +++ b/KubeArmor/monitor/processTree.go @@ -5,6 +5,7 @@ package monitor import ( "os" + "path/filepath" "strconv" "strings" "sync" @@ -231,7 +232,7 @@ func (mon *SystemMonitor) GetParentExecPath(containerID string, ctx SyscallConte if readlink { // just in case that it couldn't still get the full path - if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPPID), 10) + "/exe"); err == nil && data != "" && data != "/" { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPPID), 10), "/exe")); err == nil && data != "" && data != "/" { // // Store it in the ActiveHostPidMap so we don't need to read procfs again // // We don't call BuildPidNode Here cause that will put this into a cyclic function call loop // if pidMap, ok := ActiveHostPidMap[containerID]; ok { @@ -276,7 +277,7 @@ func (mon *SystemMonitor) GetExecPath(containerID string, ctx SyscallContext, re if readlink { // just in case that it couldn't still get the full path - if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPID), 10) + "/exe"); err == nil && data != "" && data != "/" { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPID), 10), "/exe")); err == nil && data != "" && data != "/" { // // Store it in the ActiveHostPidMap so we don't need to read procfs again // if pidMap, ok := ActiveHostPidMap[containerID]; ok { // if node, ok := pidMap[ctx.HostPID]; ok { @@ -318,7 +319,7 @@ func (mon *SystemMonitor) GetCommand(containerID string, ctx SyscallContext, rea if readlink { // just in case that it couldn't still get the full path - if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPID), 10) + "/exe"); err == nil && data != "" && data != "/" { + if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPID), 10), "/exe")); err == nil && data != "" && data != "/" { return data } else if err != nil { mon.Logger.Debugf("Could not read path from procfs due to %s", err.Error()) diff --git a/KubeArmor/monitor/syscalls_arm64.go b/KubeArmor/monitor/syscalls_arm64.go index c190303868..4b5cc800ad 100644 --- a/KubeArmor/monitor/syscalls_arm64.go +++ b/KubeArmor/monitor/syscalls_arm64.go @@ -63,7 +63,7 @@ const ( SocketConnect = 462 SocketAccept = 463 - Capable = 464 + Capable = 464 DropAlert = 0 ) diff --git a/KubeArmor/utils/bpflsmprobe/probe_bpfeb.o b/KubeArmor/utils/bpflsmprobe/probe_bpfeb.o index 0775008831..1e17482196 100644 Binary files a/KubeArmor/utils/bpflsmprobe/probe_bpfeb.o and b/KubeArmor/utils/bpflsmprobe/probe_bpfeb.o differ diff --git a/KubeArmor/utils/bpflsmprobe/probe_bpfel.o b/KubeArmor/utils/bpflsmprobe/probe_bpfel.o index 4446f3ada9..254d96e3c8 100644 Binary files a/KubeArmor/utils/bpflsmprobe/probe_bpfel.o and b/KubeArmor/utils/bpflsmprobe/probe_bpfel.o differ diff --git a/STABLE-RELEASE b/STABLE-RELEASE index 92f76b4232..2aca8c01f5 100644 --- a/STABLE-RELEASE +++ b/STABLE-RELEASE @@ -1 +1 @@ -v1.4.3 +v1.4.6 diff --git a/deployments/get/objects.go b/deployments/get/objects.go index f8d2312a52..6e9a28550c 100644 --- a/deployments/get/objects.go +++ b/deployments/get/objects.go @@ -264,6 +264,7 @@ func GenerateDaemonSet(env, namespace string) *appsv1.DaemonSet { var terminationGracePeriodSeconds = int64(60) var args = []string{ "-gRPC=" + strconv.Itoa(int(port)), + "-procfsMount=/host/procfs", } var containerVolumeMounts = []corev1.VolumeMount{ @@ -381,7 +382,6 @@ func GenerateDaemonSet(env, namespace string) *appsv1.DaemonSet { Operator: "Exists", }, }, - HostPID: true, HostNetwork: true, RestartPolicy: "Always", DNSPolicy: "ClusterFirstWithHostNet", diff --git a/deployments/helm/KubeArmorOperator/crds/operator.kubearmor.com_kubearmorconfigs.yaml b/deployments/helm/KubeArmorOperator/crds/operator.kubearmor.com_kubearmorconfigs.yaml index c45e8100c6..f2d2b62512 100644 --- a/deployments/helm/KubeArmorOperator/crds/operator.kubearmor.com_kubearmorconfigs.yaml +++ b/deployments/helm/KubeArmorOperator/crds/operator.kubearmor.com_kubearmorconfigs.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: kubearmorconfigs.operator.kubearmor.com spec: group: operator.kubearmor.com @@ -25,20 +24,27 @@ spec: description: KubeArmorConfig is the Schema for the KubeArmorConfigs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: KubeArmorConfigSpec defines the desired state of KubeArmorConfig properties: + alertThrottling: + type: boolean defaultCapabilitiesPosture: enum: - audit @@ -127,12 +133,37 @@ spec: - Never type: string type: object - seccompEnabled: - type: boolean - alertThrottling: - type: boolean maxAlertPerSec: type: integer + recommendedPolicies: + properties: + enable: + type: boolean + excludePolicy: + items: + type: string + type: array + matchExpressions: + items: + properties: + key: + enum: + - namespace + type: string + operator: + enum: + - In + - NotIn + type: string + values: + items: + type: string + type: array + type: object + type: array + type: object + seccompEnabled: + type: boolean throttleSec: type: integer tls: @@ -156,9 +187,9 @@ spec: message: type: string phase: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file type: string type: object type: object @@ -166,9 +197,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/deployments/operator/operator.yaml b/deployments/operator/operator.yaml index 997f2376b7..93bd2bc615 100644 --- a/deployments/operator/operator.yaml +++ b/deployments/operator/operator.yaml @@ -2,8 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: kubearmorconfigs.operator.kubearmor.com spec: group: operator.kubearmor.com @@ -24,20 +23,27 @@ spec: description: KubeArmorConfig is the Schema for the KubeArmorConfigs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: KubeArmorConfigSpec defines the desired state of KubeArmorConfig properties: + alertThrottling: + type: boolean defaultCapabilitiesPosture: enum: - audit @@ -126,12 +132,37 @@ spec: - Never type: string type: object - seccompEnabled: - type: boolean - alertThrottling: - type: boolean maxAlertPerSec: type: integer + recommendedPolicies: + properties: + enable: + type: boolean + excludePolicy: + items: + type: string + type: array + matchExpressions: + items: + properties: + key: + enum: + - namespace + type: string + operator: + enum: + - In + - NotIn + type: string + values: + items: + type: string + type: array + type: object + type: array + type: object + seccompEnabled: + type: boolean throttleSec: type: integer tls: @@ -155,9 +186,9 @@ spec: message: type: string phase: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file type: string type: object type: object @@ -165,12 +196,6 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: v1 kind: ServiceAccount @@ -290,6 +315,14 @@ rules: - list - watch - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch - apiGroups: - security.kubearmor.com resources: @@ -388,6 +421,10 @@ rules: - cronjobs verbs: - get + - patch + - list + - watch + - update - apiGroups: - security.kubearmor.com resources: diff --git a/examples/multiubuntu/build/Dockerfile b/examples/multiubuntu/build/Dockerfile index e0114c7466..d0897516a7 100644 --- a/examples/multiubuntu/build/Dockerfile +++ b/examples/multiubuntu/build/Dockerfile @@ -1,5 +1,14 @@ # SPDX-License-Identifier: Apache-2.0 -# Copyright 2021 Authors of KubeArmor +# Copyright 2024 Authors of KubeArmor + +FROM ubuntu:18.04 AS builder + +RUN apt-get update && apt install -y gcc +COPY helloworld/ /helloworld/ +COPY readwrite/ /readwrite/ + +RUN gcc -o hello /helloworld/hello.c +RUN gcc -o readwriter /readwrite/readwrite.c FROM ubuntu:18.04 @@ -30,7 +39,8 @@ RUN echo "key file" >> /credentials/keys/priv.key RUN useradd --create-home --shell /bin/bash user1 RUN echo 'user1:passwd1' | chpasswd -COPY hello /home/user1/hello +COPY --from=builder /hello /hello +COPY --from=builder /hello /home/user1/hello RUN chown user1:user1 /home/user1/hello RUN echo "secret file user1" >> /home/user1/secret_data1.txt @@ -44,8 +54,8 @@ RUN chown user1:user1 /home/user1/dir1/key1.txt RUN echo "other file" >> /home/user1/otherfile.txt RUN chown user1:user1 /home/user1/otherfile.txt -COPY readwriter /readwrite -COPY readwriter /home/user1/readwrite +COPY --from=builder /readwriter /readwrite +COPY --from=builder /readwriter /home/user1/readwrite RUN chown user1:user1 /home/user1/readwrite -CMD [ "/entrypoint.sh" ] +CMD [ "/entrypoint.sh" ] \ No newline at end of file diff --git a/examples/multiubuntu/build/hello b/examples/multiubuntu/build/hello deleted file mode 100755 index 7951192a02..0000000000 Binary files a/examples/multiubuntu/build/hello and /dev/null differ diff --git a/examples/multiubuntu/build/readwriter b/examples/multiubuntu/build/readwriter deleted file mode 100755 index cc0f1ee47d..0000000000 Binary files a/examples/multiubuntu/build/readwriter and /dev/null differ diff --git a/pkg/KubeArmorController/Dockerfile b/pkg/KubeArmorController/Dockerfile index 51a545cebc..6ddf75feb3 100644 --- a/pkg/KubeArmorController/Dockerfile +++ b/pkg/KubeArmorController/Dockerfile @@ -30,6 +30,7 @@ ARG VERSION=latest LABEL name="kubearmor-controller" \ vendor="AccuKnox" \ + maintainer="Barun Acharya, Ramakant Sharma" \ version=${VERSION} \ release=${VERSION} \ summary="kubearmor-controller container image based on redhat ubi" \ diff --git a/pkg/KubeArmorOperator/Dockerfile b/pkg/KubeArmorOperator/Dockerfile index 16ccaed805..a85fcdb13f 100644 --- a/pkg/KubeArmorOperator/Dockerfile +++ b/pkg/KubeArmorOperator/Dockerfile @@ -35,6 +35,7 @@ COPY $OPERATOR_DIR/enforcer enforcer COPY $OPERATOR_DIR/k8s k8s COPY $OPERATOR_DIR/runtime runtime COPY $OPERATOR_DIR/seccomp seccomp +COPY $OPERATOR_DIR/recommend recommend # Build RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -a -o operator cmd/operator/main.go @@ -46,6 +47,7 @@ ARG VERSION=latest LABEL name="kubearmor-operator" \ vendor="Accuknox" \ + maintainer="Barun Acharya, Ramakant Sharma" \ version=${VERSION} \ release=${VERSION} \ summary="kubearmor-operator container image based on redhat ubi" \ @@ -72,6 +74,7 @@ ARG VERSION=latest LABEL name="kubearmor-snitch" \ vendor="Accuknox" \ + maintainer="Barun Acharya, Ramakant Sharma" \ version=${VERSION} \ release=${VERSION} \ summary="kubearmor-snitch container image based on redhat ubi" \ diff --git a/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1/kubearmorconfig_types.go b/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1/kubearmorconfig_types.go index c6698c2286..abe45ac353 100644 --- a/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1/kubearmorconfig_types.go +++ b/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1/kubearmorconfig_types.go @@ -4,6 +4,7 @@ package v1 import ( + securityv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/api/security.kubearmor.com/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -30,11 +31,21 @@ type Tls struct { RelayExtraIpAddresses []string `json:"extraIpAddresses,omitempty"` } +type RecommendedPolicies struct { + Enable bool `json:"enable,omitempty"` + + MatchExpressions []securityv1.MatchExpressionsType `json:"matchExpressions,omitempty"` + + ExcludePolicy []string `json:"excludePolicy,omitempty"` +} + // KubeArmorConfigSpec defines the desired state of KubeArmorConfig type KubeArmorConfigSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file + // +kubebuilder:validation:optional + RecommendedPolicies RecommendedPolicies `json:"recommendedPolicies,omitempty"` // +kubebuilder:validation:optional DefaultFilePosture PostureType `json:"defaultFilePosture,omitempty"` // +kubebuilder:validation:optional diff --git a/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1/zz_generated.deepcopy.go b/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1/zz_generated.deepcopy.go index 6f75313572..01d594de57 100644 --- a/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1/zz_generated.deepcopy.go +++ b/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1/zz_generated.deepcopy.go @@ -8,6 +8,7 @@ package v1 import ( + security_kubearmor_comv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/api/security.kubearmor.com/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -88,6 +89,7 @@ func (in *KubeArmorConfigList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeArmorConfigSpec) DeepCopyInto(out *KubeArmorConfigSpec) { *out = *in + in.RecommendedPolicies.DeepCopyInto(&out.RecommendedPolicies) out.KubeArmorImage = in.KubeArmorImage out.KubeArmorInitImage = in.KubeArmorInitImage out.KubeArmorRelayImage = in.KubeArmorRelayImage @@ -121,6 +123,33 @@ func (in *KubeArmorConfigStatus) DeepCopy() *KubeArmorConfigStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecommendedPolicies) DeepCopyInto(out *RecommendedPolicies) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]security_kubearmor_comv1.MatchExpressionsType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludePolicy != nil { + in, out := &in.ExcludePolicy, &out.ExcludePolicy + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecommendedPolicies. +func (in *RecommendedPolicies) DeepCopy() *RecommendedPolicies { + if in == nil { + return nil + } + out := new(RecommendedPolicies) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tls) DeepCopyInto(out *Tls) { *out = *in diff --git a/pkg/KubeArmorOperator/cmd/operator/main.go b/pkg/KubeArmorOperator/cmd/operator/main.go index 95f87fda88..8041557fab 100644 --- a/pkg/KubeArmorOperator/cmd/operator/main.go +++ b/pkg/KubeArmorOperator/cmd/operator/main.go @@ -8,6 +8,7 @@ import ( "errors" "path/filepath" + secv1client "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/client/clientset/versioned" opv1client "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/client/clientset/versioned" controllers "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/internal/controller" "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/k8s" @@ -28,6 +29,7 @@ var PathPrefix string var DeploymentName string var ExtClient *apiextensionsclientset.Clientset var Opv1Client *opv1client.Clientset +var Secv1Client *secv1client.Clientset var InitDeploy bool var LogLevel string @@ -45,6 +47,7 @@ var Cmd = &cobra.Command{ K8sClient = k8s.NewClient(*Logger, KubeConfig) ExtClient = k8s.NewExtClient(*Logger, KubeConfig) Opv1Client = k8s.NewOpv1Client(*Logger, KubeConfig) + Secv1Client = k8s.NewSecv1Client(*Logger, KubeConfig) //Initialise k8sClient for all child commands to inherit if K8sClient == nil { return errors.New("couldn't create k8s client") @@ -52,7 +55,7 @@ var Cmd = &cobra.Command{ return nil }, Run: func(cmd *cobra.Command, args []string) { - nodeWatcher := controllers.NewClusterWatcher(K8sClient, Logger, ExtClient, Opv1Client, PathPrefix, DeploymentName, InitDeploy) + nodeWatcher := controllers.NewClusterWatcher(K8sClient, Logger, ExtClient, Opv1Client, Secv1Client, PathPrefix, DeploymentName, InitDeploy) go nodeWatcher.WatchConfigCrd() nodeWatcher.WatchNodes() diff --git a/pkg/KubeArmorOperator/common/defaults.go b/pkg/KubeArmorOperator/common/defaults.go index 0d315afa9f..970780653c 100644 --- a/pkg/KubeArmorOperator/common/defaults.go +++ b/pkg/KubeArmorOperator/common/defaults.go @@ -11,6 +11,7 @@ import ( "strings" deployments "github.com/kubearmor/KubeArmor/deployments/get" + securityv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/api/security.kubearmor.com/v1" opv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -124,6 +125,20 @@ var ( AlertThrottling bool = true DefaultMaxAlertPerSec string = "10" DefaultThrottleSec string = "30" + + // recommend policies + RecommendedPolicies opv1.RecommendedPolicies = opv1.RecommendedPolicies{ + MatchExpressions: []securityv1.MatchExpressionsType{ + { + Key: "namespace", + Operator: "NotIn", + Values: []string{ + "kube-system", + "kubearmor", + }, + }, + }, + } ) var ConfigMapData = map[string]string{ @@ -237,6 +252,15 @@ var CommonVolumes = []corev1.Volume{ }, }, }, + { + Name: "proc-fs-mount", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/proc", + Type: &HostPathDirectory, + }, + }, + }, } var CommonVolumesMount = []corev1.VolumeMount{ @@ -244,6 +268,11 @@ var CommonVolumesMount = []corev1.VolumeMount{ Name: "sys-kernel-debug-path", MountPath: "/sys/kernel/debug", }, + { + Name: "proc-fs-mount", + MountPath: "/host/procfs", + ReadOnly: true, + }, } var KubeArmorCaVolume = []corev1.Volume{ diff --git a/pkg/KubeArmorOperator/config/crd/bases/operator.kubearmor.com_kubearmorconfigs.yaml b/pkg/KubeArmorOperator/config/crd/bases/operator.kubearmor.com_kubearmorconfigs.yaml index 1e1c4434ec..f2d2b62512 100644 --- a/pkg/KubeArmorOperator/config/crd/bases/operator.kubearmor.com_kubearmorconfigs.yaml +++ b/pkg/KubeArmorOperator/config/crd/bases/operator.kubearmor.com_kubearmorconfigs.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: kubearmorconfigs.operator.kubearmor.com spec: group: operator.kubearmor.com @@ -26,20 +24,27 @@ spec: description: KubeArmorConfig is the Schema for the KubeArmorConfigs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: KubeArmorConfigSpec defines the desired state of KubeArmorConfig properties: + alertThrottling: + type: boolean defaultCapabilitiesPosture: enum: - audit @@ -128,12 +133,37 @@ spec: - Never type: string type: object - seccompEnabled: - type: boolean - alertThrottling: - type: boolean maxAlertPerSec: type: integer + recommendedPolicies: + properties: + enable: + type: boolean + excludePolicy: + items: + type: string + type: array + matchExpressions: + items: + properties: + key: + enum: + - namespace + type: string + operator: + enum: + - In + - NotIn + type: string + values: + items: + type: string + type: array + type: object + type: array + type: object + seccompEnabled: + type: boolean throttleSec: type: integer tls: @@ -157,9 +187,9 @@ spec: message: type: string phase: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file type: string type: object type: object @@ -167,9 +197,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/pkg/KubeArmorOperator/internal/controller/cluster.go b/pkg/KubeArmorOperator/internal/controller/cluster.go index fd1e5af0af..9725d2f9fc 100644 --- a/pkg/KubeArmorOperator/internal/controller/cluster.go +++ b/pkg/KubeArmorOperator/internal/controller/cluster.go @@ -7,6 +7,8 @@ import ( "bytes" "context" "fmt" + "reflect" + "slices" "strconv" "strings" "sync" @@ -14,17 +16,22 @@ import ( certutil "github.com/kubearmor/KubeArmor/KubeArmor/cert" deployments "github.com/kubearmor/KubeArmor/deployments/get" + secv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/api/security.kubearmor.com/v1" + secv1client "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/client/clientset/versioned" opv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/api/operator.kubearmor.com/v1" "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/cert" opv1client "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/client/clientset/versioned" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/client/clientset/versioned/scheme" opv1Informer "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/client/informers/externalversions" "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/common" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/recommend" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" @@ -46,6 +53,7 @@ type ClusterWatcher struct { Client *kubernetes.Clientset ExtClient *apiextensionsclientset.Clientset Opv1Client *opv1client.Clientset + Secv1Client *secv1client.Clientset Daemonsets map[string]int DaemonsetsLock *sync.Mutex } @@ -60,7 +68,7 @@ type Node struct { Seccomp string } -func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, pathPrefix, deploy_name string, initdeploy bool) *ClusterWatcher { +func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, secv1Client *secv1client.Clientset, pathPrefix, deploy_name string, initdeploy bool) *ClusterWatcher { if informer == nil { informer = informers.NewSharedInformerFactory(client, 0) } @@ -86,6 +94,7 @@ func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, ext Client: client, ExtClient: extClient, Opv1Client: opv1Client, + Secv1Client: secv1Client, } } @@ -299,6 +308,7 @@ func (clusterWatcher *ClusterWatcher) WatchConfigCrd() { UpdateImages(&cfg.Spec) UpdatedKubearmorRelayEnv(&cfg.Spec) UpdatedSeccomp(&cfg.Spec) + UpdateRecommendedPolicyConfig(&cfg.Spec) // update status to (Installation) Created go clusterWatcher.UpdateCrdStatus(cfg.Name, common.CREATED, common.CREATED_MSG) go clusterWatcher.WatchRequiredResources() @@ -322,6 +332,7 @@ func (clusterWatcher *ClusterWatcher) WatchConfigCrd() { relayEnvUpdated := UpdatedKubearmorRelayEnv(&cfg.Spec) seccompEnabledUpdated := UpdatedSeccomp(&cfg.Spec) tlsUpdated := UpdateTlsData(&cfg.Spec) + UpdateRecommendedPolicyConfig(&cfg.Spec) // return if only status has been updated if !tlsUpdated && !relayEnvUpdated && !configChanged && cfg.Status != oldObj.(*opv1.KubeArmorConfig).Status && len(imageUpdated) < 1 { return @@ -779,6 +790,95 @@ func (clusterWatcher *ClusterWatcher) UpdateTlsConfigurations(tlsEnabled bool) e return nil } +func (clusterWatcher *ClusterWatcher) WatchRecommendedPolicies() error { + var yamlBytes []byte + policies, err := recommend.CRDFs.ReadDir(".") + if err != nil { + clusterWatcher.Log.Warnf("error reading policies FS", err) + return err + } + for _, policy := range policies { + csp := &secv1.KubeArmorClusterPolicy{} + if !policy.IsDir() { + yamlBytes, err = recommend.CRDFs.ReadFile(policy.Name()) + if err != nil { + clusterWatcher.Log.Warnf("error reading csp", policy.Name()) + continue + } + if err := runtime.DecodeInto(scheme.Codecs.UniversalDeserializer(), yamlBytes, csp); err != nil { + clusterWatcher.Log.Warnf("error decoding csp", policy.Name()) + continue + } + } + switch common.RecommendedPolicies.Enable { + case true: + if slices.Contains(common.RecommendedPolicies.ExcludePolicy, csp.Name) { + clusterWatcher.Log.Infof("excluding csp ", csp.Name) + err = clusterWatcher.Secv1Client.SecurityV1().KubeArmorClusterPolicies().Delete(context.Background(), csp.GetName(), metav1.DeleteOptions{}) + if err != nil && !metav1errors.IsNotFound(err) { + clusterWatcher.Log.Warnf("error deleting csp", csp.GetName()) + } else if err == nil { + clusterWatcher.Log.Infof("deleted csp", csp.GetName()) + } + continue + } + csp.Spec.Selector.MatchExpressions = common.RecommendedPolicies.MatchExpressions + _, err = clusterWatcher.Secv1Client.SecurityV1().KubeArmorClusterPolicies().Create(context.Background(), csp, metav1.CreateOptions{}) + if err != nil && !metav1errors.IsAlreadyExists(err) { + clusterWatcher.Log.Warnf("error creating csp", csp.GetName()) + continue + } else if metav1errors.IsAlreadyExists(err) { + pol, err := clusterWatcher.Secv1Client.SecurityV1().KubeArmorClusterPolicies().Get(context.Background(), csp.GetName(), metav1.GetOptions{}) + if err != nil { + clusterWatcher.Log.Warnf("error getting csp", csp.GetName()) + continue + } + if !reflect.DeepEqual(pol.Spec.Selector.MatchExpressions, common.RecommendedPolicies.MatchExpressions) { + pol.Spec.Selector.MatchExpressions = common.RecommendedPolicies.MatchExpressions + _, err := clusterWatcher.Secv1Client.SecurityV1().KubeArmorClusterPolicies().Update(context.Background(), pol, metav1.UpdateOptions{}) + if err != nil { + clusterWatcher.Log.Warnf("error updating csp", csp.GetName()) + continue + } else { + clusterWatcher.Log.Info("updated csp", csp.GetName()) + } + } + } else { + clusterWatcher.Log.Info("created csp", csp.GetName()) + } + case false: + if !policy.IsDir() { + err = clusterWatcher.Secv1Client.SecurityV1().KubeArmorClusterPolicies().Delete(context.Background(), csp.GetName(), metav1.DeleteOptions{}) + if err != nil && !metav1errors.IsNotFound(err) { + clusterWatcher.Log.Warnf("error deleting csp", csp.GetName()) + continue + } else { + clusterWatcher.Log.Info("deleted csp", csp.GetName()) + } + } + } + } + + return nil +} + +func UpdateRecommendedPolicyConfig(config *opv1.KubeArmorConfigSpec) bool { + updated := false + if config.RecommendedPolicies.Enable != common.RecommendedPolicies.Enable { + common.RecommendedPolicies.Enable = config.RecommendedPolicies.Enable + updated = true + } + if !reflect.DeepEqual(config.RecommendedPolicies.MatchExpressions, common.RecommendedPolicies.MatchExpressions) { + common.RecommendedPolicies.MatchExpressions = slices.Clone(config.RecommendedPolicies.MatchExpressions) + updated = true + } + if !reflect.DeepEqual(config.RecommendedPolicies.ExcludePolicy, common.RecommendedPolicies.ExcludePolicy) { + common.RecommendedPolicies.ExcludePolicy = slices.Clone(config.RecommendedPolicies.ExcludePolicy) + updated = true + } + return updated +} + func UpdateConfigMapData(config *opv1.KubeArmorConfigSpec) bool { updated := false if config.DefaultFilePosture != "" { diff --git a/pkg/KubeArmorOperator/internal/controller/resources.go b/pkg/KubeArmorOperator/internal/controller/resources.go index e848be8333..b4b8f2208c 100644 --- a/pkg/KubeArmorOperator/internal/controller/resources.go +++ b/pkg/KubeArmorOperator/internal/controller/resources.go @@ -534,6 +534,22 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { controller := deployments.GetKubeArmorControllerDeployment(common.Namespace) relayServer := deployments.GetRelayDeployment(common.Namespace) + // update relay env vars + relayServer.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + { + Name: "ENABLE_STDOUT_LOGS", + Value: common.KubearmorRelayEnvMap[common.EnableStdOutLogs], + }, + { + Name: "ENABLE_STDOUT_ALERTS", + Value: common.KubearmorRelayEnvMap[common.EnableStdOutAlerts], + }, + { + Name: "ENABLE_STDOUT_MSGS", + Value: common.KubearmorRelayEnvMap[common.EnableStdOutMsgs], + }, + } + if common.EnableTls { relayServer.Spec.Template.Spec.Containers[0].VolumeMounts = append(relayServer.Spec.Template.Spec.Containers[0].VolumeMounts, common.KubeArmorRelayTlsVolumeMount...) @@ -732,6 +748,10 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { clusterWatcher.Log.Error(err.Error()) } + if err := clusterWatcher.WatchRecommendedPolicies(); err != nil { + installErr = err + } + // update operatingConfigCrd status to Running if common.OperatorConfigCrd != nil { if installErr != nil { diff --git a/pkg/KubeArmorOperator/k8s/client.go b/pkg/KubeArmorOperator/k8s/client.go index 8de18717bc..de87ed2f9c 100644 --- a/pkg/KubeArmorOperator/k8s/client.go +++ b/pkg/KubeArmorOperator/k8s/client.go @@ -6,6 +6,7 @@ package k8s import ( "os" + secv1client "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/client/clientset/versioned" opv1client "github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator/client/clientset/versioned" "go.uber.org/zap" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -117,3 +118,40 @@ func NewOpv1Client(log zap.SugaredLogger, kubeconfig string) *opv1client.Clients return client } + +func NewSecv1Client(log zap.SugaredLogger, kubeconfig string) *secv1client.Clientset { + var cfg *rest.Config + log.Info("Trying to load InCluster configuration") + inClusterConfig, err := rest.InClusterConfig() + if err == rest.ErrNotInCluster { + log.Info("Not inside a k8s Cluster, Loading kubeconfig") + kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, + &clientcmd.ConfigOverrides{}).ClientConfig() + if err != nil { + log.Errorf("Couldn't load configuration from kubeconfig Error=%s", err.Error()) + os.Exit(1) + } + log.Info("Loaded configuration from kubeconfig") + cfg = kubeConfig + } else if err != nil { + log.Errorf("Couldn't load inCluster configuration Error=%s", err.Error()) + os.Exit(1) + + } else { + log.Info("Loaded InCluster configuration") + cfg = inClusterConfig + } + + client, err := secv1client.NewForConfig(cfg) + if err != nil { + log.Errorf("Couldn't create operatorv1 clientset Error=%s", err.Error()) + os.Exit(1) + } + + if client == nil { + log.Warn("opv1client is nil") + } + + return client +} diff --git a/pkg/KubeArmorOperator/recommend/harden-cronjob-cfg.yaml b/pkg/KubeArmorOperator/recommend/harden-cronjob-cfg.yaml new file mode 100644 index 0000000000..92203858aa --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-cronjob-cfg.yaml @@ -0,0 +1,39 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-cronjob-cfg +spec: + action: Audit + file: + matchDirectories: + - dir: /etc/cron.d/ + recursive: true + - dir: /etc/cron.daily/ + recursive: true + - dir: /etc/cron.hourly/ + recursive: true + - dir: /etc/cron.monthly/ + recursive: true + - dir: /etc/cron.weekly/ + recursive: true + - dir: /var/cron/ + recursive: true + - dir: /var/spool/cron/ + recursive: true + matchPaths: + - path: /etc/crontab + message: Alert! Access to cron job files/directories detected. + selector: + matchExpressions: + severity: 5 + tags: + - CIS + - CIS_5.1_Configure_Cron + - CIS_Linux + - NIST + - NIST_800-53_SI-4 + - SI-4 + diff --git a/pkg/KubeArmorOperator/recommend/harden-crypto-miners.yaml b/pkg/KubeArmorOperator/recommend/harden-crypto-miners.yaml new file mode 100644 index 0000000000..6aaa329f2d --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-crypto-miners.yaml @@ -0,0 +1,42 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-crypto-miners +spec: + action: Block + file: + matchDirectories: + - dir: /bin/ + readOnly: true + recursive: true + - dir: /boot/ + readOnly: true + recursive: true + - dir: /sbin/ + readOnly: true + recursive: true + - dir: /usr/bin/ + readOnly: true + recursive: true + - dir: /usr/local/bin/ + readOnly: true + recursive: true + - dir: /var/local/bin/ + readOnly: true + recursive: true + message: cryptominer detected and blocked + process: + matchDirectories: + - dir: /tmp/ + recursive: true + selector: + matchExpressions: + severity: 10 + tags: + - MITRE + - MITRE_T1496_resource_hijacking + - cryptominer + diff --git a/pkg/KubeArmorOperator/recommend/harden-file-integrity-monitoring.yaml b/pkg/KubeArmorOperator/recommend/harden-file-integrity-monitoring.yaml new file mode 100644 index 0000000000..c74046fe3c --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-file-integrity-monitoring.yaml @@ -0,0 +1,41 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-file-integrity-monitoring +spec: + action: Block + file: + matchDirectories: + - dir: /bin/ + readOnly: true + recursive: true + - dir: /boot/ + readOnly: true + recursive: true + - dir: /sbin/ + readOnly: true + recursive: true + - dir: /usr/bin/ + readOnly: true + recursive: true + - dir: /usr/lib/ + readOnly: true + recursive: true + - dir: /usr/sbin/ + readOnly: true + recursive: true + message: Detected and prevented compromise to File integrity + selector: + matchExpressions: + severity: 1 + tags: + - MITRE + - MITRE_T1036_masquerading + - MITRE_T1565_data_manipulation + - NIST + - NIST_800-53_AU-2 + - NIST_800-53_SI-4 + diff --git a/pkg/KubeArmorOperator/recommend/harden-impair-defense.yaml b/pkg/KubeArmorOperator/recommend/harden-impair-defense.yaml new file mode 100644 index 0000000000..7d555b384d --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-impair-defense.yaml @@ -0,0 +1,28 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-impair-defense +spec: + action: Audit + file: + matchDirectories: + - dir: /etc/apparmor.d/ + recursive: true + - dir: /etc/sysconfig/selinux/ + recursive: true + matchPaths: + - path: /etc/selinux/semanage.conf + message: Selinux Files Accessed by Unknown Process + selector: + matchExpressions: + severity: 6 + tags: + - 5G + - FGT1562 + - FIGHT + - MITRE + - MITRE_T1562_Impair _Defenses + diff --git a/pkg/KubeArmorOperator/recommend/harden-k8s-client-tool-exec.yaml b/pkg/KubeArmorOperator/recommend/harden-k8s-client-tool-exec.yaml new file mode 100644 index 0000000000..a2c17d11bb --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-k8s-client-tool-exec.yaml @@ -0,0 +1,38 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-k8s-client-tool-exec +spec: + action: Block + message: Alert! k8s client tool executed inside the container. + process: + matchPaths: + - path: /usr/bin/docker + - path: /usr/bin/cri-ctl + - path: /usr/bin/kubectl + matchPatterns: + - pattern: /*/*/*/kubectl + - pattern: /*/*/kubectl + - pattern: /*/kubectl + - pattern: /*/*/*/cri-ctl + - pattern: /*/*/cri-ctl + - pattern: /*/cri-ctl + - pattern: /*/*/*/docker + - pattern: /*/*/docker + - pattern: /*/docker + selector: + matchExpressions: + severity: 5 + tags: + - MITRE + - MITRE_T1609_container_administration_command + - MITRE_T1610_deploy_container + - MITRE_TA0002_execution + - NIST + - NIST_800-53 + - NIST_800-53_AU-2 + - NIST_800-53_SI-4 + diff --git a/pkg/KubeArmorOperator/recommend/harden-maint-tools-access.yaml b/pkg/KubeArmorOperator/recommend/harden-maint-tools-access.yaml new file mode 100644 index 0000000000..0e45d0344f --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-maint-tools-access.yaml @@ -0,0 +1,22 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-maint-tools-access +spec: + action: Audit + message: restricted maintenance tool access attempt detected + process: + matchDirectories: + - dir: /sbin/ + recursive: true + selector: + matchExpressions: + severity: 1 + tags: + - MITRE + - MITRE_T1553_Subvert_Trust_Controls + - PCI_DSS + diff --git a/pkg/KubeArmorOperator/recommend/harden-network-service-scanning.yaml b/pkg/KubeArmorOperator/recommend/harden-network-service-scanning.yaml new file mode 100644 index 0000000000..48f2934cc4 --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-network-service-scanning.yaml @@ -0,0 +1,38 @@ +apiVersion: security.kubearmor.com/v1 +kind: kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-network-service-scanning +spec: + action: Audit + message: Network service has been scanned! + process: + matchPaths: + - path: /usr/bin/netstat + - path: /bin/netstat + - path: /usr/sbin/ip + - path: /usr/bin/ip + - path: /sbin/ip + - path: /bin/ip + - path: /usr/sbin/iw + - path: /sbin/iw + - path: /usr/sbin/ethtool + - path: /sbin/ethtool + - path: /usr/sbin/ifconfig + - path: /sbin/ifconfig + - path: /usr/sbin/arp + - path: /sbin/arp + - path: /usr/sbin/iwconfig + - path: /sbin/iwconfig + selector: + matchExpressions: + severity: 5 + tags: + - 5G + - FGT1046 + - FIGHT + - MITRE + - MITRE_T1046_Network_Service_Discovery + diff --git a/pkg/KubeArmorOperator/recommend/harden-pkg-mngr-exec.yaml b/pkg/KubeArmorOperator/recommend/harden-pkg-mngr-exec.yaml new file mode 100644 index 0000000000..cf1cdf64b3 --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-pkg-mngr-exec.yaml @@ -0,0 +1,53 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-pkg-mngr-exec +spec: + action: Block + message: Alert! Execution of package management process inside container is denied + process: + matchPaths: + - path: /usr/bin/apt + - path: /usr/bin/apt-get + - path: /bin/apt-get + - path: /bin/apt + - path: /sbin/apk + - path: /usr/bin/dpkg + - path: /bin/dpkg + - path: /usr/bin/gdebi + - path: /bin/gdebi + - path: /usr/bin/make + - path: /bin/make + - path: /usr/bin/yum + - path: /bin/yum + - path: /usr/bin/rpm + - path: /bin/rpm + - path: /usr/bin/dnf + - path: /bin/dnf + - path: /usr/bin/pacman + - path: /usr/sbin/pacman + - path: /bin/pacman + - path: /sbin/pacman + - path: /usr/bin/makepkg + - path: /usr/sbin/makepkg + - path: /bin/makepkg + - path: /sbin/makepkg + - path: /usr/bin/yaourt + - path: /usr/sbin/yaourt + - path: /bin/yaourt + - path: /sbin/yaourt + - path: /usr/bin/zypper + - path: /bin/zypper + selector: + matchExpressions: + severity: 5 + tags: + - NIST + - NIST_800-53_CM-7(4) + - NIST_800-53_SI-4 + - SI-4 + - process + diff --git a/pkg/KubeArmorOperator/recommend/harden-remote-services.yaml b/pkg/KubeArmorOperator/recommend/harden-remote-services.yaml new file mode 100644 index 0000000000..5d1e31e363 --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-remote-services.yaml @@ -0,0 +1,29 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-remote-services +spec: + action: Audit + file: + matchDirectories: + - dir: /etc/ssh/ + matchPaths: + - path: /etc/passwd + - path: /etc/shadow + - path: /var/log/auth.log + - path: /var/log/wtmp + - path: /var/run/utmp + message: Warning! access sensitive files detected + selector: + matchExpressions: + severity: 3 + tags: + - 5G + - FGT1021 + - FIGHT + - MITRE + - MITRE_T1021_Remote_Services + diff --git a/pkg/KubeArmorOperator/recommend/harden-system-owner-discovery.yaml b/pkg/KubeArmorOperator/recommend/harden-system-owner-discovery.yaml new file mode 100644 index 0000000000..dae60337ea --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-system-owner-discovery.yaml @@ -0,0 +1,23 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-system-owner-discovery +spec: + action: Block + message: System owner discovery command execution denied + process: + matchPaths: + - path: /usr/bin/who + - path: /usr/bin/w + - path: /usr/bin/id + - path: /usr/bin/whoami + selector: + matchExpressions: + severity: 3 + tags: + - MITRE + - MITRE_T1082_system_information_discovery + diff --git a/pkg/KubeArmorOperator/recommend/harden-trusted-cert-mod.yaml b/pkg/KubeArmorOperator/recommend/harden-trusted-cert-mod.yaml new file mode 100644 index 0000000000..b7e97d20b9 --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-trusted-cert-mod.yaml @@ -0,0 +1,30 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-trusted-cert-mod +spec: + action: Block + file: + matchDirectories: + - dir: /etc/pki/ + readOnly: true + recursive: true + - dir: /etc/ssl/ + readOnly: true + recursive: true + - dir: /usr/local/share/ca-certificates/ + readOnly: true + recursive: true + message: Credentials modification denied + selector: + matchExpressions: + severity: 1 + tags: + - FGT1555 + - FIGHT + - MITRE + - MITRE_T1552_unsecured_credentials + diff --git a/pkg/KubeArmorOperator/recommend/harden-write-etc-dir.yaml b/pkg/KubeArmorOperator/recommend/harden-write-etc-dir.yaml new file mode 100644 index 0000000000..2bb6a14548 --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-write-etc-dir.yaml @@ -0,0 +1,30 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-write-etc-dir +spec: + action: Block + file: + matchDirectories: + - dir: /etc/ + readOnly: true + recursive: true + message: Alert! File creation under /etc/ directory detected. + selector: + matchExpressions: + severity: 5 + tags: + - MITRE + - MITRE_T1036.005_match_legitimate_name_or_location + - MITRE_T1036_masquerading + - MITRE_T1562.001_disable_or_modify_tools + - MITRE_TA0003_persistence + - MITRE_TA0005_defense_evasion + - NIST + - NIST_800-53 + - NIST_800-53_SI-4 + - NIST_800-53_SI-7 + diff --git a/pkg/KubeArmorOperator/recommend/harden-write-in-shm-dir.yaml b/pkg/KubeArmorOperator/recommend/harden-write-in-shm-dir.yaml new file mode 100644 index 0000000000..e903d9e8a5 --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-write-in-shm-dir.yaml @@ -0,0 +1,23 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-write-in-shm-dir + namespace: gmp-system +spec: + action: Block + file: + matchDirectories: + - dir: /dev/shm/ + readOnly: true + recursive: true + message: Alert! write to /dev/shm folder prevented. + selector: + matchExpressions: + severity: 5 + tags: + - MITRE + - MITRE_TA0002_Execution + diff --git a/pkg/KubeArmorOperator/recommend/harden-write-under-dev-dir.yaml b/pkg/KubeArmorOperator/recommend/harden-write-under-dev-dir.yaml new file mode 100644 index 0000000000..b2b7c25cf2 --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/harden-write-under-dev-dir.yaml @@ -0,0 +1,25 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorClusterPolicy +metadata: + annotations: + app.accuknox.com/source: KubeArmor Operator + app.accuknox.com/type: harden + name: harden-write-under-dev-dir +spec: + action: Audit + file: + matchDirectories: + - dir: /dev/ + readOnly: true + recursive: true + message: Alert! File creation under /dev/ directory detected. + selector: + matchExpressions: + severity: 5 + tags: + - MITRE + - MITRE_T1036_masquerading + - NIST + - NIST_800-53_AU-2 + - NIST_800-53_SI-4 + diff --git a/pkg/KubeArmorOperator/recommend/recommend.go b/pkg/KubeArmorOperator/recommend/recommend.go new file mode 100644 index 0000000000..eaad25b1b8 --- /dev/null +++ b/pkg/KubeArmorOperator/recommend/recommend.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 Authors of KubeArmor + +package recommend + +import ( + "embed" +) + +//go:embed *.yaml +var CRDFs embed.FS diff --git a/tests/go.mod b/tests/go.mod index 2f93de57f1..11c2738d47 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -54,7 +54,6 @@ require ( github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.24.0 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect diff --git a/tests/go.sum b/tests/go.sum index f70f29ae8f..a966a456f8 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -48,8 +48,6 @@ github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyT github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -79,8 +77,6 @@ github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3Bum github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -101,8 +97,6 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240319011627-a57c5dfe54fd h1:LjW4RcTwfcqOYGmD7UpFrn1gfBZ9mgu7QN5mSeFkCog= -github.com/google/pprof v0.0.0-20240319011627-a57c5dfe54fd/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -176,13 +170,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= @@ -251,7 +240,6 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=