Skip to content

Commit

Permalink
Merge pull request #10 from build-security/feature/file_config
Browse files Browse the repository at this point in the history
Feature/file config
  • Loading branch information
amirbenun authored Nov 18, 2021
2 parents 3516913 + a5847f2 commit 440c227
Show file tree
Hide file tree
Showing 8 changed files with 245 additions and 61 deletions.
1 change: 1 addition & 0 deletions kubebeat/beater/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,4 +141,5 @@ func copy(m map[string]interface{}) (map[string]interface{}, error) {
func init() {
gob.Register([]interface{}{})
gob.Register(map[string]Process{})
gob.Register([]FileSystemResourceData{})
}
86 changes: 86 additions & 0 deletions kubebeat/beater/file_system_fetcher.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
package beater

import (
"github.com/elastic/beats/v7/libbeat/logp"
"os"
"os/user"
"strconv"
"syscall"
)

// FileSystemFetcher implement the Fetcher interface
// The FileSystemFetcher meant to fetch file/directories from the file system and ship it
// to the Kubebeat
type FileSystemFetcher struct {
filesPaths []string // Files and directories paths for the fetcher to extract info from
}

const (
FileSystemInputType = "file-system"
)

// FileSystemResourceData represents a struct for a system resource data
// This struct is being used by the fileSystemFetcher when
type FileSystemResourceData struct {
FileName string `json:"fileName"`
FileMode string `json:"fileMode"`
Gid string `json:"gid"`
Uid string `json:"uid"`
InputType string `json:"inputType"`
Path string `json:"path"`
}

func NewFileFetcher(filesPaths []string) Fetcher {
return &FileSystemFetcher{
filesPaths: filesPaths,
}
}

func (f *FileSystemFetcher) Fetch() (interface{}, error) {
results := make([]FileSystemResourceData, 0)

for _, filePath := range f.filesPaths {
info, err := os.Stat(filePath)

// If errors occur during file system resource, just skip on the file and log the error
if err != nil {
logp.Err("Failed to fetch %s, error - %+v", filePath, err)
continue
}

result := FromFileInfo(info, filePath)
results = append(results, result)
}

return results, nil
}

func (f *FileSystemFetcher) Stop() {
}

func FromFileInfo(info os.FileInfo, path string) FileSystemResourceData {

if info == nil {
return FileSystemResourceData{}
}

stat := info.Sys().(*syscall.Stat_t)
uid := stat.Uid
gid := stat.Gid
u := strconv.FormatUint(uint64(uid), 10)
g := strconv.FormatUint(uint64(gid), 10)
usr, _ := user.LookupId(u)
group, _ := user.LookupGroupId(g)
mod := strconv.FormatUint(uint64(info.Mode().Perm()), 8)

data := FileSystemResourceData{
FileName: info.Name(),
FileMode: mod,
Uid: usr.Name,
Gid: group.Name,
Path: path,
InputType: FileSystemInputType,
}

return data
}
35 changes: 35 additions & 0 deletions kubebeat/beater/file_system_fetcher_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
package beater

import (
"github.com/stretchr/testify/assert"
"io/ioutil"
"os"
"path/filepath"
"testing"
)

func TestFileFetcherFetchFilesFromFileSystem(t *testing.T) {

dir, err := ioutil.TempDir("", "file-fetcher-test")
if err != nil {
t.Fatal(err)
}

defer os.RemoveAll(dir)
file := filepath.Join(dir, "file.txt")
if err = ioutil.WriteFile(file, []byte("test txt\n"), 0600); err != nil {
t.Fatal(err)
}

filePaths := []string{file}
fileFetcher := NewFileFetcher(filePaths)
results, err := fileFetcher.Fetch()

if err != nil {
assert.Fail(t, "Fetcher did not work")
}
result := results.([]FileSystemResourceData)[0]

assert.Equal(t, file, result.Path)
assert.Equal(t, "600", result.FileMode)
}
3 changes: 2 additions & 1 deletion kubebeat/beater/kubebeat.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,15 @@ func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {

data := NewData(ctx, c.Period)

data.RegisterFetcher("processes", NewProcessesFetcher(procfsdir))

kubef, err := NewKubeFetcher(c.KubeConfig, c.Period)
if err != nil {
return nil, err
}

data.RegisterFetcher("kube_api", kubef)
data.RegisterFetcher("processes", NewProcessesFetcher(procfsdir))
data.RegisterFetcher("file_system", NewFileFetcher(c.Files))

// create a mock HTTP bundle bundleServer
bundleServer, err := sdktest.NewServer(sdktest.MockBundle("/bundles/bundle.tar.gz", bundle.Policies))
Expand Down
1 change: 1 addition & 0 deletions kubebeat/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import "time"
type Config struct {
KubeConfig string `config:"kube_config"`
Period time.Duration `config:"period"`
Files []string `config:"files"`
}

var DefaultConfig = Config{
Expand Down
95 changes: 54 additions & 41 deletions kubebeat/kubebeat.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,20 @@
kubebeat:
# Defines how often an event is sent to the output
period: 1s
files: [
"/hostfs/etc/kubernetes/scheduler.conf",
"/hostfs/etc/kubernetes/controller-manager.conf",
"/hostfs/etc/kubernetes/admin.conf",
"/hostfs/etc/kubernetes/kubelet.conf",
"/hostfs/etc/kubernetes/manifests/etcd.yaml",
"/hostfs/etc/kubernetes/manifests/kube-apiserver.yaml",
"/hostfs/etc/kubernetes/manifests/kube-controller-manager.yaml",
"/hostfs/etc/kubernetes/manifests/kube-scheduler.yaml",
"/hostfs/etc/systemd/system/kubelet.service.d/10-kubeadm.conf",
"/hostfs/var/lib/kubelet/config.yaml",
"/hostfs/var/lib/etcd",
"/hostfs/etc/kubernetes/pki"
]

# ================================== General ===================================

Expand Down Expand Up @@ -39,16 +53,16 @@ kubebeat:
# This requires a Kibana endpoint configuration.
setup.kibana:

# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"

# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:

# =============================== Elastic Cloud ================================

Expand Down Expand Up @@ -80,8 +94,8 @@ output.elasticsearch:
#username: "elastic"
#password: "changeme"

# ------------------------------ Logstash Output -------------------------------
#output.logstash:
# ------------------------------ Logstash Output -------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]

Expand All @@ -105,43 +119,43 @@ processors:
- add_docker_metadata: ~


# ================================== Logging ===================================
# ================================== Logging ===================================

# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug

# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publisher", "service".
#logging.selectors: ["*"]
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publisher", "service".
#logging.selectors: ["*"]

# ============================= X-Pack Monitoring ==============================
# Kubebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# ============================= X-Pack Monitoring ==============================
# Kubebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.

# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false

# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Kubebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Kubebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:

# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:

# ============================== Instrumentation ===============================
# ============================== Instrumentation ===============================

# Instrumentation support for the kubebeat.
#instrumentation:
# Instrumentation support for the kubebeat.
#instrumentation:
# Set to true to enable instrumentation of kubebeat.
#enabled: false

Expand All @@ -164,4 +178,3 @@ processors:

# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true

32 changes: 16 additions & 16 deletions kubebeat/kubebeat_minikube.yml.patch
Original file line number Diff line number Diff line change
@@ -1,24 +1,24 @@
--- kubebeat.yml 2021-10-05 18:16:52.000000000 +0300
+++ kubebeat_minikube.yml 2021-10-05 19:01:16.000000000 +0300
--- kubebeat.yml 2021-11-15 18:48:10.000000000 +0200
+++ kubebeat_new.yml 2021-11-15 18:52:13.000000000 +0200
@@ -4,7 +4,7 @@

kubebeat:
# Defines how often an event is sent to the output
- period: 1s
+ period: 5s
files: [
"/hostfs/etc/kubernetes/scheduler.conf",
"/hostfs/etc/kubernetes/controller-manager.conf",
@@ -57,7 +57,7 @@
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
-#host: "localhost:5601"
+host: "http://host.minikube.internal:5601"

# ================================== General ===================================

@@ -43,7 +43,7 @@
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
- #host: "localhost:5601"
+ host: "http://host.minikube.internal:5601"

# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
@@ -70,15 +70,15 @@
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
@@ -84,15 +84,15 @@
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
Expand All @@ -35,5 +35,5 @@
+ username: "elastic"
+ password: "changeme"

# ------------------------------ Logstash Output -------------------------------
#output.logstash:
# ------------------------------ Logstash Output -------------------------------
#output.logstash:
53 changes: 50 additions & 3 deletions kubebeat/pod.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,55 @@ metadata:
run: kubebeat-demo
name: kubebeat-demo
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- image: kubebeat
name: kubebeat-demo
imagePullPolicy: IfNotPresent
- image: kubebeat
name: kubebeat-demo
imagePullPolicy: IfNotPresent
volumeMounts:
- name: proc
mountPath: /hostfs/proc
readOnly: true
- name: cgroup
mountPath: /hostfs/sys/fs/cgroup
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
- name: etckubernetes
mountPath: /hostfs/etc/kubernetes
readOnly: true
- name: varlib
mountPath: /hostfs/var/lib
readOnly: true
- name: etcsysmd
mountPath: /hostfs/etc/systemd
readOnly: true
volumes:
- name: proc
hostPath:
path: /proc
- name: cgroup
hostPath:
path: /sys/fs/cgroup
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
- name: etckubernetes
hostPath:
path: /etc/kubernetes
- name: varlib
hostPath:
path: /var/lib
- name: etcsysmd
hostPath:
path: /etc/systemd
restartPolicy: Always

0 comments on commit 440c227

Please sign in to comment.