From 5eb12a8afb4554c08244f873491876bcd9dcc3c2 Mon Sep 17 00:00:00 2001 From: Forest Date: Wed, 10 Jan 2024 17:42:02 +0800 Subject: [PATCH] refactor: monitoring generator with workspace configuration (#741) --- pkg/apis/core/v1/types.go | 12 - .../app_configurations_generator.go | 2 +- .../monitoring/monitoring_generator.go | 241 ++++++++++++------ .../monitoring/monitoring_generator_test.go | 87 ++++--- .../workload/workload_generator_test.go | 4 - pkg/modules/inputs/monitoring/monitoring.go | 34 ++- pkg/modules/interfaces.go | 6 +- .../patchers/monitoring/monitoring_patcher.go | 107 ++++++-- .../monitoring/monitoring_patcher_test.go | 78 ++++-- 9 files changed, 398 insertions(+), 173 deletions(-) diff --git a/pkg/apis/core/v1/types.go b/pkg/apis/core/v1/types.go index 7af93c6c..9c69dfdc 100644 --- a/pkg/apis/core/v1/types.go +++ b/pkg/apis/core/v1/types.go @@ -2,14 +2,11 @@ package v1 type ( BuilderType string - MonitorType string ) const ( KCLBuilder BuilderType = "KCL" AppConfigurationBuilder BuilderType = "AppConfiguration" - PodMonitorType MonitorType = "Pod" - ServiceMonitorType MonitorType = "Service" ) // Project is a definition of Kusion Project resource. @@ -31,9 +28,6 @@ type Project struct { // Generator controls how to generate the Intent. Generator *GeneratorConfig `json:"generator,omitempty" yaml:"generator,omitempty"` - // Prometheus configs - Prometheus *PrometheusConfig `json:"prometheus,omitempty" yaml:"prometheus,omitempty"` - // The set of stacks that are known about this project. Stacks []*Stack `json:"stacks,omitempty" yaml:"stacks,omitempty"` } @@ -46,12 +40,6 @@ type GeneratorConfig struct { Configs map[string]interface{} `json:"configs,omitempty" yaml:"configs,omitempty"` } -// PrometheusConfig represent Prometheus configs saved in project.yaml -type PrometheusConfig struct { - OperatorMode bool `yaml:"operatorMode,omitempty" json:"operatorMode,omitempty"` - MonitorType MonitorType `yaml:"monitorType,omitempty" json:"monitorType,omitempty"` -} - // Stack is a definition of Kusion Stack resource. // // Stack provides a mechanism to isolate multiple deploys of same application, diff --git a/pkg/modules/generators/app_configurations_generator.go b/pkg/modules/generators/app_configurations_generator.go index a53fc9cb..5afb87f4 100644 --- a/pkg/modules/generators/app_configurations_generator.go +++ b/pkg/modules/generators/app_configurations_generator.go @@ -115,7 +115,7 @@ func (g *appConfigurationGenerator) Generate(i *apiv1.Intent) error { // Patcher logic patches generated resources pfs := []modules.NewPatcherFunc{ pattrait.NewOpsRulePatcherFunc(g.app, modulesConfig), - patmonitoring.NewMonitoringPatcherFunc(g.appName, g.app, g.project), + patmonitoring.NewMonitoringPatcherFunc(g.app, modulesConfig), } if err := modules.CallPatchers(i.Resources.GVKIndex(), pfs...); err != nil { return err diff --git a/pkg/modules/generators/monitoring/monitoring_generator.go b/pkg/modules/generators/monitoring/monitoring_generator.go index 9fcf1a23..7c47ad49 100644 --- a/pkg/modules/generators/monitoring/monitoring_generator.go +++ b/pkg/modules/generators/monitoring/monitoring_generator.go @@ -2,20 +2,26 @@ package monitoring import ( "fmt" + "time" prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "kusionstack.io/kusion/pkg/modules/inputs" apiv1 "kusionstack.io/kusion/pkg/apis/core/v1" "kusionstack.io/kusion/pkg/modules" "kusionstack.io/kusion/pkg/modules/inputs/monitoring" + "kusionstack.io/kusion/pkg/workspace" ) type monitoringGenerator struct { - project *apiv1.Project - monitor *monitoring.Monitor - appName string - namespace string + project *apiv1.Project + stack *apiv1.Stack + appName string + app *inputs.AppConfiguration + modulesConfig map[string]apiv1.GenericConfig + namespace string } func NewMonitoringGenerator(ctx modules.GeneratorContext) (modules.Generator, error) { @@ -27,10 +33,12 @@ func NewMonitoringGenerator(ctx modules.GeneratorContext) (modules.Generator, er return nil, fmt.Errorf("app name must not be empty") } return &monitoringGenerator{ - project: ctx.Project, - monitor: ctx.Application.Monitoring, - appName: ctx.Application.Name, - namespace: ctx.Namespace, + project: ctx.Project, + stack: ctx.Stack, + app: ctx.Application, + appName: ctx.Application.Name, + modulesConfig: ctx.ModuleInputs, + namespace: ctx.Namespace, }, nil } @@ -44,81 +52,45 @@ func (g *monitoringGenerator) Generate(spec *apiv1.Intent) error { if spec.Resources == nil { spec.Resources = make(apiv1.Resources, 0) } + // If AppConfiguration does not contain monitoring config, return + if g.app.Monitoring == nil { + return nil + } - // If Prometheus runs as an operator, it relies on Custom Resources to - // manage the scrape configs. CRs (ServiceMonitors and PodMonitors) rely on - // corresponding resources (Services and Pods) to have labels that can be - // used as part of the label selector for the CR to determine which - // service/pods to scrape from. - // Here we choose the label name kusion_monitoring_appname for two reasons: - // 1. Unlike the label validation in Kubernetes, the label name accepted by - // Prometheus cannot contain non-alphanumeric characters except underscore: - // https://github.com/prometheus/common/blob/main/model/labels.go#L94 - // 2. The name should be unique enough that is only created by Kusion and - // used to identify a certain application - monitoringLabels := map[string]string{ - "kusion_monitoring_appname": g.appName, + // Patch workspace configurations for monitoring generator. + if err := g.parseWorkspaceConfig(); err != nil { + return err } - if g.project.Prometheus != nil && g.project.Prometheus.OperatorMode && g.monitor != nil { - if g.project.Prometheus.MonitorType == apiv1.ServiceMonitorType { - serviceEndpoint := prometheusv1.Endpoint{ - Interval: g.monitor.Interval, - ScrapeTimeout: g.monitor.Timeout, - Port: g.monitor.Port, - Path: g.monitor.Path, - Scheme: g.monitor.Scheme, - } - serviceEndpointList := []prometheusv1.Endpoint{serviceEndpoint} - serviceMonitor := &prometheusv1.ServiceMonitor{ - TypeMeta: metav1.TypeMeta{ - Kind: "ServiceMonitor", - APIVersion: prometheusv1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-service-monitor", g.appName), Namespace: g.namespace}, - Spec: prometheusv1.ServiceMonitorSpec{ - Selector: metav1.LabelSelector{ - MatchLabels: monitoringLabels, - }, - Endpoints: serviceEndpointList, - }, + if g.app.Monitoring != nil && g.app.Monitoring.OperatorMode { + if g.app.Monitoring.MonitorType == monitoring.ServiceMonitorType { + serviceMonitor, err := g.buildMonitorObject(g.app.Monitoring.MonitorType) + if err != nil { + return err } - err := modules.AppendToIntent( + err = modules.AppendToIntent( apiv1.Kubernetes, - modules.KubernetesResourceID(serviceMonitor.TypeMeta, serviceMonitor.ObjectMeta), + modules.KubernetesResourceID( + serviceMonitor.(*prometheusv1.ServiceMonitor).TypeMeta, + serviceMonitor.(*prometheusv1.ServiceMonitor).ObjectMeta, + ), spec, serviceMonitor, ) if err != nil { return err } - } else if g.project.Prometheus.MonitorType == apiv1.PodMonitorType { - podMetricsEndpoint := prometheusv1.PodMetricsEndpoint{ - Interval: g.monitor.Interval, - ScrapeTimeout: g.monitor.Timeout, - Port: g.monitor.Port, - Path: g.monitor.Path, - Scheme: g.monitor.Scheme, - } - podMetricsEndpointList := []prometheusv1.PodMetricsEndpoint{podMetricsEndpoint} - - podMonitor := &prometheusv1.PodMonitor{ - TypeMeta: metav1.TypeMeta{ - Kind: "PodMonitor", - APIVersion: prometheusv1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-pod-monitor", g.appName), Namespace: g.namespace}, - Spec: prometheusv1.PodMonitorSpec{ - Selector: metav1.LabelSelector{ - MatchLabels: monitoringLabels, - }, - PodMetricsEndpoints: podMetricsEndpointList, - }, + } else if g.app.Monitoring.MonitorType == monitoring.PodMonitorType { + podMonitor, err := g.buildMonitorObject(g.app.Monitoring.MonitorType) + if err != nil { + return err } - - err := modules.AppendToIntent( + err = modules.AppendToIntent( apiv1.Kubernetes, - modules.KubernetesResourceID(podMonitor.TypeMeta, podMonitor.ObjectMeta), + modules.KubernetesResourceID( + podMonitor.(*prometheusv1.PodMonitor).TypeMeta, + podMonitor.(*prometheusv1.PodMonitor).ObjectMeta, + ), spec, podMonitor, ) @@ -126,9 +98,136 @@ func (g *monitoringGenerator) Generate(spec *apiv1.Intent) error { return err } } else { - return fmt.Errorf("MonitorType should either be service or pod %s", g.project.Prometheus.MonitorType) + return fmt.Errorf("MonitorType should either be service or pod %s", g.app.Monitoring.MonitorType) } } return nil } + +// parseWorkspaceConfig parses the config items for monitoring generator in workspace configurations. +func (g *monitoringGenerator) parseWorkspaceConfig() error { + wsConfig, ok := g.modulesConfig[monitoring.ModuleName] + // If AppConfiguration contains monitoring config but workspace does not, + // respond with the error ErrEmptyModuleConfigBlock + if g.app.Monitoring != nil && !ok { + return workspace.ErrEmptyModuleConfigBlock + } + + if operatorMode, ok := wsConfig[monitoring.OperatorModeKey]; ok { + g.app.Monitoring.OperatorMode = operatorMode.(bool) + } + + if monitorType, ok := wsConfig[monitoring.MonitorTypeKey]; ok { + g.app.Monitoring.MonitorType = monitoring.MonitorType(monitorType.(string)) + } else { + g.app.Monitoring.MonitorType = monitoring.DefaultMonitorType + } + + if interval, ok := wsConfig[monitoring.IntervalKey]; ok { + g.app.Monitoring.Interval = prometheusv1.Duration(interval.(string)) + } else { + g.app.Monitoring.Interval = monitoring.DefaultInterval + } + + if timeout, ok := wsConfig[monitoring.TimeoutKey]; ok { + g.app.Monitoring.Timeout = prometheusv1.Duration(timeout.(string)) + } else { + g.app.Monitoring.Timeout = monitoring.DefaultTimeout + } + + if scheme, ok := wsConfig[monitoring.SchemeKey]; ok { + g.app.Monitoring.Scheme = scheme.(string) + } else { + g.app.Monitoring.Scheme = monitoring.DefaultScheme + } + + parsedTimeout, err := time.ParseDuration(string(g.app.Monitoring.Timeout)) + if err != nil { + return err + } + parsedInterval, err := time.ParseDuration(string(g.app.Monitoring.Interval)) + if err != nil { + return err + } + + if parsedTimeout > parsedInterval { + return monitoring.ErrTimeoutGreaterThanInterval + } + + return nil +} + +func (g *monitoringGenerator) buildMonitorObject(monitorType monitoring.MonitorType) (runtime.Object, error) { + // If Prometheus runs as an operator, it relies on Custom Resources to + // manage the scrape configs. CRs (ServiceMonitors and PodMonitors) rely on + // corresponding resources (Services and Pods) to have labels that can be + // used as part of the label selector for the CR to determine which + // service/pods to scrape from. + // Here we choose the label name kusion_monitoring_appname for two reasons: + // 1. Unlike the label validation in Kubernetes, the label name accepted by + // Prometheus cannot contain non-alphanumeric characters except underscore: + // https://github.com/prometheus/common/blob/main/model/labels.go#L94 + // 2. The name should be unique enough that is only created by Kusion and + // used to identify a certain application + monitoringLabels := map[string]string{ + "kusion_monitoring_appname": g.appName, + } + + if monitorType == monitoring.ServiceMonitorType { + serviceEndpoint := prometheusv1.Endpoint{ + Interval: g.app.Monitoring.Interval, + ScrapeTimeout: g.app.Monitoring.Timeout, + Port: g.app.Monitoring.Port, + Path: g.app.Monitoring.Path, + Scheme: g.app.Monitoring.Scheme, + } + serviceEndpointList := []prometheusv1.Endpoint{serviceEndpoint} + serviceMonitor := &prometheusv1.ServiceMonitor{ + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceMonitor", + APIVersion: prometheusv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-service-monitor", modules.UniqueAppName(g.project.Name, g.stack.Name, g.appName)), + Namespace: g.namespace, + }, + Spec: prometheusv1.ServiceMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: monitoringLabels, + }, + Endpoints: serviceEndpointList, + }, + } + return serviceMonitor, nil + } else if monitorType == monitoring.PodMonitorType { + podMetricsEndpoint := prometheusv1.PodMetricsEndpoint{ + Interval: g.app.Monitoring.Interval, + ScrapeTimeout: g.app.Monitoring.Timeout, + Port: g.app.Monitoring.Port, + Path: g.app.Monitoring.Path, + Scheme: g.app.Monitoring.Scheme, + } + podMetricsEndpointList := []prometheusv1.PodMetricsEndpoint{podMetricsEndpoint} + + podMonitor := &prometheusv1.PodMonitor{ + TypeMeta: metav1.TypeMeta{ + Kind: "PodMonitor", + APIVersion: prometheusv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-pod-monitor", modules.UniqueAppName(g.project.Name, g.stack.Name, g.appName)), + Namespace: g.namespace, + }, + Spec: prometheusv1.PodMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: monitoringLabels, + }, + PodMetricsEndpoints: podMetricsEndpointList, + }, + } + return podMonitor, nil + } + + return nil, fmt.Errorf("MonitorType should either be service or pod %s", monitorType) +} diff --git a/pkg/modules/generators/monitoring/monitoring_generator_test.go b/pkg/modules/generators/monitoring/monitoring_generator_test.go index f7cd82de..2bbdf9b0 100644 --- a/pkg/modules/generators/monitoring/monitoring_generator_test.go +++ b/pkg/modules/generators/monitoring/monitoring_generator_test.go @@ -5,17 +5,19 @@ import ( "strings" "testing" - prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/stretchr/testify/require" apiv1 "kusionstack.io/kusion/pkg/apis/core/v1" + "kusionstack.io/kusion/pkg/modules" + "kusionstack.io/kusion/pkg/modules/inputs" "kusionstack.io/kusion/pkg/modules/inputs/monitoring" ) type Fields struct { project *apiv1.Project - monitor *monitoring.Monitor - appName string + stack *apiv1.Stack + app *inputs.AppConfiguration + ws map[string]apiv1.GenericConfig } type Args struct { @@ -31,33 +33,32 @@ type TestCase struct { } func BuildMonitoringTestCase( - projectName, appName string, - interval, timeout prometheusv1.Duration, - path, port, scheme string, - monitorType apiv1.MonitorType, - operatorMode bool, + testName, projectName, stackName, appName string, + interval, timeout, path, port, scheme, monitorType string, + operatorMode, wantErr bool, ) *TestCase { var endpointType string - var monitorKind apiv1.MonitorType - if monitorType == "Service" { + var monitorKind monitoring.MonitorType + if monitorType == string(monitoring.ServiceMonitorType) { monitorKind = "ServiceMonitor" endpointType = "endpoints" - } else if monitorType == "Pod" { + } else if monitorType == string(monitoring.PodMonitorType) { monitorKind = "PodMonitor" endpointType = "podMetricsEndpoints" } expectedResources := make([]apiv1.Resource, 0) + uniqueName := modules.UniqueAppName(projectName, stackName, appName) if operatorMode { expectedResources = []apiv1.Resource{ { - ID: fmt.Sprintf("monitoring.coreos.com/v1:%s:%s:%s-%s-monitor", monitorKind, projectName, appName, strings.ToLower(string(monitorType))), + ID: fmt.Sprintf("monitoring.coreos.com/v1:%s:%s:%s-%s-monitor", monitorKind, projectName, uniqueName, strings.ToLower(monitorType)), Type: "Kubernetes", Attributes: map[string]interface{}{ "apiVersion": "monitoring.coreos.com/v1", "kind": string(monitorKind), "metadata": map[string]interface{}{ "creationTimestamp": nil, - "name": fmt.Sprintf("%s-%s-monitor", appName, strings.ToLower(string(monitorType))), + "name": fmt.Sprintf("%s-%s-monitor", uniqueName, strings.ToLower(monitorType)), "namespace": projectName, }, "spec": map[string]interface{}{ @@ -66,8 +67,8 @@ func BuildMonitoringTestCase( "bearerTokenSecret": map[string]interface{}{ "key": "", }, - "interval": string(interval), - "scrapeTimeout": string(timeout), + "interval": interval, + "scrapeTimeout": timeout, "path": path, "port": port, "scheme": scheme, @@ -89,24 +90,30 @@ func BuildMonitoringTestCase( } } testCase := &TestCase{ - name: fmt.Sprintf("%s-%s", projectName, appName), + name: testName, fields: Fields{ project: &apiv1.Project{ Name: projectName, - Prometheus: &apiv1.PrometheusConfig{ - OperatorMode: operatorMode, - MonitorType: monitorType, + }, + stack: &apiv1.Stack{ + Name: stackName, + }, + app: &inputs.AppConfiguration{ + Name: appName, + Monitoring: &monitoring.Monitor{ + Path: path, + Port: port, }, - Path: "/test-project", }, - monitor: &monitoring.Monitor{ - Interval: interval, - Timeout: timeout, - Path: path, - Port: port, - Scheme: scheme, + ws: map[string]apiv1.GenericConfig{ + "monitoring": { + "operatorMode": operatorMode, + "monitorType": monitorType, + "scheme": scheme, + "interval": interval, + "timeout": timeout, + }, }, - appName: appName, }, args: Args{ spec: &apiv1.Intent{}, @@ -114,30 +121,36 @@ func BuildMonitoringTestCase( want: &apiv1.Intent{ Resources: expectedResources, }, - wantErr: false, + wantErr: wantErr, } return testCase } func TestMonitoringGenerator_Generate(t *testing.T) { tests := []TestCase{ - *BuildMonitoringTestCase("test-project", "test-app", "15s", "5s", "/metrics", "web", "http", "Service", true), - *BuildMonitoringTestCase("test-project", "test-app", "15s", "5s", "/metrics", "web", "http", "Pod", true), - *BuildMonitoringTestCase("test-project", "test-app", "30s", "15s", "/metrics", "8080", "http", "Service", false), - *BuildMonitoringTestCase("test-project", "test-app", "30s", "15s", "/metrics", "8080", "http", "Pod", false), + *BuildMonitoringTestCase("ServiceMonitorTest", "test-project", "test-stack", "test-app", "15s", "5s", "/metrics", "web", "http", "Service", true, false), + *BuildMonitoringTestCase("PodMonitorTest", "test-project", "test-stack", "test-app", "15s", "5s", "/metrics", "web", "http", "Pod", true, false), + *BuildMonitoringTestCase("ServiceAnnotationTest", "test-project", "test-stack", "test-app", "30s", "15s", "/metrics", "8080", "http", "Service", false, false), + *BuildMonitoringTestCase("PodAnnotationTest", "test-project", "test-stack", "test-app", "30s", "15s", "/metrics", "8080", "http", "Pod", false, false), + *BuildMonitoringTestCase("InvalidDurationTest", "test-project", "test-stack", "test-app", "15s", "5ssss", "/metrics", "8080", "http", "Pod", false, true), + *BuildMonitoringTestCase("InvalidTimeoutTest", "test-project", "test-stack", "test-app", "15s", "30s", "/metrics", "8080", "http", "Pod", false, true), } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := &monitoringGenerator{ - project: tt.fields.project, - monitor: tt.fields.monitor, - appName: tt.fields.appName, - namespace: tt.fields.project.Name, + project: tt.fields.project, + stack: tt.fields.stack, + appName: tt.fields.app.Name, + app: tt.fields.app, + modulesConfig: tt.fields.ws, + namespace: tt.fields.project.Name, } if err := g.Generate(tt.args.spec); (err != nil) != tt.wantErr { t.Errorf("Generate() error = %v, wantErr %v", err, tt.wantErr) } - require.Equal(t, tt.want, tt.args.spec) + if !tt.wantErr { + require.Equal(t, tt.want, tt.args.spec) + } }) } } diff --git a/pkg/modules/generators/workload/workload_generator_test.go b/pkg/modules/generators/workload/workload_generator_test.go index a0d58132..f6e85950 100644 --- a/pkg/modules/generators/workload/workload_generator_test.go +++ b/pkg/modules/generators/workload/workload_generator_test.go @@ -137,10 +137,6 @@ func TestWorkloadGenerator_Generate(t *testing.T) { t.Run(tc.name, func(t *testing.T) { expectedProject := &apiv1.Project{ Name: "test", - Prometheus: &apiv1.PrometheusConfig{ - OperatorMode: false, - MonitorType: "Pod", - }, } expectedStack := &apiv1.Stack{ Name: "teststack", diff --git a/pkg/modules/inputs/monitoring/monitoring.go b/pkg/modules/inputs/monitoring/monitoring.go index b4404d57..8891a60a 100644 --- a/pkg/modules/inputs/monitoring/monitoring.go +++ b/pkg/modules/inputs/monitoring/monitoring.go @@ -1,13 +1,41 @@ package monitoring import ( + "errors" + prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" ) +const ( + ModuleName = "monitoring" + OperatorModeKey = "operatorMode" + MonitorTypeKey = "monitorType" + IntervalKey = "interval" + TimeoutKey = "timeout" + SchemeKey = "scheme" + DefaultMonitorType = "Service" + DefaultInterval = "30s" + DefaultTimeout = "15s" + DefaultScheme = "http" + PodMonitorType MonitorType = "Pod" + ServiceMonitorType MonitorType = "Service" +) + +var ( + ErrTimeoutGreaterThanInterval = errors.New("timeout cannot be greater than interval") + ErrPathAndPortEmpty = errors.New("path and port must be present in monitoring configuration") +) + +type ( + MonitorType string +) + type Monitor struct { - Interval prometheusv1.Duration `yaml:"interval,omitempty" json:"interval,omitempty"` - Timeout prometheusv1.Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` - Path string `yaml:"path,omitempty" json:"path,omitempty"` + OperatorMode bool `yaml:"operatorMode,omitempty" json:"operatorMode,omitempty"` + Interval prometheusv1.Duration `yaml:"interval,omitempty" json:"interval,omitempty"` + Timeout prometheusv1.Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` + MonitorType MonitorType `yaml:"monitorType,omitempty" json:"monitorType,omitempty"` + Path string `yaml:"path,omitempty" json:"path,omitempty"` // Despite what the name suggests, PodMonitor and ServiceMonitor actually // only accept port names as the input. So in operator mode, this port field // need to be the user-provided port name. diff --git a/pkg/modules/interfaces.go b/pkg/modules/interfaces.go index 739e9c89..20bb3767 100644 --- a/pkg/modules/interfaces.go +++ b/pkg/modules/interfaces.go @@ -2,13 +2,17 @@ package modules import ( appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" v1 "kusionstack.io/kusion/pkg/apis/core/v1" "kusionstack.io/kusion/pkg/modules/inputs" ) // GVKDeployment is the GroupVersionKind of Deployment -var GVKDeployment = appsv1.SchemeGroupVersion.WithKind("Deployment").String() +var ( + GVKDeployment = appsv1.SchemeGroupVersion.WithKind("Deployment").String() + GVKService = corev1.SchemeGroupVersion.WithKind("Service").String() +) // Generator is an interface for things that can generate Intent from input // configurations. diff --git a/pkg/modules/patchers/monitoring/monitoring_patcher.go b/pkg/modules/patchers/monitoring/monitoring_patcher.go index 23f75220..d9b958c9 100644 --- a/pkg/modules/patchers/monitoring/monitoring_patcher.go +++ b/pkg/modules/patchers/monitoring/monitoring_patcher.go @@ -1,43 +1,53 @@ package monitoring import ( + "time" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "kusionstack.io/kube-api/apps/v1alpha1" apiv1 "kusionstack.io/kusion/pkg/apis/core/v1" "kusionstack.io/kusion/pkg/modules" - modelsapp "kusionstack.io/kusion/pkg/modules/inputs" + "kusionstack.io/kusion/pkg/modules/inputs" + "kusionstack.io/kusion/pkg/modules/inputs/monitoring" + "kusionstack.io/kusion/pkg/workspace" ) type monitoringPatcher struct { - appName string - app *modelsapp.AppConfiguration - project *apiv1.Project + app *inputs.AppConfiguration + modulesConfig map[string]apiv1.GenericConfig } // NewMonitoringPatcher returns a Patcher. -func NewMonitoringPatcher(appName string, app *modelsapp.AppConfiguration, project *apiv1.Project) (modules.Patcher, error) { +func NewMonitoringPatcher(app *inputs.AppConfiguration, modulesConfig map[string]apiv1.GenericConfig) (modules.Patcher, error) { return &monitoringPatcher{ - appName: appName, - app: app, - project: project, + app: app, + modulesConfig: modulesConfig, }, nil } // NewMonitoringPatcherFunc returns a NewPatcherFunc. -func NewMonitoringPatcherFunc(appName string, app *modelsapp.AppConfiguration, project *apiv1.Project) modules.NewPatcherFunc { +func NewMonitoringPatcherFunc(app *inputs.AppConfiguration, modulesConfig map[string]apiv1.GenericConfig) modules.NewPatcherFunc { return func() (modules.Patcher, error) { - return NewMonitoringPatcher(appName, app, project) + return NewMonitoringPatcher(app, modulesConfig) } } // Patch implements Patcher interface. func (p *monitoringPatcher) Patch(resources map[string][]*apiv1.Resource) error { - if p.app.Monitoring == nil || p.project.Prometheus == nil { + // If AppConfiguration does not contain monitoring config, return + if p.app.Monitoring == nil { return nil } + // Patch workspace configurations for monitoring generator. + if err := p.parseWorkspaceConfig(); err != nil { + return err + } + // If Prometheus runs as an operator, it relies on Custom Resources to // manage the scrape configs. CRs (ServiceMonitors and PodMonitors) rely on // corresponding resources (Services and Pods) to have labels that can be @@ -52,20 +62,24 @@ func (p *monitoringPatcher) Patch(resources map[string][]*apiv1.Resource) error monitoringLabels := make(map[string]string) monitoringAnnotations := make(map[string]string) - if p.project.Prometheus.OperatorMode { - monitoringLabels["kusion_monitoring_appname"] = p.appName + if p.app.Monitoring.OperatorMode { + monitoringLabels["kusion_monitoring_appname"] = p.app.Name } else { // If Prometheus doesn't run as an operator, kusion will generate the // most widely-known annotation for workloads that can be consumed by // the out-of-the-box community version of Prometheus server - // installation shown as below: + // installation shown as below. In this case, path and port cannot be + // omitted + if p.app.Monitoring.Path == "" || p.app.Monitoring.Port == "" { + return monitoring.ErrPathAndPortEmpty + } monitoringAnnotations["prometheus.io/scrape"] = "true" monitoringAnnotations["prometheus.io/scheme"] = p.app.Monitoring.Scheme monitoringAnnotations["prometheus.io/path"] = p.app.Monitoring.Path monitoringAnnotations["prometheus.io/port"] = p.app.Monitoring.Port } - if err := modules.PatchResource[appsv1.Deployment](resources, modules.GVKDeployment, func(obj *appsv1.Deployment) error { + if err := modules.PatchResource(resources, modules.GVKDeployment, func(obj *appsv1.Deployment) error { obj.Labels = modules.MergeMaps(obj.Labels, monitoringLabels) obj.Annotations = modules.MergeMaps(obj.Annotations, monitoringAnnotations) obj.Spec.Template.Labels = modules.MergeMaps(obj.Spec.Template.Labels, monitoringLabels) @@ -75,7 +89,7 @@ func (p *monitoringPatcher) Patch(resources map[string][]*apiv1.Resource) error return err } - if err := modules.PatchResource[v1alpha1.CollaSet](resources, modules.GVKDeployment, func(obj *v1alpha1.CollaSet) error { + if err := modules.PatchResource(resources, modules.GVKDeployment, func(obj *v1alpha1.CollaSet) error { obj.Labels = modules.MergeMaps(obj.Labels, monitoringLabels) obj.Annotations = modules.MergeMaps(obj.Annotations, monitoringAnnotations) obj.Spec.Template.Labels = modules.MergeMaps(obj.Spec.Template.Labels, monitoringLabels) @@ -84,5 +98,66 @@ func (p *monitoringPatcher) Patch(resources map[string][]*apiv1.Resource) error }); err != nil { return err } + + if err := modules.PatchResource(resources, modules.GVKService, func(obj *corev1.Service) error { + obj.Labels = modules.MergeMaps(obj.Labels, monitoringLabels) + obj.Annotations = modules.MergeMaps(obj.Annotations, monitoringAnnotations) + return nil + }); err != nil { + return err + } + return nil +} + +// parseWorkspaceConfig parses the config items for monitoring generator in workspace configurations. +func (p *monitoringPatcher) parseWorkspaceConfig() error { + wsConfig, ok := p.modulesConfig[monitoring.ModuleName] + // If AppConfiguration contains monitoring config but workspace does not, + // respond with the error ErrEmptyModuleConfigBlock + if p.app.Monitoring != nil && !ok { + return workspace.ErrEmptyModuleConfigBlock + } + + if operatorMode, ok := wsConfig[monitoring.OperatorModeKey]; ok { + p.app.Monitoring.OperatorMode = operatorMode.(bool) + } + + if monitorType, ok := wsConfig[monitoring.MonitorTypeKey]; ok { + p.app.Monitoring.MonitorType = monitoring.MonitorType(monitorType.(string)) + } else { + p.app.Monitoring.MonitorType = monitoring.DefaultMonitorType + } + + if interval, ok := wsConfig[monitoring.IntervalKey]; ok { + p.app.Monitoring.Interval = prometheusv1.Duration(interval.(string)) + } else { + p.app.Monitoring.Interval = monitoring.DefaultInterval + } + + if timeout, ok := wsConfig[monitoring.TimeoutKey]; ok { + p.app.Monitoring.Timeout = prometheusv1.Duration(timeout.(string)) + } else { + p.app.Monitoring.Timeout = monitoring.DefaultTimeout + } + + if scheme, ok := wsConfig[monitoring.SchemeKey]; ok { + p.app.Monitoring.Scheme = scheme.(string) + } else { + p.app.Monitoring.Scheme = monitoring.DefaultScheme + } + + parsedTimeout, err := time.ParseDuration(string(p.app.Monitoring.Timeout)) + if err != nil { + return err + } + parsedInterval, err := time.ParseDuration(string(p.app.Monitoring.Interval)) + if err != nil { + return err + } + + if parsedTimeout > parsedInterval { + return monitoring.ErrTimeoutGreaterThanInterval + } + return nil } diff --git a/pkg/modules/patchers/monitoring/monitoring_patcher_test.go b/pkg/modules/patchers/monitoring/monitoring_patcher_test.go index f73852a6..7b61869b 100644 --- a/pkg/modules/patchers/monitoring/monitoring_patcher_test.go +++ b/pkg/modules/patchers/monitoring/monitoring_patcher_test.go @@ -23,9 +23,8 @@ func Test_monitoringPatcher_Patch(t *testing.T) { } type fields struct { - appName string - app *modelsapp.AppConfiguration - project *apiv1.Project + app *modelsapp.AppConfiguration + workspace map[string]apiv1.GenericConfig } type args struct { resources map[string][]*apiv1.Resource @@ -39,13 +38,20 @@ func Test_monitoringPatcher_Patch(t *testing.T) { { name: "operatorModeTrue", fields: fields{ - appName: "test", app: &modelsapp.AppConfiguration{ - Monitoring: &monitoring.Monitor{}, + Name: "test-app", + Monitoring: &monitoring.Monitor{ + Path: "/metrics", + Port: "web", + }, }, - project: &apiv1.Project{ - Prometheus: &apiv1.PrometheusConfig{ - OperatorMode: true, + workspace: map[string]apiv1.GenericConfig{ + "monitoring": { + "operatorMode": true, + "monitorType": "Pod", + "scheme": "http", + "interval": "30s", + "timeout": "15s", }, }, }, @@ -57,13 +63,19 @@ func Test_monitoringPatcher_Patch(t *testing.T) { { name: "operatorModeFalse", fields: fields{ - appName: "test", app: &modelsapp.AppConfiguration{ - Monitoring: &monitoring.Monitor{}, + Name: "test-app", + Monitoring: &monitoring.Monitor{ + Path: "/metrics", + Port: "8080", + }, }, - project: &apiv1.Project{ - Prometheus: &apiv1.PrometheusConfig{ - OperatorMode: false, + workspace: map[string]apiv1.GenericConfig{ + "monitoring": { + "operatorMode": false, + "scheme": "http", + "interval": "30s", + "timeout": "15s", }, }, }, @@ -76,9 +88,8 @@ func Test_monitoringPatcher_Patch(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &monitoringPatcher{ - appName: tt.fields.appName, - app: tt.fields.app, - project: tt.fields.project, + app: tt.fields.app, + modulesConfig: tt.fields.workspace, } tt.wantErr(t, p.Patch(tt.args.resources), fmt.Sprintf("Patch(%v)", tt.args.resources)) // check if the deployment is patched @@ -86,11 +97,11 @@ func Test_monitoringPatcher_Patch(t *testing.T) { if err := runtime.DefaultUnstructuredConverter.FromUnstructured(i.Resources[0].Attributes, &deployment); err != nil { t.Fatal(err) } - if tt.fields.project.Prometheus.OperatorMode { + if tt.fields.app.Monitoring.OperatorMode { assert.NotNil(t, deployment.Labels) assert.NotNil(t, deployment.Spec.Template.Labels) - assert.Equal(t, deployment.Labels["kusion_monitoring_appname"], tt.fields.appName) - assert.Equal(t, deployment.Spec.Template.Labels["kusion_monitoring_appname"], tt.fields.appName) + assert.Equal(t, deployment.Labels["kusion_monitoring_appname"], tt.fields.app.Name) + assert.Equal(t, deployment.Spec.Template.Labels["kusion_monitoring_appname"], tt.fields.app.Name) } else { assert.NotNil(t, deployment.Annotations) assert.NotNil(t, deployment.Spec.Template.Annotations) @@ -123,31 +134,42 @@ func buildMockDeployment() *appsv1.Deployment { func TestNewMonitoringPatcherFunc(t *testing.T) { type args struct { - appName string - app *modelsapp.AppConfiguration - project *apiv1.Project + app *modelsapp.AppConfiguration + workspace map[string]apiv1.GenericConfig } tests := []struct { name string args args - want modules.NewPatcherFunc }{ { name: "NewMonitoringPatcherFunc", args: args{ - appName: "test", - app: &modelsapp.AppConfiguration{}, - project: &apiv1.Project{}, + app: &modelsapp.AppConfiguration{ + Name: "test-app", + Monitoring: &monitoring.Monitor{ + Path: "/metrics", + Port: "web", + }, + }, + workspace: map[string]apiv1.GenericConfig{ + "monitoring": { + "operatorMode": true, + "monitorType": "Pod", + "scheme": "http", + "interval": "15s", + "timeout": "30s", + }, + }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - patcherFunc := NewMonitoringPatcherFunc(tt.args.appName, tt.args.app, tt.args.project) + patcherFunc := NewMonitoringPatcherFunc(tt.args.app, tt.args.workspace) assert.NotNil(t, patcherFunc) patcher, err := patcherFunc() assert.NoError(t, err) - assert.Equal(t, tt.args.appName, patcher.(*monitoringPatcher).appName) + assert.Equal(t, tt.args.app.Name, patcher.(*monitoringPatcher).app.Name) }) } }