diff --git a/README.md b/README.md index b1fe41f33..f871d0924 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ Following parameters are available to customize the elastic cluster: - data-node-replicas: Number of data node replicas - zones: Define which zones to deploy data nodes to for high availability (_Note: Zones are evenly distributed based upon number of data-node-replicas defined_) - data-volume-size: Size of persistent volume to attach to data nodes -- elastic-search-image: Override the elasticsearch image (e.g. `upmcenterprises/docker-elasticsearch-kubernetes:5.3.1`) +- elastic-search-image: Override the elasticsearch image (e.g. `upmcenterprises/docker-elasticsearch-kubernetes:5.3.1_3`) - [snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html) - scheduler-enabled: If the cron scheduler should be running to enable snapshotting - bucket-name: Name of S3 bucket to dump snaptshots @@ -66,7 +66,7 @@ If supplying your own certs, first generate them and add to a secret. Secret sho ## Base image -The base image used is `upmcenterprises/docker-elasticsearch-kubernetes:5.3.1` which can be overriden by addeding to the custom cluster you create _(See: [CustomResourceDefinition](#customdesourcedefinition) above)_. +The base image used is `upmcenterprises/docker-elasticsearch-kubernetes:5.3.1_3` which can be overriden by addeding to the custom cluster you create _(See: [CustomResourceDefinition](#customdesourcedefinition) above)_. _NOTE: If no image is specified, the default noted previously is used._ diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 381c9b913..1c4e778e9 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -55,7 +55,7 @@ var ( func init() { flag.BoolVar(&printVersion, "version", false, "Show version and quit") - flag.StringVar(&baseImage, "baseImage", "upmcenterprises/docker-elasticsearch-kubernetes:5.3.1", "Base image to use when spinning up the elasticsearch components.") + flag.StringVar(&baseImage, "baseImage", "upmcenterprises/docker-elasticsearch-kubernetes:5.3.1_3", "Base image to use when spinning up the elasticsearch components.") flag.StringVar(&kubeCfgFile, "kubecfg-file", "", "Location of kubecfg file for access to kubernetes master service; --kube_master_url overrides the URL part of this; if neither this nor --kube_master_url are provided, defaults to service account tokens") flag.StringVar(&masterHost, "masterhost", "http://127.0.0.1:8001", "Full url to k8s api server") flag.Parse() diff --git a/example/controller.yaml b/example/controller.yaml index 0121488da..c5d6d9f84 100644 --- a/example/controller.yaml +++ b/example/controller.yaml @@ -71,16 +71,16 @@ spec: ports: - containerPort: 8000 name: http - livenessProbe: - httpGet: - path: /live - port: "8000" - initialDelaySeconds: 10 - timeoutSeconds: 10 - readinessProbe: - httpGet: - path: /ready - port: "8000" - initialDelaySeconds: 10 - timeoutSeconds: 5 + livenessProbe: + httpGet: + path: /live + port: "8000" + initialDelaySeconds: 10 + timeoutSeconds: 10 + readinessProbe: + httpGet: + path: /ready + port: "8000" + initialDelaySeconds: 10 + timeoutSeconds: 5 serviceAccount: elasticsearch-operator diff --git a/example/example-es-cluster-minikube.yaml b/example/example-es-cluster-minikube.yaml index 29e2e40d1..e6940fc16 100644 --- a/example/example-es-cluster-minikube.yaml +++ b/example/example-es-cluster-minikube.yaml @@ -5,13 +5,14 @@ metadata: spec: kibana: image: upmcenterprises/kibana:5.3.1 - #cerebro: - # image: upmcenterprises/cerebro:0.6.8 - elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:5.3.1 + cerebro: + image: upmcenterprises/cerebro:0.6.8 + elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:5.3.1_3 client-node-replicas: 1 master-node-replicas: 1 data-node-replicas: 3 network-host: 0.0.0.0 + enable-ssl: true zones: [] data-volume-size: 10Gi java-options: "-Xms256m -Xmx256m" diff --git a/example/example-es-cluster.yaml b/example/example-es-cluster.yaml index 7551057db..48ea94f21 100644 --- a/example/example-es-cluster.yaml +++ b/example/example-es-cluster.yaml @@ -11,6 +11,7 @@ spec: master-node-replicas: 2 data-node-replicas: 3 network-host: 0.0.0.0 + enable-ssl: true zones: - us-east-1c - us-east-1d diff --git a/pkg/k8sutil/deployments.go b/pkg/k8sutil/deployments.go index 8eaec26e7..4aee8991d 100644 --- a/pkg/k8sutil/deployments.go +++ b/pkg/k8sutil/deployments.go @@ -26,6 +26,7 @@ package k8sutil import ( "fmt" + "strconv" "github.com/Sirupsen/logrus" myspec "github.com/upmc-enterprises/elasticsearch-operator/pkg/spec" @@ -93,7 +94,7 @@ func (k *K8sutil) DeleteDeployment(clusterName, namespace, deploymentType string } // CreateClientDeployment creates the client deployment -func (k *K8sutil) CreateClientDeployment(baseImage string, replicas *int32, javaOptions string, +func (k *K8sutil) CreateClientDeployment(baseImage string, replicas *int32, javaOptions string, enableSSL bool, resources myspec.Resources, imagePullSecrets []myspec.ImagePullSecrets, clusterName, statsdEndpoint, networkHost, namespace string) error { component := fmt.Sprintf("elasticsearch-%s", clusterName) @@ -178,6 +179,14 @@ func (k *K8sutil) CreateClientDeployment(baseImage string, replicas *int32, java Name: "HTTP_ENABLE", Value: "true", }, + v1.EnvVar{ + Name: "SEARCHGUARD_SSL_TRANSPORT_ENABLED", + Value: strconv.FormatBool(enableSSL), + }, + v1.EnvVar{ + Name: "SEARCHGUARD_SSL_HTTP_ENABLED", + Value: strconv.FormatBool(enableSSL), + }, v1.EnvVar{ Name: "ES_JAVA_OPTS", Value: javaOptions, @@ -278,12 +287,12 @@ func (k *K8sutil) CreateClientDeployment(baseImage string, replicas *int32, java } // CreateKibanaDeployment creates a deployment of Kibana -func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace string, imagePullSecrets []myspec.ImagePullSecrets) error { +func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace string, enableSSL bool, imagePullSecrets []myspec.ImagePullSecrets) error { replicaCount := int32(1) component := fmt.Sprintf("elasticsearch-%s", clusterName) - elasticHTTPEndpoint := fmt.Sprintf("https://%s:9200", component) + deploymentName := fmt.Sprintf("%s-%s", kibanaDeploymentName, clusterName) // Check if deployment exists @@ -320,7 +329,7 @@ func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace strin Env: []v1.EnvVar{ v1.EnvVar{ Name: "ELASTICSEARCH_URL", - Value: elasticHTTPEndpoint, + Value: GetESURL(component, enableSSL), }, v1.EnvVar{ Name: "ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES", @@ -328,7 +337,7 @@ func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace strin }, v1.EnvVar{ Name: "SERVER_SSL_ENABLED", - Value: "true", + Value: strconv.FormatBool(enableSSL), }, v1.EnvVar{ Name: "SERVER_SSL_KEY", @@ -391,7 +400,7 @@ func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace strin } // CreateCerebroDeployment creates a deployment of Cerebro -func (k *K8sutil) CreateCerebroDeployment(baseImage, clusterName, namespace, cert string, imagePullSecrets []myspec.ImagePullSecrets) error { +func (k *K8sutil) CreateCerebroDeployment(baseImage, clusterName, namespace, cert string, enableSSL bool, imagePullSecrets []myspec.ImagePullSecrets) error { replicaCount := int32(1) component := fmt.Sprintf("elasticsearch-%s", clusterName) deploymentName := fmt.Sprintf("%s-%s", cerebroDeploymentName, clusterName) diff --git a/pkg/k8sutil/k8sutil.go b/pkg/k8sutil/k8sutil.go index efdaf0a20..e074be639 100644 --- a/pkg/k8sutil/k8sutil.go +++ b/pkg/k8sutil/k8sutil.go @@ -26,6 +26,7 @@ package k8sutil import ( "fmt" + "strconv" "time" "github.com/Sirupsen/logrus" @@ -353,9 +354,20 @@ func TemplateImagePullSecrets(ips []myspec.ImagePullSecrets) []v1.LocalObjectRef return outSecrets } +// GetESURL Return elasticsearch url +func GetESURL(esHost string, enableSSL bool) string { + + if !enableSSL { + return fmt.Sprintf("http://%s:9200", esHost) + } + + return fmt.Sprintf("https://%s:9200", esHost) + +} + // CreateDataNodeDeployment creates the data node deployment func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int32, baseImage, storageClass string, dataDiskSize string, resources myspec.Resources, - imagePullSecrets []myspec.ImagePullSecrets, clusterName, statsdEndpoint, networkHost, namespace, javaOptions string) error { + enableSSL bool, imagePullSecrets []myspec.ImagePullSecrets, clusterName, statsdEndpoint, networkHost, namespace, javaOptions string) error { var deploymentName, role, isNodeMaster, isNodeData string @@ -473,6 +485,14 @@ func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int3 Name: "HTTP_ENABLE", Value: "true", }, + v1.EnvVar{ + Name: "SEARCHGUARD_SSL_TRANSPORT_ENABLED", + Value: strconv.FormatBool(enableSSL), + }, + v1.EnvVar{ + Name: "SEARCHGUARD_SSL_HTTP_ENABLED", + Value: strconv.FormatBool(enableSSL), + }, v1.EnvVar{ Name: "ES_JAVA_OPTS", Value: javaOptions, @@ -568,10 +588,7 @@ func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int3 "volume.beta.kubernetes.io/storage-class": storageClass, } } - - _, err := k.Kclient.AppsV1beta1().StatefulSets(namespace).Create(statefulSet) - - if err != nil { + if _, err := k.Kclient.AppsV1beta1().StatefulSets(namespace).Create(statefulSet); err != nil { logrus.Error("Could not create stateful set: ", err) return err } @@ -589,6 +606,7 @@ func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int3 if err != nil { logrus.Error("Could not scale statefulSet: ", err) + return err } } } @@ -596,7 +614,7 @@ func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int3 return nil } -func (k *K8sutil) CreateCerebroConfiguration(clusterName string) map[string]string { +func (k *K8sutil) CreateCerebroConfiguration(esHost string, enableSSL bool) map[string]string { x := map[string]string{} x["application.conf"] = fmt.Sprintf(` @@ -608,8 +626,6 @@ play.ws.ssl { ] } } -//play.crypto.secret = "ki:s:[[@=Ag?QIW2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N" -//play.http.secret.key = "ki:s:[[@=Ag?QIW2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N" secret = "ki:s:[[@=Ag?QIW2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N" # Application base path basePath = "/" @@ -623,15 +639,13 @@ pidfile.path=/dev/null rest.history.size = 50 // defaults to 50 if not specified # Path of local database file -#data.path: "/var/lib/cerebro/cerebro.db" data.path = "./cerebro.db" hosts = [ { host = "%s" - name = "es-servers" + name = "%s" } -] - `, elasticsearchCertspath, elasticsearchCertspath, fmt.Sprintf("https://%s:9200", - fmt.Sprintf(fmt.Sprintf("elasticsearch-%s", clusterName)))) +]`, elasticsearchCertspath, elasticsearchCertspath, GetESURL(esHost, enableSSL), esHost) + return x } diff --git a/pkg/k8sutil/k8sutil_test.go b/pkg/k8sutil/k8sutil_test.go new file mode 100644 index 000000000..07d8f7bf1 --- /dev/null +++ b/pkg/k8sutil/k8sutil_test.go @@ -0,0 +1,26 @@ +package k8sutil + +import ( + "fmt" + "testing" +) + +func TestGetESURL(t *testing.T) { + + for _, v := range []struct { + host string + expected string + enableSSL bool + }{ + {"es-ssl", "https://es-ssl:9200", true}, + {"es-bla", "http://es-bla:9200", false}, + } { + + esURL := GetESURL(v.host, v.enableSSL) + + if esURL != v.expected { + t.Errorf(fmt.Sprintf("Expected %s, got %s", v.expected, esURL)) + } + + } +} \ No newline at end of file diff --git a/pkg/processor/processor.go b/pkg/processor/processor.go index ea8eb8c94..2a2b8e419 100644 --- a/pkg/processor/processor.go +++ b/pkg/processor/processor.go @@ -141,6 +141,7 @@ func (p *Processor) refreshClusters() error { DataNodeReplicas: cluster.Spec.DataNodeReplicas, Zones: cluster.Spec.Zones, DataDiskSize: cluster.Spec.DataDiskSize, + EnableSSL: cluster.Spec.EnableSSL, ElasticSearchImage: cluster.Spec.ElasticSearchImage, JavaOptions: cluster.Spec.JavaOptions, NetworkHost: cluster.Spec.NetworkHost, @@ -160,7 +161,7 @@ func (p *Processor) refreshClusters() error { cluster.Spec.Snapshot.SchedulerEnabled, cluster.Spec.Snapshot.Authentication.UserName, cluster.Spec.Snapshot.Authentication.Password, - p.k8sclient.GetClientServiceNameFullDNS(cluster.ObjectMeta.Name, cluster.ObjectMeta.Namespace), + k8sutil.GetESURL(p.k8sclient.GetClientServiceNameFullDNS(cluster.ObjectMeta.Name, cluster.ObjectMeta.Namespace), cluster.Spec.EnableSSL), cluster.ObjectMeta.Name, cluster.ObjectMeta.Namespace, p.k8sclient.Kclient, @@ -260,7 +261,7 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster) return err } - if err := p.k8sclient.CreateClientDeployment(baseImage, &c.Spec.ClientNodeReplicas, c.Spec.JavaOptions, + if err := p.k8sclient.CreateClientDeployment(baseImage, &c.Spec.ClientNodeReplicas, c.Spec.JavaOptions, c.Spec.EnableSSL, c.Spec.Resources, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, c.ObjectMeta.Namespace); err != nil { logrus.Error("Error creating client deployment ", err) return err @@ -284,22 +285,25 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster) // Create Master Nodes for index, count := range zoneDistributionMaster { if err := p.k8sclient.CreateDataNodeDeployment("master", &count, baseImage, c.Spec.Zones[index], c.Spec.DataDiskSize, c.Spec.Resources, - c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, + c.Spec.EnableSSL, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, c.ObjectMeta.Namespace, c.Spec.JavaOptions); err != nil { logrus.Error("Error creating master node deployment ", err) return err } + } // Create Data Nodes for index, count := range zoneDistributionData { + if err := p.k8sclient.CreateDataNodeDeployment("data", &count, baseImage, c.Spec.Zones[index], c.Spec.DataDiskSize, c.Spec.Resources, - c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, + c.Spec.EnableSSL, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, c.ObjectMeta.Namespace, c.Spec.JavaOptions); err != nil { logrus.Error("Error creating data node deployment ", err) return err } + } } else { // No zones defined, rely on current provisioning logic which may break. Other strategy is to use emptyDir? @@ -310,7 +314,7 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster) // Create Master Nodes if err := p.k8sclient.CreateDataNodeDeployment("master", func() *int32 { i := int32(c.Spec.MasterNodeReplicas); return &i }(), baseImage, c.Spec.Storage.StorageClass, - c.Spec.DataDiskSize, c.Spec.Resources, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, + c.Spec.DataDiskSize, c.Spec.Resources, c.Spec.EnableSSL, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, c.ObjectMeta.Namespace, c.Spec.JavaOptions); err != nil { logrus.Error("Error creating master node deployment ", err) @@ -319,7 +323,7 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster) // Create Data Nodes if err := p.k8sclient.CreateDataNodeDeployment("data", func() *int32 { i := int32(c.Spec.DataNodeReplicas); return &i }(), baseImage, c.Spec.Storage.StorageClass, - c.Spec.DataDiskSize, c.Spec.Resources, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, + c.Spec.DataDiskSize, c.Spec.Resources, c.Spec.EnableSSL, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, c.ObjectMeta.Namespace, c.Spec.JavaOptions); err != nil { logrus.Error("Error creating data node deployment ", err) return err @@ -329,7 +333,8 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster) // Deploy Kibana if c.Spec.Kibana.Image != "" { - if err := p.k8sclient.CreateKibanaDeployment(c.Spec.Kibana.Image, c.ObjectMeta.Name, c.ObjectMeta.Namespace, c.Spec.ImagePullSecrets); err != nil { + if err := p.k8sclient.CreateKibanaDeployment(c.Spec.Kibana.Image, c.ObjectMeta.Name, c.ObjectMeta.Namespace, + c.Spec.EnableSSL, c.Spec.ImagePullSecrets); err != nil { logrus.Error("Error creating kibana deployment ", err) return err } @@ -338,14 +343,8 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster) // Deploy Cerebro if c.Spec.Cerebro.Image != "" { - name := fmt.Sprintf("%s-%s", c.ObjectMeta.Name, "cerebro") - if err := p.k8sclient.CreateCerebroDeployment(c.Spec.Cerebro.Image, c.ObjectMeta.Name, c.ObjectMeta.Namespace, name, c.Spec.ImagePullSecrets); err != nil { - logrus.Error("Error creating cerebro deployment ", err) - return err - } - // TODO create service - - cerebroConf := p.k8sclient.CreateCerebroConfiguration(c.ObjectMeta.Name) + host := fmt.Sprintf("elasticsearch-%s", c.ObjectMeta.Name) + cerebroConf := p.k8sclient.CreateCerebroConfiguration(host, c.Spec.EnableSSL) // create/update cerebro configMap if p.k8sclient.ConfigmapExists(c.ObjectMeta.Namespace, fmt.Sprintf("%s-%s", c.ObjectMeta.Name, "cerebro")) { @@ -359,6 +358,12 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster) return err } } + + if err := p.k8sclient.CreateCerebroDeployment(c.Spec.Cerebro.Image, c.ObjectMeta.Name, c.ObjectMeta.Namespace, + fmt.Sprintf("%s-%s", c.ObjectMeta.Name, "cerebro"), c.Spec.EnableSSL, c.Spec.ImagePullSecrets); err != nil { + logrus.Error("Error creating cerebro deployment ", err) + return err + } } // Setup CronSchedule diff --git a/pkg/snapshot/scheduler.go b/pkg/snapshot/scheduler.go index 4bef64ba3..1076cb559 100644 --- a/pkg/snapshot/scheduler.go +++ b/pkg/snapshot/scheduler.go @@ -27,11 +27,10 @@ package snapshot import ( "fmt" - "k8s.io/client-go/kubernetes" - "github.com/Sirupsen/logrus" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" apiv1 "k8s.io/client-go/pkg/api/v1" batchv1 "k8s.io/client-go/pkg/apis/batch/v1" batch "k8s.io/client-go/pkg/apis/batch/v2alpha1" @@ -62,8 +61,7 @@ type Authentication struct { } // New creates an instance of Scheduler -func New(bucketName, cronSchedule string, enabled bool, userName, password, svcURL, clusterName, namespace string, kc kubernetes.Interface) *Scheduler { - elasticURL := fmt.Sprintf("https://%s:9200", svcURL) // Internal service name of cluster +func New(bucketName, cronSchedule string, enabled bool, userName, password, elasticURL, clusterName, namespace string, kc kubernetes.Interface) *Scheduler { return &Scheduler{ s3bucketName: bucketName, diff --git a/pkg/spec/cluster.go b/pkg/spec/cluster.go index c88e3b3fb..3c0876141 100644 --- a/pkg/spec/cluster.go +++ b/pkg/spec/cluster.go @@ -84,6 +84,9 @@ type ClusterSpec struct { // ImagePullSecrets defines credentials to pull image from private repository (optional) ImagePullSecrets []ImagePullSecrets `json:"image-pull-secrets"` + // Flag to enable/disable ssl + EnableSSL bool `json:"enable-ssl"` + // Resources defines memory / cpu constraints Resources Resources `json:"resources"`