Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add cluster spec to enable/disable ssl #64

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Following parameters are available to customize the elastic cluster:
- data-node-replicas: Number of data node replicas
- zones: Define which zones to deploy data nodes to for high availability (_Note: Zones are evenly distributed based upon number of data-node-replicas defined_)
- data-volume-size: Size of persistent volume to attach to data nodes
- elastic-search-image: Override the elasticsearch image (e.g. `upmcenterprises/docker-elasticsearch-kubernetes:5.3.1`)
- elastic-search-image: Override the elasticsearch image (e.g. `upmcenterprises/docker-elasticsearch-kubernetes:5.3.1_3`)
- [snapshot](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html)
- scheduler-enabled: If the cron scheduler should be running to enable snapshotting
- bucket-name: Name of S3 bucket to dump snaptshots
Expand Down Expand Up @@ -66,7 +66,7 @@ If supplying your own certs, first generate them and add to a secret. Secret sho

## Base image

The base image used is `upmcenterprises/docker-elasticsearch-kubernetes:5.3.1` which can be overriden by addeding to the custom cluster you create _(See: [CustomResourceDefinition](#customdesourcedefinition) above)_.
The base image used is `upmcenterprises/docker-elasticsearch-kubernetes:5.3.1_3` which can be overriden by addeding to the custom cluster you create _(See: [CustomResourceDefinition](#customdesourcedefinition) above)_.

_NOTE: If no image is specified, the default noted previously is used._

Expand Down
2 changes: 1 addition & 1 deletion cmd/operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ var (

func init() {
flag.BoolVar(&printVersion, "version", false, "Show version and quit")
flag.StringVar(&baseImage, "baseImage", "upmcenterprises/docker-elasticsearch-kubernetes:5.3.1", "Base image to use when spinning up the elasticsearch components.")
flag.StringVar(&baseImage, "baseImage", "upmcenterprises/docker-elasticsearch-kubernetes:5.3.1_3", "Base image to use when spinning up the elasticsearch components.")
flag.StringVar(&kubeCfgFile, "kubecfg-file", "", "Location of kubecfg file for access to kubernetes master service; --kube_master_url overrides the URL part of this; if neither this nor --kube_master_url are provided, defaults to service account tokens")
flag.StringVar(&masterHost, "masterhost", "http://127.0.0.1:8001", "Full url to k8s api server")
flag.Parse()
Expand Down
24 changes: 12 additions & 12 deletions example/controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -71,16 +71,16 @@ spec:
ports:
- containerPort: 8000
name: http
livenessProbe:
httpGet:
path: /live
port: "8000"
initialDelaySeconds: 10
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: "8000"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
httpGet:
path: /live
port: "8000"
initialDelaySeconds: 10
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: "8000"
initialDelaySeconds: 10
timeoutSeconds: 5
serviceAccount: elasticsearch-operator
7 changes: 4 additions & 3 deletions example/example-es-cluster-minikube.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,14 @@ metadata:
spec:
kibana:
image: upmcenterprises/kibana:5.3.1
#cerebro:
# image: upmcenterprises/cerebro:0.6.8
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:5.3.1
cerebro:
image: upmcenterprises/cerebro:0.6.8
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:5.3.1_3
client-node-replicas: 1
master-node-replicas: 1
data-node-replicas: 3
network-host: 0.0.0.0
enable-ssl: true
zones: []
data-volume-size: 10Gi
java-options: "-Xms256m -Xmx256m"
Expand Down
1 change: 1 addition & 0 deletions example/example-es-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ spec:
master-node-replicas: 2
data-node-replicas: 3
network-host: 0.0.0.0
enable-ssl: true
zones:
- us-east-1c
- us-east-1d
Expand Down
21 changes: 15 additions & 6 deletions pkg/k8sutil/deployments.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ package k8sutil

import (
"fmt"
"strconv"

"github.com/Sirupsen/logrus"
myspec "github.com/upmc-enterprises/elasticsearch-operator/pkg/spec"
Expand Down Expand Up @@ -93,7 +94,7 @@ func (k *K8sutil) DeleteDeployment(clusterName, namespace, deploymentType string
}

// CreateClientDeployment creates the client deployment
func (k *K8sutil) CreateClientDeployment(baseImage string, replicas *int32, javaOptions string,
func (k *K8sutil) CreateClientDeployment(baseImage string, replicas *int32, javaOptions string, enableSSL bool,
resources myspec.Resources, imagePullSecrets []myspec.ImagePullSecrets, clusterName, statsdEndpoint, networkHost, namespace string) error {

component := fmt.Sprintf("elasticsearch-%s", clusterName)
Expand Down Expand Up @@ -178,6 +179,14 @@ func (k *K8sutil) CreateClientDeployment(baseImage string, replicas *int32, java
Name: "HTTP_ENABLE",
Value: "true",
},
v1.EnvVar{
Name: "SEARCHGUARD_SSL_TRANSPORT_ENABLED",
Value: strconv.FormatBool(enableSSL),
},
v1.EnvVar{
Name: "SEARCHGUARD_SSL_HTTP_ENABLED",
Value: strconv.FormatBool(enableSSL),
},
v1.EnvVar{
Name: "ES_JAVA_OPTS",
Value: javaOptions,
Expand Down Expand Up @@ -278,12 +287,12 @@ func (k *K8sutil) CreateClientDeployment(baseImage string, replicas *int32, java
}

// CreateKibanaDeployment creates a deployment of Kibana
func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace string, imagePullSecrets []myspec.ImagePullSecrets) error {
func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace string, enableSSL bool, imagePullSecrets []myspec.ImagePullSecrets) error {

replicaCount := int32(1)

component := fmt.Sprintf("elasticsearch-%s", clusterName)
elasticHTTPEndpoint := fmt.Sprintf("https://%s:9200", component)

deploymentName := fmt.Sprintf("%s-%s", kibanaDeploymentName, clusterName)

// Check if deployment exists
Expand Down Expand Up @@ -320,15 +329,15 @@ func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace strin
Env: []v1.EnvVar{
v1.EnvVar{
Name: "ELASTICSEARCH_URL",
Value: elasticHTTPEndpoint,
Value: GetESURL(component, enableSSL),
},
v1.EnvVar{
Name: "ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES",
Value: fmt.Sprintf("%s/ca.pem", elasticsearchCertspath),
},
v1.EnvVar{
Name: "SERVER_SSL_ENABLED",
Value: "true",
Value: strconv.FormatBool(enableSSL),
},
v1.EnvVar{
Name: "SERVER_SSL_KEY",
Expand Down Expand Up @@ -391,7 +400,7 @@ func (k *K8sutil) CreateKibanaDeployment(baseImage, clusterName, namespace strin
}

// CreateCerebroDeployment creates a deployment of Cerebro
func (k *K8sutil) CreateCerebroDeployment(baseImage, clusterName, namespace, cert string, imagePullSecrets []myspec.ImagePullSecrets) error {
func (k *K8sutil) CreateCerebroDeployment(baseImage, clusterName, namespace, cert string, enableSSL bool, imagePullSecrets []myspec.ImagePullSecrets) error {
replicaCount := int32(1)
component := fmt.Sprintf("elasticsearch-%s", clusterName)
deploymentName := fmt.Sprintf("%s-%s", cerebroDeploymentName, clusterName)
Expand Down
40 changes: 27 additions & 13 deletions pkg/k8sutil/k8sutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ package k8sutil

import (
"fmt"
"strconv"
"time"

"github.com/Sirupsen/logrus"
Expand Down Expand Up @@ -353,9 +354,20 @@ func TemplateImagePullSecrets(ips []myspec.ImagePullSecrets) []v1.LocalObjectRef
return outSecrets
}

// GetESURL Return elasticsearch url
func GetESURL(esHost string, enableSSL bool) string {

if !enableSSL {
return fmt.Sprintf("http://%s:9200", esHost)
}

return fmt.Sprintf("https://%s:9200", esHost)

}

// CreateDataNodeDeployment creates the data node deployment
func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int32, baseImage, storageClass string, dataDiskSize string, resources myspec.Resources,
imagePullSecrets []myspec.ImagePullSecrets, clusterName, statsdEndpoint, networkHost, namespace, javaOptions string) error {
enableSSL bool, imagePullSecrets []myspec.ImagePullSecrets, clusterName, statsdEndpoint, networkHost, namespace, javaOptions string) error {

var deploymentName, role, isNodeMaster, isNodeData string

Expand Down Expand Up @@ -473,6 +485,14 @@ func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int3
Name: "HTTP_ENABLE",
Value: "true",
},
v1.EnvVar{
Name: "SEARCHGUARD_SSL_TRANSPORT_ENABLED",
Value: strconv.FormatBool(enableSSL),
},
v1.EnvVar{
Name: "SEARCHGUARD_SSL_HTTP_ENABLED",
Value: strconv.FormatBool(enableSSL),
},
v1.EnvVar{
Name: "ES_JAVA_OPTS",
Value: javaOptions,
Expand Down Expand Up @@ -568,10 +588,7 @@ func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int3
"volume.beta.kubernetes.io/storage-class": storageClass,
}
}

_, err := k.Kclient.AppsV1beta1().StatefulSets(namespace).Create(statefulSet)

if err != nil {
if _, err := k.Kclient.AppsV1beta1().StatefulSets(namespace).Create(statefulSet); err != nil {
logrus.Error("Could not create stateful set: ", err)
return err
}
Expand All @@ -589,14 +606,15 @@ func (k *K8sutil) CreateDataNodeDeployment(deploymentType string, replicas *int3

if err != nil {
logrus.Error("Could not scale statefulSet: ", err)
return err
}
}
}

return nil
}

func (k *K8sutil) CreateCerebroConfiguration(clusterName string) map[string]string {
func (k *K8sutil) CreateCerebroConfiguration(esHost string, enableSSL bool) map[string]string {

x := map[string]string{}
x["application.conf"] = fmt.Sprintf(`
Expand All @@ -608,8 +626,6 @@ play.ws.ssl {
]
}
}
//play.crypto.secret = "ki:s:[[@=Ag?QIW2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
//play.http.secret.key = "ki:s:[[@=Ag?QIW2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
secret = "ki:s:[[@=Ag?QIW2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
# Application base path
basePath = "/"
Expand All @@ -623,15 +639,13 @@ pidfile.path=/dev/null
rest.history.size = 50 // defaults to 50 if not specified

# Path of local database file
#data.path: "/var/lib/cerebro/cerebro.db"
data.path = "./cerebro.db"
hosts = [
{
host = "%s"
name = "es-servers"
name = "%s"
}
]
`, elasticsearchCertspath, elasticsearchCertspath, fmt.Sprintf("https://%s:9200",
fmt.Sprintf(fmt.Sprintf("elasticsearch-%s", clusterName))))
]`, elasticsearchCertspath, elasticsearchCertspath, GetESURL(esHost, enableSSL), esHost)

return x
}
26 changes: 26 additions & 0 deletions pkg/k8sutil/k8sutil_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package k8sutil

import (
"fmt"
"testing"
)

func TestGetESURL(t *testing.T) {

for _, v := range []struct {
host string
expected string
enableSSL bool
}{
{"es-ssl", "https://es-ssl:9200", true},
{"es-bla", "http://es-bla:9200", false},
} {

esURL := GetESURL(v.host, v.enableSSL)

if esURL != v.expected {
t.Errorf(fmt.Sprintf("Expected %s, got %s", v.expected, esURL))
}

}
}
35 changes: 20 additions & 15 deletions pkg/processor/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ func (p *Processor) refreshClusters() error {
DataNodeReplicas: cluster.Spec.DataNodeReplicas,
Zones: cluster.Spec.Zones,
DataDiskSize: cluster.Spec.DataDiskSize,
EnableSSL: cluster.Spec.EnableSSL,
ElasticSearchImage: cluster.Spec.ElasticSearchImage,
JavaOptions: cluster.Spec.JavaOptions,
NetworkHost: cluster.Spec.NetworkHost,
Expand All @@ -160,7 +161,7 @@ func (p *Processor) refreshClusters() error {
cluster.Spec.Snapshot.SchedulerEnabled,
cluster.Spec.Snapshot.Authentication.UserName,
cluster.Spec.Snapshot.Authentication.Password,
p.k8sclient.GetClientServiceNameFullDNS(cluster.ObjectMeta.Name, cluster.ObjectMeta.Namespace),
k8sutil.GetESURL(p.k8sclient.GetClientServiceNameFullDNS(cluster.ObjectMeta.Name, cluster.ObjectMeta.Namespace), cluster.Spec.EnableSSL),
cluster.ObjectMeta.Name,
cluster.ObjectMeta.Namespace,
p.k8sclient.Kclient,
Expand Down Expand Up @@ -260,7 +261,7 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster)
return err
}

if err := p.k8sclient.CreateClientDeployment(baseImage, &c.Spec.ClientNodeReplicas, c.Spec.JavaOptions,
if err := p.k8sclient.CreateClientDeployment(baseImage, &c.Spec.ClientNodeReplicas, c.Spec.JavaOptions, c.Spec.EnableSSL,
c.Spec.Resources, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, c.ObjectMeta.Namespace); err != nil {
logrus.Error("Error creating client deployment ", err)
return err
Expand All @@ -284,22 +285,25 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster)
// Create Master Nodes
for index, count := range zoneDistributionMaster {
if err := p.k8sclient.CreateDataNodeDeployment("master", &count, baseImage, c.Spec.Zones[index], c.Spec.DataDiskSize, c.Spec.Resources,
c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost,
c.Spec.EnableSSL, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost,
c.ObjectMeta.Namespace, c.Spec.JavaOptions); err != nil {
logrus.Error("Error creating master node deployment ", err)
return err
}

}

// Create Data Nodes
for index, count := range zoneDistributionData {

if err := p.k8sclient.CreateDataNodeDeployment("data", &count, baseImage, c.Spec.Zones[index], c.Spec.DataDiskSize, c.Spec.Resources,
c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost,
c.Spec.EnableSSL, c.Spec.ImagePullSecrets, c.ObjectMeta.Name, c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost,
c.ObjectMeta.Namespace, c.Spec.JavaOptions); err != nil {
logrus.Error("Error creating data node deployment ", err)

return err
}

}
} else {
// No zones defined, rely on current provisioning logic which may break. Other strategy is to use emptyDir?
Expand All @@ -310,7 +314,7 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster)

// Create Master Nodes
if err := p.k8sclient.CreateDataNodeDeployment("master", func() *int32 { i := int32(c.Spec.MasterNodeReplicas); return &i }(), baseImage, c.Spec.Storage.StorageClass,
c.Spec.DataDiskSize, c.Spec.Resources, c.Spec.ImagePullSecrets, c.ObjectMeta.Name,
c.Spec.DataDiskSize, c.Spec.Resources, c.Spec.EnableSSL, c.Spec.ImagePullSecrets, c.ObjectMeta.Name,
c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, c.ObjectMeta.Namespace, c.Spec.JavaOptions); err != nil {
logrus.Error("Error creating master node deployment ", err)

Expand All @@ -319,7 +323,7 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster)

// Create Data Nodes
if err := p.k8sclient.CreateDataNodeDeployment("data", func() *int32 { i := int32(c.Spec.DataNodeReplicas); return &i }(), baseImage, c.Spec.Storage.StorageClass,
c.Spec.DataDiskSize, c.Spec.Resources, c.Spec.ImagePullSecrets, c.ObjectMeta.Name,
c.Spec.DataDiskSize, c.Spec.Resources, c.Spec.EnableSSL, c.Spec.ImagePullSecrets, c.ObjectMeta.Name,
c.Spec.Instrumentation.StatsdHost, c.Spec.NetworkHost, c.ObjectMeta.Namespace, c.Spec.JavaOptions); err != nil {
logrus.Error("Error creating data node deployment ", err)
return err
Expand All @@ -329,7 +333,8 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster)
// Deploy Kibana
if c.Spec.Kibana.Image != "" {

if err := p.k8sclient.CreateKibanaDeployment(c.Spec.Kibana.Image, c.ObjectMeta.Name, c.ObjectMeta.Namespace, c.Spec.ImagePullSecrets); err != nil {
if err := p.k8sclient.CreateKibanaDeployment(c.Spec.Kibana.Image, c.ObjectMeta.Name, c.ObjectMeta.Namespace,
c.Spec.EnableSSL, c.Spec.ImagePullSecrets); err != nil {
logrus.Error("Error creating kibana deployment ", err)
return err
}
Expand All @@ -338,14 +343,8 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster)

// Deploy Cerebro
if c.Spec.Cerebro.Image != "" {
name := fmt.Sprintf("%s-%s", c.ObjectMeta.Name, "cerebro")
if err := p.k8sclient.CreateCerebroDeployment(c.Spec.Cerebro.Image, c.ObjectMeta.Name, c.ObjectMeta.Namespace, name, c.Spec.ImagePullSecrets); err != nil {
logrus.Error("Error creating cerebro deployment ", err)
return err
}
// TODO create service

cerebroConf := p.k8sclient.CreateCerebroConfiguration(c.ObjectMeta.Name)
host := fmt.Sprintf("elasticsearch-%s", c.ObjectMeta.Name)
cerebroConf := p.k8sclient.CreateCerebroConfiguration(host, c.Spec.EnableSSL)

// create/update cerebro configMap
if p.k8sclient.ConfigmapExists(c.ObjectMeta.Namespace, fmt.Sprintf("%s-%s", c.ObjectMeta.Name, "cerebro")) {
Expand All @@ -359,6 +358,12 @@ func (p *Processor) processElasticSearchCluster(c *myspec.ElasticsearchCluster)
return err
}
}

if err := p.k8sclient.CreateCerebroDeployment(c.Spec.Cerebro.Image, c.ObjectMeta.Name, c.ObjectMeta.Namespace,
fmt.Sprintf("%s-%s", c.ObjectMeta.Name, "cerebro"), c.Spec.EnableSSL, c.Spec.ImagePullSecrets); err != nil {
logrus.Error("Error creating cerebro deployment ", err)
return err
}
}

// Setup CronSchedule
Expand Down
Loading