Skip to content

Commit

Permalink
Update module sigs.k8s.io/controller-runtime to v0.15.0 (#6847)
Browse files Browse the repository at this point in the history
This PR updates sigs.k8s.io/controller-runtime from v0.14.6 to v0.15.0 and ECK due to breaking changes brought by this new version.

- refactoring watches
- refactoring tests with client.Object instead of runtime.Object
- refactoring webhook validate funcs signature
- remove webhook injections
- replace deprecated opts.Namespace/cache.MultiNamespacedCacheBuilder by opts.Cache.Namespaces
- replace deprecated opts.Port by opts.WebhookServer
- replace deprecated opts.CertDir by opts.WebhookServer.CertDir
- replace deprecated PollImmediate
- add finalizers for deletion
- lint: unused argument
- update TestReconcileElasticsearch_Reconcile because there is no more conflicts
  • Loading branch information
renovate[bot] committed Jun 1, 2023
1 parent ee17f58 commit 9959151
Show file tree
Hide file tree
Showing 136 changed files with 2,831 additions and 1,592 deletions.
398 changes: 301 additions & 97 deletions NOTICE.txt

Large diffs are not rendered by default.

28 changes: 18 additions & 10 deletions cmd/manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,11 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
crwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"

"github.com/elastic/cloud-on-k8s/v2/pkg/about"
Expand Down Expand Up @@ -545,7 +545,6 @@ func startOperator(ctx context.Context) error {
// Create a new Cmd to provide shared dependencies and start components
opts := ctrl.Options{
Scheme: clientgoscheme.Scheme,
CertDir: viper.GetString(operator.WebhookCertDirFlag),
LeaderElection: viper.GetBool(operator.EnableLeaderElection),
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
LeaderElectionID: LeaderElectionLeaseName,
Expand All @@ -560,24 +559,30 @@ func startOperator(ctx context.Context) error {
log.Info("Operator configured to manage all namespaces")
case len(managedNamespaces) == 1 && managedNamespaces[0] == operatorNamespace:
log.Info("Operator configured to manage a single namespace", "namespace", managedNamespaces[0], "operator_namespace", operatorNamespace)
// opts.Namespace implicitly allows watching cluster-scoped resources (e.g. storage classes)
opts.Namespace = managedNamespaces[0]

default:
log.Info("Operator configured to manage multiple namespaces", "namespaces", managedNamespaces, "operator_namespace", operatorNamespace)
// The managed cache should always include the operator namespace so that we can work with operator-internal resources.
managedNamespaces = append(managedNamespaces, operatorNamespace)

opts.NewCache = cache.MultiNamespacedCacheBuilder(managedNamespaces)
}

// implicitly allows watching cluster-scoped resources (e.g. storage classes)
opts.Cache.Namespaces = managedNamespaces

// only expose prometheus metrics if provided a non-zero port
metricsPort := viper.GetInt(operator.MetricsPortFlag)
if metricsPort != 0 {
log.Info("Exposing Prometheus metrics on /metrics", "port", metricsPort)
}
opts.MetricsBindAddress = fmt.Sprintf(":%d", metricsPort) // 0 to disable

opts.Port = viper.GetInt(operator.WebhookPortFlag)
webhookPort := viper.GetInt(operator.WebhookPortFlag)
webhookCertDir := viper.GetString(operator.WebhookCertDirFlag)
opts.WebhookServer = crwebhook.NewServer(crwebhook.Options{
Port: webhookPort,
CertDir: webhookCertDir,
})

mgr, err := ctrl.NewManager(cfg, opts)
if err != nil {
log.Error(err, "Failed to create controller manager")
Expand Down Expand Up @@ -677,7 +682,7 @@ func startOperator(ctx context.Context) error {
}

if viper.GetBool(operator.EnableWebhookFlag) {
setupWebhook(ctx, mgr, params, clientset, exposedNodeLabels, managedNamespaces, tracer)
setupWebhook(ctx, mgr, params, webhookCertDir, clientset, exposedNodeLabels, managedNamespaces, tracer)
}

enforceRbacOnRefs := viper.GetBool(operator.EnforceRBACOnRefsFlag)
Expand Down Expand Up @@ -969,6 +974,7 @@ func setupWebhook(
ctx context.Context,
mgr manager.Manager,
params operator.Parameters,
webhookCertDir string,
clientset kubernetes.Interface,
exposedNodeLabels esvalidation.NodeLabels,
managedNamespaces []string,
Expand Down Expand Up @@ -1020,13 +1026,15 @@ func setupWebhook(
// wait for the secret to be populated in the local filesystem before returning
interval := time.Second * 1
timeout := time.Second * 30
keyPath := filepath.Join(mgr.GetWebhookServer().CertDir, certificates.CertFileName)
keyPath := filepath.Join(webhookCertDir, certificates.CertFileName)
log.Info("Polling for the webhook certificate to be available", "path", keyPath)
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
//nolint:staticcheck
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(_ context.Context) (bool, error) {
_, err := os.Stat(keyPath)
// err could be that the file does not exist, but also that permission was denied or something else
if os.IsNotExist(err) {
log.V(1).Info("Webhook certificate file not present on filesystem yet", "path", keyPath)

return false, nil
} else if err != nil {
log.Error(err, "Error checking if webhook secret path exists", "path", keyPath)
Expand Down
26 changes: 13 additions & 13 deletions cmd/manager/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"

apmv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/apm/v1"
Expand All @@ -43,12 +43,12 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
log = logf.Log.WithName("test")
tests := []struct {
name string
runtimeObjs []runtime.Object
runtimeObjs []client.Object
assert func(c k8s.Client, t *testing.T)
}{
{
name: "don't gc secrets owned by a different Kind of resource",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing another resource (a Secret) that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "a-secret", "Secret"),
},
Expand All @@ -59,7 +59,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no Elasticsearch soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&esv1.Elasticsearch{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "Elasticsearch"},
Expand All @@ -74,7 +74,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some Elasticsearch soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing ES that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "Elasticsearch"),
},
Expand All @@ -85,7 +85,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no Kibana soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&kbv1.Kibana{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "Kibana"},
Expand All @@ -100,7 +100,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some Kibana soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing Kibana that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "Kibana"),
},
Expand All @@ -111,7 +111,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no ApmServer soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&apmv1.ApmServer{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "ApmServer"},
Expand All @@ -126,7 +126,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some ApmServer soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing ApmServer that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "ApmServer"),
},
Expand All @@ -137,7 +137,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no EnterpriseSearch soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&entv1.EnterpriseSearch{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "EnterpriseSearch"},
Expand All @@ -152,7 +152,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some EnterpriseSearch soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing EnterpriseSearch that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "EnterpriseSearch"),
},
Expand All @@ -163,7 +163,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no Beat soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&beatv1beta1.Beat{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "Beat"},
Expand All @@ -178,7 +178,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some Beat soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing Beat that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "Beat"),
},
Expand Down
20 changes: 10 additions & 10 deletions config/crds/v1/all-crds.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4396,8 +4396,8 @@ spec:
of compute resources required. If Requests is
omitted for a container, it defaults to Limits
if that is explicitly specified, otherwise to
an implementation-defined value. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
an implementation-defined value. Requests cannot
exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
Expand Down Expand Up @@ -4513,7 +4513,7 @@ spec:
volume is being resized then the Condition will
be set to 'ResizeStarted'.
items:
description: PersistentVolumeClaimCondition contails
description: PersistentVolumeClaimCondition contains
details about state of pvc
properties:
lastProbeTime:
Expand Down Expand Up @@ -4690,9 +4690,9 @@ spec:
the PDB for eviction. \n Additional policies may be added
in the future. Clients making eviction decisions should
disallow eviction of unhealthy pods if they encounter an
unrecognized policy in this field. \n This field is alpha-level.
unrecognized policy in this field. \n This field is beta-level.
The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy
is enabled (disabled by default)."
is enabled (enabled by default)."
type: string
type: object
type: object
Expand Down Expand Up @@ -6145,8 +6145,8 @@ spec:
of compute resources required. If Requests is
omitted for a container, it defaults to Limits
if that is explicitly specified, otherwise to
an implementation-defined value. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
an implementation-defined value. Requests cannot
exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
Expand Down Expand Up @@ -6262,7 +6262,7 @@ spec:
volume is being resized then the Condition will
be set to 'ResizeStarted'.
items:
description: PersistentVolumeClaimCondition contails
description: PersistentVolumeClaimCondition contains
details about state of pvc
properties:
lastProbeTime:
Expand Down Expand Up @@ -6441,9 +6441,9 @@ spec:
the PDB for eviction. \n Additional policies may be added
in the future. Clients making eviction decisions should
disallow eviction of unhealthy pods if they encounter an
unrecognized policy in this field. \n This field is alpha-level.
unrecognized policy in this field. \n This field is beta-level.
The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy
is enabled (disabled by default)."
is enabled (enabled by default)."
type: string
type: object
type: object
Expand Down
Loading

0 comments on commit 9959151

Please sign in to comment.