Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update module sigs.k8s.io/controller-runtime to v0.15.0 #6847

Merged
merged 15 commits into from
Jun 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
398 changes: 301 additions & 97 deletions NOTICE.txt

Large diffs are not rendered by default.

28 changes: 18 additions & 10 deletions cmd/manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,11 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
crwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"

"github.com/elastic/cloud-on-k8s/v2/pkg/about"
Expand Down Expand Up @@ -545,7 +545,6 @@ func startOperator(ctx context.Context) error {
// Create a new Cmd to provide shared dependencies and start components
opts := ctrl.Options{
Scheme: clientgoscheme.Scheme,
CertDir: viper.GetString(operator.WebhookCertDirFlag),
LeaderElection: viper.GetBool(operator.EnableLeaderElection),
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
LeaderElectionID: LeaderElectionLeaseName,
Expand All @@ -560,24 +559,30 @@ func startOperator(ctx context.Context) error {
log.Info("Operator configured to manage all namespaces")
case len(managedNamespaces) == 1 && managedNamespaces[0] == operatorNamespace:
log.Info("Operator configured to manage a single namespace", "namespace", managedNamespaces[0], "operator_namespace", operatorNamespace)
// opts.Namespace implicitly allows watching cluster-scoped resources (e.g. storage classes)
opts.Namespace = managedNamespaces[0]

default:
log.Info("Operator configured to manage multiple namespaces", "namespaces", managedNamespaces, "operator_namespace", operatorNamespace)
// The managed cache should always include the operator namespace so that we can work with operator-internal resources.
managedNamespaces = append(managedNamespaces, operatorNamespace)

opts.NewCache = cache.MultiNamespacedCacheBuilder(managedNamespaces)
}

// implicitly allows watching cluster-scoped resources (e.g. storage classes)
opts.Cache.Namespaces = managedNamespaces

// only expose prometheus metrics if provided a non-zero port
metricsPort := viper.GetInt(operator.MetricsPortFlag)
if metricsPort != 0 {
log.Info("Exposing Prometheus metrics on /metrics", "port", metricsPort)
}
opts.MetricsBindAddress = fmt.Sprintf(":%d", metricsPort) // 0 to disable

opts.Port = viper.GetInt(operator.WebhookPortFlag)
webhookPort := viper.GetInt(operator.WebhookPortFlag)
webhookCertDir := viper.GetString(operator.WebhookCertDirFlag)
opts.WebhookServer = crwebhook.NewServer(crwebhook.Options{
Port: webhookPort,
CertDir: webhookCertDir,
})

mgr, err := ctrl.NewManager(cfg, opts)
if err != nil {
log.Error(err, "Failed to create controller manager")
Expand Down Expand Up @@ -677,7 +682,7 @@ func startOperator(ctx context.Context) error {
}

if viper.GetBool(operator.EnableWebhookFlag) {
setupWebhook(ctx, mgr, params, clientset, exposedNodeLabels, managedNamespaces, tracer)
setupWebhook(ctx, mgr, params, webhookCertDir, clientset, exposedNodeLabels, managedNamespaces, tracer)
}

enforceRbacOnRefs := viper.GetBool(operator.EnforceRBACOnRefsFlag)
Expand Down Expand Up @@ -969,6 +974,7 @@ func setupWebhook(
ctx context.Context,
mgr manager.Manager,
params operator.Parameters,
webhookCertDir string,
clientset kubernetes.Interface,
exposedNodeLabels esvalidation.NodeLabels,
managedNamespaces []string,
Expand Down Expand Up @@ -1020,13 +1026,15 @@ func setupWebhook(
// wait for the secret to be populated in the local filesystem before returning
interval := time.Second * 1
timeout := time.Second * 30
keyPath := filepath.Join(mgr.GetWebhookServer().CertDir, certificates.CertFileName)
keyPath := filepath.Join(webhookCertDir, certificates.CertFileName)
log.Info("Polling for the webhook certificate to be available", "path", keyPath)
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
//nolint:staticcheck
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(_ context.Context) (bool, error) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PollImmediate is deprecated but there was a mistake and the new functions are also deprecated, hence the nolint:staticcheck to prevent the go linter from complaining.

_, err := os.Stat(keyPath)
// err could be that the file does not exist, but also that permission was denied or something else
if os.IsNotExist(err) {
log.V(1).Info("Webhook certificate file not present on filesystem yet", "path", keyPath)

return false, nil
} else if err != nil {
log.Error(err, "Error checking if webhook secret path exists", "path", keyPath)
Expand Down
26 changes: 13 additions & 13 deletions cmd/manager/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"

apmv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/apm/v1"
Expand All @@ -43,12 +43,12 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
log = logf.Log.WithName("test")
tests := []struct {
name string
runtimeObjs []runtime.Object
runtimeObjs []client.Object
assert func(c k8s.Client, t *testing.T)
}{
{
name: "don't gc secrets owned by a different Kind of resource",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing another resource (a Secret) that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "a-secret", "Secret"),
},
Expand All @@ -59,7 +59,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no Elasticsearch soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&esv1.Elasticsearch{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "Elasticsearch"},
Expand All @@ -74,7 +74,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some Elasticsearch soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing ES that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "Elasticsearch"),
},
Expand All @@ -85,7 +85,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no Kibana soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&kbv1.Kibana{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "Kibana"},
Expand All @@ -100,7 +100,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some Kibana soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing Kibana that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "Kibana"),
},
Expand All @@ -111,7 +111,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no ApmServer soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&apmv1.ApmServer{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "ApmServer"},
Expand All @@ -126,7 +126,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some ApmServer soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing ApmServer that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "ApmServer"),
},
Expand All @@ -137,7 +137,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no EnterpriseSearch soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&entv1.EnterpriseSearch{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "EnterpriseSearch"},
Expand All @@ -152,7 +152,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some EnterpriseSearch soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing EnterpriseSearch that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "EnterpriseSearch"),
},
Expand All @@ -163,7 +163,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "no Beat soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
&beatv1beta1.Beat{
ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"},
TypeMeta: metav1.TypeMeta{Kind: "Beat"},
Expand All @@ -178,7 +178,7 @@ func Test_garbageCollectSoftOwnedSecrets(t *testing.T) {
},
{
name: "some Beat soft-owned secrets to gc",
runtimeObjs: []runtime.Object{
runtimeObjs: []client.Object{
// secret referencing Beat that does not exist anymore
ownedSecret("ns", "secret-1", "ns", "es", "Beat"),
},
Expand Down
20 changes: 10 additions & 10 deletions config/crds/v1/all-crds.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4396,8 +4396,8 @@ spec:
of compute resources required. If Requests is
omitted for a container, it defaults to Limits
if that is explicitly specified, otherwise to
an implementation-defined value. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
an implementation-defined value. Requests cannot
exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
Expand Down Expand Up @@ -4513,7 +4513,7 @@ spec:
volume is being resized then the Condition will
be set to 'ResizeStarted'.
items:
description: PersistentVolumeClaimCondition contails
description: PersistentVolumeClaimCondition contains
details about state of pvc
properties:
lastProbeTime:
Expand Down Expand Up @@ -4690,9 +4690,9 @@ spec:
the PDB for eviction. \n Additional policies may be added
in the future. Clients making eviction decisions should
disallow eviction of unhealthy pods if they encounter an
unrecognized policy in this field. \n This field is alpha-level.
unrecognized policy in this field. \n This field is beta-level.
The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy
is enabled (disabled by default)."
is enabled (enabled by default)."
type: string
type: object
type: object
Expand Down Expand Up @@ -6145,8 +6145,8 @@ spec:
of compute resources required. If Requests is
omitted for a container, it defaults to Limits
if that is explicitly specified, otherwise to
an implementation-defined value. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
an implementation-defined value. Requests cannot
exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
type: object
selector:
Expand Down Expand Up @@ -6262,7 +6262,7 @@ spec:
volume is being resized then the Condition will
be set to 'ResizeStarted'.
items:
description: PersistentVolumeClaimCondition contails
description: PersistentVolumeClaimCondition contains
details about state of pvc
properties:
lastProbeTime:
Expand Down Expand Up @@ -6441,9 +6441,9 @@ spec:
the PDB for eviction. \n Additional policies may be added
in the future. Clients making eviction decisions should
disallow eviction of unhealthy pods if they encounter an
unrecognized policy in this field. \n This field is alpha-level.
unrecognized policy in this field. \n This field is beta-level.
The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy
is enabled (disabled by default)."
is enabled (enabled by default)."
type: string
type: object
type: object
Expand Down
Loading