Skip to content

Commit

Permalink
Validate Upgrade of the HCO CR's Workloads (#825)
Browse files Browse the repository at this point in the history
* Validate Upgrade of the HCO CR

Add a check to the webhook, to make sure that the workloads change is acceptable by KubeVirt and CDI operators, by running dry run update of of their CRs.

Signed-off-by: Nahshon Unna-Tsameret <nunnatsa@redhat.com>

* fix unit test; add happy case

Signed-off-by: Nahshon Unna-Tsameret <nunnatsa@redhat.com>

* gofmt

Signed-off-by: Nahshon Unna-Tsameret <nunnatsa@redhat.com>

* Register "update" to the admission webhook

Signed-off-by: Nahshon Unna-Tsameret <nunnatsa@redhat.com>

* prevent conflict

Signed-off-by: Nahshon Unna-Tsameret <nunnatsa@redhat.com>
  • Loading branch information
nunnatsa authored Sep 23, 2020
1 parent 2345c1f commit 52db560
Show file tree
Hide file tree
Showing 4 changed files with 268 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2273,6 +2273,7 @@ spec:
operations:
- CREATE
- DELETE
- UPDATE
resources:
- hyperconvergeds
sideEffects: None
Expand Down
47 changes: 37 additions & 10 deletions pkg/apis/hco/v1beta1/hyperconverged_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package v1beta1
import (
"context"
"fmt"
kubevirtv1 "kubevirt.io/client-go/api/v1"
cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1"
"os"
"path/filepath"
"reflect"
Expand Down Expand Up @@ -115,22 +117,47 @@ func (r *HyperConverged) ValidateUpdate(old runtime.Object) error {
if !reflect.DeepEqual(
oldR.Spec.Workloads,
r.Spec.Workloads) {

opts := &client.UpdateOptions{DryRun: []string{metav1.DryRunAll}}
for _, obj := range []runtime.Object{
r.NewKubeVirt(),
r.NewCDI(),
&kubevirtv1.KubeVirt{},
&cdiv1beta1.CDI{},
} {

// TODO: try a dry-run update KubeVirt and CDI CR and refuse the update
// if one of them refuses the update due to existing workloads
err := hcoutil.EnsureDeleted(ctx, cli, obj, r.Name, hcolog, true)
if err != nil {
msg := "updating HCO nodeselectors is not allowed due to existing workload"
hcolog.Error(err, msg, "GVK", obj.GetObjectKind().GroupVersionKind())
return fmt.Errorf(msg)
if err := r.UpdateOperatorCr(ctx, obj, opts); err != nil {
return err
}
}
}

return nil
}

// currently only supports KV and CDI
func (r *HyperConverged) UpdateOperatorCr(ctx context.Context, exists runtime.Object, opts *client.UpdateOptions) error {
err := hcoutil.GetRuntimeObject(ctx, cli, exists, hcolog)
if err != nil {
hcolog.Error(err, "failed to get object from kubernetes", "kind", exists.GetObjectKind())
return err
}

switch exists.(type) {
case *kubevirtv1.KubeVirt:
existingKv := exists.(*kubevirtv1.KubeVirt)
required := r.NewKubeVirt()
existingKv.Spec = required.Spec

case *cdiv1beta1.CDI:
existingCdi := exists.(*cdiv1beta1.CDI)
required := r.NewCDI()
existingCdi.Spec = required.Spec
}

if err = cli.Update(ctx, exists, opts); err != nil {
hcolog.Error(err, "failed to dry-run update the object", "kind", exists.GetObjectKind())
return err
}

hcolog.Info("dry-run update the object passed", "kind", exists.GetObjectKind())
return nil
}

Expand Down
229 changes: 229 additions & 0 deletions pkg/apis/hco/v1beta1/hyperconverged_webhook_test.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,22 @@
package v1beta1

import (
"context"
"errors"
sdkapi "github.com/kubevirt/controller-lifecycle-operator-sdk/pkg/sdk/api"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
kubevirtv1 "kubevirt.io/client-go/api/v1"
cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1"
"os"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)

const (
Expand Down Expand Up @@ -41,4 +52,222 @@ var _ = Describe("Hyperconverged Webhooks", func() {

// TODO: add tests for update validation with existing workload
})

Context("validate update webhook", func() {
s := scheme.Scheme
for _, f := range []func(*runtime.Scheme) error{
AddToScheme,
cdiv1beta1.AddToScheme,
kubevirtv1.AddToScheme,
} {
Expect(f(s)).To(BeNil())
}

It("should return error if KV CR is missing", func() {
hco := &HyperConverged{}
// replace the real client with a mock
cli = fake.NewFakeClientWithScheme(s, hco, hco.NewCDI())

newHco := &HyperConverged{
Spec: HyperConvergedSpec{
Infra: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
Workloads: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
},
}

err := newHco.ValidateUpdate(hco)
Expect(err).NotTo(BeNil())
Expect(apierrors.IsNotFound(err)).To(BeTrue())
})

It("should return error if dry-run update of KV CR returns error", func() {
hco := &HyperConverged{
Spec: HyperConvergedSpec{
Infra: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
Workloads: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
},
}
// replace the real client with a mock
c := fake.NewFakeClientWithScheme(s, hco, hco.NewKubeVirt(), hco.NewCDI())
cli = errorClient{c, kvUpdateFailure}

newHco := &HyperConverged{}
hco.DeepCopyInto(newHco)
// change something in workloads to trigger dry-run update
newHco.Spec.Workloads.NodePlacement.NodeSelector["a change"] = "Something else"

err := newHco.ValidateUpdate(hco)
Expect(err).NotTo(BeNil())
Expect(err).Should(Equal(ErrFakeKvError))
})

It("should return error if CDI CR is missing", func() {
hco := &HyperConverged{}
// replace the real client with a mock
cli = fake.NewFakeClientWithScheme(s, hco, hco.NewKubeVirt())

newHco := &HyperConverged{
Spec: HyperConvergedSpec{
Infra: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
Workloads: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
},
}

err := newHco.ValidateUpdate(hco)
Expect(err).NotTo(BeNil())
Expect(apierrors.IsNotFound(err)).To(BeTrue())
})

It("should return error if dry-run update of CDI CR returns error", func() {
hco := &HyperConverged{
Spec: HyperConvergedSpec{
Infra: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
Workloads: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
},
}
// replace the real client with a mock
c := fake.NewFakeClientWithScheme(s, hco, hco.NewKubeVirt(), hco.NewCDI())
cli = errorClient{c, cdiUpdateFailure}

newHco := &HyperConverged{}
hco.DeepCopyInto(newHco)
// change something in workloads to trigger dry-run update
newHco.Spec.Workloads.NodePlacement.NodeSelector["a change"] = "Something else"

err := newHco.ValidateUpdate(hco)
Expect(err).NotTo(BeNil())
Expect(err).Should(Equal(ErrFakeCdiError))
})

It("should not return error if no different in Spec.Workloads", func() {
hco := &HyperConverged{
Spec: HyperConvergedSpec{
Infra: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
Workloads: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
},
}

// replace the real client with a mock
cli = fake.NewFakeClientWithScheme(s, hco)

newHco := &HyperConverged{}
hco.DeepCopyInto(newHco)
// Change only infra, but leave workloads as is
newHco.Spec.Infra.NodePlacement.NodeSelector["a change"] = "Something else"

// should new return error, even when there are no CDI and KV
err := newHco.ValidateUpdate(hco)
Expect(err).To(BeNil())
})

It("should not return error if dry-run update of CDI CR passes", func() {
hco := &HyperConverged{
Spec: HyperConvergedSpec{
Infra: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
Workloads: HyperConvergedConfig{
NodePlacement: newHyperConvergedConfig(),
},
},
}
// replace the real client with a mock
c := fake.NewFakeClientWithScheme(s, hco, hco.NewKubeVirt(), hco.NewCDI())
cli = errorClient{c, noFailure}

newHco := &HyperConverged{}
hco.DeepCopyInto(newHco)
// change something in workloads to trigger dry-run update
newHco.Spec.Workloads.NodePlacement.NodeSelector["a change"] = "Something else"

err := newHco.ValidateUpdate(hco)
Expect(err).To(BeNil())
})

})
})

func newHyperConvergedConfig() *sdkapi.NodePlacement {
seconds1, seconds2 := int64(1), int64(2)
return &sdkapi.NodePlacement{
NodeSelector: map[string]string{
"key1": "value1",
"key2": "value2",
},
Affinity: &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{Key: "key1", Operator: "operator1", Values: []string{"value11, value12"}},
{Key: "key2", Operator: "operator2", Values: []string{"value21, value22"}},
},
MatchFields: []corev1.NodeSelectorRequirement{
{Key: "key1", Operator: "operator1", Values: []string{"value11, value12"}},
{Key: "key2", Operator: "operator2", Values: []string{"value21, value22"}},
},
},
},
},
},
},
Tolerations: []corev1.Toleration{
{Key: "key1", Operator: "operator1", Value: "value1", Effect: "effect1", TolerationSeconds: &seconds1},
{Key: "key2", Operator: "operator2", Value: "value2", Effect: "effect2", TolerationSeconds: &seconds2},
},
}
}

type fakeFailure int

const (
noFailure fakeFailure = iota
kvUpdateFailure
cdiUpdateFailure
)

type errorClient struct {
client.Client
failure fakeFailure
}

var (
ErrFakeKvError = errors.New("fake KubeVirt error")
ErrFakeCdiError = errors.New("fake CDI error")
)

func (ec errorClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error {
switch obj.(type) {
case *kubevirtv1.KubeVirt:
if ec.failure == kvUpdateFailure {
return ErrFakeKvError
}
case *cdiv1beta1.CDI:
if ec.failure == cdiUpdateFailure {
return ErrFakeCdiError
}
}

return ec.Client.Update(ctx, obj, opts...)
}
1 change: 1 addition & 0 deletions pkg/components/components.go
Original file line number Diff line number Diff line change
Expand Up @@ -771,6 +771,7 @@ func GetCSVBase(name, namespace, displayName, description, image, replaces strin
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
admissionregistrationv1.Delete,
admissionregistrationv1.Update,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{util.APIVersionGroup},
Expand Down

0 comments on commit 52db560

Please sign in to comment.