From 60e2e568051d13d1209bf67b73c65802d6e9e1d9 Mon Sep 17 00:00:00 2001 From: Siyu Wang Date: Fri, 18 Mar 2022 11:03:49 +0800 Subject: [PATCH] Support in-place update containers with launch priority (#909) --- Makefile | 23 +- apis/apps/pub/inplace_update.go | 27 +- apis/apps/pub/zz_generated.deepcopy.go | 71 ++- .../cloneset/sync/cloneset_update.go | 2 +- .../cloneset/sync/cloneset_update_test.go | 38 +- .../container_launch_priority_controller.go | 59 +-- .../statefulset/stateful_set_control.go | 4 +- .../container_launch_prirotiy.go | 53 ++ pkg/util/containermeta/env_hash.go | 20 + pkg/util/inplaceupdate/inplace_update.go | 170 ++++--- .../inplaceupdate/inplace_update_defaults.go | 248 ++++++++-- .../inplace_update_defaults_test.go | 468 ++++++++++++++++++ pkg/util/inplaceupdate/inplace_update_test.go | 219 +++++++- test/e2e/apps/cloneset.go | 247 +++++++++ test/e2e/apps/types.go | 3 + 15 files changed, 1466 insertions(+), 186 deletions(-) create mode 100644 pkg/util/containerlaunchpriority/container_launch_prirotiy.go create mode 100644 pkg/util/inplaceupdate/inplace_update_defaults_test.go diff --git a/Makefile b/Makefile index a09c0c6b4c..9483f80393 100644 --- a/Makefile +++ b/Makefile @@ -81,9 +81,13 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi $(KUSTOMIZE) build config/default | kubectl delete -f - -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen-0.7 +CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool-with-alias,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0,controller-gen) +ifeq ("$(shell $(CONTROLLER_GEN) --version 2> /dev/null)", "Version: v0.7.0") +else + rm -rf $(CONTROLLER_GEN) + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0) +endif KUSTOMIZE = $(shell pwd)/bin/kustomize kustomize: ## Download kustomize locally if necessary. @@ -110,18 +114,3 @@ GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ rm -rf $$TMP_DIR ;\ } endef - -# go-get-tool will 'go get' any package $2 and install it to $1. -PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) -define go-get-tool-with-alias -@[ -f $(1) ] || { \ -set -e ;\ -TMP_DIR=$$(mktemp -d) ;\ -cd $$TMP_DIR ;\ -go mod init tmp ;\ -echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\ -rm -rf $$TMP_DIR ;\ -mv $(PROJECT_DIR)/bin/$(3) $(1);\ -} -endef diff --git a/apis/apps/pub/inplace_update.go b/apis/apps/pub/inplace_update.go index b3bafc8ffa..8274473e21 100644 --- a/apis/apps/pub/inplace_update.go +++ b/apis/apps/pub/inplace_update.go @@ -52,7 +52,7 @@ type InPlaceUpdateState struct { // Revision is the updated revision hash. Revision string `json:"revision"` - // UpdateTimestamp is the time when the in-place update happens. + // UpdateTimestamp is the start time when the in-place update happens. UpdateTimestamp metav1.Time `json:"updateTimestamp"` // LastContainerStatuses records the before-in-place-update container statuses. It is a map from ContainerName @@ -61,6 +61,31 @@ type InPlaceUpdateState struct { // UpdateEnvFromMetadata indicates there are envs from annotations/labels that should be in-place update. UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"` + + // NextContainerImages is the containers with lower priority that waiting for in-place update images in next batch. + NextContainerImages map[string]string `json:"nextContainerImages,omitempty"` + + // NextContainerRefMetadata is the containers with lower priority that waiting for in-place update labels/annotations in next batch. + NextContainerRefMetadata map[string]metav1.ObjectMeta `json:"nextContainerRefMetadata,omitempty"` + + // PreCheckBeforeNext is the pre-check that must pass before the next containers can be in-place update. + PreCheckBeforeNext *InPlaceUpdatePreCheckBeforeNext `json:"preCheckBeforeNext,omitempty"` + + // ContainerBatchesRecord records the update batches that have patched in this revision. + ContainerBatchesRecord []InPlaceUpdateContainerBatch `json:"containerBatchesRecord,omitempty"` +} + +// InPlaceUpdatePreCheckBeforeNext contains the pre-check that must pass before the next containers can be in-place update. +type InPlaceUpdatePreCheckBeforeNext struct { + ContainersRequiredReady []string `json:"containersRequiredReady,omitempty"` +} + +// InPlaceUpdateContainerBatch indicates the timestamp and containers for a batch update +type InPlaceUpdateContainerBatch struct { + // Timestamp is the time for this update batch + Timestamp metav1.Time `json:"timestamp"` + // Containers is the name list of containers for this update batch + Containers []string `json:"containers"` } // InPlaceUpdateContainerStatus records the statuses of the container that are mainly used diff --git a/apis/apps/pub/zz_generated.deepcopy.go b/apis/apps/pub/zz_generated.deepcopy.go index b574406080..afa9250c4e 100644 --- a/apis/apps/pub/zz_generated.deepcopy.go +++ b/apis/apps/pub/zz_generated.deepcopy.go @@ -21,7 +21,30 @@ limitations under the License. package pub -import () +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InPlaceUpdateContainerBatch) DeepCopyInto(out *InPlaceUpdateContainerBatch) { + *out = *in + in.Timestamp.DeepCopyInto(&out.Timestamp) + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdateContainerBatch. +func (in *InPlaceUpdateContainerBatch) DeepCopy() *InPlaceUpdateContainerBatch { + if in == nil { + return nil + } + out := new(InPlaceUpdateContainerBatch) + in.DeepCopyInto(out) + return out +} // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InPlaceUpdateContainerStatus) DeepCopyInto(out *InPlaceUpdateContainerStatus) { @@ -38,6 +61,26 @@ func (in *InPlaceUpdateContainerStatus) DeepCopy() *InPlaceUpdateContainerStatus return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InPlaceUpdatePreCheckBeforeNext) DeepCopyInto(out *InPlaceUpdatePreCheckBeforeNext) { + *out = *in + if in.ContainersRequiredReady != nil { + in, out := &in.ContainersRequiredReady, &out.ContainersRequiredReady + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdatePreCheckBeforeNext. +func (in *InPlaceUpdatePreCheckBeforeNext) DeepCopy() *InPlaceUpdatePreCheckBeforeNext { + if in == nil { + return nil + } + out := new(InPlaceUpdatePreCheckBeforeNext) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InPlaceUpdateState) DeepCopyInto(out *InPlaceUpdateState) { *out = *in @@ -49,6 +92,32 @@ func (in *InPlaceUpdateState) DeepCopyInto(out *InPlaceUpdateState) { (*out)[key] = val } } + if in.NextContainerImages != nil { + in, out := &in.NextContainerImages, &out.NextContainerImages + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NextContainerRefMetadata != nil { + in, out := &in.NextContainerRefMetadata, &out.NextContainerRefMetadata + *out = make(map[string]v1.ObjectMeta, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.PreCheckBeforeNext != nil { + in, out := &in.PreCheckBeforeNext, &out.PreCheckBeforeNext + *out = new(InPlaceUpdatePreCheckBeforeNext) + (*in).DeepCopyInto(*out) + } + if in.ContainerBatchesRecord != nil { + in, out := &in.ContainerBatchesRecord, &out.ContainerBatchesRecord + *out = make([]InPlaceUpdateContainerBatch, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InPlaceUpdateState. diff --git a/pkg/controller/cloneset/sync/cloneset_update.go b/pkg/controller/cloneset/sync/cloneset_update.go index 41171c3cc6..1e8375996a 100644 --- a/pkg/controller/cloneset/sync/cloneset_update.go +++ b/pkg/controller/cloneset/sync/cloneset_update.go @@ -170,7 +170,7 @@ func (c *realControl) refreshPodState(cs *appsv1alpha1.CloneSet, coreControl clo var state appspub.LifecycleStateType switch lifecycle.GetPodLifecycleState(pod) { case appspub.LifecycleStateUpdating: - if opts.CheckUpdateCompleted(pod) == nil { + if opts.CheckPodUpdateCompleted(pod) == nil { if cs.Spec.Lifecycle != nil && !lifecycle.IsPodHooked(cs.Spec.Lifecycle.InPlaceUpdate, pod) { state = appspub.LifecycleStateUpdated } else { diff --git a/pkg/controller/cloneset/sync/cloneset_update_test.go b/pkg/controller/cloneset/sync/cloneset_update_test.go index e48d4b81f2..edf6b0594c 100644 --- a/pkg/controller/cloneset/sync/cloneset_update_test.go +++ b/pkg/controller/cloneset/sync/cloneset_update_test.go @@ -39,6 +39,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes/scheme" @@ -294,9 +295,10 @@ func TestUpdate(t *testing.T) { appspub.LifecycleStateKey: string(appspub.LifecycleStateUpdating), }, Annotations: map[string]string{appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ - Revision: "rev_new", - UpdateTimestamp: now, - LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "image-id-xyz"}}, + Revision: "rev_new", + UpdateTimestamp: now, + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "image-id-xyz"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: now, Containers: []string{"c1"}}}, })}, ResourceVersion: "2", }, @@ -371,9 +373,8 @@ func TestUpdate(t *testing.T) { }, Annotations: map[string]string{ appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ - Revision: "rev_new", - UpdateTimestamp: now, - LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "image-id-xyz"}}, + Revision: "rev_new", + UpdateTimestamp: now, }), appspub.InPlaceUpdateGraceKey: `{"revision":"rev_new","containerImages":{"c1":"foo2"},"graceSeconds":3630}`, }, @@ -421,9 +422,8 @@ func TestUpdate(t *testing.T) { Labels: map[string]string{apps.ControllerRevisionHashLabelKey: "rev_new", appsv1alpha1.CloneSetInstanceID: "id-0"}, Annotations: map[string]string{ appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ - Revision: "rev_new", - UpdateTimestamp: metav1.NewTime(now.Add(-time.Second * 10)), - LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "image-id-xyz"}}, + Revision: "rev_new", + UpdateTimestamp: metav1.NewTime(now.Add(-time.Second * 10)), }), appspub.InPlaceUpdateGraceKey: `{"revision":"rev_new","containerImages":{"c1":"foo2"},"graceSeconds":3630}`, }, @@ -456,9 +456,8 @@ func TestUpdate(t *testing.T) { }, Annotations: map[string]string{ appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ - Revision: "rev_new", - UpdateTimestamp: metav1.NewTime(now.Add(-time.Second * 10)), - LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "image-id-xyz"}}, + Revision: "rev_new", + UpdateTimestamp: metav1.NewTime(now.Add(-time.Second * 10)), }), appspub.InPlaceUpdateGraceKey: `{"revision":"rev_new","containerImages":{"c1":"foo2"},"graceSeconds":3630}`, }, @@ -505,9 +504,8 @@ func TestUpdate(t *testing.T) { Labels: map[string]string{apps.ControllerRevisionHashLabelKey: "rev_new", appsv1alpha1.CloneSetInstanceID: "id-0"}, Annotations: map[string]string{ appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ - Revision: "rev_new", - UpdateTimestamp: metav1.NewTime(now.Add(-time.Minute)), - LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "image-id-xyz"}}, + Revision: "rev_new", + UpdateTimestamp: metav1.NewTime(now.Add(-time.Minute)), }), appspub.InPlaceUpdateGraceKey: `{"revision":"rev_new","containerImages":{"c1":"foo2"},"graceSeconds":3630}`, }, @@ -540,9 +538,10 @@ func TestUpdate(t *testing.T) { }, Annotations: map[string]string{ appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ - Revision: "rev_new", - UpdateTimestamp: metav1.NewTime(now.Add(-time.Minute)), - LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "image-id-xyz"}}, + Revision: "rev_new", + UpdateTimestamp: metav1.NewTime(now.Add(-time.Minute)), + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "image-id-xyz"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: now, Containers: []string{"c1"}}}, }), }, ResourceVersion: "1", @@ -569,13 +568,14 @@ func TestUpdate(t *testing.T) { }, } + inplaceupdate.Clock = clock.NewFakeClock(now.Time) for _, mc := range cases { initialObjs := mc.initial() fakeClient := fake.NewClientBuilder().WithObjects(initialObjs...).Build() ctrl := &realControl{ fakeClient, lifecycle.New(fakeClient), - inplaceupdate.NewForTest(fakeClient, clonesetutils.RevisionAdapterImpl, func() metav1.Time { return now }), + inplaceupdate.New(fakeClient, clonesetutils.RevisionAdapterImpl), record.NewFakeRecorder(10), controllerfinder.NewControllerFinder(fakeClient), } diff --git a/pkg/controller/containerlaunchpriority/container_launch_priority_controller.go b/pkg/controller/containerlaunchpriority/container_launch_priority_controller.go index 61679f9618..ca23137430 100644 --- a/pkg/controller/containerlaunchpriority/container_launch_priority_controller.go +++ b/pkg/controller/containerlaunchpriority/container_launch_priority_controller.go @@ -22,8 +22,8 @@ import ( "strconv" "time" - appspub "github.com/openkruise/kruise/apis/apps/pub" "github.com/openkruise/kruise/pkg/util" + utilcontainerlaunchpriority "github.com/openkruise/kruise/pkg/util/containerlaunchpriority" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,14 +43,6 @@ import ( const ( concurrentReconciles = 4 - - // priority string parse starting index - priorityStartIndex = 2 - - // this design is to accept any integer value - MaxInt = int(^uint(0) >> 1) - MinInt = -MaxInt - 1 - minAcceptablePriority = MinInt ) func Add(mgr manager.Manager) error { @@ -77,12 +69,12 @@ func add(mgr manager.Manager, r *ReconcileContainerLaunchPriority) error { CreateFunc: func(e event.CreateEvent) bool { pod := e.Object.(*v1.Pod) _, containersReady := podutil.GetPodCondition(&pod.Status, v1.ContainersReady) - return r.validate(pod) && containersReady != nil && containersReady.Status != v1.ConditionTrue + return utilcontainerlaunchpriority.ExistsPriorities(pod) && containersReady != nil && containersReady.Status != v1.ConditionTrue }, UpdateFunc: func(e event.UpdateEvent) bool { pod := e.ObjectNew.(*v1.Pod) _, containersReady := podutil.GetPodCondition(&pod.Status, v1.ContainersReady) - return r.validate(pod) && containersReady != nil && containersReady.Status != v1.ConditionTrue + return utilcontainerlaunchpriority.ExistsPriorities(pod) && containersReady != nil && containersReady.Status != v1.ConditionTrue }, DeleteFunc: func(e event.DeleteEvent) bool { return false @@ -144,10 +136,12 @@ func (r *ReconcileContainerLaunchPriority) Reconcile(_ context.Context, request Name: pod.Name, UID: pod.UID, }) + klog.V(4).Infof("Creating ConfigMap %s for Pod %s/%s", barrier.Name, pod.Namespace, pod.Name) temErr := r.Client.Create(context.TODO(), barrier) - if temErr != nil { + if temErr != nil && !errors.IsAlreadyExists(temErr) { return reconcile.Result{}, temErr } + return reconcile.Result{Requeue: true}, nil } if err != nil { return reconcile.Result{}, err @@ -156,8 +150,11 @@ func (r *ReconcileContainerLaunchPriority) Reconcile(_ context.Context, request // set next starting containers _, containersReady := podutil.GetPodCondition(&pod.Status, v1.ContainersReady) if containersReady != nil && containersReady.Status != v1.ConditionTrue { - var patchKey = r.findNextPatchKey(pod) - key := "p_" + strconv.Itoa(patchKey) + patchKey := r.findNextPatchKey(pod) + if patchKey == nil { + return reconcile.Result{}, nil + } + key := "p_" + strconv.Itoa(*patchKey) if err = r.patchOnKeyNotExist(barrier, key); err != nil { return reconcile.Result{}, err } @@ -166,21 +163,8 @@ func (r *ReconcileContainerLaunchPriority) Reconcile(_ context.Context, request return reconcile.Result{}, nil } -func (r *ReconcileContainerLaunchPriority) validate(pod *v1.Pod) bool { - if len(pod.Spec.Containers) == 0 { - return false - } - // Since priorityBarrier env is created by webhook on a all or none basis, we only need to check first container's env. - for _, v := range pod.Spec.Containers[0].Env { - if v.Name == appspub.ContainerLaunchBarrierEnvName { - return true - } - } - return false -} - -func (r *ReconcileContainerLaunchPriority) findNextPatchKey(pod *v1.Pod) int { - var priority = minAcceptablePriority +func (r *ReconcileContainerLaunchPriority) findNextPatchKey(pod *v1.Pod) *int { + var priority *int var containerPendingSet = make(map[string]bool) for _, status := range pod.Status.ContainerStatuses { if status.Ready { @@ -190,8 +174,11 @@ func (r *ReconcileContainerLaunchPriority) findNextPatchKey(pod *v1.Pod) int { } for _, c := range pod.Spec.Containers { if _, ok := containerPendingSet[c.Name]; ok { - p := r.getLaunchPriority(c) - if p > priority { + p := utilcontainerlaunchpriority.GetContainerPriority(&c) + if p == nil { + continue + } + if priority == nil || *p > *priority { priority = p } } @@ -199,16 +186,6 @@ func (r *ReconcileContainerLaunchPriority) findNextPatchKey(pod *v1.Pod) int { return priority } -func (r *ReconcileContainerLaunchPriority) getLaunchPriority(c v1.Container) int { - for _, e := range c.Env { - if e.Name == appspub.ContainerLaunchBarrierEnvName { - p, _ := strconv.Atoi(e.ValueFrom.ConfigMapKeyRef.Key[priorityStartIndex:]) - return p - } - } - return minAcceptablePriority -} - func (r *ReconcileContainerLaunchPriority) patchOnKeyNotExist(barrier *v1.ConfigMap, key string) error { if _, ok := barrier.Data[key]; !ok { body := fmt.Sprintf( diff --git a/pkg/controller/statefulset/stateful_set_control.go b/pkg/controller/statefulset/stateful_set_control.go index 5969e1b19e..fc4614fbf2 100644 --- a/pkg/controller/statefulset/stateful_set_control.go +++ b/pkg/controller/statefulset/stateful_set_control.go @@ -712,7 +712,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( opts = inplaceupdate.SetOptionsDefaults(opts) if getPodRevision(replicas[target]) != updateRevision.Name || !isHealthy(replicas[target]) { unavailablePods = append(unavailablePods, replicas[target].Name) - } else if completedErr := opts.CheckUpdateCompleted(replicas[target]); completedErr != nil { + } else if completedErr := opts.CheckPodUpdateCompleted(replicas[target]); completedErr != nil { klog.V(4).Infof("StatefulSet %s/%s check Pod %s in-place update not-ready: %v", set.Namespace, set.Name, @@ -784,7 +784,7 @@ func (ssc *defaultStatefulSetControl) refreshPodState(set *appsv1beta1.StatefulS var state appspub.LifecycleStateType switch lifecycle.GetPodLifecycleState(pod) { case appspub.LifecycleStateUpdating: - if opts.CheckUpdateCompleted(pod) == nil { + if opts.CheckPodUpdateCompleted(pod) == nil { if set.Spec.Lifecycle != nil && !lifecycle.IsPodHooked(set.Spec.Lifecycle.InPlaceUpdate, pod) { state = appspub.LifecycleStateUpdated } else { diff --git a/pkg/util/containerlaunchpriority/container_launch_prirotiy.go b/pkg/util/containerlaunchpriority/container_launch_prirotiy.go new file mode 100644 index 0000000000..cd7a63bd1d --- /dev/null +++ b/pkg/util/containerlaunchpriority/container_launch_prirotiy.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package containerlaunchpriority + +import ( + "strconv" + + appspub "github.com/openkruise/kruise/apis/apps/pub" + v1 "k8s.io/api/core/v1" +) + +const ( + // priority string parse starting index + priorityStartIndex = 2 +) + +func ExistsPriorities(pod *v1.Pod) bool { + if len(pod.Spec.Containers) == 0 { + return false + } + for i := range pod.Spec.Containers { + for _, env := range pod.Spec.Containers[i].Env { + if env.Name == appspub.ContainerLaunchBarrierEnvName { + return true + } + } + } + return false +} + +func GetContainerPriority(c *v1.Container) *int { + for _, e := range c.Env { + if e.Name == appspub.ContainerLaunchBarrierEnvName { + p, _ := strconv.Atoi(e.ValueFrom.ConfigMapKeyRef.Key[priorityStartIndex:]) + return &p + } + } + return nil +} diff --git a/pkg/util/containermeta/env_hash.go b/pkg/util/containermeta/env_hash.go index 95d96c66df..1b880f2ad8 100644 --- a/pkg/util/containermeta/env_hash.go +++ b/pkg/util/containermeta/env_hash.go @@ -124,3 +124,23 @@ func hashEnvs(envs []v1.EnvVar) uint64 { hashutil.DeepHashObject(hash, envsJSON) return uint64(hash.Sum32()) } + +func IsContainerReferenceToMeta(c *v1.Container, path, key string) bool { + for i := range c.Env { + if c.Env[i].Value != "" || c.Env[i].ValueFrom == nil || c.Env[i].ValueFrom.FieldRef == nil { + continue + } else if excludeEnvs.Has(c.Env[i].Name) { + continue + } + + reqPath, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(c.Env[i].ValueFrom.FieldRef.FieldPath) + if !ok { + continue + } + + if reqPath == path && subscript == key { + return true + } + } + return false +} diff --git a/pkg/util/inplaceupdate/inplace_update.go b/pkg/util/inplaceupdate/inplace_update.go index 1f60a941c9..930170a5c7 100644 --- a/pkg/util/inplaceupdate/inplace_update.go +++ b/pkg/util/inplaceupdate/inplace_update.go @@ -24,11 +24,13 @@ import ( "time" appspub "github.com/openkruise/kruise/apis/apps/pub" + "github.com/openkruise/kruise/pkg/util" "github.com/openkruise/kruise/pkg/util/podadapter" "github.com/openkruise/kruise/pkg/util/revisionadapter" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" @@ -37,7 +39,10 @@ import ( ) var ( - inPlaceUpdatePatchRexp = regexp.MustCompile("^/spec/containers/([0-9]+)/image$") + containerImagePatchRexp = regexp.MustCompile("^/spec/containers/([0-9]+)/image$") + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") + + Clock clock.Clock = clock.RealClock{} ) type RefreshResult struct { @@ -56,28 +61,29 @@ type UpdateOptions struct { GracePeriodSeconds int32 AdditionalFuncs []func(*v1.Pod) - CalculateSpec func(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) *UpdateSpec - PatchSpecToPod func(pod *v1.Pod, spec *UpdateSpec) (*v1.Pod, error) - CheckUpdateCompleted func(pod *v1.Pod) error - GetRevision func(rev *apps.ControllerRevision) string + CalculateSpec func(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) *UpdateSpec + PatchSpecToPod func(pod *v1.Pod, spec *UpdateSpec, state *appspub.InPlaceUpdateState) (*v1.Pod, error) + CheckPodUpdateCompleted func(pod *v1.Pod) error + CheckContainersUpdateCompleted func(pod *v1.Pod, state *appspub.InPlaceUpdateState) error + GetRevision func(rev *apps.ControllerRevision) string } // Interface for managing pods in-place update. type Interface interface { - Refresh(pod *v1.Pod, opts *UpdateOptions) RefreshResult CanUpdateInPlace(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) bool Update(pod *v1.Pod, oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) UpdateResult + Refresh(pod *v1.Pod, opts *UpdateOptions) RefreshResult } // UpdateSpec records the images of containers which need to in-place update. type UpdateSpec struct { - Revision string `json:"revision"` - Annotations map[string]string `json:"annotations,omitempty"` + Revision string `json:"revision"` - ContainerImages map[string]string `json:"containerImages,omitempty"` - MetaDataPatch []byte `json:"metaDataPatch,omitempty"` - UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"` - GraceSeconds int32 `json:"graceSeconds,omitempty"` + ContainerImages map[string]string `json:"containerImages,omitempty"` + ContainerRefMetadata map[string]metav1.ObjectMeta `json:"containerRefMetadata,omitempty"` + MetaDataPatch []byte `json:"metaDataPatch,omitempty"` + UpdateEnvFromMetadata bool `json:"updateEnvFromMetadata,omitempty"` + GraceSeconds int32 `json:"graceSeconds,omitempty"` OldTemplate *v1.PodTemplateSpec `json:"oldTemplate,omitempty"` NewTemplate *v1.PodTemplateSpec `json:"newTemplate,omitempty"` @@ -86,68 +92,73 @@ type UpdateSpec struct { type realControl struct { podAdapter podadapter.Adapter revisionAdapter revisionadapter.Interface - - // just for test - now func() metav1.Time } func New(c client.Client, revisionAdapter revisionadapter.Interface) Interface { - return &realControl{podAdapter: &podadapter.AdapterRuntimeClient{Client: c}, revisionAdapter: revisionAdapter, now: metav1.Now} + return &realControl{podAdapter: &podadapter.AdapterRuntimeClient{Client: c}, revisionAdapter: revisionAdapter} } func NewForTypedClient(c clientset.Interface, revisionAdapter revisionadapter.Interface) Interface { - return &realControl{podAdapter: &podadapter.AdapterTypedClient{Client: c}, revisionAdapter: revisionAdapter, now: metav1.Now} + return &realControl{podAdapter: &podadapter.AdapterTypedClient{Client: c}, revisionAdapter: revisionAdapter} } func NewForInformer(informer coreinformers.PodInformer, revisionAdapter revisionadapter.Interface) Interface { - return &realControl{podAdapter: &podadapter.AdapterInformer{PodInformer: informer}, revisionAdapter: revisionAdapter, now: metav1.Now} -} - -func NewForTest(c client.Client, revisionAdapter revisionadapter.Interface, now func() metav1.Time) Interface { - return &realControl{podAdapter: &podadapter.AdapterRuntimeClient{Client: c}, revisionAdapter: revisionAdapter, now: now} + return &realControl{podAdapter: &podadapter.AdapterInformer{PodInformer: informer}, revisionAdapter: revisionAdapter} } func (c *realControl) Refresh(pod *v1.Pod, opts *UpdateOptions) RefreshResult { opts = SetOptionsDefaults(opts) - if err := c.refreshCondition(pod, opts); err != nil { - return RefreshResult{RefreshErr: err} - } - - var delayDuration time.Duration - var err error + // check if it is in grace period if gracePeriod, _ := appspub.GetInPlaceUpdateGrace(pod); gracePeriod != "" { - if delayDuration, err = c.finishGracePeriod(pod, opts); err != nil { + delayDuration, err := c.finishGracePeriod(pod, opts) + if err != nil { return RefreshResult{RefreshErr: err} } + return RefreshResult{DelayDuration: delayDuration} } - return RefreshResult{DelayDuration: delayDuration} -} + if stateStr, ok := appspub.GetInPlaceUpdateState(pod); ok { + state := appspub.InPlaceUpdateState{} + if err := json.Unmarshal([]byte(stateStr), &state); err != nil { + return RefreshResult{RefreshErr: err} + } -func (c *realControl) refreshCondition(pod *v1.Pod, opts *UpdateOptions) error { - // no need to update condition because of no readiness-gate - if !containsReadinessGate(pod) { - return nil - } + // check in-place updating has not completed yet + if checkErr := opts.CheckContainersUpdateCompleted(pod, &state); checkErr != nil { + klog.V(6).Infof("Check Pod %s/%s in-place update not completed yet: %v", pod.Namespace, pod.Name, checkErr) + return RefreshResult{} + } - // in-place updating has not completed yet - if checkErr := opts.CheckUpdateCompleted(pod); checkErr != nil { - klog.V(6).Infof("Check Pod %s/%s in-place update not completed yet: %v", pod.Namespace, pod.Name, checkErr) - return nil + // check if there are containers with lower-priority that have to in-place update in next batch + if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 { + + // pre-check the previous updated containers + if checkErr := doPreCheckBeforeNext(pod, state.PreCheckBeforeNext); checkErr != nil { + klog.V(5).Infof("Pod %s/%s in-place update pre-check not passed: %v", pod.Namespace, pod.Name, checkErr) + return RefreshResult{} + } + + // do update the next containers + if updated, err := c.updateNextBatch(pod, opts); err != nil { + return RefreshResult{RefreshErr: err} + } else if updated { + return RefreshResult{} + } + } } - // already ready - if existingCondition := GetCondition(pod); existingCondition != nil && existingCondition.Status == v1.ConditionTrue { - return nil + if !containsReadinessGate(pod) { + return RefreshResult{} } newCondition := v1.PodCondition{ Type: appspub.InPlaceUpdateReady, Status: v1.ConditionTrue, - LastTransitionTime: c.now(), + LastTransitionTime: metav1.NewTime(Clock.Now()), } - return c.updateCondition(pod, newCondition) + err := c.updateCondition(pod, newCondition) + return RefreshResult{RefreshErr: err} } func (c *realControl) updateCondition(pod *v1.Pod, condition v1.PodCondition) error { @@ -202,7 +213,7 @@ func (c *realControl) finishGracePeriod(pod *v1.Pod, opts *UpdateOptions) (time. return nil } - if clone, err = opts.PatchSpecToPod(clone, &spec); err != nil { + if clone, err = opts.PatchSpecToPod(clone, &spec, &updateState); err != nil { return err } appspub.RemoveInPlaceUpdateGrace(clone) @@ -215,6 +226,42 @@ func (c *realControl) finishGracePeriod(pod *v1.Pod, opts *UpdateOptions) (time. return delayDuration, err } +func (c *realControl) updateNextBatch(pod *v1.Pod, opts *UpdateOptions) (bool, error) { + var updated bool + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + updated = false + clone, err := c.podAdapter.GetPod(pod.Namespace, pod.Name) + if err != nil { + return err + } + + state := appspub.InPlaceUpdateState{} + if stateStr, ok := appspub.GetInPlaceUpdateState(pod); !ok { + return nil + } else if err := json.Unmarshal([]byte(stateStr), &state); err != nil { + return err + } + + if len(state.NextContainerImages) == 0 && len(state.NextContainerRefMetadata) == 0 { + return nil + } + + spec := UpdateSpec{ + ContainerImages: state.NextContainerImages, + ContainerRefMetadata: state.NextContainerRefMetadata, + UpdateEnvFromMetadata: state.UpdateEnvFromMetadata, + } + if clone, err = opts.PatchSpecToPod(clone, &spec, &state); err != nil { + return err + } + + updated = true + _, err = c.podAdapter.UpdatePod(clone) + return err + }) + return updated, err +} + func (c *realControl) CanUpdateInPlace(oldRevision, newRevision *apps.ControllerRevision, opts *UpdateOptions) bool { opts = SetOptionsDefaults(opts) return opts.CalculateSpec(oldRevision, newRevision, opts) != nil @@ -235,7 +282,7 @@ func (c *realControl) Update(pod *v1.Pod, oldRevision, newRevision *apps.Control if containsReadinessGate(pod) { newCondition := v1.PodCondition{ Type: appspub.InPlaceUpdateReady, - LastTransitionTime: c.now(), + LastTransitionTime: metav1.NewTime(Clock.Now()), Status: v1.ConditionFalse, Reason: "StartInPlaceUpdate", } @@ -274,26 +321,17 @@ func (c *realControl) updatePodInPlace(pod *v1.Pod, spec *UpdateSpec, opts *Upda f(clone) } - // record old containerStatuses inPlaceUpdateState := appspub.InPlaceUpdateState{ Revision: spec.Revision, - UpdateTimestamp: c.now(), - LastContainerStatuses: make(map[string]appspub.InPlaceUpdateContainerStatus, len(spec.ContainerImages)), + UpdateTimestamp: metav1.NewTime(Clock.Now()), UpdateEnvFromMetadata: spec.UpdateEnvFromMetadata, } - for _, c := range clone.Status.ContainerStatuses { - if _, ok := spec.ContainerImages[c.Name]; ok { - inPlaceUpdateState.LastContainerStatuses[c.Name] = appspub.InPlaceUpdateContainerStatus{ - ImageID: c.ImageID, - } - } - } inPlaceUpdateStateJSON, _ := json.Marshal(inPlaceUpdateState) clone.Annotations[appspub.InPlaceUpdateStateKey] = string(inPlaceUpdateStateJSON) delete(clone.Annotations, appspub.InPlaceUpdateStateKeyOld) if spec.GraceSeconds <= 0 { - if clone, err = opts.PatchSpecToPod(clone, spec); err != nil { + if clone, err = opts.PatchSpecToPod(clone, spec, &inPlaceUpdateState); err != nil { return err } appspub.RemoveInPlaceUpdateGrace(clone) @@ -415,3 +453,19 @@ func roundupSeconds(d time.Duration) time.Duration { } return (d/time.Second + 1) * time.Second } + +func doPreCheckBeforeNext(pod *v1.Pod, preCheck *appspub.InPlaceUpdatePreCheckBeforeNext) error { + if preCheck == nil { + return nil + } + for _, cName := range preCheck.ContainersRequiredReady { + cStatus := util.GetContainerStatus(cName, pod) + if cStatus == nil { + return fmt.Errorf("not found container %s in pod status", cName) + } + if !cStatus.Ready { + return fmt.Errorf("waiting container %s to be ready", cName) + } + } + return nil +} diff --git a/pkg/util/inplaceupdate/inplace_update_defaults.go b/pkg/util/inplaceupdate/inplace_update_defaults.go index 17dded464f..223e79f5fc 100644 --- a/pkg/util/inplaceupdate/inplace_update_defaults.go +++ b/pkg/util/inplaceupdate/inplace_update_defaults.go @@ -25,10 +25,14 @@ import ( "github.com/appscode/jsonpatch" appspub "github.com/openkruise/kruise/apis/apps/pub" "github.com/openkruise/kruise/pkg/features" + "github.com/openkruise/kruise/pkg/util" + utilcontainerlaunchpriority "github.com/openkruise/kruise/pkg/util/containerlaunchpriority" utilcontainermeta "github.com/openkruise/kruise/pkg/util/containermeta" utilfeature "github.com/openkruise/kruise/pkg/util/feature" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/klog/v2" kubeletcontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -47,15 +51,30 @@ func SetOptionsDefaults(opts *UpdateOptions) *UpdateOptions { opts.PatchSpecToPod = defaultPatchUpdateSpecToPod } - if opts.CheckUpdateCompleted == nil { - opts.CheckUpdateCompleted = DefaultCheckInPlaceUpdateCompleted + if opts.CheckPodUpdateCompleted == nil { + opts.CheckPodUpdateCompleted = DefaultCheckInPlaceUpdateCompleted + } + + if opts.CheckContainersUpdateCompleted == nil { + opts.CheckContainersUpdateCompleted = defaultCheckContainersInPlaceUpdateCompleted } return opts } // defaultPatchUpdateSpecToPod returns new pod that merges spec into old pod -func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec) (*v1.Pod, error) { +func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec, state *appspub.InPlaceUpdateState) (*v1.Pod, error) { + + klog.V(5).Infof("Begin to in-place update pod %s/%s with update spec %v, state %v", pod.Namespace, pod.Name, util.DumpJSON(spec), util.DumpJSON(state)) + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + state.NextContainerImages = make(map[string]string) + state.NextContainerRefMetadata = make(map[string]metav1.ObjectMeta) + if spec.MetaDataPatch != nil { cloneBytes, _ := json.Marshal(pod) modified, err := strategicpatch.StrategicMergePatch(cloneBytes, spec.MetaDataPatch, &v1.Pod{}) @@ -68,14 +87,137 @@ func defaultPatchUpdateSpecToPod(pod *v1.Pod, spec *UpdateSpec) (*v1.Pod, error) } } + // prepare containers that should update this time and next time, according to their priorities + containersToUpdate := sets.NewString() + var highestPriority *int + var containersWithHighestPriority []string + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + _, existImage := spec.ContainerImages[c.Name] + _, existMetadata := spec.ContainerRefMetadata[c.Name] + if !existImage && !existMetadata { + continue + } + priority := utilcontainerlaunchpriority.GetContainerPriority(c) + if priority == nil { + containersToUpdate.Insert(c.Name) + } else if highestPriority == nil || *highestPriority < *priority { + highestPriority = priority + containersWithHighestPriority = []string{c.Name} + } else if *highestPriority == *priority { + containersWithHighestPriority = append(containersWithHighestPriority, c.Name) + } + } + for _, cName := range containersWithHighestPriority { + containersToUpdate.Insert(cName) + } + addMetadataSharedContainersToUpdate(pod, containersToUpdate, spec.ContainerRefMetadata) + + // DO NOT modify the fields in spec for it may have to retry on conflict in updatePodInPlace + + // update images and record current imageIDs for the containers to update + containersImageChanged := sets.NewString() for i := range pod.Spec.Containers { - if newImage, ok := spec.ContainerImages[pod.Spec.Containers[i].Name]; ok { + c := &pod.Spec.Containers[i] + newImage, exists := spec.ContainerImages[c.Name] + if !exists { + continue + } + if containersToUpdate.Has(c.Name) { pod.Spec.Containers[i].Image = newImage + containersImageChanged.Insert(c.Name) + } else { + state.NextContainerImages[c.Name] = newImage } } + for _, c := range pod.Status.ContainerStatuses { + if containersImageChanged.Has(c.Name) { + if state.LastContainerStatuses == nil { + state.LastContainerStatuses = map[string]appspub.InPlaceUpdateContainerStatus{} + } + state.LastContainerStatuses[c.Name] = appspub.InPlaceUpdateContainerStatus{ImageID: c.ImageID} + } + } + + // update annotations and labels for the containers to update + for cName, objMeta := range spec.ContainerRefMetadata { + if containersToUpdate.Has(cName) { + for k, v := range objMeta.Labels { + pod.Labels[k] = v + } + for k, v := range objMeta.Annotations { + pod.Annotations[k] = v + } + } else { + state.NextContainerRefMetadata[cName] = objMeta + } + } + + // add the containers that update this time into PreCheckBeforeNext, so that next containers can only + // start to update when these containers have updated ready + // TODO: currently we only support ContainersRequiredReady, not sure if we have to add ContainersPreferredReady in future + if len(state.NextContainerImages) > 0 || len(state.NextContainerRefMetadata) > 0 { + state.PreCheckBeforeNext = &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: containersToUpdate.List()} + } else { + state.PreCheckBeforeNext = nil + } + + state.ContainerBatchesRecord = append(state.ContainerBatchesRecord, appspub.InPlaceUpdateContainerBatch{ + Timestamp: metav1.NewTime(Clock.Now()), + Containers: containersToUpdate.List(), + }) + + klog.V(5).Infof("Decide to in-place update pod %s/%s with state %v", pod.Namespace, pod.Name, util.DumpJSON(state)) + + inPlaceUpdateStateJSON, _ := json.Marshal(state) + pod.Annotations[appspub.InPlaceUpdateStateKey] = string(inPlaceUpdateStateJSON) return pod, nil } +func addMetadataSharedContainersToUpdate(pod *v1.Pod, containersToUpdate sets.String, containerRefMetadata map[string]metav1.ObjectMeta) { + labelsToUpdate := sets.NewString() + annotationsToUpdate := sets.NewString() + newToUpdate := containersToUpdate + // We need a for-loop to merge the indirect shared containers + for newToUpdate.Len() > 0 { + for _, cName := range newToUpdate.UnsortedList() { + if objMeta, exists := containerRefMetadata[cName]; exists { + for key := range objMeta.Labels { + labelsToUpdate.Insert(key) + } + for key := range objMeta.Annotations { + annotationsToUpdate.Insert(key) + } + } + } + newToUpdate = sets.NewString() + + for cName, objMeta := range containerRefMetadata { + if containersToUpdate.Has(cName) { + continue + } + for _, key := range labelsToUpdate.UnsortedList() { + if _, exists := objMeta.Labels[key]; exists { + klog.Warningf("Has to in-place update container %s with lower priority in Pod %s/%s, for the label %s it shared has changed", + cName, pod.Namespace, pod.Name, key) + containersToUpdate.Insert(cName) + newToUpdate.Insert(cName) + break + } + } + for _, key := range annotationsToUpdate.UnsortedList() { + if _, exists := objMeta.Annotations[key]; exists { + klog.Warningf("Has to in-place update container %s with lower priority in Pod %s/%s, for the annotation %s it shared has changed", + cName, pod.Namespace, pod.Name, key) + containersToUpdate.Insert(cName) + newToUpdate.Insert(cName) + break + } + } + } + } +} + // defaultCalculateInPlaceUpdateSpec calculates diff between old and update revisions. // If the diff just contains replace operation of spec.containers[x].image, it will returns an UpdateSpec. // Otherwise, it returns nil which means can not use in-place update. @@ -100,35 +242,85 @@ func defaultCalculateInPlaceUpdateSpec(oldRevision, newRevision *apps.Controller } updateSpec := &UpdateSpec{ - Revision: newRevision.Name, - ContainerImages: make(map[string]string), - GraceSeconds: opts.GracePeriodSeconds, + Revision: newRevision.Name, + ContainerImages: make(map[string]string), + ContainerRefMetadata: make(map[string]metav1.ObjectMeta), + GraceSeconds: opts.GracePeriodSeconds, } if opts.GetRevision != nil { updateSpec.Revision = opts.GetRevision(newRevision) } - // all patches for podSpec can just update images - var metadataChanged bool - for _, jsonPatchOperation := range patches { - jsonPatchOperation.Path = strings.Replace(jsonPatchOperation.Path, "/spec/template", "", 1) + // all patches for podSpec can just update images in pod spec + var metadataPatches []jsonpatch.Operation + for _, op := range patches { + op.Path = strings.Replace(op.Path, "/spec/template", "", 1) - if !strings.HasPrefix(jsonPatchOperation.Path, "/spec/") { - metadataChanged = true - continue + if !strings.HasPrefix(op.Path, "/spec/") { + if strings.HasPrefix(op.Path, "/metadata/") { + metadataPatches = append(metadataPatches, op) + continue + } + return nil } - if jsonPatchOperation.Operation != "replace" || !inPlaceUpdatePatchRexp.MatchString(jsonPatchOperation.Path) { + if op.Operation != "replace" || !containerImagePatchRexp.MatchString(op.Path) { return nil } // for example: /spec/containers/0/image - words := strings.Split(jsonPatchOperation.Path, "/") + words := strings.Split(op.Path, "/") idx, _ := strconv.Atoi(words[3]) if len(oldTemp.Spec.Containers) <= idx { return nil } - updateSpec.ContainerImages[oldTemp.Spec.Containers[idx].Name] = jsonPatchOperation.Value.(string) + updateSpec.ContainerImages[oldTemp.Spec.Containers[idx].Name] = op.Value.(string) } - if metadataChanged { + + if len(metadataPatches) > 0 { + if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceUpdateEnvFromMetadata) { + // for example: /metadata/labels/my-label-key + for _, op := range metadataPatches { + if op.Operation != "replace" && op.Operation != "add" { + continue + } + words := strings.SplitN(op.Path, "/", 4) + if len(words) != 4 && words[2] != "labels" && words[2] != "annotations" { + continue + } + key := rfc6901Decoder.Replace(words[3]) + + for i := range newTemp.Spec.Containers { + c := &newTemp.Spec.Containers[i] + objMeta := updateSpec.ContainerRefMetadata[c.Name] + switch words[2] { + case "labels": + if !utilcontainermeta.IsContainerReferenceToMeta(c, "metadata.labels", key) { + continue + } + if objMeta.Labels == nil { + objMeta.Labels = make(map[string]string) + } + objMeta.Labels[key] = op.Value.(string) + delete(oldTemp.ObjectMeta.Labels, key) + delete(newTemp.ObjectMeta.Labels, key) + + case "annotations": + if !utilcontainermeta.IsContainerReferenceToMeta(c, "metadata.annotations", key) { + continue + } + if objMeta.Annotations == nil { + objMeta.Annotations = make(map[string]string) + } + objMeta.Annotations[key] = op.Value.(string) + delete(oldTemp.ObjectMeta.Annotations, key) + delete(newTemp.ObjectMeta.Annotations, key) + } + + updateSpec.ContainerRefMetadata[c.Name] = objMeta + updateSpec.UpdateEnvFromMetadata = true + } + } + } + oldBytes, _ := json.Marshal(v1.Pod{ObjectMeta: oldTemp.ObjectMeta}) newBytes, _ := json.Marshal(v1.Pod{ObjectMeta: newTemp.ObjectMeta}) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldBytes, newBytes, &v1.Pod{}) @@ -136,19 +328,6 @@ func defaultCalculateInPlaceUpdateSpec(oldRevision, newRevision *apps.Controller return nil } updateSpec.MetaDataPatch = patchBytes - - if utilfeature.DefaultFeatureGate.Enabled(features.InPlaceUpdateEnvFromMetadata) { - hasher := utilcontainermeta.NewEnvFromMetadataHasher() - for i := range newTemp.Spec.Containers { - c := &newTemp.Spec.Containers[i] - oldHashWithEnvFromMetadata := hasher.GetExpectHash(c, oldTemp) - newHashWithEnvFromMetadata := hasher.GetExpectHash(c, newTemp) - if oldHashWithEnvFromMetadata != newHashWithEnvFromMetadata { - updateSpec.UpdateEnvFromMetadata = true - break - } - } - } } return updateSpec } @@ -167,7 +346,14 @@ func DefaultCheckInPlaceUpdateCompleted(pod *v1.Pod) error { } else if err := json.Unmarshal([]byte(stateStr), &inPlaceUpdateState); err != nil { return err } + if len(inPlaceUpdateState.NextContainerImages) > 0 || len(inPlaceUpdateState.NextContainerRefMetadata) > 0 { + return fmt.Errorf("existing containers to in-place update in next batches") + } + + return defaultCheckContainersInPlaceUpdateCompleted(pod, &inPlaceUpdateState) +} +func defaultCheckContainersInPlaceUpdateCompleted(pod *v1.Pod, inPlaceUpdateState *appspub.InPlaceUpdateState) error { runtimeContainerMetaSet, err := appspub.GetRuntimeContainerMetaSet(pod) if err != nil { return err diff --git a/pkg/util/inplaceupdate/inplace_update_defaults_test.go b/pkg/util/inplaceupdate/inplace_update_defaults_test.go new file mode 100644 index 0000000000..2ab1cbe06f --- /dev/null +++ b/pkg/util/inplaceupdate/inplace_update_defaults_test.go @@ -0,0 +1,468 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inplaceupdate + +import ( + "encoding/json" + "testing" + "time" + + appspub "github.com/openkruise/kruise/apis/apps/pub" + "github.com/openkruise/kruise/pkg/util" + v1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +func TestDefaultPatchUpdateSpecToPod(t *testing.T) { + now := time.Now() + Clock = clock.NewFakeClock(now) + givenPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"label-k1": "foo", "label-k2": "foo"}, + Annotations: map[string]string{"annotation-k1": "foo"}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "c1-img", + Env: []v1.EnvVar{ + {Name: appspub.ContainerLaunchBarrierEnvName, ValueFrom: &v1.EnvVarSource{ConfigMapKeyRef: &v1.ConfigMapKeySelector{Key: "p_20"}}}, + {Name: "config", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['label-k1']"}}}, + }, + }, + { + Name: "c2", + Image: "c2-img", + Env: []v1.EnvVar{ + {Name: appspub.ContainerLaunchBarrierEnvName, ValueFrom: &v1.EnvVarSource{ConfigMapKeyRef: &v1.ConfigMapKeySelector{Key: "p_10"}}}, + {Name: "config", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['label-k2']"}}}, + }, + }, + { + Name: "c3", + Image: "c3-img", + Env: []v1.EnvVar{ + {Name: appspub.ContainerLaunchBarrierEnvName, ValueFrom: &v1.EnvVarSource{ConfigMapKeyRef: &v1.ConfigMapKeySelector{Key: "p_0"}}}, + {Name: "config", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.labels['label-k2']"}}}, + }, + }, + { + Name: "c4", + Image: "c4-img", + Env: []v1.EnvVar{ + {Name: appspub.ContainerLaunchBarrierEnvName, ValueFrom: &v1.EnvVarSource{ConfigMapKeyRef: &v1.ConfigMapKeySelector{Key: "p_0"}}}, + {Name: "config", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.annotations['annotation-k1']"}}}, + }, + }, + }, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "c1", + ImageID: "containerd://c1-img", + }, + { + Name: "c2", + ImageID: "containerd://c2-img", + }, + { + Name: "c3", + ImageID: "containerd://c3-img", + }, + { + Name: "c4", + ImageID: "containerd://c4-img", + }, + }, + }, + } + + cases := []struct { + name string + spec *UpdateSpec + state *appspub.InPlaceUpdateState + expectedState *appspub.InPlaceUpdateState + expectedPatch map[string]interface{} + }{ + { + name: "update a signal container image", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c1": "c1-img-new"}, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedPatch: map[string]interface{}{ + //"metadata": map[string]interface{}{ + // "annotations": map[string]interface{}{ + // appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ + // LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}}, + // ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + // }), + // }, + //}, + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c1", + "image": "c1-img-new", + }, + }, + }, + }, + }, + { + name: "update two container images without priority", + spec: &UpdateSpec{ + MetaDataPatch: []byte(`{"metadata":{"annotations":{"new-key": "bar"}}}`), + ContainerImages: map[string]string{"c3": "c3-img-new", "c4": "c4-img-new"}, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c3": {ImageID: "containerd://c3-img"}, "c4": {ImageID: "containerd://c4-img"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c3", "c4"}}}, + }, + expectedPatch: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "new-key": "bar", + }, + }, + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c3", + "image": "c3-img-new", + }, + { + "name": "c4", + "image": "c4-img-new", + }, + }, + }, + }, + }, + { + name: "update two container image and env from metadata without priority", + spec: &UpdateSpec{ + MetaDataPatch: []byte(`{"metadata":{"annotations":{"new-key": "bar"}}}`), + ContainerImages: map[string]string{"c3": "c3-img-new"}, + ContainerRefMetadata: map[string]metav1.ObjectMeta{"c4": {Annotations: map[string]string{"annotation-k1": "bar"}}}, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c3": {ImageID: "containerd://c3-img"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c3", "c4"}}}, + }, + expectedPatch: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "new-key": "bar", + "annotation-k1": "bar", + }, + }, + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c3", + "image": "c3-img-new", + }, + }, + }, + }, + }, + { + name: "update two container images with different priorities, batch 1st", + spec: &UpdateSpec{ + MetaDataPatch: []byte(`{"metadata":{"annotations":{"new-key": "bar"}}}`), + ContainerImages: map[string]string{"c1": "c1-img-new", "c2": "c2-img-new"}, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}}, + NextContainerImages: map[string]string{"c2": "c2-img-new"}, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c1"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedPatch: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "new-key": "bar", + }, + }, + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c1", + "image": "c1-img-new", + }, + }, + }, + }, + }, + { + name: "update two container images with different priorities, batch 2nd", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c2": "c2-img-new"}, + }, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img-old"}}, + NextContainerImages: map[string]string{"c2": "c2-img-new"}, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c1"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img-old"}, "c2": {ImageID: "containerd://c2-img"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}, {Timestamp: metav1.NewTime(now), Containers: []string{"c2"}}}, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c2", + "image": "c2-img-new", + }, + }, + }, + }, + }, + { + name: "update three container images with different priorities, batch 1st", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c1": "c1-img-new", "c2": "c2-img-new", "c4": "c4-img-new"}, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}}, + NextContainerImages: map[string]string{"c2": "c2-img-new", "c4": "c4-img-new"}, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c1"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c1", + "image": "c1-img-new", + }, + }, + }, + }, + }, + { + name: "update three container images with different priorities, batch 2nd", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c2": "c2-img-new", "c4": "c4-img-new"}, + }, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}}, + NextContainerImages: map[string]string{"c2": "c2-img-new", "c4": "c4-img-new"}, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c1"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}, "c2": {ImageID: "containerd://c2-img"}}, + NextContainerImages: map[string]string{"c4": "c4-img-new"}, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c2"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}, {Timestamp: metav1.NewTime(now), Containers: []string{"c2"}}}, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c2", + "image": "c2-img-new", + }, + }, + }, + }, + }, + { + name: "update three container images with different priorities, batch 3rd", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c4": "c4-img-new"}, + }, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}, "c2": {ImageID: "containerd://c2-img"}}, + NextContainerImages: map[string]string{"c4": "c4-img-new"}, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c2"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}, {Timestamp: metav1.NewTime(now), Containers: []string{"c2"}}}, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}, "c2": {ImageID: "containerd://c2-img"}, "c4": {ImageID: "containerd://c4-img"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{ + {Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}, + {Timestamp: metav1.NewTime(now), Containers: []string{"c2"}}, + {Timestamp: metav1.NewTime(now), Containers: []string{"c4"}}, + }, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c4", + "image": "c4-img-new", + }, + }, + }, + }, + }, + { + name: "update four container images and env from metadata with different priorities, batch 1st", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c1": "c1-img-new", "c2": "c2-img-new", "c4": "c4-img-new"}, + ContainerRefMetadata: map[string]metav1.ObjectMeta{ + "c2": {Labels: map[string]string{"label-k2": "bar"}}, + "c3": {Labels: map[string]string{"label-k2": "bar"}}, + }, + }, + state: &appspub.InPlaceUpdateState{}, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}}, + NextContainerImages: map[string]string{"c2": "c2-img-new", "c4": "c4-img-new"}, + NextContainerRefMetadata: map[string]metav1.ObjectMeta{ + "c2": {Labels: map[string]string{"label-k2": "bar"}}, + "c3": {Labels: map[string]string{"label-k2": "bar"}}, + }, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c1"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c1", + "image": "c1-img-new", + }, + }, + }, + }, + }, + { + name: "update four container images and env from metadata with different priorities, batch 2nd", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c2": "c2-img-new", "c4": "c4-img-new"}, + ContainerRefMetadata: map[string]metav1.ObjectMeta{ + "c2": {Labels: map[string]string{"label-k2": "bar"}}, + "c3": {Labels: map[string]string{"label-k2": "bar"}}, + }, + }, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}}, + NextContainerImages: map[string]string{"c2": "c2-img-new", "c4": "c4-img-new"}, + NextContainerRefMetadata: map[string]metav1.ObjectMeta{ + "c2": {Labels: map[string]string{"label-k2": "bar"}}, + "c3": {Labels: map[string]string{"label-k2": "bar"}}, + }, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c1"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}}, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}, "c2": {ImageID: "containerd://c2-img"}}, + NextContainerImages: map[string]string{"c4": "c4-img-new"}, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c2", "c3"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{ + {Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}, + {Timestamp: metav1.NewTime(now), Containers: []string{"c2", "c3"}}, + }, + }, + expectedPatch: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "label-k2": "bar", + }, + }, + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c2", + "image": "c2-img-new", + }, + }, + }, + }, + }, + { + name: "update four container images and env from metadata with different priorities, batch 3rd", + spec: &UpdateSpec{ + ContainerImages: map[string]string{"c4": "c4-img-new"}, + }, + state: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}, "c2": {ImageID: "containerd://c2-img"}}, + NextContainerImages: map[string]string{"c4": "c4-img-new"}, + PreCheckBeforeNext: &appspub.InPlaceUpdatePreCheckBeforeNext{ContainersRequiredReady: []string{"c2", "c3"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{ + {Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}, + {Timestamp: metav1.NewTime(now), Containers: []string{"c2", "c3"}}, + }, + }, + expectedState: &appspub.InPlaceUpdateState{ + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "containerd://c1-img"}, "c2": {ImageID: "containerd://c2-img"}, "c4": {ImageID: "containerd://c4-img"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{ + {Timestamp: metav1.NewTime(now), Containers: []string{"c1"}}, + {Timestamp: metav1.NewTime(now), Containers: []string{"c2", "c3"}}, + {Timestamp: metav1.NewTime(now), Containers: []string{"c4"}}, + }, + }, + expectedPatch: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "c4", + "image": "c4-img-new", + }, + }, + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + gotPod, err := defaultPatchUpdateSpecToPod(givenPod.DeepCopy(), tc.spec, tc.state) + if err != nil { + t.Fatal(err) + } + + if !apiequality.Semantic.DeepEqual(tc.state, tc.expectedState) { + t.Fatalf("expected state \n%v\n but got \n%v", util.DumpJSON(tc.expectedState), util.DumpJSON(tc.state)) + } + + originPodJS, _ := json.Marshal(givenPod) + patchJS, _ := json.Marshal(tc.expectedPatch) + expectedPodJS, err := strategicpatch.StrategicMergePatch(originPodJS, patchJS, &v1.Pod{}) + if err != nil { + t.Fatal(err) + } + expectedPod := &v1.Pod{} + if err := json.Unmarshal(expectedPodJS, expectedPod); err != nil { + t.Fatal(err) + } + expectedPod.Annotations[appspub.InPlaceUpdateStateKey] = util.DumpJSON(tc.state) + if !apiequality.Semantic.DeepEqual(gotPod, expectedPod) { + t.Fatalf("expected pod \n%v\n but got \n%v", util.DumpJSON(expectedPod), util.DumpJSON(gotPod)) + } + }) + } +} diff --git a/pkg/util/inplaceupdate/inplace_update_test.go b/pkg/util/inplaceupdate/inplace_update_test.go index 9465e6301f..75353a8ae9 100644 --- a/pkg/util/inplaceupdate/inplace_update_test.go +++ b/pkg/util/inplaceupdate/inplace_update_test.go @@ -24,13 +24,16 @@ import ( "time" appspub "github.com/openkruise/kruise/apis/apps/pub" + "github.com/openkruise/kruise/pkg/features" "github.com/openkruise/kruise/pkg/util" + utilfeature "github.com/openkruise/kruise/pkg/util/feature" "github.com/openkruise/kruise/pkg/util/revisionadapter" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -58,8 +61,9 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { Data: runtime.RawExtension{Raw: []byte(`{"spec":{"template":{"$patch":"replace","spec":{"containers":[{"name":"c1","image":"foo2"}]}}}}`)}, }, expectedSpec: &UpdateSpec{ - Revision: "new-revision", - ContainerImages: map[string]string{"c1": "foo2"}, + Revision: "new-revision", + ContainerImages: map[string]string{"c1": "foo2"}, + ContainerRefMetadata: make(map[string]metav1.ObjectMeta), }, }, { @@ -72,8 +76,9 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { Data: runtime.RawExtension{Raw: []byte(`{"metadata":{"labels":{"k":"v"}},"spec":{"template":{"$patch":"replace","spec":{"containers":[{"name":"c1","image":"foo2"}]}}}}`)}, }, expectedSpec: &UpdateSpec{ - Revision: "new-revision", - ContainerImages: map[string]string{"c1": "foo2"}, + Revision: "new-revision", + ContainerImages: map[string]string{"c1": "foo2"}, + ContainerRefMetadata: make(map[string]metav1.ObjectMeta), }, }, { @@ -86,9 +91,27 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { Data: runtime.RawExtension{Raw: []byte(`{"spec":{"template":{"$patch":"replace","metadata":{"labels":{"k":"v","k1":"v1"}},"spec":{"containers":[{"name":"c1","image":"foo2"}]}}}}`)}, }, expectedSpec: &UpdateSpec{ - Revision: "new-revision", - ContainerImages: map[string]string{"c1": "foo2"}, - MetaDataPatch: []byte(`{"metadata":{"labels":{"k1":"v1"}}}`), + Revision: "new-revision", + ContainerImages: map[string]string{"c1": "foo2"}, + ContainerRefMetadata: make(map[string]metav1.ObjectMeta), + MetaDataPatch: []byte(`{"metadata":{"labels":{"k1":"v1"}}}`), + }, + }, + { + oldRevision: &apps.ControllerRevision{ + ObjectMeta: metav1.ObjectMeta{Name: "old-revision"}, + Data: runtime.RawExtension{Raw: []byte(`{"spec":{"template":{"$patch":"replace","metadata":{"labels":{"k":"v"}},"spec":{"containers":[{"name":"c1","image":"foo1","env":[{"name":"TEST_ENV","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.labels['k']"}}}]}]}}}}`)}, + }, + newRevision: &apps.ControllerRevision{ + ObjectMeta: metav1.ObjectMeta{Name: "new-revision"}, + Data: runtime.RawExtension{Raw: []byte(`{"spec":{"template":{"$patch":"replace","metadata":{"labels":{"k":"v2","k1":"v1"}},"spec":{"containers":[{"name":"c1","image":"foo2","env":[{"name":"TEST_ENV","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"metadata.labels['k']"}}}]}]}}}}`)}, + }, + expectedSpec: &UpdateSpec{ + Revision: "new-revision", + ContainerImages: map[string]string{"c1": "foo2"}, + ContainerRefMetadata: map[string]metav1.ObjectMeta{"c1": {Labels: map[string]string{"k": "v2"}}}, + MetaDataPatch: []byte(`{"metadata":{"labels":{"k1":"v1"}}}`), + UpdateEnvFromMetadata: true, }, }, { @@ -101,9 +124,10 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { Data: runtime.RawExtension{Raw: []byte(`{"spec":{"template":{"$patch":"replace","metadata":{"labels":{"k":"v","k1":"v1"},"finalizers":["fz2"]},"spec":{"containers":[{"name":"c1","image":"foo2"}]}}}}`)}, }, expectedSpec: &UpdateSpec{ - Revision: "new-revision", - ContainerImages: map[string]string{"c1": "foo2"}, - MetaDataPatch: []byte(`{"metadata":{"$deleteFromPrimitiveList/finalizers":["fz1"],"$setElementOrder/finalizers":["fz2"],"labels":{"k1":"v1","k2":null}}}`), + Revision: "new-revision", + ContainerImages: map[string]string{"c1": "foo2"}, + ContainerRefMetadata: make(map[string]metav1.ObjectMeta), + MetaDataPatch: []byte(`{"metadata":{"$deleteFromPrimitiveList/finalizers":["fz1"],"$setElementOrder/finalizers":["fz2"],"labels":{"k1":"v1","k2":null}}}`), }, }, { @@ -119,10 +143,12 @@ func TestCalculateInPlaceUpdateSpec(t *testing.T) { }, } + // Enable the CloneSetPartitionRollback feature-gate + _ = utilfeature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=true", features.InPlaceUpdateEnvFromMetadata)) for i, tc := range cases { res := defaultCalculateInPlaceUpdateSpec(tc.oldRevision, tc.newRevision, nil) if !reflect.DeepEqual(res, tc.expectedSpec) { - t.Fatalf("case #%d failed, expected %v, got %v", i, tc.expectedSpec, res) + t.Fatalf("case #%d failed, expected %+v, got %+v", i, tc.expectedSpec, res) } } } @@ -400,7 +426,11 @@ func TestRefresh(t *testing.T) { apps.StatefulSetRevisionLabel: "new-revision", }, Annotations: map[string]string{ - appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{Revision: "new-revision", UpdateTimestamp: tenSecondsAgo, LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "img01"}}}), + appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ + Revision: "new-revision", + UpdateTimestamp: tenSecondsAgo, + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "img01"}}, + }), appspub.InPlaceUpdateGraceKey: `{"revision":"new-revision","containerImages":{"main":"img-name02"},"graceSeconds":5}`, }, }, @@ -415,7 +445,12 @@ func TestRefresh(t *testing.T) { apps.StatefulSetRevisionLabel: "new-revision", }, Annotations: map[string]string{ - appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{Revision: "new-revision", UpdateTimestamp: tenSecondsAgo, LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "img01"}}}), + appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ + Revision: "new-revision", + UpdateTimestamp: tenSecondsAgo, + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "img01"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: aHourAgo, Containers: []string{"main"}}}, + }), }, ResourceVersion: "1", }, @@ -425,8 +460,162 @@ func TestRefresh(t *testing.T) { }, }, }, + { + name: "do not in-place update the next batch if containers not consistent", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + apps.StatefulSetRevisionLabel: "new-revision", + }, + Annotations: map[string]string{ + appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ + Revision: "new-revision", + UpdateTimestamp: aHourAgo, + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "c1-img1-ID"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: aHourAgo, Containers: []string{"c1"}}}, + NextContainerImages: map[string]string{"c2": "c2-img2"}, + }), + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "c1", Image: "c1-img2"}, {Name: "c2", Image: "c2-img1"}}, + ReadinessGates: []v1.PodReadinessGate{{ConditionType: appspub.InPlaceUpdateReady}}, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + {Name: "c1", ImageID: "c1-img1-ID"}, + {Name: "c2", ImageID: "c2-img1-ID"}, + }, + Conditions: []v1.PodCondition{ + { + Type: v1.ContainersReady, + Status: v1.ConditionTrue, + }, + { + Type: appspub.InPlaceUpdateReady, + Status: v1.ConditionFalse, + LastTransitionTime: aHourAgo, + }, + }, + }, + }, + expectedPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + apps.StatefulSetRevisionLabel: "new-revision", + }, + Annotations: map[string]string{ + appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ + Revision: "new-revision", + UpdateTimestamp: aHourAgo, + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "c1-img1-ID"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: aHourAgo, Containers: []string{"c1"}}}, + NextContainerImages: map[string]string{"c2": "c2-img2"}, + }), + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "c1", Image: "c1-img2"}, {Name: "c2", Image: "c2-img1"}}, + ReadinessGates: []v1.PodReadinessGate{{ConditionType: appspub.InPlaceUpdateReady}}, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + {Name: "c1", ImageID: "c1-img1-ID"}, + {Name: "c2", ImageID: "c2-img1-ID"}, + }, + Conditions: []v1.PodCondition{ + { + Type: v1.ContainersReady, + Status: v1.ConditionTrue, + }, + { + Type: appspub.InPlaceUpdateReady, + Status: v1.ConditionFalse, + LastTransitionTime: aHourAgo, + }, + }, + }, + }, + }, + { + name: "in-place update the next batch if containers have been consistent", + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + apps.StatefulSetRevisionLabel: "new-revision", + }, + Annotations: map[string]string{ + appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ + Revision: "new-revision", + UpdateTimestamp: aHourAgo, + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "c1-img1-ID"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: aHourAgo, Containers: []string{"c1"}}}, + NextContainerImages: map[string]string{"c2": "c2-img2"}, + }), + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "c1", Image: "c1-img2"}, {Name: "c2", Image: "c2-img1"}}, + ReadinessGates: []v1.PodReadinessGate{{ConditionType: appspub.InPlaceUpdateReady}}, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + {Name: "c1", ImageID: "c1-img2-ID"}, + {Name: "c2", ImageID: "c2-img1-ID"}, + }, + Conditions: []v1.PodCondition{ + { + Type: v1.ContainersReady, + Status: v1.ConditionTrue, + }, + { + Type: appspub.InPlaceUpdateReady, + Status: v1.ConditionFalse, + LastTransitionTime: aHourAgo, + }, + }, + }, + }, + expectedPod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + apps.StatefulSetRevisionLabel: "new-revision", + }, + Annotations: map[string]string{ + appspub.InPlaceUpdateStateKey: util.DumpJSON(appspub.InPlaceUpdateState{ + Revision: "new-revision", + UpdateTimestamp: aHourAgo, + LastContainerStatuses: map[string]appspub.InPlaceUpdateContainerStatus{"c1": {ImageID: "c1-img1-ID"}, "c2": {ImageID: "c2-img1-ID"}}, + ContainerBatchesRecord: []appspub.InPlaceUpdateContainerBatch{{Timestamp: aHourAgo, Containers: []string{"c1"}}, {Timestamp: aHourAgo, Containers: []string{"c2"}}}, + }), + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Name: "c1", Image: "c1-img2"}, {Name: "c2", Image: "c2-img2"}}, + ReadinessGates: []v1.PodReadinessGate{{ConditionType: appspub.InPlaceUpdateReady}}, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + {Name: "c1", ImageID: "c1-img2-ID"}, + {Name: "c2", ImageID: "c2-img1-ID"}, + }, + Conditions: []v1.PodCondition{ + { + Type: v1.ContainersReady, + Status: v1.ConditionTrue, + }, + { + Type: appspub.InPlaceUpdateReady, + Status: v1.ConditionFalse, + LastTransitionTime: aHourAgo, + }, + }, + }, + }, + }, } + Clock = clock.NewFakeClock(aHourAgo.Time) for i, testCase := range cases { testCase.pod.Name = fmt.Sprintf("pod-%d", i) testCase.expectedPod.Name = fmt.Sprintf("pod-%d", i) @@ -434,7 +623,7 @@ func TestRefresh(t *testing.T) { testCase.expectedPod.Kind = "Pod" cli := fake.NewClientBuilder().WithObjects(testCase.pod).Build() - ctrl := NewForTest(cli, revisionadapter.NewDefaultImpl(), func() metav1.Time { return aHourAgo }) + ctrl := New(cli, revisionadapter.NewDefaultImpl()) if res := ctrl.Refresh(testCase.pod, nil); res.RefreshErr != nil { t.Fatalf("failed to update condition: %v", res.RefreshErr) } @@ -446,7 +635,7 @@ func TestRefresh(t *testing.T) { testCase.expectedPod.ResourceVersion = got.ResourceVersion if !reflect.DeepEqual(testCase.expectedPod, got) { - t.Fatalf("case %s failed, expected \n%v got \n%v", testCase.name, util.DumpJSON(testCase.expectedPod), util.DumpJSON(got)) + t.Fatalf("case %s failed, expected \n%v\n got \n%v", testCase.name, util.DumpJSON(testCase.expectedPod), util.DumpJSON(got)) } } } diff --git a/test/e2e/apps/cloneset.go b/test/e2e/apps/cloneset.go index 5420bd9b04..d901185c49 100644 --- a/test/e2e/apps/cloneset.go +++ b/test/e2e/apps/cloneset.go @@ -17,13 +17,19 @@ limitations under the License. package apps import ( + "encoding/json" + "fmt" "sort" "time" + utilpointer "k8s.io/utils/pointer" + "github.com/onsi/ginkgo" "github.com/onsi/gomega" + appspub "github.com/openkruise/kruise/apis/apps/pub" appsv1alpha1 "github.com/openkruise/kruise/apis/apps/v1alpha1" kruiseclientset "github.com/openkruise/kruise/pkg/client/clientset/versioned" + "github.com/openkruise/kruise/pkg/util" "github.com/openkruise/kruise/test/e2e/framework" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -275,6 +281,247 @@ var _ = SIGDescribe("CloneSet", func() { gomega.Expect(newContainerStatus.ContainerID).NotTo(gomega.Equal(oldContainerStatus.ContainerID)) gomega.Expect(newContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) }) + + framework.ConformanceIt("in-place update two container images with priorities successfully", func() { + cs := tester.NewCloneSet("clone-"+randStr, 1, appsv1alpha1.CloneSetUpdateStrategy{Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType}) + cs.Spec.Template.Spec.Containers = append(cs.Spec.Template.Spec.Containers, v1.Container{ + Name: "redis", + Image: RedisImage, + Command: []string{"sleep", "999"}, + Env: []v1.EnvVar{{Name: appspub.ContainerLaunchPriorityEnvName, Value: "10"}}, + Lifecycle: &v1.Lifecycle{PostStart: &v1.Handler{Exec: &v1.ExecAction{Command: []string{"sleep", "10"}}}}, + }) + cs.Spec.Template.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(3) + cs, err = tester.CreateCloneSet(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cs.Spec.UpdateStrategy.Type).To(gomega.Equal(appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType)) + + ginkgo.By("Wait for replicas satisfied") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.Replicas + }, 3*time.Second, time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Wait for all pods ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.ReadyReplicas + }, 120*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + pods, err := tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + + ginkgo.By("Update images of nginx and redis") + err = tester.UpdateCloneSet(cs.Name, func(cs *appsv1alpha1.CloneSet) { + cs.Spec.Template.Spec.Containers[0].Image = NewNginxImage + cs.Spec.Template.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for CloneSet generation consistent") + gomega.Eventually(func() bool { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Generation == cs.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(gomega.Equal(true)) + + ginkgo.By("Wait for all pods updated and ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.UpdatedReadyReplicas + }, 120*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify two containers have all updated in-place") + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + + pod := pods[0] + nginxContainerStatus := util.GetContainerStatus("nginx", pod) + redisContainerStatus := util.GetContainerStatus("redis", pod) + gomega.Expect(nginxContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + gomega.Expect(redisContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify nginx should be stopped after new redis has started 10s") + gomega.Expect(nginxContainerStatus.LastTerminationState.Terminated.FinishedAt.After(redisContainerStatus.State.Running.StartedAt.Time.Add(time.Second*10))). + Should(gomega.Equal(true), fmt.Sprintf("nginx finish at %v is not after redis start %v + 10s", + nginxContainerStatus.LastTerminationState.Terminated.FinishedAt, + redisContainerStatus.State.Running.StartedAt)) + + ginkgo.By("Verify in-place update state in two batches") + inPlaceUpdateState := appspub.InPlaceUpdateState{} + gomega.Expect(pod.Annotations[appspub.InPlaceUpdateStateKey]).ShouldNot(gomega.BeEmpty()) + err = json.Unmarshal([]byte(pod.Annotations[appspub.InPlaceUpdateStateKey]), &inPlaceUpdateState) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(inPlaceUpdateState.ContainerBatchesRecord)).Should(gomega.Equal(2)) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[0].Containers).Should(gomega.Equal([]string{"redis"})) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[1].Containers).Should(gomega.Equal([]string{"nginx"})) + }) + + framework.ConformanceIt("in-place update two container images with priorities, should not update the next when the previous one failed", func() { + cs := tester.NewCloneSet("clone-"+randStr, 1, appsv1alpha1.CloneSetUpdateStrategy{Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType}) + cs.Spec.Template.Spec.Containers = append(cs.Spec.Template.Spec.Containers, v1.Container{ + Name: "redis", + Image: RedisImage, + Env: []v1.EnvVar{{Name: appspub.ContainerLaunchPriorityEnvName, Value: "10"}}, + Lifecycle: &v1.Lifecycle{PostStart: &v1.Handler{Exec: &v1.ExecAction{Command: []string{"sleep", "10"}}}}, + }) + cs.Spec.Template.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(3) + cs, err = tester.CreateCloneSet(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cs.Spec.UpdateStrategy.Type).To(gomega.Equal(appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType)) + + ginkgo.By("Wait for replicas satisfied") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.Replicas + }, 3*time.Second, time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Wait for all pods ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.ReadyReplicas + }, 120*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + pods, err := tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + + ginkgo.By("Update images of nginx and redis") + err = tester.UpdateCloneSet(cs.Name, func(cs *appsv1alpha1.CloneSet) { + cs.Spec.Template.Spec.Containers[0].Image = NewNginxImage + cs.Spec.Template.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for CloneSet generation consistent") + gomega.Eventually(func() bool { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Generation == cs.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(gomega.Equal(true)) + + ginkgo.By("Wait for redis failed to start") + var pod *v1.Pod + gomega.Eventually(func() *v1.ContainerStateTerminated { + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + pod = pods[0] + redisContainerStatus := util.GetContainerStatus("redis", pod) + return redisContainerStatus.LastTerminationState.Terminated + }, 60*time.Second, time.Second).ShouldNot(gomega.BeNil()) + + gomega.Eventually(func() *v1.ContainerStateWaiting { + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + pod = pods[0] + redisContainerStatus := util.GetContainerStatus("redis", pod) + return redisContainerStatus.State.Waiting + }, 60*time.Second, time.Second).ShouldNot(gomega.BeNil()) + + nginxContainerStatus := util.GetContainerStatus("nginx", pod) + gomega.Expect(nginxContainerStatus.RestartCount).Should(gomega.Equal(int32(0))) + + ginkgo.By("Verify in-place update state only one batch and remain next") + inPlaceUpdateState := appspub.InPlaceUpdateState{} + gomega.Expect(pod.Annotations[appspub.InPlaceUpdateStateKey]).ShouldNot(gomega.BeEmpty()) + err = json.Unmarshal([]byte(pod.Annotations[appspub.InPlaceUpdateStateKey]), &inPlaceUpdateState) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(inPlaceUpdateState.ContainerBatchesRecord)).Should(gomega.Equal(1)) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[0].Containers).Should(gomega.Equal([]string{"redis"})) + gomega.Expect(inPlaceUpdateState.NextContainerImages).Should(gomega.Equal(map[string]string{"nginx": NewNginxImage})) + }) + + // This can't be Conformance yet. + ginkgo.It("in-place update two container image and env from metadata with priorities", func() { + cs := tester.NewCloneSet("clone-"+randStr, 1, appsv1alpha1.CloneSetUpdateStrategy{Type: appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType}) + cs.Spec.Template.Annotations = map[string]string{"config": "foo"} + cs.Spec.Template.Spec.Containers = append(cs.Spec.Template.Spec.Containers, v1.Container{ + Name: "redis", + Image: RedisImage, + Env: []v1.EnvVar{ + {Name: appspub.ContainerLaunchPriorityEnvName, Value: "10"}, + {Name: "CONFIG", ValueFrom: &v1.EnvVarSource{FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.annotations['config']"}}}, + }, + Lifecycle: &v1.Lifecycle{PostStart: &v1.Handler{Exec: &v1.ExecAction{Command: []string{"sleep", "10"}}}}, + }) + cs, err = tester.CreateCloneSet(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cs.Spec.UpdateStrategy.Type).To(gomega.Equal(appsv1alpha1.InPlaceIfPossibleCloneSetUpdateStrategyType)) + + ginkgo.By("Wait for replicas satisfied") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.Replicas + }, 3*time.Second, time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Wait for all pods ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.ReadyReplicas + }, 120*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + pods, err := tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + + ginkgo.By("Update nginx image and config annotation") + err = tester.UpdateCloneSet(cs.Name, func(cs *appsv1alpha1.CloneSet) { + cs.Spec.Template.Spec.Containers[0].Image = NewNginxImage + cs.Spec.Template.Annotations["config"] = "bar" + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait for CloneSet generation consistent") + gomega.Eventually(func() bool { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Generation == cs.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(gomega.Equal(true)) + + ginkgo.By("Wait for all pods updated and ready") + gomega.Eventually(func() int32 { + cs, err = tester.GetCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return cs.Status.UpdatedReadyReplicas + }, 120*time.Second, 3*time.Second).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify two containers have all updated in-place") + pods, err = tester.ListPodsForCloneSet(cs.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(pods)).Should(gomega.Equal(1)) + + pod := pods[0] + nginxContainerStatus := util.GetContainerStatus("nginx", pod) + redisContainerStatus := util.GetContainerStatus("redis", pod) + gomega.Expect(nginxContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + gomega.Expect(redisContainerStatus.RestartCount).Should(gomega.Equal(int32(1))) + + ginkgo.By("Verify nginx should be stopped after new redis has started") + gomega.Expect(nginxContainerStatus.LastTerminationState.Terminated.FinishedAt.After(redisContainerStatus.State.Running.StartedAt.Time.Add(time.Second*10))). + Should(gomega.Equal(true), fmt.Sprintf("nginx finish at %v is not after redis start %v + 10s", + nginxContainerStatus.LastTerminationState.Terminated.FinishedAt, + redisContainerStatus.State.Running.StartedAt)) + + ginkgo.By("Verify in-place update state in two batches") + inPlaceUpdateState := appspub.InPlaceUpdateState{} + gomega.Expect(pod.Annotations[appspub.InPlaceUpdateStateKey]).ShouldNot(gomega.BeEmpty()) + err = json.Unmarshal([]byte(pod.Annotations[appspub.InPlaceUpdateStateKey]), &inPlaceUpdateState) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(inPlaceUpdateState.ContainerBatchesRecord)).Should(gomega.Equal(2)) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[0].Containers).Should(gomega.Equal([]string{"redis"})) + gomega.Expect(inPlaceUpdateState.ContainerBatchesRecord[1].Containers).Should(gomega.Equal([]string{"nginx"})) + }) }) framework.KruiseDescribe("CloneSet pre-download images", func() { diff --git a/test/e2e/apps/types.go b/test/e2e/apps/types.go index 438b2bf442..07495a8a33 100644 --- a/test/e2e/apps/types.go +++ b/test/e2e/apps/types.go @@ -42,4 +42,7 @@ var ( // InvalidImage is the fully qualified URI to the invalid image InvalidImage = imageutils.GetE2EImage(imageutils.InvalidRegistryImage) + + // RedisImage is the fully qualified URI to the Redis image + RedisImage = imageutils.GetE2EImage(imageutils.Redis) )