Skip to content

Commit

Permalink
Merge pull request kubernetes#123484 from erikgb/csa-upgrade-subresource
Browse files Browse the repository at this point in the history
Add option to upgrade subresource from CSA to SSA
  • Loading branch information
k8s-ci-robot committed Mar 2, 2024
2 parents f4b0115 + 9633e25 commit 65d7550
Show file tree
Hide file tree
Showing 3 changed files with 213 additions and 11 deletions.
30 changes: 30 additions & 0 deletions staging/src/k8s.io/client-go/util/csaupgrade/options.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package csaupgrade

type Option func(*options)

// Subresource set the subresource to upgrade from CSA to SSA.
func Subresource(s string) Option {
return func(opts *options) {
opts.subresource = s
}
}

type options struct {
subresource string
}
33 changes: 23 additions & 10 deletions staging/src/k8s.io/client-go/util/csaupgrade/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,13 @@ func UpgradeManagedFields(
obj runtime.Object,
csaManagerNames sets.Set[string],
ssaManagerName string,
opts ...Option,
) error {
o := options{}
for _, opt := range opts {
opt(&o)
}

accessor, err := meta.Accessor(obj)
if err != nil {
return err
Expand All @@ -92,7 +98,7 @@ func UpgradeManagedFields(

for csaManagerName := range csaManagerNames {
filteredManagers, err = upgradedManagedFields(
filteredManagers, csaManagerName, ssaManagerName)
filteredManagers, csaManagerName, ssaManagerName, o)

if err != nil {
return err
Expand All @@ -116,7 +122,14 @@ func UpgradeManagedFields(
func UpgradeManagedFieldsPatch(
obj runtime.Object,
csaManagerNames sets.Set[string],
ssaManagerName string) ([]byte, error) {
ssaManagerName string,
opts ...Option,
) ([]byte, error) {
o := options{}
for _, opt := range opts {
opt(&o)
}

accessor, err := meta.Accessor(obj)
if err != nil {
return nil, err
Expand All @@ -126,7 +139,7 @@ func UpgradeManagedFieldsPatch(
filteredManagers := accessor.GetManagedFields()
for csaManagerName := range csaManagerNames {
filteredManagers, err = upgradedManagedFields(
filteredManagers, csaManagerName, ssaManagerName)
filteredManagers, csaManagerName, ssaManagerName, o)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -166,6 +179,7 @@ func upgradedManagedFields(
managedFields []metav1.ManagedFieldsEntry,
csaManagerName string,
ssaManagerName string,
opts options,
) ([]metav1.ManagedFieldsEntry, error) {
if managedFields == nil {
return nil, nil
Expand All @@ -183,7 +197,7 @@ func upgradedManagedFields(
func(entry metav1.ManagedFieldsEntry) bool {
return entry.Manager == ssaManagerName &&
entry.Operation == metav1.ManagedFieldsOperationApply &&
entry.Subresource == ""
entry.Subresource == opts.subresource
})

if !managerExists {
Expand All @@ -196,7 +210,7 @@ func upgradedManagedFields(
func(entry metav1.ManagedFieldsEntry) bool {
return entry.Manager == csaManagerName &&
entry.Operation == metav1.ManagedFieldsOperationUpdate &&
entry.Subresource == ""
entry.Subresource == opts.subresource
})

if !managerExists {
Expand All @@ -209,7 +223,7 @@ func upgradedManagedFields(
managedFields[replaceIndex].Operation = metav1.ManagedFieldsOperationApply
managedFields[replaceIndex].Manager = ssaManagerName
}
err := unionManagerIntoIndex(managedFields, replaceIndex, csaManagerName)
err := unionManagerIntoIndex(managedFields, replaceIndex, csaManagerName, opts)
if err != nil {
return nil, err
}
Expand All @@ -218,7 +232,7 @@ func upgradedManagedFields(
filteredManagers := filter(managedFields, func(entry metav1.ManagedFieldsEntry) bool {
return !(entry.Manager == csaManagerName &&
entry.Operation == metav1.ManagedFieldsOperationUpdate &&
entry.Subresource == "")
entry.Subresource == opts.subresource)
})

return filteredManagers, nil
Expand All @@ -231,6 +245,7 @@ func unionManagerIntoIndex(
entries []metav1.ManagedFieldsEntry,
targetIndex int,
csaManagerName string,
opts options,
) error {
ssaManager := entries[targetIndex]

Expand All @@ -240,9 +255,7 @@ func unionManagerIntoIndex(
func(entry metav1.ManagedFieldsEntry) bool {
return entry.Manager == csaManagerName &&
entry.Operation == metav1.ManagedFieldsOperationUpdate &&
//!TODO: some users may want to migrate subresources.
// should thread through the args at some point.
entry.Subresource == "" &&
entry.Subresource == opts.subresource &&
entry.APIVersion == ssaManager.APIVersion
})

Expand Down
161 changes: 160 additions & 1 deletion staging/src/k8s.io/client-go/util/csaupgrade/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ func TestUpgradeCSA(t *testing.T) {
Name string
CSAManagers []string
SSAManager string
Options []csaupgrade.Option
OriginalObject []byte
ExpectedObject []byte
}{
Expand Down Expand Up @@ -1079,6 +1080,163 @@ metadata:
time: "2022-11-03T23:22:40Z"
name: test
namespace: default
`),
},
{
// Expect multiple targets to be merged into a new ssa manager
Name: "subresource",
CSAManagers: []string{"kube-controller-manager"},
SSAManager: "kube-controller-manager",
Options: []csaupgrade.Option{csaupgrade.Subresource("status")},
OriginalObject: []byte(`
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
pv.kubernetes.io/bind-completed: "yes"
pv.kubernetes.io/bound-by-controller: "yes"
volume.beta.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com
volume.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com
creationTimestamp: "2024-02-24T15:24:31Z"
finalizers:
- kubernetes.io/pvc-protection
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:spec:
f:accessModes: {}
f:resources:
f:requests:
.: {}
f:storage: {}
f:storageClassName: {}
f:volumeMode: {}
manager: Mozilla
operation: Update
time: "2024-02-24T15:24:31Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:pv.kubernetes.io/bind-completed: {}
f:pv.kubernetes.io/bound-by-controller: {}
f:volume.beta.kubernetes.io/storage-provisioner: {}
f:volume.kubernetes.io/storage-provisioner: {}
f:spec:
f:volumeName: {}
manager: kube-controller-manager
operation: Update
time: "2024-02-24T15:24:32Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:status:
f:accessModes: {}
f:capacity:
.: {}
f:storage: {}
f:phase: {}
manager: kube-controller-manager
operation: Update
subresource: status
time: "2024-02-24T15:24:32Z"
name: test
namespace: default
resourceVersion: "948647140"
uid: f0692a61-0ffe-4fd5-b00f-0b95f3654fb9
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: ocs-storagecluster-cephfs
volumeMode: Filesystem
volumeName: pvc-f0692a61-0ffe-4fd5-b00f-0b95f3654fb9
status:
accessModes:
- ReadWriteOnce
capacity:
storage: 1Gi
phase: Bound
`),
ExpectedObject: []byte(`
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
pv.kubernetes.io/bind-completed: "yes"
pv.kubernetes.io/bound-by-controller: "yes"
volume.beta.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com
volume.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com
creationTimestamp: "2024-02-24T15:24:31Z"
finalizers:
- kubernetes.io/pvc-protection
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:spec:
f:accessModes: {}
f:resources:
f:requests:
.: {}
f:storage: {}
f:storageClassName: {}
f:volumeMode: {}
manager: Mozilla
operation: Update
time: "2024-02-24T15:24:31Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:pv.kubernetes.io/bind-completed: {}
f:pv.kubernetes.io/bound-by-controller: {}
f:volume.beta.kubernetes.io/storage-provisioner: {}
f:volume.kubernetes.io/storage-provisioner: {}
f:spec:
f:volumeName: {}
manager: kube-controller-manager
operation: Update
time: "2024-02-24T15:24:32Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:status:
f:accessModes: {}
f:capacity:
.: {}
f:storage: {}
f:phase: {}
manager: kube-controller-manager
operation: Apply
subresource: status
time: "2024-02-24T15:24:32Z"
name: test
namespace: default
resourceVersion: "948647140"
uid: f0692a61-0ffe-4fd5-b00f-0b95f3654fb9
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: ocs-storagecluster-cephfs
volumeMode: Filesystem
volumeName: pvc-f0692a61-0ffe-4fd5-b00f-0b95f3654fb9
status:
accessModes:
- ReadWriteOnce
capacity:
storage: 1Gi
phase: Bound
`),
},
}
Expand All @@ -1096,6 +1254,7 @@ metadata:
upgraded,
sets.New(testCase.CSAManagers...),
testCase.SSAManager,
testCase.Options...,
)

if err != nil {
Expand All @@ -1118,7 +1277,7 @@ metadata:

initialCopy := initialObject.DeepCopyObject()
patchBytes, err := csaupgrade.UpgradeManagedFieldsPatch(
initialCopy, sets.New(testCase.CSAManagers...), testCase.SSAManager)
initialCopy, sets.New(testCase.CSAManagers...), testCase.SSAManager, testCase.Options...)

if err != nil {
t.Fatal(err)
Expand Down

0 comments on commit 65d7550

Please sign in to comment.