From ddb6a35a394d4b9c33ddb489f23cc4d97918d002 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Fri, 19 Jan 2024 17:43:48 +0800 Subject: [PATCH] support cloudevents for manifestworkreplicaset Signed-off-by: Wei Liu --- go.mod | 4 +- go.sum | 4 +- ...ger-manifestworkreplicaset-deployment.yaml | 1 + pkg/cmd/hub/work.go | 11 +- pkg/common/options/worksource.go | 18 + ...tworkreplicaset_add_finalizer_reconcile.go | 4 +- ...nifestworkreplicaset_add_finalizer_test.go | 6 +- .../manifestworkreplicaset_controller.go | 33 +- ...manifestworkreplicaset_controllers_test.go | 2 + ...manifestworkreplicaset_deploy_reconcile.go | 12 +- .../manifestworkreplicaset_deploy_test.go | 2 +- ...nifestworkreplicaset_finalize_reconcile.go | 4 +- .../manifestworkreplicaset_finalizer_test.go | 2 +- .../manifestworkreplicaset_index_test.go | 6 +- pkg/work/hub/manager.go | 151 +++- pkg/work/hub/options.go | 24 + pkg/work/spoke/options.go | 16 +- pkg/work/spoke/spokeagent.go | 56 +- .../cloudevents/deleteoption_test.go | 334 ++++++++- .../manifestworkreplicaset_test.go | 206 ++++++ test/integration/cloudevents/source/codec.go | 6 +- .../cloudevents/source/manifestwork.go | 17 +- test/integration/cloudevents/source/source.go | 1 + .../cloudevents/statusfeedback_test.go | 155 ++++- test/integration/cloudevents/suite_test.go | 37 +- .../cloudevents/updatestrategy_test.go | 3 +- test/integration/cloudevents/work_test.go | 214 ++++-- test/integration/work/suite_test.go | 8 +- .../golang/protobuf/ptypes/empty/empty.pb.go | 62 ++ vendor/modules.txt | 10 +- .../pkg/cloudevents/generic/agentclient.go | 7 + .../pkg/cloudevents/generic/baseclient.go | 10 +- .../pkg/cloudevents/generic/interface.go | 4 + .../generic/options/grpc/agentoptions.go | 79 +++ .../generic/options/grpc/options.go | 187 +++++ .../options/grpc/protobuf/v1/README.md | 30 + .../options/grpc/protobuf/v1/cloudevent.pb.go | 656 ++++++++++++++++++ .../options/grpc/protobuf/v1/cloudevent.proto | 84 +++ .../grpc/protobuf/v1/cloudevent_grpc.pb.go | 175 +++++ .../generic/options/grpc/protobuf/v1/gen.go | 3 + .../generic/options/grpc/protocol/message.go | 205 ++++++ .../generic/options/grpc/protocol/option.go | 40 ++ .../generic/options/grpc/protocol/protocol.go | 165 +++++ .../options/grpc/protocol/write_message.go | 215 ++++++ .../generic/options/grpc/sourceoptions.go | 77 ++ .../generic/options/mqtt/agentoptions.go | 2 +- .../generic/options/mqtt/sourceoptions.go | 4 +- .../pkg/cloudevents/generic/sourceclient.go | 5 + .../work/agent/client/manifestwork.go | 31 +- .../cloudevents/work/agent/codec/manifest.go | 17 +- .../work/agent/codec/manifestbundle.go | 9 +- .../pkg/cloudevents/work/clientbuilder.go | 103 ++- .../pkg/cloudevents/work/common/common.go | 29 + .../cloudevents/work/internal/clientset.go | 6 +- .../sdk-go/pkg/cloudevents/work/lister.go | 14 +- .../work/source/client/manifestwork.go | 225 ++++++ .../work/source/codec/manifestbundle.go | 101 +++ .../work/source/handler/resourcehandler.go | 167 +++++ .../pkg/cloudevents/work/utils/utils.go | 8 + 59 files changed, 3842 insertions(+), 225 deletions(-) create mode 100644 pkg/common/options/worksource.go create mode 100644 pkg/work/hub/options.go create mode 100644 test/integration/cloudevents/manifestworkreplicaset_test.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go diff --git a/go.mod b/go.mod index 9d0043327..8f44d1f18 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/net v0.19.0 + gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.1 k8s.io/apiextensions-apiserver v0.29.0 k8s.io/apimachinery v0.29.1 @@ -35,7 +36,7 @@ require ( k8s.io/utils v0.0.0-20240102154912-e7106e64919e open-cluster-management.io/addon-framework v0.8.1-0.20240205013730-13fbb6259464 open-cluster-management.io/api v0.13.0 - open-cluster-management.io/sdk-go v0.13.0 + open-cluster-management.io/sdk-go v0.13.1-0.20240227052220-ae7814c4d512 sigs.k8s.io/controller-runtime v0.16.2 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 ) @@ -142,7 +143,6 @@ require ( google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect helm.sh/helm/v3 v3.11.1 // indirect k8s.io/kms v0.29.0 // indirect diff --git a/go.sum b/go.sum index 14c01ccfd..89dcf5208 100644 --- a/go.sum +++ b/go.sum @@ -452,8 +452,8 @@ open-cluster-management.io/addon-framework v0.8.1-0.20240205013730-13fbb6259464 open-cluster-management.io/addon-framework v0.8.1-0.20240205013730-13fbb6259464/go.mod h1:SBs6wF0Umzr5/miJb9p8uMaTDbcjphHHQLa76nXnbU8= open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= -open-cluster-management.io/sdk-go v0.13.0 h1:ddMGsPUekQr9z03tVN6vF39Uf+WEKMtGU/xSd81HdoA= -open-cluster-management.io/sdk-go v0.13.0/go.mod h1:UnsjzYOrDTF9a8rHEXksoIAtAdO1o5CD5Jtaw6T5B9w= +open-cluster-management.io/sdk-go v0.13.1-0.20240227052220-ae7814c4d512 h1:Fb9laGmR+R2DGYV8k2FMx+IhQQx28904aUeYPN8ssMM= +open-cluster-management.io/sdk-go v0.13.1-0.20240227052220-ae7814c4d512/go.mod h1:UnsjzYOrDTF9a8rHEXksoIAtAdO1o5CD5Jtaw6T5B9w= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU= diff --git a/manifests/cluster-manager/management/cluster-manager-manifestworkreplicaset-deployment.yaml b/manifests/cluster-manager/management/cluster-manager-manifestworkreplicaset-deployment.yaml index b27512be2..54b8d0a87 100644 --- a/manifests/cluster-manager/management/cluster-manager-manifestworkreplicaset-deployment.yaml +++ b/manifests/cluster-manager/management/cluster-manager-manifestworkreplicaset-deployment.yaml @@ -46,6 +46,7 @@ spec: args: - "/work" - "manager" + - "--work-driver=kube" {{ if .HostedMode }} - "--kubeconfig=/var/run/secrets/hub/kubeconfig" {{ end }} diff --git a/pkg/cmd/hub/work.go b/pkg/cmd/hub/work.go index b25f0e76d..4d29ccf4c 100644 --- a/pkg/cmd/hub/work.go +++ b/pkg/cmd/hub/work.go @@ -12,12 +12,17 @@ import ( // NewHubManager generates a command to start hub manager func NewWorkController() *cobra.Command { - opts := commonoptions.NewOptions() - cmdConfig := opts. - NewControllerCommandConfig("work-manager", version.Get(), hub.RunWorkHubManager) + commonOpts := commonoptions.NewOptions() + hubOpts := hub.NewWorkHubManagerOptions() + hubCfg := hub.NewWorkHubManagerConfig(hubOpts) + cmdConfig := commonOpts.NewControllerCommandConfig("work-manager", version.Get(), hubCfg.RunWorkHubManager) cmd := cmdConfig.NewCommandWithContext(context.TODO()) cmd.Use = "manager" cmd.Short = "Start the Work Hub Manager" + flags := cmd.Flags() + commonOpts.AddFlags(flags) + hubOpts.AddFlags(flags) + return cmd } diff --git a/pkg/common/options/worksource.go b/pkg/common/options/worksource.go new file mode 100644 index 000000000..ec58f871e --- /dev/null +++ b/pkg/common/options/worksource.go @@ -0,0 +1,18 @@ +package options + +const ( + KubeDriver = "kube" + MQTTDriver = "mqtt" + GRPCDriver = "grpc" +) + +const ( + ManifestBundleCodec = "manifestbundle" + ManifestCodec = "manifest" +) + +type WorkloadSourceDriver struct { + Type string + Codec string + Config string +} diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_reconcile.go index 91f188644..49e4ecb4b 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_reconcile.go @@ -10,7 +10,7 @@ import ( // addFinalizerReconciler is to add finalizer to the manifestworkreplicaset. type addFinalizerReconciler struct { - workClient workclientset.Interface + replicaSetClient workclientset.Interface } func (a *addFinalizerReconciler) reconcile(ctx context.Context, pw *workapiv1alpha1.ManifestWorkReplicaSet, @@ -22,7 +22,7 @@ func (a *addFinalizerReconciler) reconcile(ctx context.Context, pw *workapiv1alp workSetPatcher := patcher.NewPatcher[ *workapiv1alpha1.ManifestWorkReplicaSet, workapiv1alpha1.ManifestWorkReplicaSetSpec, workapiv1alpha1.ManifestWorkReplicaSetStatus]( - a.workClient.WorkV1alpha1().ManifestWorkReplicaSets(pw.Namespace)) + a.replicaSetClient.WorkV1alpha1().ManifestWorkReplicaSets(pw.Namespace)) updated, err := workSetPatcher.AddFinalizer(ctx, pw, ManifestWorkReplicaSetFinalizer) // if this conflicts, we'll simply try again later diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_test.go index d95a029af..7ad7832b0 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_add_finalizer_test.go @@ -18,7 +18,7 @@ func TestAddFinalizerReconcile(t *testing.T) { fakeClient := fakeclient.NewSimpleClientset(mwrSetTest) addFinalizerController := &addFinalizerReconciler{ - workClient: fakeClient, + replicaSetClient: fakeClient, } _, _, err := addFinalizerController.reconcile(context.TODO(), mwrSetTest) @@ -58,10 +58,10 @@ func TestAddFinalizerTwiceReconcile(t *testing.T) { fakeClient := fakeclient.NewSimpleClientset(mwrSetTest) addFinalizerController := &addFinalizerReconciler{ - workClient: fakeClient, + replicaSetClient: fakeClient, } - mwrSetTest, _, err := addFinalizerController.reconcile(context.TODO(), mwrSetTest) + _, _, err := addFinalizerController.reconcile(context.TODO(), mwrSetTest) if err != nil { t.Fatal(err) } diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go index 8f553157b..d204f798b 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go @@ -53,7 +53,7 @@ const ( ) type ManifestWorkReplicaSetController struct { - workClient workclientset.Interface + replicaSetClient workclientset.Interface manifestWorkReplicaSetLister worklisterv1alpha1.ManifestWorkReplicaSetLister manifestWorkReplicaSetIndexer cache.Indexer @@ -75,14 +75,15 @@ const ( func NewManifestWorkReplicaSetController( recorder events.Recorder, - workClient workclientset.Interface, + replicaSetClient workclientset.Interface, + workApplier *workapplier.WorkApplier, manifestWorkReplicaSetInformer workinformerv1alpha1.ManifestWorkReplicaSetInformer, manifestWorkInformer workinformerv1.ManifestWorkInformer, placementInformer clusterinformerv1beta1.PlacementInformer, placeDecisionInformer clusterinformerv1beta1.PlacementDecisionInformer) factory.Controller { controller := newController( - workClient, manifestWorkReplicaSetInformer, manifestWorkInformer, placementInformer, placeDecisionInformer) + replicaSetClient, workApplier, manifestWorkReplicaSetInformer, manifestWorkInformer, placementInformer, placeDecisionInformer) err := manifestWorkReplicaSetInformer.Informer().AddIndexers( cache.Indexers{ @@ -113,22 +114,32 @@ func NewManifestWorkReplicaSetController( WithSync(controller.sync).ToController("ManifestWorkReplicaSetController", recorder) } -func newController(workClient workclientset.Interface, +func newController(replicaSetClient workclientset.Interface, + workApplier *workapplier.WorkApplier, manifestWorkReplicaSetInformer workinformerv1alpha1.ManifestWorkReplicaSetInformer, manifestWorkInformer workinformerv1.ManifestWorkInformer, placementInformer clusterinformerv1beta1.PlacementInformer, placeDecisionInformer clusterinformerv1beta1.PlacementDecisionInformer) *ManifestWorkReplicaSetController { return &ManifestWorkReplicaSetController{ - workClient: workClient, + replicaSetClient: replicaSetClient, manifestWorkReplicaSetLister: manifestWorkReplicaSetInformer.Lister(), manifestWorkReplicaSetIndexer: manifestWorkReplicaSetInformer.Informer().GetIndexer(), reconcilers: []ManifestWorkReplicaSetReconcile{ - &finalizeReconciler{workApplier: workapplier.NewWorkApplierWithTypedClient(workClient, manifestWorkInformer.Lister()), - workClient: workClient, manifestWorkLister: manifestWorkInformer.Lister()}, - &addFinalizerReconciler{workClient: workClient}, - &deployReconciler{workApplier: workapplier.NewWorkApplierWithTypedClient(workClient, manifestWorkInformer.Lister()), - manifestWorkLister: manifestWorkInformer.Lister(), placementLister: placementInformer.Lister(), placeDecisionLister: placeDecisionInformer.Lister()}, + &finalizeReconciler{ + workApplier: workApplier, + replicaSetClient: replicaSetClient, + manifestWorkLister: manifestWorkInformer.Lister(), + }, + &addFinalizerReconciler{ + replicaSetClient: replicaSetClient, + }, + &deployReconciler{ + workApplier: workApplier, + manifestWorkLister: manifestWorkInformer.Lister(), + placementLister: placementInformer.Lister(), + placeDecisionLister: placeDecisionInformer.Lister(), + }, &statusReconciler{manifestWorkLister: manifestWorkInformer.Lister()}, }, } @@ -176,7 +187,7 @@ func (m *ManifestWorkReplicaSetController) sync(ctx context.Context, controllerC workSetPatcher := patcher.NewPatcher[ *workapiv1alpha1.ManifestWorkReplicaSet, workapiv1alpha1.ManifestWorkReplicaSetSpec, workapiv1alpha1.ManifestWorkReplicaSetStatus]( - m.workClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace)) + m.replicaSetClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace)) // Patch status if _, err := workSetPatcher.PatchStatus(ctx, manifestWorkReplicaSet, manifestWorkReplicaSet.Status, oldManifestWorkReplicaSet.Status); err != nil { diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go index a5eec6c1e..98d3f2de5 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go @@ -18,6 +18,7 @@ import ( workinformers "open-cluster-management.io/api/client/work/informers/externalversions" clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" + workapplier "open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" helpertest "open-cluster-management.io/ocm/pkg/work/hub/test" @@ -237,6 +238,7 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) { ctrl := newController( fakeClient, + workapplier.NewWorkApplierWithTypedClient(fakeClient, workInformers.Work().V1().ManifestWorks().Lister()), workInformers.Work().V1alpha1().ManifestWorkReplicaSets(), workInformers.Work().V1().ManifestWorks(), clusterInformers.Cluster().V1beta1().Placements(), diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go index d48fb4de7..af0e14e13 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go @@ -16,6 +16,8 @@ import ( workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" clustersdkv1alpha1 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1" workapplier "open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" "open-cluster-management.io/ocm/pkg/common/helpers" "open-cluster-management.io/ocm/pkg/work/helper" @@ -271,8 +273,14 @@ func CreateManifestWork(mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, clusterN ObjectMeta: metav1.ObjectMeta{ Name: mwrSet.Name, Namespace: clusterNS, - Labels: map[string]string{ManifestWorkReplicaSetControllerNameLabelKey: manifestWorkReplicaSetKey(mwrSet), - ManifestWorkReplicaSetPlacementNameLabelKey: placementRefName}, + Labels: map[string]string{ + ManifestWorkReplicaSetControllerNameLabelKey: manifestWorkReplicaSetKey(mwrSet), + ManifestWorkReplicaSetPlacementNameLabelKey: placementRefName, + }, + Annotations: map[string]string{ + common.CloudEventsDataTypeAnnotationKey: payload.ManifestBundleEventDataType.String(), + common.CloudEventsGenerationAnnotationKey: fmt.Sprintf("%d", mwrSet.Generation), + }, }, Spec: mwrSet.Spec.ManifestWorkTemplate}, nil } diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go index 679b8a425..b43f338f7 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go @@ -623,7 +623,7 @@ func TestRequeueWithProgressDeadline(t *testing.T) { placementLister: placementLister, } - mwrSet, _, err := pmwDeployController.reconcile(context.TODO(), mwrSet) + _, _, err := pmwDeployController.reconcile(context.TODO(), mwrSet) var rqe helpers.RequeueError if !errors.As(err, &rqe) { t.Errorf("expect to get err %t", err) diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go index f28173d46..f1ece483c 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go @@ -16,7 +16,7 @@ import ( // finalizeReconciler is to finalize the manifestWorkReplicaSet by deleting all related manifestWorks. type finalizeReconciler struct { workApplier *workapplier.WorkApplier - workClient workclientset.Interface + replicaSetClient workclientset.Interface manifestWorkLister worklisterv1.ManifestWorkLister } @@ -32,7 +32,7 @@ func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alp workSetPatcher := patcher.NewPatcher[ *workapiv1alpha1.ManifestWorkReplicaSet, workapiv1alpha1.ManifestWorkReplicaSetSpec, workapiv1alpha1.ManifestWorkReplicaSetStatus]( - f.workClient.WorkV1alpha1().ManifestWorkReplicaSets(mwrSet.Namespace)) + f.replicaSetClient.WorkV1alpha1().ManifestWorkReplicaSets(mwrSet.Namespace)) // Remove finalizer after delete all created Manifestworks if err := workSetPatcher.RemoveFinalizer(ctx, mwrSet, ManifestWorkReplicaSetFinalizer); err != nil { diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalizer_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalizer_test.go index ee5e62f11..fee3a6650 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalizer_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalizer_test.go @@ -24,7 +24,7 @@ func TestFinalizeReconcile(t *testing.T) { mwLister := manifestWorkInformerFactory.Work().V1().ManifestWorks().Lister() finalizerController := finalizeReconciler{ - workClient: fakeClient, + replicaSetClient: fakeClient, manifestWorkLister: mwLister, workApplier: workapplier.NewWorkApplierWithTypedClient(fakeClient, mwLister), } diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go index 91a679fc0..5635ea64b 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go @@ -53,14 +53,14 @@ func TestPlaceMWControllerIndex(t *testing.T) { placementDecisionLister := clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Lister() pmwController := &ManifestWorkReplicaSetController{ - workClient: fWorkClient, + replicaSetClient: fWorkClient, manifestWorkReplicaSetLister: workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets().Lister(), manifestWorkReplicaSetIndexer: workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetIndexer(), reconcilers: []ManifestWorkReplicaSetReconcile{ &finalizeReconciler{workApplier: workapplier.NewWorkApplierWithTypedClient(fWorkClient, mwLister), - workClient: fWorkClient, manifestWorkLister: mwLister}, - &addFinalizerReconciler{workClient: fWorkClient}, + replicaSetClient: fWorkClient, manifestWorkLister: mwLister}, + &addFinalizerReconciler{replicaSetClient: fWorkClient}, &deployReconciler{workApplier: workapplier.NewWorkApplierWithTypedClient(fWorkClient, mwLister), manifestWorkLister: mwLister, placementLister: placementLister, placeDecisionLister: placementDecisionLister}, &statusReconciler{manifestWorkLister: mwLister}, diff --git a/pkg/work/hub/manager.go b/pkg/work/hub/manager.go index 9e83d2272..83e4a5ba1 100644 --- a/pkg/work/hub/manager.go +++ b/pkg/work/hub/manager.go @@ -2,26 +2,47 @@ package hub import ( "context" + "fmt" "time" "github.com/openshift/library-go/pkg/controller/controllercmd" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + workapplier "open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" + cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec" + "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/pkg/work/hub/controllers/manifestworkreplicasetcontroller" ) -// RunWorkHubManager starts the controllers on hub. -func RunWorkHubManager(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { - hubWorkClient, err := workclientset.NewForConfig(controllerContext.KubeConfig) - if err != nil { - return err +const ( + sourceID = "mwrsctrl" + sourceClientID = "mwrsctrl-client" +) + +// WorkHubManagerConfig holds configuration for work hub manager controller +type WorkHubManagerConfig struct { + workOptions *WorkHubManagerOptions +} + +// NewWorkHubManagerConfig returns a WorkHubManagerConfig +func NewWorkHubManagerConfig(opts *WorkHubManagerOptions) *WorkHubManagerConfig { + return &WorkHubManagerConfig{ + workOptions: opts, } +} +// RunWorkHubManager starts the controllers on hub. +func (c *WorkHubManagerConfig) RunWorkHubManager(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { hubClusterClient, err := clusterclientset.NewForConfig(controllerContext.KubeConfig) if err != nil { return err @@ -29,46 +50,118 @@ func RunWorkHubManager(ctx context.Context, controllerContext *controllercmd.Con clusterInformerFactory := clusterinformers.NewSharedInformerFactory(hubClusterClient, 30*time.Minute) - // we need a separated filtered manifestwork informers so we only watch the manifestworks that manifestworkreplicaset cares. - // This could reduce a lot of memory consumptions - manifestWorkInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 30*time.Minute, workinformers.WithTweakListOptions( - func(listOptions *metav1.ListOptions) { - selector := &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: manifestworkreplicasetcontroller.ManifestWorkReplicaSetControllerNameLabelKey, - Operator: metav1.LabelSelectorOpExists, - }, - }, - } - listOptions.LabelSelector = metav1.FormatLabelSelector(selector) - }, - )) + // build a hub work client for ManifestWorkReplicaSets + replicaSetsClient, err := workclientset.NewForConfig(controllerContext.KubeConfig) + if err != nil { + return err + } + + // build hub client and informer + clientHolder, err := c.buildHubClientHolder(ctx, controllerContext) + if err != nil { + return err + } - return RunControllerManagerWithInformers(ctx, controllerContext, hubWorkClient, manifestWorkInformerFactory, clusterInformerFactory) + return RunControllerManagerWithInformers(ctx, controllerContext, replicaSetsClient, clientHolder, clusterInformerFactory) } func RunControllerManagerWithInformers( ctx context.Context, controllerContext *controllercmd.ControllerContext, - hubWorkClient workclientset.Interface, - manifestWorkInformers workinformers.SharedInformerFactory, + replicaSetClient workclientset.Interface, + hubWorkClientHolder *cloudeventswork.ClientHolder, clusterInformers clusterinformers.SharedInformerFactory, ) error { - workInformerFactory := workinformers.NewSharedInformerFactory(hubWorkClient, 30*time.Minute) + replicaSetInformerFactory := workinformers.NewSharedInformerFactory(replicaSetClient, 30*time.Minute) + hubWorkInformer := hubWorkClientHolder.ManifestWorkInformer() + manifestWorkReplicaSetController := manifestworkreplicasetcontroller.NewManifestWorkReplicaSetController( controllerContext.EventRecorder, - hubWorkClient, - workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets(), - manifestWorkInformers.Work().V1().ManifestWorks(), + replicaSetClient, + workapplier.NewWorkApplierWithTypedClient(hubWorkClientHolder.WorkInterface(), hubWorkInformer.Lister()), + replicaSetInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets(), + hubWorkInformer, clusterInformers.Cluster().V1beta1().Placements(), clusterInformers.Cluster().V1beta1().PlacementDecisions(), ) + go clusterInformers.Start(ctx.Done()) - go workInformerFactory.Start(ctx.Done()) - go manifestWorkInformers.Start(ctx.Done()) + go replicaSetInformerFactory.Start(ctx.Done()) go manifestWorkReplicaSetController.Run(ctx, 5) + go hubWorkInformer.Informer().Run(ctx.Done()) + <-ctx.Done() return nil } + +func (c *WorkHubManagerConfig) buildHubClientHolder( + ctx context.Context, controllerContext *controllercmd.ControllerContext) (*cloudeventswork.ClientHolder, error) { + switch c.workOptions.WorkloadSourceDriver.Type { + case options.KubeDriver: + var err error + var hubRestConfig *rest.Config + if c.workOptions.WorkloadSourceDriver.Config == "" { + hubRestConfig = controllerContext.KubeConfig + } else { + hubRestConfig, err = clientcmd.BuildConfigFromFlags("", c.workOptions.WorkloadSourceDriver.Config) + if err != nil { + return nil, err + } + } + + // we need a separated filtered manifestwork informers so we only watch the manifestworks that manifestworkreplicaset cares. + // This could reduce a lot of memory consumptions + clientHolder, err := cloudeventswork.NewClientHolderBuilder(sourceClientID, hubRestConfig). + WithInformerConfig(30*time.Minute, workinformers.WithTweakListOptions( + func(listOptions *metav1.ListOptions) { + selector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: manifestworkreplicasetcontroller.ManifestWorkReplicaSetControllerNameLabelKey, + Operator: metav1.LabelSelectorOpExists, + }, + }, + } + listOptions.LabelSelector = metav1.FormatLabelSelector(selector) + }, + )). + NewClientHolder(ctx) + if err != nil { + return nil, err + } + + return clientHolder, nil + case options.MQTTDriver: + mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(c.workOptions.WorkloadSourceDriver.Config) + if err != nil { + return nil, err + } + + clientHolder, err := cloudeventswork.NewClientHolderBuilder(sourceClientID, mqttOptions). + WithSourceID(sourceID). + WithCodecs(codec.NewManifestBundleCodec()). + NewClientHolder(ctx) + if err != nil { + return nil, err + } + + return clientHolder, nil + case options.GRPCDriver: + grpcOptions, err := grpc.BuildGRPCOptionsFromFlags(c.workOptions.WorkloadSourceDriver.Config) + if err != nil { + return nil, err + } + + clientHolder, err := cloudeventswork.NewClientHolderBuilder(sourceClientID, grpcOptions). + WithSourceID(sourceID). + WithCodecs(codec.NewManifestBundleCodec()). + NewClientHolder(ctx) + if err != nil { + return nil, err + } + return clientHolder, nil + } + + return nil, fmt.Errorf("unsupported driver %s", c.workOptions.WorkloadSourceDriver.Type) +} diff --git a/pkg/work/hub/options.go b/pkg/work/hub/options.go new file mode 100644 index 000000000..1afe36a11 --- /dev/null +++ b/pkg/work/hub/options.go @@ -0,0 +1,24 @@ +package hub + +import ( + "github.com/spf13/pflag" + + "open-cluster-management.io/ocm/pkg/common/options" +) + +// WorkHubManagerOptions defines the flags for work hub manager +type WorkHubManagerOptions struct { + WorkloadSourceDriver options.WorkloadSourceDriver +} + +func NewWorkHubManagerOptions() *WorkHubManagerOptions { + return &WorkHubManagerOptions{} +} + +// AddFlags register and binds the default flags +func (o *WorkHubManagerOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.WorkloadSourceDriver.Type, "work-driver", + o.WorkloadSourceDriver.Type, "The type of work driver, currently it can be kube, mqtt or grpc") + fs.StringVar(&o.WorkloadSourceDriver.Config, "work-driver-config", + o.WorkloadSourceDriver.Config, "The config file path of current work driver") +} diff --git a/pkg/work/spoke/options.go b/pkg/work/spoke/options.go index 3f96257bd..9c7f7d922 100644 --- a/pkg/work/spoke/options.go +++ b/pkg/work/spoke/options.go @@ -4,23 +4,15 @@ import ( "time" "github.com/spf13/pflag" -) -const ( - KubeDriver = "kube" - MQTTDriver = "mqtt" + "open-cluster-management.io/ocm/pkg/common/options" ) -type WorkloadSourceDriver struct { - Type string - Config string -} - // WorkloadAgentOptions defines the flags for workload agent type WorkloadAgentOptions struct { StatusSyncInterval time.Duration AppliedManifestWorkEvictionGracePeriod time.Duration - WorkloadSourceDriver WorkloadSourceDriver + WorkloadSourceDriver options.WorkloadSourceDriver } // NewWorkloadAgentOptions returns the flags with default value set @@ -38,7 +30,9 @@ func (o *WorkloadAgentOptions) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period", o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction") fs.StringVar(&o.WorkloadSourceDriver.Type, "workload-source-driver", - o.WorkloadSourceDriver.Type, "The type of workload source driver, currently it can be kube or mqtt") + o.WorkloadSourceDriver.Type, "The type of workload source driver, currently it can be kube, mqtt or grpc") fs.StringVar(&o.WorkloadSourceDriver.Config, "workload-source-config", o.WorkloadSourceDriver.Config, "The config file path of current workload source") + fs.StringVar(&o.WorkloadSourceDriver.Codec, "workload-codec", + o.WorkloadSourceDriver.Codec, "The codec of workload, it will be used when the workload source is a message broker, currently it can ") } diff --git a/pkg/work/spoke/spokeagent.go b/pkg/work/spoke/spokeagent.go index 60a56358c..a4f692d1a 100644 --- a/pkg/work/spoke/spokeagent.go +++ b/pkg/work/spoke/spokeagent.go @@ -17,11 +17,14 @@ import ( workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workinformers "open-cluster-management.io/api/client/work/informers/externalversions" ocmfeature "open-cluster-management.io/api/feature" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec" - commonoptions "open-cluster-management.io/ocm/pkg/common/options" + "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/pkg/features" "open-cluster-management.io/ocm/pkg/work/helper" "open-cluster-management.io/ocm/pkg/work/spoke/auth" @@ -44,12 +47,12 @@ const ( ) type WorkAgentConfig struct { - agentOptions *commonoptions.AgentOptions + agentOptions *options.AgentOptions workOptions *WorkloadAgentOptions } // NewWorkAgentConfig returns a WorkAgentConfig -func NewWorkAgentConfig(commonOpts *commonoptions.AgentOptions, opts *WorkloadAgentOptions) *WorkAgentConfig { +func NewWorkAgentConfig(commonOpts *options.AgentOptions, opts *WorkloadAgentOptions) *WorkAgentConfig { return &WorkAgentConfig{ agentOptions: commonOpts, workOptions: opts, @@ -198,7 +201,7 @@ func (o *WorkAgentConfig) buildHubClientHolder(ctx context.Context, clusterName string, restMapper meta.RESTMapper) (*cloudeventswork.ClientHolder, string, string, error) { agentID := o.agentOptions.AgentID switch o.workOptions.WorkloadSourceDriver.Type { - case KubeDriver: + case options.KubeDriver: hubRestConfig, err := clientcmd.BuildConfigFromFlags("", o.workOptions.WorkloadSourceDriver.Config) if err != nil { return nil, "", "", err @@ -218,7 +221,7 @@ func (o *WorkAgentConfig) buildHubClientHolder(ctx context.Context, } return clientHolder, hubHash, agentID, nil - case MQTTDriver: + case options.MQTTDriver: mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(o.workOptions.WorkloadSourceDriver.Config) if err != nil { return nil, "", "", err @@ -229,9 +232,39 @@ func (o *WorkAgentConfig) buildHubClientHolder(ctx context.Context, agentID = fmt.Sprintf("%s-work-agent", o.agentOptions.SpokeClusterName) } + codec, err := o.getCodec(restMapper) + if err != nil { + return nil, "", "", err + } + clientHolder, err := cloudeventswork.NewClientHolderBuilder(agentID, mqttOptions). WithClusterName(o.agentOptions.SpokeClusterName). - WithCodecs(codec.NewManifestCodec(restMapper)). // TODO support manifestbundles + WithCodecs(codec). + NewClientHolder(ctx) + if err != nil { + return nil, "", "", err + } + + return clientHolder, hubHash, agentID, nil + case options.GRPCDriver: + grpcOptions, err := grpc.BuildGRPCOptionsFromFlags(o.workOptions.WorkloadSourceDriver.Config) + if err != nil { + return nil, "", "", err + } + + hubHash := helper.HubHash(grpcOptions.URL) + if len(agentID) == 0 { + agentID = fmt.Sprintf("%s-work-agent", o.agentOptions.SpokeClusterName) + } + + codec, err := o.getCodec(restMapper) + if err != nil { + return nil, "", "", err + } + + clientHolder, err := cloudeventswork.NewClientHolderBuilder(agentID, grpcOptions). + WithClusterName(o.agentOptions.SpokeClusterName). + WithCodecs(codec). NewClientHolder(ctx) if err != nil { return nil, "", "", err @@ -242,3 +275,14 @@ func (o *WorkAgentConfig) buildHubClientHolder(ctx context.Context, return nil, "", "", fmt.Errorf("unsupported driver %s", o.workOptions.WorkloadSourceDriver.Type) } + +func (o *WorkAgentConfig) getCodec(restMapper meta.RESTMapper) (generic.Codec[*workv1.ManifestWork], error) { + switch o.workOptions.WorkloadSourceDriver.Codec { + case options.ManifestBundleCodec: + return codec.NewManifestBundleCodec(), nil + case options.ManifestCodec: + return codec.NewManifestCodec(restMapper), nil + } + + return nil, fmt.Errorf("unsupported codec %s", o.workOptions.WorkloadSourceDriver.Codec) +} diff --git a/test/integration/cloudevents/deleteoption_test.go b/test/integration/cloudevents/deleteoption_test.go index 2794f8575..932527304 100644 --- a/test/integration/cloudevents/deleteoption_test.go +++ b/test/integration/cloudevents/deleteoption_test.go @@ -3,7 +3,6 @@ package cloudevents import ( "context" "fmt" - "time" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -15,62 +14,47 @@ import ( workapiv1 "open-cluster-management.io/api/work/v1" commonoptions "open-cluster-management.io/ocm/pkg/common/options" - "open-cluster-management.io/ocm/pkg/work/spoke" "open-cluster-management.io/ocm/test/integration/util" ) var _ = ginkgo.Describe("ManifestWork Delete Option", func() { - var o *spoke.WorkloadAgentOptions - var commOptions *commonoptions.AgentOptions + var err error var cancel context.CancelFunc + var clusterName string var work *workapiv1.ManifestWork var manifests []workapiv1.Manifest - var err error - ginkgo.BeforeEach(func() { - o = spoke.NewWorkloadAgentOptions() - o.StatusSyncInterval = 3 * time.Second - o.WorkloadSourceDriver.Type = workSourceDriver - o.WorkloadSourceDriver.Config = workSourceConfigFileName - - commOptions = commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = utilrand.String(5) + clusterName = utilrand.String(5) ns := &corev1.Namespace{} - ns.Name = commOptions.SpokeClusterName + ns.Name = clusterName _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) var ctx context.Context ctx, cancel = context.WithCancel(context.Background()) - go startWorkAgent(ctx, o, commOptions) + startWorkAgent(ctx, clusterName, commonoptions.ManifestCodec) // reset manifests manifests = nil }) - ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - ginkgo.AfterEach(func() { if cancel != nil { cancel() } - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{}) + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) - // TODO test multiple manifests after the manifestbundles is enabled - ginkgo.Context("Delete options", func() { ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})), } + work = util.NewManifestWork(clusterName, "", manifests) }) ginkgo.It("Orphan deletion of the whole manifestwork", func() { @@ -78,7 +62,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, } - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, @@ -91,7 +75,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -104,12 +88,12 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Delete the work - err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + _, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) return errors.IsNotFound(err) }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) @@ -125,14 +109,14 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { { Group: "", Resource: "configmaps", - Namespace: commOptions.SpokeClusterName, + Namespace: clusterName, Name: cm1, }, }, }, } - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, @@ -145,7 +129,7 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -159,19 +143,19 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { // Remove the delete option gomega.Eventually(func() error { - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) if err != nil { return err } work.Spec.DeleteOption = nil - _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Ensure ownership of configmap is updated gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + cm, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) if err != nil { return err } @@ -184,18 +168,294 @@ var _ = ginkgo.Describe("ManifestWork Delete Option", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) // Delete the work - err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + _, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) return errors.IsNotFound(err) }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // All of the resource should be deleted. - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) }) }) + +var _ = ginkgo.Describe("Delete ManifestWork with sharing resources", func() { + var err error + var cancel context.CancelFunc + + var clusterName string + var work *workapiv1.ManifestWork + var anotherWork *workapiv1.ManifestWork + var manifests []workapiv1.Manifest + var appliedManifestWorkName string + var anotherAppliedManifestWorkName string + + ginkgo.BeforeEach(func() { + clusterName = utilrand.String(5) + + ns := &corev1.Namespace{} + ns.Name = clusterName + _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + startWorkAgent(ctx, clusterName, commonoptions.ManifestBundleCodec) + + // reset manifests + manifests = nil + }) + + ginkgo.AfterEach(func() { + if cancel != nil { + cancel() + } + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.Context("Resource sharing and adoption between manifestworks", func() { + ginkgo.BeforeEach(func() { + manifests = []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, []string{})), + } + work = util.NewManifestWork(clusterName, "", manifests) + // Create another manifestworks with one shared resource. + anotherWork = util.NewManifestWork(clusterName, "sharing-resource-work", []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, []string{})), + }) + }) + + ginkgo.JustBeforeEach(func() { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + appliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, work.UID) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + anotherWork, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + anotherAppliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, anotherWork.UID) + + util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() { + // ensure configmap exists and get its uid + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + currentUID := curentConfigMap.UID + + // Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct. + gomega.Eventually(func() error { + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range appliedManifestWork.Status.AppliedResources { + if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { + return nil + } + } + + return fmt.Errorf("resource name or uid in appliedmanifestwork does not match") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( + context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { + if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { + return nil + } + } + + return fmt.Errorf("resource name or uid in appliedmanifestwork does not match") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete one manifestwork + err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource + gomega.Eventually(func() error { + appliedWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + return fmt.Errorf("appliedmanifestwork should not exist: %v", appliedWork.DeletionTimestamp) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + // Ensure the configmap is kept and tracked by anotherappliedmanifestwork. + gomega.Eventually(func() error { + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + if err != nil { + return err + } + + if currentUID != configMap.UID { + return fmt.Errorf("UID should be equal") + } + + anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( + context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + hasAppliedResourceName := false + hasAppliedResourceUID := false + for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources { + if appliedResource.Name == cm1 { + hasAppliedResourceName = true + } + + if appliedResource.UID != string(currentUID) { + hasAppliedResourceUID = true + } + } + + if !hasAppliedResourceName { + return fmt.Errorf("resource Name should be cm1") + } + + if !hasAppliedResourceUID { + return fmt.Errorf("UID should be equal") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() { + // ensure configmap exists and get its uid + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + currentUID := curentConfigMap.UID + + // Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct. + gomega.Eventually(func() error { + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range appliedManifestWork.Status.AppliedResources { + if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { + return nil + } + } + + return fmt.Errorf("resource name or uid in appliedmanifestwork does not match") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( + context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { + if appliedResource.Name == cm1 && appliedResource.UID == string(currentUID) { + return nil + } + } + + return fmt.Errorf("resource name or uid in appliedmanifestwork does not match") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update one manifestwork to remove the shared resource + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + work.Spec.Workload.Manifests = []workapiv1.Manifest{ + manifests[1], + util.ToManifest(util.NewConfigmap(clusterName, "cm4", map[string]string{"g": "h"}, []string{})), + } + _, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Ensure the resource is not tracked by the appliedmanifestwork. + gomega.Eventually(func() error { + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range appliedManifestWork.Status.AppliedResources { + if appliedResource.Name == cm1 { + return fmt.Errorf("found applied resource name cm1") + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Ensure the configmap is kept and tracked by anotherappliedmanifestwork + gomega.Eventually(func() error { + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get( + context.Background(), cm1, metav1.GetOptions{}) + if err != nil { + return err + } + + if currentUID != configMap.UID { + return fmt.Errorf("UID should be equal") + } + + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get( + context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + hasAppliedResourceName := false + hasAppliedResourceUID := false + for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { + if appliedResource.Name == cm1 { + hasAppliedResourceName = true + } + + if appliedResource.UID != string(currentUID) { + hasAppliedResourceUID = true + } + } + + if !hasAppliedResourceName { + return fmt.Errorf("resource Name should be cm1") + } + + if !hasAppliedResourceUID { + return fmt.Errorf("UID should be equal") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + }) +}) diff --git a/test/integration/cloudevents/manifestworkreplicaset_test.go b/test/integration/cloudevents/manifestworkreplicaset_test.go new file mode 100644 index 000000000..0d737caa4 --- /dev/null +++ b/test/integration/cloudevents/manifestworkreplicaset_test.go @@ -0,0 +1,206 @@ +package cloudevents + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/openshift/library-go/pkg/controller/controllercmd" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilrand "k8s.io/apimachinery/pkg/util/rand" + + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + workapiv1 "open-cluster-management.io/api/work/v1" + workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" + + commonoptions "open-cluster-management.io/ocm/pkg/common/options" + "open-cluster-management.io/ocm/pkg/work/hub" + "open-cluster-management.io/ocm/pkg/work/spoke" + "open-cluster-management.io/ocm/test/integration/util" +) + +var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { + var err error + var cancel context.CancelFunc + + var clusterAName, clusterBName string + var namespace string + var placement *clusterv1beta1.Placement + var placementDecision *clusterv1beta1.PlacementDecision + var manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet + + ginkgo.BeforeEach(func() { + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + + namespace = utilrand.String(5) + ns := &corev1.Namespace{} + ns.Name = namespace + _, err = spokeKubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + clusterAName = "cluster-" + utilrand.String(5) + clusterNS := &corev1.Namespace{} + clusterNS.Name = clusterAName + _, err = spokeKubeClient.CoreV1().Namespaces().Create(ctx, clusterNS, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + clusterBName = "cluster-" + utilrand.String(5) + clusterNS = &corev1.Namespace{} + clusterNS.Name = clusterBName + _, err = spokeKubeClient.CoreV1().Namespaces().Create(ctx, clusterNS, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + placement = &clusterv1beta1.Placement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: namespace, + }, + } + _, err = hubClusterClient.ClusterV1beta1().Placements(namespace).Create(ctx, placement, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + placementDecision = &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement-decision", + Namespace: namespace, + Labels: map[string]string{ + clusterv1beta1.PlacementLabel: placement.Name, + clusterv1beta1.DecisionGroupIndexLabel: "0", + }, + }, + } + decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(namespace).Create(ctx, placementDecision, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + decision.Status.Decisions = []clusterv1beta1.ClusterDecision{ + {ClusterName: clusterAName}, + {ClusterName: clusterBName}, + } + _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(namespace).UpdateStatus(ctx, decision, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + <-time.After(time.Second) + + startCtrl(ctx) + + // start work agents + startAgent(ctx, clusterAName) + startAgent(ctx, clusterBName) + + manifestWorkReplicaSet = &workapiv1alpha1.ManifestWorkReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: namespace, + }, + Spec: workapiv1alpha1.ManifestWorkReplicaSetSpec{ + ManifestWorkTemplate: workapiv1.ManifestWorkSpec{ + Workload: workapiv1.ManifestsTemplate{ + Manifests: []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap("default", cm1, map[string]string{"a": "b"}, nil)), + }, + }, + }, + PlacementRefs: []workapiv1alpha1.LocalPlacementReference{ + { + Name: placement.Name, + RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}, + }, + }, + }, + } + _, err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Create(context.TODO(), manifestWorkReplicaSet, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + if cancel != nil { + cancel() + } + }) + + ginkgo.Context("Create/Update/Delete a manifestWorkReplicaSet", func() { + ginkgo.It("should create/update/delete successfully", func() { + gomega.Eventually(func() error { + return assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{Total: 2, Available: 2, Applied: 2}, manifestWorkReplicaSet) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + ginkgo.By("Update decision so manifestworks should be updated") + decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(namespace).Get(context.TODO(), placementDecision.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + decision.Status.Decisions = decision.Status.Decisions[:1] + _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + return assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{Total: 1, Available: 1, Applied: 1}, manifestWorkReplicaSet) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + ginkgo.By("Delete manifestworkreplicaset") + err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Delete(context.TODO(), manifestWorkReplicaSet.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + _, err := spokeKubeClient.CoreV1().ConfigMaps("default").Get(context.TODO(), cm1, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + + return fmt.Errorf("the configmap is not deleted, %v", err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + }) + }) +}) + +func startAgent(ctx context.Context, clusterName string) { + o := spoke.NewWorkloadAgentOptions() + o.StatusSyncInterval = 3 * time.Second + o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second + o.WorkloadSourceDriver.Type = workSourceDriver + o.WorkloadSourceDriver.Config = mwrsConfigFileName + o.WorkloadSourceDriver.Codec = commonoptions.ManifestBundleCodec + + commOptions := commonoptions.NewAgentOptions() + commOptions.SpokeClusterName = clusterName + + go runWorkAgent(ctx, o, commOptions) +} + +func startCtrl(ctx context.Context) { + opts := hub.NewWorkHubManagerOptions() + opts.WorkloadSourceDriver.Type = workSourceDriver + opts.WorkloadSourceDriver.Config = mwrsConfigFileName + hubConfig := hub.NewWorkHubManagerConfig(opts) + + // start hub controller + go func() { + err := hubConfig.RunWorkHubManager(ctx, &controllercmd.ControllerContext{ + KubeConfig: hubRestConfig, + EventRecorder: util.NewIntegrationTestEventRecorder("mwrsctrl"), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + +} + +func assertSummary(summary workapiv1alpha1.ManifestWorkReplicaSetSummary, mwrs *workapiv1alpha1.ManifestWorkReplicaSet) error { + rs, err := hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(mwrs.Namespace).Get(context.TODO(), mwrs.Name, metav1.GetOptions{}) + + if err != nil { + return err + } + + if rs.Status.Summary != summary { + return fmt.Errorf("unexpected summary expected: %v, got :%v", summary, rs.Status.Summary) + } + + return nil +} diff --git a/test/integration/cloudevents/source/codec.go b/test/integration/cloudevents/source/codec.go index 8f301eba5..3ace7081e 100644 --- a/test/integration/cloudevents/source/codec.go +++ b/test/integration/cloudevents/source/codec.go @@ -152,8 +152,12 @@ func (d *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsT } evt := eventBuilder.NewEvent() + data := &payload.ManifestBundle{} + data.Manifests = work.Spec.Workload.Manifests + data.ManifestConfigs = work.Spec.ManifestConfigs + data.DeleteOption = work.Spec.DeleteOption - if err := evt.SetData(cloudevents.ApplicationJSON, &payload.ManifestBundle{Manifests: work.Spec.Workload.Manifests}); err != nil { + if err := evt.SetData(cloudevents.ApplicationJSON, data); err != nil { return nil, fmt.Errorf("failed to encode manifests to cloud event: %v", err) } diff --git a/test/integration/cloudevents/source/manifestwork.go b/test/integration/cloudevents/source/manifestwork.go index 74e165d6c..fd3aee49e 100644 --- a/test/integration/cloudevents/source/manifestwork.go +++ b/test/integration/cloudevents/source/manifestwork.go @@ -72,12 +72,16 @@ func (c *manifestWorkSourceClient) Create(ctx context.Context, manifestWork *wor newObj.Generation = 1 newObj.Namespace = c.namespace - //TODO support manifestbundles eventType := types.CloudEventsType{ CloudEventsDataType: payload.ManifestEventDataType, SubResource: types.SubResourceSpec, Action: "create_request", } + + if len(manifestWork.Spec.Workload.Manifests) > 1 { + eventType.CloudEventsDataType = payload.ManifestBundleEventDataType + } + if err := c.cloudEventsClient.Publish(ctx, eventType, newObj); err != nil { return nil, err } @@ -109,12 +113,16 @@ func (c *manifestWorkSourceClient) Update(ctx context.Context, manifestWork *wor updatedObj.Generation = updatedObj.Generation + 1 updatedObj.ResourceVersion = fmt.Sprintf("%d", updatedObj.Generation) - //TODO support manifestbundles eventType := types.CloudEventsType{ CloudEventsDataType: payload.ManifestEventDataType, SubResource: types.SubResourceSpec, Action: "update_request", } + + if len(manifestWork.Spec.Workload.Manifests) > 1 { + eventType.CloudEventsDataType = payload.ManifestBundleEventDataType + } + if err := c.cloudEventsClient.Publish(ctx, eventType, updatedObj); err != nil { return nil, err } @@ -142,13 +150,16 @@ func (c *manifestWorkSourceClient) Delete(ctx context.Context, name string, opts now := metav1.Now() deletedObj.DeletionTimestamp = &now - //TODO support manifestbundles eventType := types.CloudEventsType{ CloudEventsDataType: payload.ManifestEventDataType, SubResource: types.SubResourceSpec, Action: "delete_request", } + if len(manifestWork.Spec.Workload.Manifests) > 1 { + eventType.CloudEventsDataType = payload.ManifestBundleEventDataType + } + if err := c.cloudEventsClient.Publish(ctx, eventType, deletedObj); err != nil { return err } diff --git a/test/integration/cloudevents/source/source.go b/test/integration/cloudevents/source/source.go index 30783cb4d..fd3998a38 100644 --- a/test/integration/cloudevents/source/source.go +++ b/test/integration/cloudevents/source/source.go @@ -100,6 +100,7 @@ func (m *MQTTSource) Start(ctx context.Context) error { workLister, work.ManifestWorkStatusHash, &ManifestCodec{}, + &ManifestBundleCodec{}, ) if err != nil { return err diff --git a/test/integration/cloudevents/statusfeedback_test.go b/test/integration/cloudevents/statusfeedback_test.go index 21f6282f0..24ce2023a 100644 --- a/test/integration/cloudevents/statusfeedback_test.go +++ b/test/integration/cloudevents/statusfeedback_test.go @@ -38,6 +38,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { o.StatusSyncInterval = 3 * time.Second o.WorkloadSourceDriver.Type = workSourceDriver o.WorkloadSourceDriver.Config = workSourceConfigFileName + o.WorkloadSourceDriver.Codec = commonoptions.ManifestCodec commOptions = commonoptions.NewAgentOptions() commOptions.SpokeClusterName = utilrand.String(5) @@ -69,7 +70,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { var ctx context.Context ctx, cancel = context.WithCancel(context.Background()) - go startWorkAgent(ctx, o, commOptions) + go runWorkAgent(ctx, o, commOptions) }) ginkgo.AfterEach(func() { @@ -315,8 +316,6 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }) }) - // TODO should return none for resources with no wellknown status - ginkgo.Context("Deployment Status feedback with RawJsonString enabled", func() { ginkgo.BeforeEach(func() { u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa") @@ -327,7 +326,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) var ctx context.Context ctx, cancel = context.WithCancel(context.Background()) - go startWorkAgent(ctx, o, commOptions) + go runWorkAgent(ctx, o, commOptions) }) ginkgo.AfterEach(func() { @@ -422,3 +421,151 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }) }) }) + +var _ = ginkgo.Describe("ManifestWork Status Feedback for resources with no wellknown status", func() { + var err error + var cancel context.CancelFunc + + var clusterName string + var work *workapiv1.ManifestWork + + ginkgo.BeforeEach(func() { + clusterName = utilrand.String(5) + + ns := &corev1.Namespace{} + ns.Name = clusterName + _, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + startWorkAgent(ctx, clusterName, commonoptions.ManifestBundleCodec) + }) + + ginkgo.AfterEach(func() { + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + if cancel != nil { + cancel() + } + }) + + ginkgo.It("should return none for resources with no wellknown status", func() { + u, _, err := util.NewDeployment(clusterName, "deploy1", "sa") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + sa, _ := util.NewServiceAccount(clusterName, "sa") + + work = util.NewManifestWork(clusterName, "", []workapiv1.Manifest{}) + work.Spec.Workload.Manifests = []workapiv1.Manifest{ + util.ToManifest(u), + util.ToManifest(sa), + } + + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: clusterName, + Name: "deploy1", + }, + FeedbackRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "", + Resource: "serviceaccounts", + Namespace: clusterName, + Name: "sa", + }, + FeedbackRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Update Deployment status on spoke + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(clusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + deploy.Status.AvailableReplicas = 2 + deploy.Status.Replicas = 3 + deploy.Status.ReadyReplicas = 2 + + _, err = spokeKubeClient.AppsV1().Deployments(clusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Check if we get status of deployment on work api + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Status.ResourceStatus.Manifests) != 2 { + return fmt.Errorf("the size of resource status is not correct, expect to be 2 but got %d", len(work.Status.ResourceStatus.Manifests)) + } + + values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values + + expectedValues := []workapiv1.FeedbackValue{ + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](2), + }, + }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](3), + }, + }, + { + Name: "AvailableReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](2), + }, + }, + } + if !apiequality.Semantic.DeepEqual(values, expectedValues) { + return fmt.Errorf("status feedback values are not correct, we got %v", work.Status.ResourceStatus.Manifests) + } + + if len(work.Status.ResourceStatus.Manifests[1].StatusFeedbacks.Values) != 0 { + return fmt.Errorf("status feedback values are not correct, we got %v", work.Status.ResourceStatus.Manifests[1].StatusFeedbacks.Values) + } + + if !util.HaveManifestCondition( + work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionFalse}) { + return fmt.Errorf("status sync condition should be True") + } + + return nil + }, eventuallyTimeout*2, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) +}) diff --git a/test/integration/cloudevents/suite_test.go b/test/integration/cloudevents/suite_test.go index 78b8146d9..bc9036e4b 100644 --- a/test/integration/cloudevents/suite_test.go +++ b/test/integration/cloudevents/suite_test.go @@ -10,6 +10,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "go.uber.org/zap/zapcore" + "gopkg.in/yaml.v2" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -17,9 +18,12 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" ocmfeature "open-cluster-management.io/api/feature" workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" "open-cluster-management.io/ocm/pkg/features" "open-cluster-management.io/ocm/pkg/work/helper" @@ -32,7 +36,7 @@ const ( cm1, cm2 = "cm1", "cm2" ) -// TODO consider to use one integration with part +// TODO consider to use one integration with work integration // focus on source is a MQTT broker const workSourceDriver = "mqtt" @@ -47,6 +51,12 @@ var workSourceConfigFileName string var workSourceWorkClient workclientset.Interface var workSourceHash string +var mwrsConfigFileName string + +var hubRestConfig *rest.Config +var hubClusterClient clusterclientset.Interface +var hubWorkClient workclientset.Interface + var spokeRestConfig *rest.Config var spokeKubeClient kubernetes.Interface var spokeWorkClient workclientset.Interface @@ -96,9 +106,16 @@ var _ = ginkgo.BeforeSuite(func() { spokeWorkClient, err = workclientset.NewForConfig(cfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + hubRestConfig = cfg + hubClusterClient, err = clusterclientset.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + hubWorkClient, err = workclientset.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + switch workSourceDriver { case "mqtt": - // create kubeconfig file for hub in a tmp dir + // create mqttconfig file for source in a tmp dir workSourceConfigFileName = path.Join(tempDir, "mqttconfig") workSource = source.NewMQTTSource(workSourceConfigFileName) @@ -109,6 +126,22 @@ var _ = ginkgo.BeforeSuite(func() { workSourceWorkClient = workSource.Workclientset() gomega.Expect(workSourceWorkClient).ToNot(gomega.BeNil()) + + // create mqttconfig file for mwrsctrl in a tmp dir + mwrsConfigFileName = path.Join(tempDir, "mwrsctrl-mqttconfig") + config := mqtt.MQTTConfig{ + BrokerHost: workSource.Host(), + Topics: &types.Topics{ + SourceEvents: "sources/mwrsctrl/clusters/+/sourceevents", + AgentEvents: "sources/mwrsctrl/clusters/+/agentevents", + SourceBroadcast: "sources/mwrsctrl/sourcebroadcast", + }, + } + + configData, err := yaml.Marshal(config) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = os.WriteFile(mwrsConfigFileName, configData, 0600) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) default: ginkgo.AbortSuite(fmt.Sprintf("unsupported source driver: %s", workSourceDriver)) } diff --git a/test/integration/cloudevents/updatestrategy_test.go b/test/integration/cloudevents/updatestrategy_test.go index fea0f5937..6af528492 100644 --- a/test/integration/cloudevents/updatestrategy_test.go +++ b/test/integration/cloudevents/updatestrategy_test.go @@ -36,6 +36,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { o.StatusSyncInterval = 3 * time.Second o.WorkloadSourceDriver.Type = workSourceDriver o.WorkloadSourceDriver.Config = workSourceConfigFileName + o.WorkloadSourceDriver.Codec = commonoptions.ManifestCodec commOptions = commonoptions.NewAgentOptions() commOptions.SpokeClusterName = utilrand.String(5) @@ -47,7 +48,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { var ctx context.Context ctx, cancel = context.WithCancel(context.Background()) - go startWorkAgent(ctx, o, commOptions) + go runWorkAgent(ctx, o, commOptions) // reset manifests manifests = nil diff --git a/test/integration/cloudevents/work_test.go b/test/integration/cloudevents/work_test.go index fc7edb808..e8942830f 100644 --- a/test/integration/cloudevents/work_test.go +++ b/test/integration/cloudevents/work_test.go @@ -20,7 +20,21 @@ import ( "open-cluster-management.io/ocm/test/integration/util" ) -func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *commonoptions.AgentOptions) { +func startWorkAgent(ctx context.Context, clusterName, codec string) { + o := spoke.NewWorkloadAgentOptions() + o.StatusSyncInterval = 3 * time.Second + o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second + o.WorkloadSourceDriver.Type = workSourceDriver + o.WorkloadSourceDriver.Config = workSourceConfigFileName + o.WorkloadSourceDriver.Codec = codec + + commOptions := commonoptions.NewAgentOptions() + commOptions.SpokeClusterName = clusterName + + go runWorkAgent(ctx, o, commOptions) +} + +func runWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *commonoptions.AgentOptions) { agentConfig := spoke.NewWorkAgentConfig(commOption, o) err := agentConfig.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{ KubeConfig: spokeRestConfig, @@ -29,68 +43,66 @@ func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOpti gomega.Expect(err).NotTo(gomega.HaveOccurred()) } +func prepareWork(clusterName string, manifests []workapiv1.Manifest) (*workapiv1.ManifestWork, string) { + work := util.NewManifestWork(clusterName, "", manifests) + work, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + // if the source is not kube, the uid will be used as the manifestwork name + return work, fmt.Sprintf("%s-%s", workSourceHash, work.UID) +} + +func cleanup(clusterName, workName string) { + err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), workName, metav1.DeleteOptions{}) + if !errors.IsNotFound(err) { + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + + gomega.Eventually(func() error { + _, err := workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), workName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + return fmt.Errorf("work %s in namespace %s still exists", workName, clusterName) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + err = spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) +} + var _ = ginkgo.Describe("ManifestWork", func() { - var o *spoke.WorkloadAgentOptions - var commOptions *commonoptions.AgentOptions + var err error var cancel context.CancelFunc + var clusterName string var work *workapiv1.ManifestWork var manifests []workapiv1.Manifest var appliedManifestWorkName string - var err error - ginkgo.BeforeEach(func() { - o = spoke.NewWorkloadAgentOptions() - o.StatusSyncInterval = 3 * time.Second - o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second - o.WorkloadSourceDriver.Type = workSourceDriver - o.WorkloadSourceDriver.Config = workSourceConfigFileName - - commOptions = commonoptions.NewAgentOptions() - commOptions.SpokeClusterName = utilrand.String(5) + clusterName = utilrand.String(5) ns := &corev1.Namespace{} - ns.Name = commOptions.SpokeClusterName + ns.Name = clusterName _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) var ctx context.Context ctx, cancel = context.WithCancel(context.Background()) - go startWorkAgent(ctx, o, commOptions) + startWorkAgent(ctx, clusterName, commonoptions.ManifestCodec) // reset manifests manifests = nil }) ginkgo.JustBeforeEach(func() { - work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) - // if the source is not kube, the uid will be used as the manifestwork name - appliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, work.UID) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + work, appliedManifestWorkName = prepareWork(clusterName, manifests) }) ginkgo.AfterEach(func() { - err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - if !errors.IsNotFound(err) { - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - } - - gomega.Eventually(func() error { - _, err := workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - return fmt.Errorf("work %s in namespace %s still exists", work.Name, commOptions.SpokeClusterName) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) - - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - + cleanup(clusterName, work.Name) if cancel != nil { cancel() } @@ -99,7 +111,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { ginkgo.Context("With a single manifest", func() { ginkgo.BeforeEach(func() { manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)), } }) @@ -119,13 +131,13 @@ var _ = ginkgo.Describe("ManifestWork", func() { []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) newManifests := []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"x": "y"}, nil)), + util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)), } - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) work.Spec.Workload.Manifests = newManifests - work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) @@ -146,20 +158,130 @@ var _ = ginkgo.Describe("ManifestWork", func() { return nil }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + _, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), cm1, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) ginkgo.It("should delete work successfully", func() { util.AssertFinalizerAdded(work.Namespace, work.Name, workSourceWorkClient, eventuallyTimeout, eventuallyInterval) - err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", workSourceHash, work.UID), manifests, workSourceWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) }) +}) + +var _ = ginkgo.Describe("ManifestWorkBundle", func() { + var err error + var cancel context.CancelFunc + + var clusterName string + var work *workapiv1.ManifestWork + var manifests []workapiv1.Manifest + var appliedManifestWorkName string + + ginkgo.BeforeEach(func() { + clusterName = utilrand.String(5) + + ns := &corev1.Namespace{} + ns.Name = clusterName + _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + startWorkAgent(ctx, clusterName, commonoptions.ManifestBundleCodec) + + // reset manifests + manifests = nil + }) - // TODO test multiple manifests after the manifestbundles is enabled + ginkgo.JustBeforeEach(func() { + work, appliedManifestWorkName = prepareWork(clusterName, manifests) + }) + + ginkgo.AfterEach(func() { + cleanup(clusterName, work.Name) + if cancel != nil { + cancel() + } + }) + + ginkgo.Context("With multiple manifests", func() { + ginkgo.BeforeEach(func() { + manifests = []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap("non-existent-namespace", cm1, map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"c": "d"}, nil)), + util.ToManifest(util.NewConfigmap(clusterName, "cm3", map[string]string{"e": "f"}, nil)), + } + }) + + ginkgo.It("should create work and then apply it successfully", func() { + util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, + []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionFalse, + []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("should update work and then apply it successfully", func() { + util.AssertExistenceOfConfigMaps(manifests[1:], spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, + []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionFalse, + []metav1.ConditionStatus{metav1.ConditionFalse, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + newManifests := []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(clusterName, cm1, map[string]string{"a": "b"}, nil)), + util.ToManifest(util.NewConfigmap(clusterName, cm2, map[string]string{"x": "y"}, nil)), + util.ToManifest(util.NewConfigmap(clusterName, "cm4", map[string]string{"e": "f"}, nil)), + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + work.Spec.Workload.Manifests = newManifests + work, err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // check if Available status is updated or not + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // check if resource created by stale manifest is deleted once it is removed from applied resource list + gomega.Eventually(func() error { + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range appliedManifestWork.Status.AppliedResources { + if appliedResource.Name == "cm3" { + return fmt.Errorf("found appled resource cm3") + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + _, err = spokeKubeClient.CoreV1().ConfigMaps(clusterName).Get(context.Background(), "cm3", metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + }) + + ginkgo.It("should delete work successfully", func() { + util.AssertFinalizerAdded(work.Namespace, work.Name, workSourceWorkClient, eventuallyTimeout, eventuallyInterval) + + err = workSourceWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", workSourceHash, work.Name), manifests, + workSourceWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + }) + }) }) diff --git a/test/integration/work/suite_test.go b/test/integration/work/suite_test.go index 2c9477626..18967b5ea 100644 --- a/test/integration/work/suite_test.go +++ b/test/integration/work/suite_test.go @@ -21,6 +21,7 @@ import ( ocmfeature "open-cluster-management.io/api/feature" workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/ocm/pkg/common/options" "open-cluster-management.io/ocm/pkg/features" "open-cluster-management.io/ocm/pkg/work/helper" "open-cluster-management.io/ocm/pkg/work/hub" @@ -108,9 +109,14 @@ var _ = ginkgo.BeforeSuite(func() { hubClusterClient, err = clusterclientset.NewForConfig(cfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + opts := hub.NewWorkHubManagerOptions() + opts.WorkloadSourceDriver.Type = options.KubeDriver + opts.WorkloadSourceDriver.Config = sourceConfigFileName + hubConfig := hub.NewWorkHubManagerConfig(opts) + // start hub controller go func() { - err := hub.RunWorkHubManager(envCtx, &controllercmd.ControllerContext{ + err := hubConfig.RunWorkHubManager(envCtx, &controllercmd.ControllerContext{ KubeConfig: cfg, EventRecorder: util.NewIntegrationTestEventRecorder("hub"), }) diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..16686a655 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +package empty + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2cc595a9c..976637ab6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -143,6 +143,7 @@ github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp # github.com/google/cel-go v0.17.7 ## explicit; go 1.18 @@ -1578,7 +1579,7 @@ open-cluster-management.io/api/utils/work/v1/workapplier open-cluster-management.io/api/utils/work/v1/workvalidator open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 -# open-cluster-management.io/sdk-go v0.13.0 +# open-cluster-management.io/sdk-go v0.13.1-0.20240227052220-ae7814c4d512 ## explicit; go 1.21 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1 @@ -1588,6 +1589,9 @@ open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator open-cluster-management.io/sdk-go/pkg/cloudevents/generic open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1 +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types @@ -1595,8 +1599,12 @@ open-cluster-management.io/sdk-go/pkg/cloudevents/work open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler +open-cluster-management.io/sdk-go/pkg/cloudevents/work/common open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher open-cluster-management.io/sdk-go/pkg/patcher diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go index c341f1f2b..fc451156f 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go @@ -44,6 +44,7 @@ func NewCloudEventAgentClient[T ResourceObject]( baseClient := &baseClient{ cloudEventsOptions: agentOptions.CloudEventsOptions, cloudEventsRateLimiter: NewRateLimiter(agentOptions.EventRateLimit), + reconnectedChan: make(chan struct{}), } if err := baseClient.connect(ctx); err != nil { @@ -65,6 +66,12 @@ func NewCloudEventAgentClient[T ResourceObject]( }, nil } +// ReconnectedChan returns a chan which indicates the source/agent client is reconnected. +// The source/agent client callers should consider sending a resync request when receiving this signal. +func (c *CloudEventAgentClient[T]) ReconnectedChan() <-chan struct{} { + return c.reconnectedChan +} + // Resync the resources spec by sending a spec resync request from the current to the given source. func (c *CloudEventAgentClient[T]) Resync(ctx context.Context, source string) error { // list the resource objects that are maintained by the current agent with the given source diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go index 271e1c97f..5c78bf7a9 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go @@ -30,6 +30,7 @@ type baseClient struct { cloudEventsClient cloudevents.Client cloudEventsRateLimiter flowcontrol.RateLimiter receiverChan chan int + reconnectedChan chan struct{} } func (c *baseClient) connect(ctx context.Context) error { @@ -71,6 +72,7 @@ func (c *baseClient) connect(ctx context.Context) error { klog.V(4).Infof("the cloudevents client is reconnected") c.resetClient(cloudEventsClient) c.sendReceiverSignal(restartReceiverSignal) + c.sendReconnectedSignal() } select { @@ -153,8 +155,6 @@ func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { for { if cloudEventsClient != nil { - // TODO send a resync request - go func() { if err := cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) { receive(receiverCtx, evt) @@ -213,3 +213,9 @@ func (c *baseClient) sendReceiverSignal(signal int) { c.receiverChan <- signal } } + +func (c *baseClient) sendReconnectedSignal() { + c.RLock() + defer c.RUnlock() + c.reconnectedChan <- struct{}{} +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go index ee8b77b37..06cb8fd45 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go @@ -67,4 +67,8 @@ type CloudEventsClient[T ResourceObject] interface { // Subscribe the resources status/spec event to the broker to receive the resources status/spec and use // ResourceHandler to handle them. Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) + + // ReconnectedChan returns a chan which indicates the source/agent client is reconnected. + // The source/agent client callers should consider sending a resync request when receiving this signal. + ReconnectedChan() <-chan struct{} } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go new file mode 100644 index 000000000..81534ff87 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go @@ -0,0 +1,79 @@ +package grpc + +import ( + "context" + "fmt" + "strings" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cecontext "github.com/cloudevents/sdk-go/v2/context" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type grpcAgentOptions struct { + GRPCOptions + errorChan chan error // grpc client connection doesn't have error channel, it will handle reconnecting automatically + clusterName string +} + +func NewAgentOptions(grpcOptions *GRPCOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + return &options.CloudEventsAgentOptions{ + CloudEventsOptions: &grpcAgentOptions{ + GRPCOptions: *grpcOptions, + errorChan: make(chan error), + clusterName: clusterName, + }, + AgentID: agentID, + ClusterName: clusterName, + } +} + +func (o *grpcAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + if eventType.Action == types.ResyncRequestAction { + // agent publishes event to spec resync topic to request to get resources spec from all sources + topic := strings.Replace(SpecResyncTopic, "+", o.clusterName, -1) + return cecontext.WithTopic(ctx, topic), nil + } + + // agent publishes event to status topic to send the resource status from a specified cluster + originalSource, err := evtCtx.GetExtension(types.ExtensionOriginalSource) + if err != nil { + return nil, err + } + + statusTopic := strings.Replace(StatusTopic, "+", fmt.Sprintf("%s", originalSource), 1) + statusTopic = strings.Replace(statusTopic, "+", o.clusterName, -1) + return cecontext.WithTopic(ctx, statusTopic), nil +} + +func (o *grpcAgentOptions) Client(ctx context.Context) (cloudevents.Client, error) { + receiver, err := o.GetCloudEventsClient( + ctx, + func(err error) { + o.errorChan <- err + }, + protocol.WithPublishOption(&protocol.PublishOption{}), + protocol.WithSubscribeOption(&protocol.SubscribeOption{ + Topics: []string{ + replaceNth(SpecTopic, "+", o.clusterName, 2), // receiving the resources spec from sources with spec topic + StatusResyncTopic, // receiving the resources status resync request from sources with status resync topic + }, + }), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *grpcAgentOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go new file mode 100644 index 000000000..23a72eeb7 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go @@ -0,0 +1,187 @@ +package grpc + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "os" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "gopkg.in/yaml.v2" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" +) + +const ( + // SpecTopic is a pubsub topic for resource spec. + SpecTopic = "sources/+/clusters/+/spec" + + // StatusTopic is a pubsub topic for resource status. + StatusTopic = "sources/+/clusters/+/status" + + // SpecResyncTopic is a pubsub topic for resource spec resync. + SpecResyncTopic = "sources/clusters/+/specresync" + + // StatusResyncTopic is a pubsub topic for resource status resync. + StatusResyncTopic = "sources/+/clusters/statusresync" +) + +// GRPCOptions holds the options that are used to build gRPC client. +type GRPCOptions struct { + URL string + CAFile string + ClientCertFile string + ClientKeyFile string +} + +// GRPCConfig holds the information needed to build connect to gRPC server as a given user. +type GRPCConfig struct { + // URL is the address of the gRPC server (host:port). + URL string `json:"url" yaml:"url"` + // CAFile is the file path to a cert file for the gRPC server certificate authority. + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + // ClientCertFile is the file path to a client cert file for TLS. + ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"` + // ClientKeyFile is the file path to a client key file for TLS. + ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"` +} + +// BuildGRPCOptionsFromFlags builds configs from a config filepath. +func BuildGRPCOptionsFromFlags(configPath string) (*GRPCOptions, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + config := &GRPCConfig{} + if err := yaml.Unmarshal(configData, config); err != nil { + return nil, err + } + + if config.URL == "" { + return nil, fmt.Errorf("url is required") + } + + if (config.ClientCertFile == "" && config.ClientKeyFile != "") || + (config.ClientCertFile != "" && config.ClientKeyFile == "") { + return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set") + } + if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { + return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") + } + + return &GRPCOptions{ + URL: config.URL, + CAFile: config.CAFile, + ClientCertFile: config.ClientCertFile, + ClientKeyFile: config.ClientKeyFile, + }, nil +} + +func NewGRPCOptions() *GRPCOptions { + return &GRPCOptions{} +} + +func (o *GRPCOptions) GetGRPCClientConn() (*grpc.ClientConn, error) { + if len(o.CAFile) != 0 { + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + caPEM, err := os.ReadFile(o.CAFile) + if err != nil { + return nil, err + } + + if ok := certPool.AppendCertsFromPEM(caPEM); !ok { + return nil, fmt.Errorf("invalid CA %s", o.CAFile) + } + + clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) + if err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{clientCerts}, + RootCAs: certPool, + MinVersion: tls.VersionTLS13, + MaxVersion: tls.VersionTLS13, + } + + conn, err := grpc.Dial(o.URL, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + if err != nil { + return nil, fmt.Errorf("failed to connect to grpc server %s, %v", o.URL, err) + } + + return conn, nil + } + + conn, err := grpc.Dial(o.URL, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, fmt.Errorf("failed to connect to grpc server %s, %v", o.URL, err) + } + + return conn, nil +} + +func (o *GRPCOptions) GetCloudEventsClient(ctx context.Context, errorHandler func(error), clientOpts ...protocol.Option) (cloudevents.Client, error) { + conn, err := o.GetGRPCClientConn() + if err != nil { + return nil, err + } + + // Periodically (every 100ms) check the connection status and reconnect if necessary. + go func() { + ticker := time.NewTicker(100 * time.Millisecond) + for { + select { + case <-ctx.Done(): + ticker.Stop() + conn.Close() + case <-ticker.C: + if conn.GetState() == connectivity.TransientFailure { + errorHandler(fmt.Errorf("grpc connection is disconnected")) + ticker.Stop() + conn.Close() + return // exit the goroutine as the error handler function will handle the reconnection. + } + } + } + }() + + opts := []protocol.Option{} + opts = append(opts, clientOpts...) + p, err := protocol.NewProtocol(conn, opts...) + if err != nil { + return nil, err + } + + return cloudevents.NewClient(p) +} + +// Replace the nth occurrence of old in str by new. +func replaceNth(str, old, new string, n int) string { + i := 0 + for m := 1; m <= n; m++ { + x := strings.Index(str[i:], old) + if x < 0 { + break + } + i += x + if m == n { + return str[:i] + new + str[i+len(old):] + } + i += len(old) + } + return str +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md new file mode 100644 index 000000000..0bc2eeba6 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md @@ -0,0 +1,30 @@ +# CloudEvent gRPC Protobuf Definitions + +## Overview + +This repository includes the protobuf message and RPC method definitions for CloudEvent gRPC service, along with the corresponding Go code generated from these definitions. + +## Getting Started + +### Prerequisites + +Make sure you have the following tools installed: + +- [Protocol Compiler (protoc)](https://grpc.io/docs/protoc-installation/) +- Go plugins for the protocol compiler: + +```bash +$ go install google.golang.org/protobuf/cmd/protoc-gen-go +$ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +``` + +### Updating CloudEvent gRPC Service + +1. Modify the `*.proto` files to reflect your desired changes. +2. Run the following command to update the generated code: + + ```bash + go generate + ``` + + This step is crucial to ensure that your changes are applied to both the gRPC server and client stub. diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go new file mode 100644 index 000000000..c85f45f4c --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go @@ -0,0 +1,656 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.12.4 +// source: cloudevent.proto + +package v1 + +import ( + any1 "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// CloudEvent is copied from +// https://github.com/cloudevents/spec/blob/main/cloudevents/formats/protobuf-format.md. +type CloudEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique event identifier. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // URI of the event source. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Version of the spec in use. + SpecVersion string `protobuf:"bytes,3,opt,name=spec_version,json=specVersion,proto3" json:"spec_version,omitempty"` + // Event type identifier. + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + // Optional & Extension Attributes + Attributes map[string]*CloudEventAttributeValue `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CloudEvent Data (Bytes, Text, or Proto) + // + // Types that are assignable to Data: + // + // *CloudEvent_BinaryData + // *CloudEvent_TextData + // *CloudEvent_ProtoData + Data isCloudEvent_Data `protobuf_oneof:"data"` +} + +func (x *CloudEvent) Reset() { + *x = CloudEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEvent) ProtoMessage() {} + +func (x *CloudEvent) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEvent.ProtoReflect.Descriptor instead. +func (*CloudEvent) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{0} +} + +func (x *CloudEvent) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *CloudEvent) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *CloudEvent) GetSpecVersion() string { + if x != nil { + return x.SpecVersion + } + return "" +} + +func (x *CloudEvent) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *CloudEvent) GetAttributes() map[string]*CloudEventAttributeValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (m *CloudEvent) GetData() isCloudEvent_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *CloudEvent) GetBinaryData() []byte { + if x, ok := x.GetData().(*CloudEvent_BinaryData); ok { + return x.BinaryData + } + return nil +} + +func (x *CloudEvent) GetTextData() string { + if x, ok := x.GetData().(*CloudEvent_TextData); ok { + return x.TextData + } + return "" +} + +func (x *CloudEvent) GetProtoData() *any1.Any { + if x, ok := x.GetData().(*CloudEvent_ProtoData); ok { + return x.ProtoData + } + return nil +} + +type isCloudEvent_Data interface { + isCloudEvent_Data() +} + +type CloudEvent_BinaryData struct { + // If the event is binary data then the datacontenttype attribute + // should be set to an appropriate media-type. + BinaryData []byte `protobuf:"bytes,6,opt,name=binary_data,json=binaryData,proto3,oneof"` +} + +type CloudEvent_TextData struct { + // If the event is string data then the datacontenttype attribute + // should be set to an appropriate media-type such as application/json. + TextData string `protobuf:"bytes,7,opt,name=text_data,json=textData,proto3,oneof"` +} + +type CloudEvent_ProtoData struct { + // If the event is a protobuf then it must be encoded using this Any + // type. The datacontenttype attribute should be set to + // application/protobuf and the dataschema attribute set to the message + // type. + ProtoData *any1.Any `protobuf:"bytes,8,opt,name=proto_data,json=protoData,proto3,oneof"` +} + +func (*CloudEvent_BinaryData) isCloudEvent_Data() {} + +func (*CloudEvent_TextData) isCloudEvent_Data() {} + +func (*CloudEvent_ProtoData) isCloudEvent_Data() {} + +// CloudEventAttribute enables extensions to use any of the seven allowed +// data types as the value of an envelope key. +type CloudEventAttributeValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value can be any one of these types. + // + // Types that are assignable to Attr: + // + // *CloudEventAttributeValue_CeBoolean + // *CloudEventAttributeValue_CeInteger + // *CloudEventAttributeValue_CeString + // *CloudEventAttributeValue_CeBytes + // *CloudEventAttributeValue_CeUri + // *CloudEventAttributeValue_CeUriRef + // *CloudEventAttributeValue_CeTimestamp + Attr isCloudEventAttributeValue_Attr `protobuf_oneof:"attr"` +} + +func (x *CloudEventAttributeValue) Reset() { + *x = CloudEventAttributeValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEventAttributeValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEventAttributeValue) ProtoMessage() {} + +func (x *CloudEventAttributeValue) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEventAttributeValue.ProtoReflect.Descriptor instead. +func (*CloudEventAttributeValue) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{1} +} + +func (m *CloudEventAttributeValue) GetAttr() isCloudEventAttributeValue_Attr { + if m != nil { + return m.Attr + } + return nil +} + +func (x *CloudEventAttributeValue) GetCeBoolean() bool { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeBoolean); ok { + return x.CeBoolean + } + return false +} + +func (x *CloudEventAttributeValue) GetCeInteger() int32 { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeInteger); ok { + return x.CeInteger + } + return 0 +} + +func (x *CloudEventAttributeValue) GetCeString() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeString); ok { + return x.CeString + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeBytes() []byte { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeBytes); ok { + return x.CeBytes + } + return nil +} + +func (x *CloudEventAttributeValue) GetCeUri() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeUri); ok { + return x.CeUri + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeUriRef() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeUriRef); ok { + return x.CeUriRef + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeTimestamp() *timestamp.Timestamp { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeTimestamp); ok { + return x.CeTimestamp + } + return nil +} + +type isCloudEventAttributeValue_Attr interface { + isCloudEventAttributeValue_Attr() +} + +type CloudEventAttributeValue_CeBoolean struct { + // Boolean value. + CeBoolean bool `protobuf:"varint,1,opt,name=ce_boolean,json=ceBoolean,proto3,oneof"` +} + +type CloudEventAttributeValue_CeInteger struct { + // Integer value. + CeInteger int32 `protobuf:"varint,2,opt,name=ce_integer,json=ceInteger,proto3,oneof"` +} + +type CloudEventAttributeValue_CeString struct { + // String value. + CeString string `protobuf:"bytes,3,opt,name=ce_string,json=ceString,proto3,oneof"` +} + +type CloudEventAttributeValue_CeBytes struct { + // Byte string value. + CeBytes []byte `protobuf:"bytes,4,opt,name=ce_bytes,json=ceBytes,proto3,oneof"` +} + +type CloudEventAttributeValue_CeUri struct { + // URI value. + CeUri string `protobuf:"bytes,5,opt,name=ce_uri,json=ceUri,proto3,oneof"` +} + +type CloudEventAttributeValue_CeUriRef struct { + // URI reference value. + CeUriRef string `protobuf:"bytes,6,opt,name=ce_uri_ref,json=ceUriRef,proto3,oneof"` +} + +type CloudEventAttributeValue_CeTimestamp struct { + // Timestamp value. + CeTimestamp *timestamp.Timestamp `protobuf:"bytes,7,opt,name=ce_timestamp,json=ceTimestamp,proto3,oneof"` +} + +func (*CloudEventAttributeValue_CeBoolean) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeInteger) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeString) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeBytes) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeUri) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeUriRef) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeTimestamp) isCloudEventAttributeValue_Attr() {} + +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The topic to which event should be published. + // Format is `myhome/groundfloor/livingroom/temperature`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + Event *CloudEvent `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{2} +} + +func (x *PublishRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *PublishRequest) GetEvent() *CloudEvent { + if x != nil { + return x.Event + } + return nil +} + +type SubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The topic from which event should be pulled. + // Format is `myhome/groundfloor/livingroom/temperature`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *SubscriptionRequest) Reset() { + *x = SubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionRequest) ProtoMessage() {} + +func (x *SubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionRequest.ProtoReflect.Descriptor instead. +func (*SubscriptionRequest) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{3} +} + +func (x *SubscriptionRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +var File_cloudevent_proto protoreflect.FileDescriptor + +var file_cloudevent_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, + 0x03, 0x0a, 0x0a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x70, 0x65, + 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, + 0x48, 0x00, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, + 0x0a, 0x09, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x08, 0x74, 0x65, 0x78, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x35, 0x0a, + 0x0a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x44, 0x61, 0x74, 0x61, 0x1a, 0x6a, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x9a, 0x02, 0x0a, 0x18, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x65, 0x5f, 0x62, 0x6f, 0x6f, 0x6c, + 0x65, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x63, 0x65, 0x42, + 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x67, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x09, 0x63, 0x65, + 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x09, 0x63, 0x65, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x63, 0x65, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x08, 0x63, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x63, 0x65, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x06, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x65, 0x55, 0x72, 0x69, 0x52, 0x65, 0x66, 0x12, 0x3f, 0x0a, 0x0c, + 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, + 0x52, 0x0b, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x06, 0x0a, + 0x04, 0x61, 0x74, 0x74, 0x72, 0x22, 0x5b, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x33, 0x0a, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, + 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x22, 0x2b, 0x0a, 0x13, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x32, + 0xb3, 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x12, 0x21, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, + 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x26, 0x2e, 0x69, 0x6f, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x22, 0x00, 0x30, 0x01, 0x42, 0x4d, 0x5a, 0x4b, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cloudevent_proto_rawDescOnce sync.Once + file_cloudevent_proto_rawDescData = file_cloudevent_proto_rawDesc +) + +func file_cloudevent_proto_rawDescGZIP() []byte { + file_cloudevent_proto_rawDescOnce.Do(func() { + file_cloudevent_proto_rawDescData = protoimpl.X.CompressGZIP(file_cloudevent_proto_rawDescData) + }) + return file_cloudevent_proto_rawDescData +} + +var file_cloudevent_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cloudevent_proto_goTypes = []interface{}{ + (*CloudEvent)(nil), // 0: io.cloudevents.v1.CloudEvent + (*CloudEventAttributeValue)(nil), // 1: io.cloudevents.v1.CloudEventAttributeValue + (*PublishRequest)(nil), // 2: io.cloudevents.v1.PublishRequest + (*SubscriptionRequest)(nil), // 3: io.cloudevents.v1.SubscriptionRequest + nil, // 4: io.cloudevents.v1.CloudEvent.AttributesEntry + (*any1.Any)(nil), // 5: google.protobuf.Any + (*timestamp.Timestamp)(nil), // 6: google.protobuf.Timestamp + (*empty.Empty)(nil), // 7: google.protobuf.Empty +} +var file_cloudevent_proto_depIdxs = []int32{ + 4, // 0: io.cloudevents.v1.CloudEvent.attributes:type_name -> io.cloudevents.v1.CloudEvent.AttributesEntry + 5, // 1: io.cloudevents.v1.CloudEvent.proto_data:type_name -> google.protobuf.Any + 6, // 2: io.cloudevents.v1.CloudEventAttributeValue.ce_timestamp:type_name -> google.protobuf.Timestamp + 0, // 3: io.cloudevents.v1.PublishRequest.event:type_name -> io.cloudevents.v1.CloudEvent + 1, // 4: io.cloudevents.v1.CloudEvent.AttributesEntry.value:type_name -> io.cloudevents.v1.CloudEventAttributeValue + 2, // 5: io.cloudevents.v1.CloudEventService.Publish:input_type -> io.cloudevents.v1.PublishRequest + 3, // 6: io.cloudevents.v1.CloudEventService.Subscribe:input_type -> io.cloudevents.v1.SubscriptionRequest + 7, // 7: io.cloudevents.v1.CloudEventService.Publish:output_type -> google.protobuf.Empty + 0, // 8: io.cloudevents.v1.CloudEventService.Subscribe:output_type -> io.cloudevents.v1.CloudEvent + 7, // [7:9] is the sub-list for method output_type + 5, // [5:7] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_cloudevent_proto_init() } +func file_cloudevent_proto_init() { + if File_cloudevent_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cloudevent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEventAttributeValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cloudevent_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CloudEvent_BinaryData)(nil), + (*CloudEvent_TextData)(nil), + (*CloudEvent_ProtoData)(nil), + } + file_cloudevent_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*CloudEventAttributeValue_CeBoolean)(nil), + (*CloudEventAttributeValue_CeInteger)(nil), + (*CloudEventAttributeValue_CeString)(nil), + (*CloudEventAttributeValue_CeBytes)(nil), + (*CloudEventAttributeValue_CeUri)(nil), + (*CloudEventAttributeValue_CeUriRef)(nil), + (*CloudEventAttributeValue_CeTimestamp)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cloudevent_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_cloudevent_proto_goTypes, + DependencyIndexes: file_cloudevent_proto_depIdxs, + MessageInfos: file_cloudevent_proto_msgTypes, + }.Build() + File_cloudevent_proto = out.File + file_cloudevent_proto_rawDesc = nil + file_cloudevent_proto_goTypes = nil + file_cloudevent_proto_depIdxs = nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto new file mode 100644 index 000000000..266c9691c --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto @@ -0,0 +1,84 @@ +// After making changes to the *.proto files, always run the following +// command in current directory to update the generated code: +// go generate + +syntax = "proto3"; + +package io.cloudevents.v1; + +option go_package = "open-cluster-management.io/sdk-go/cloudevents/generic/options/grpc/protobuf/v1"; + +import "google/protobuf/empty.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +// CloudEvent is copied from +// https://github.com/cloudevents/spec/blob/main/cloudevents/formats/protobuf-format.md. +message CloudEvent { + // Unique event identifier. + string id = 1; + // URI of the event source. + string source = 2; + // Version of the spec in use. + string spec_version = 3; + // Event type identifier. + string type = 4; + + // Optional & Extension Attributes + map attributes = 5; + + // CloudEvent Data (Bytes, Text, or Proto) + oneof data { + // If the event is binary data then the datacontenttype attribute + // should be set to an appropriate media-type. + bytes binary_data = 6; + // If the event is string data then the datacontenttype attribute + // should be set to an appropriate media-type such as application/json. + string text_data = 7; + // If the event is a protobuf then it must be encoded using this Any + // type. The datacontenttype attribute should be set to + // application/protobuf and the dataschema attribute set to the message + // type. + google.protobuf.Any proto_data = 8; + } +} + +// CloudEventAttribute enables extensions to use any of the seven allowed +// data types as the value of an envelope key. +message CloudEventAttributeValue { + // The value can be any one of these types. + oneof attr { + // Boolean value. + bool ce_boolean = 1; + // Integer value. + int32 ce_integer = 2; + // String value. + string ce_string = 3; + // Byte string value. + bytes ce_bytes = 4; + // URI value. + string ce_uri = 5; + // URI reference value. + string ce_uri_ref = 6; + // Timestamp value. + google.protobuf.Timestamp ce_timestamp = 7; + } +} + +message PublishRequest { + // Required. The topic to which event should be published. + // Format is `myhome/groundfloor/livingroom/temperature`. + string topic = 1; + CloudEvent event = 2; +} + +message SubscriptionRequest { + // Required. The topic from which event should be pulled. + // Format is `myhome/groundfloor/livingroom/temperature`. + string topic = 1; +} + +service CloudEventService { + rpc Publish(PublishRequest) returns (google.protobuf.Empty) {} + rpc Subscribe(SubscriptionRequest) returns (stream CloudEvent) {} +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go new file mode 100644 index 000000000..3dfd989fd --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go @@ -0,0 +1,175 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.12.4 +// source: cloudevent.proto + +package v1 + +import ( + context "context" + empty "github.com/golang/protobuf/ptypes/empty" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + CloudEventService_Publish_FullMethodName = "/io.cloudevents.v1.CloudEventService/Publish" + CloudEventService_Subscribe_FullMethodName = "/io.cloudevents.v1.CloudEventService/Subscribe" +) + +// CloudEventServiceClient is the client API for CloudEventService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CloudEventServiceClient interface { + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Subscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (CloudEventService_SubscribeClient, error) +} + +type cloudEventServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCloudEventServiceClient(cc grpc.ClientConnInterface) CloudEventServiceClient { + return &cloudEventServiceClient{cc} +} + +func (c *cloudEventServiceClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, CloudEventService_Publish_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudEventServiceClient) Subscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (CloudEventService_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &CloudEventService_ServiceDesc.Streams[0], CloudEventService_Subscribe_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &cloudEventServiceSubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type CloudEventService_SubscribeClient interface { + Recv() (*CloudEvent, error) + grpc.ClientStream +} + +type cloudEventServiceSubscribeClient struct { + grpc.ClientStream +} + +func (x *cloudEventServiceSubscribeClient) Recv() (*CloudEvent, error) { + m := new(CloudEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloudEventServiceServer is the server API for CloudEventService service. +// All implementations must embed UnimplementedCloudEventServiceServer +// for forward compatibility +type CloudEventServiceServer interface { + Publish(context.Context, *PublishRequest) (*empty.Empty, error) + Subscribe(*SubscriptionRequest, CloudEventService_SubscribeServer) error + mustEmbedUnimplementedCloudEventServiceServer() +} + +// UnimplementedCloudEventServiceServer must be embedded to have forward compatible implementations. +type UnimplementedCloudEventServiceServer struct { +} + +func (UnimplementedCloudEventServiceServer) Publish(context.Context, *PublishRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (UnimplementedCloudEventServiceServer) Subscribe(*SubscriptionRequest, CloudEventService_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (UnimplementedCloudEventServiceServer) mustEmbedUnimplementedCloudEventServiceServer() {} + +// UnsafeCloudEventServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CloudEventServiceServer will +// result in compilation errors. +type UnsafeCloudEventServiceServer interface { + mustEmbedUnimplementedCloudEventServiceServer() +} + +func RegisterCloudEventServiceServer(s grpc.ServiceRegistrar, srv CloudEventServiceServer) { + s.RegisterService(&CloudEventService_ServiceDesc, srv) +} + +func _CloudEventService_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudEventServiceServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CloudEventService_Publish_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudEventServiceServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudEventService_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscriptionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(CloudEventServiceServer).Subscribe(m, &cloudEventServiceSubscribeServer{stream}) +} + +type CloudEventService_SubscribeServer interface { + Send(*CloudEvent) error + grpc.ServerStream +} + +type cloudEventServiceSubscribeServer struct { + grpc.ServerStream +} + +func (x *cloudEventServiceSubscribeServer) Send(m *CloudEvent) error { + return x.ServerStream.SendMsg(m) +} + +// CloudEventService_ServiceDesc is the grpc.ServiceDesc for CloudEventService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CloudEventService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "io.cloudevents.v1.CloudEventService", + HandlerType: (*CloudEventServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Publish", + Handler: _CloudEventService_Publish_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _CloudEventService_Subscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "cloudevent.proto", +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go new file mode 100644 index 000000000..f643ef818 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go @@ -0,0 +1,3 @@ +package v1 + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative cloudevent.proto diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go new file mode 100644 index 000000000..b03e77dc9 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go @@ -0,0 +1,205 @@ +package protocol + +import ( + "bytes" + "context" + "fmt" + "net/url" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +const ( + prefix = "ce-" + contenttype = "contenttype" + // dataSchema = "dataschema" + subject = "subject" + time = "time" +) + +var specs = spec.WithPrefix(prefix) + +// Message represents a gRPC message. +// This message *can* be read several times safely +type Message struct { + internal *pbv1.CloudEvent + version spec.Version + format format.Format +} + +// Check if Message implements binding.Message +var ( + _ binding.Message = (*Message)(nil) + _ binding.MessageMetadataReader = (*Message)(nil) +) + +func NewMessage(msg *pbv1.CloudEvent) *Message { + var f format.Format + var v spec.Version + if msg.Attributes != nil { + if contentType, ok := msg.Attributes[contenttype]; ok && format.IsFormat(contentType.GetCeString()) { + f = format.Lookup(contentType.GetCeString()) + } else if s := msg.SpecVersion; s != "" { + v = specs.Version(s) + } + } + return &Message{ + internal: msg, + version: v, + format: f, + } +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } + + return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.GetBinaryData())) +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) error { + if m.version == nil { + return binding.ErrNotBinary + } + + if m.format != nil { + return binding.ErrNotBinary + } + + if m.internal.SpecVersion != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.SpecVersion), m.internal.SpecVersion) + if err != nil { + return err + } + } + if m.internal.Id != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.ID), m.internal.Id) + if err != nil { + return err + } + } + if m.internal.Source != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.Source), m.internal.Source) + if err != nil { + return err + } + } + if m.internal.Type != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.Type), m.internal.Type) + if err != nil { + return err + } + } + + for name, value := range m.internal.Attributes { + attrVal, err := valueFrom(value) + if err != nil { + return fmt.Errorf("failed to convert attribute %s: %s", name, err) + } + + if strings.HasPrefix(name, prefix) { + attr := m.version.Attribute(name) + if attr != nil { + err = encoder.SetAttribute(attr, attrVal) + if err != nil { + return err + } + } else { + err = encoder.SetExtension(strings.TrimPrefix(name, prefix), attrVal) + if err != nil { + return err + } + } + } else if name == contenttype { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), attrVal) + if err != nil { + return err + } + } + } + + if m.internal.GetBinaryData() != nil { + return encoder.SetData(bytes.NewBuffer(m.internal.GetBinaryData())) + } + + return nil +} + +func (m *Message) Finish(error) error { + return nil +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + switch attr.Kind() { + case spec.SpecVersion: + return attr, m.internal.SpecVersion + case spec.Type: + return attr, m.internal.Type + case spec.Source: + return attr, m.internal.Source + case spec.ID: + return attr, m.internal.Id + // case spec.DataContentType: + // return attr, m.internal.Attributes[contenttype].GetCeString() + default: + return attr, m.internal.Attributes[prefix+attr.Name()] + } + } + + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + return m.internal.Attributes[prefix+name] +} + +func valueFrom(attr *pbv1.CloudEventAttributeValue) (interface{}, error) { + var v interface{} + switch vt := attr.Attr.(type) { + case *pbv1.CloudEventAttributeValue_CeBoolean: + v = vt.CeBoolean + case *pbv1.CloudEventAttributeValue_CeInteger: + v = vt.CeInteger + case *pbv1.CloudEventAttributeValue_CeString: + v = vt.CeString + case *pbv1.CloudEventAttributeValue_CeBytes: + v = vt.CeBytes + case *pbv1.CloudEventAttributeValue_CeUri: + uri, err := url.Parse(vt.CeUri) + if err != nil { + return nil, fmt.Errorf("failed to parse URI value %s: %s", vt.CeUri, err.Error()) + } + v = uri + case *pbv1.CloudEventAttributeValue_CeUriRef: + uri, err := url.Parse(vt.CeUriRef) + if err != nil { + return nil, fmt.Errorf("failed to parse URIRef value %s: %s", vt.CeUriRef, err.Error()) + } + v = types.URIRef{URL: *uri} + case *pbv1.CloudEventAttributeValue_CeTimestamp: + v = vt.CeTimestamp.AsTime() + default: + return nil, fmt.Errorf("unsupported attribute type: %T", vt) + } + return types.Validate(v) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go new file mode 100644 index 000000000..4ac79e1f8 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go @@ -0,0 +1,40 @@ +package protocol + +import ( + "fmt" +) + +// Option is the function signature +type Option func(*Protocol) error + +// PublishOption +type PublishOption struct { + Topic string +} + +// SubscribeOption +type SubscribeOption struct { + Topics []string +} + +// WithPublishOption sets the Publish configuration for the client. This option is required if you want to send messages. +func WithPublishOption(publishOpt *PublishOption) Option { + return func(p *Protocol) error { + if publishOpt == nil { + return fmt.Errorf("the publish option must not be nil") + } + p.publishOption = publishOpt + return nil + } +} + +// WithSubscribeOption sets the Subscribe configuration for the client. This option is required if you want to receive messages. +func WithSubscribeOption(subscribeOpt *SubscribeOption) Option { + return func(p *Protocol) error { + if subscribeOpt == nil { + return fmt.Errorf("the subscribe option must not be nil") + } + p.subscribeOption = subscribeOpt + return nil + } +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go new file mode 100644 index 000000000..801cc8a38 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go @@ -0,0 +1,165 @@ +package protocol + +import ( + "context" + "fmt" + "io" + "sync" + + "google.golang.org/grpc" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +// protocol for grpc +// define protocol for grpc + +type Protocol struct { + client pbv1.CloudEventServiceClient + publishOption *PublishOption + subscribeOption *SubscribeOption + // receiver + incoming chan *pbv1.CloudEvent + // inOpen + openerMutex sync.Mutex + + closeChan chan struct{} +} + +var ( + _ protocol.Sender = (*Protocol)(nil) + _ protocol.Opener = (*Protocol)(nil) + _ protocol.Receiver = (*Protocol)(nil) + _ protocol.Closer = (*Protocol)(nil) +) + +// new create grpc protocol +func NewProtocol(clientConn grpc.ClientConnInterface, opts ...Option) (*Protocol, error) { + if clientConn == nil { + return nil, fmt.Errorf("the client connection must not be nil") + } + + // TODO: support clientID and error handling in grpc connection + p := &Protocol{ + client: pbv1.NewCloudEventServiceClient(clientConn), + // subClient: + incoming: make(chan *pbv1.CloudEvent), + closeChan: make(chan struct{}), + } + + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + return p, nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + // if p.publishOption == nil { + // return fmt.Errorf("the publish option must not be nil") + // } + var err error + defer func() { + err = m.Finish(err) + }() + + msg := &pbv1.CloudEvent{} + err = WritePBMessage(ctx, m, msg, transformers...) + if err != nil { + return err + } + + var topic string + if p.publishOption != nil { + topic = p.publishOption.Topic + } + if cecontext.TopicFrom(ctx) != "" { + topic = cecontext.TopicFrom(ctx) + cecontext.WithTopic(ctx, "") + } + + logger := cecontext.LoggerFrom(ctx) + logger.Infof("publishing event to topic: %v", topic) + _, err = p.client.Publish(ctx, &pbv1.PublishRequest{ + Topic: topic, + Event: msg, + }) + if err != nil { + return err + } + return err +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + if p.subscribeOption == nil { + return fmt.Errorf("the subscribe option must not be nil") + } + + if len(p.subscribeOption.Topics) == 0 { + return fmt.Errorf("the subscribe option topics must not be empty") + } + + p.openerMutex.Lock() + defer p.openerMutex.Unlock() + + logger := cecontext.LoggerFrom(ctx) + for _, topic := range p.subscribeOption.Topics { + subClient, err := p.client.Subscribe(ctx, &pbv1.SubscriptionRequest{ + Topic: topic, + }) + if err != nil { + return err + } + + logger.Infof("subscribing to topic: %v", topic) + go func() { + for { + msg, err := subClient.Recv() + if err != nil { + return + } + p.incoming <- msg + } + }() + } + + // Wait until external or internal context done + select { + case <-ctx.Done(): + case <-p.closeChan: + } + + return nil +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + select { + case m, ok := <-p.incoming: + if !ok { + return nil, io.EOF + } + msg := NewMessage(m) + return msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +func (p *Protocol) Close(ctx context.Context) error { + close(p.closeChan) + return nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go new file mode 100644 index 000000000..1c6149344 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go @@ -0,0 +1,215 @@ +package protocol + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + "google.golang.org/protobuf/types/known/timestamppb" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +// WritePBMessage fills the provided pubMessage with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WritePBMessage(ctx context.Context, m binding.Message, pbEvt *pbv1.CloudEvent, transformers ...binding.Transformer) error { + structuredWriter := (*pbEventWriter)(pbEvt) + binaryWriter := (*pbEventWriter)(pbEvt) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type pbEventWriter pbv1.CloudEvent + +var ( + _ binding.StructuredWriter = (*pbEventWriter)(nil) + _ binding.BinaryWriter = (*pbEventWriter)(nil) +) + +func (b *pbEventWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error { + if b.Attributes == nil { + b.Attributes = make(map[string]*pbv1.CloudEventAttributeValue) + } + + b.Attributes[contenttype], _ = attributeFor(f.MediaType()) + + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + + // TODO: check the data content type and set the right data format + b.Data = &pbv1.CloudEvent_BinaryData{ + BinaryData: buf.Bytes(), + } + + return nil +} + +func (b *pbEventWriter) Start(ctx context.Context) error { + if b.Attributes == nil { + b.Attributes = make(map[string]*pbv1.CloudEventAttributeValue) + } + + return nil +} + +func (b *pbEventWriter) End(ctx context.Context) error { + return nil +} + +func (b *pbEventWriter) SetData(reader io.Reader) error { + buf, ok := reader.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, reader) + if err != nil { + return err + } + } + + b.Data = &pbv1.CloudEvent_BinaryData{ + BinaryData: buf.Bytes(), + } + + return nil +} + +func (b *pbEventWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + switch attribute.Kind() { + case spec.SpecVersion: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid SpecVersion type, expected string got %T", value) + } + b.SpecVersion = val + case spec.ID: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid ID type, expected string got %T", value) + } + b.Id = val + case spec.Source: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid Source type, expected string got %T", value) + } + b.Source = val + case spec.Type: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid Type type, expected string got %T", value) + } + b.Type = val + case spec.DataContentType: + if value == nil { + delete(b.Attributes, contenttype) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[contenttype] = attrVal + } + case spec.Subject: + if value == nil { + delete(b.Attributes, prefix+subject) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+subject] = attrVal + } + case spec.Time: + if value == nil { + delete(b.Attributes, prefix+time) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+time] = attrVal + } + default: + if value == nil { + delete(b.Attributes, prefix+attribute.Name()) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+attribute.Name()] = attrVal + } + } + + return nil +} + +func (b *pbEventWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.Attributes, prefix+name) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+name] = attrVal + } + + return nil +} + +func attributeFor(v interface{}) (*pbv1.CloudEventAttributeValue, error) { + vv, err := types.Validate(v) + if err != nil { + return nil, err + } + attr := &pbv1.CloudEventAttributeValue{} + switch vt := vv.(type) { + case bool: + attr.Attr = &pbv1.CloudEventAttributeValue_CeBoolean{ + CeBoolean: vt, + } + case int32: + attr.Attr = &pbv1.CloudEventAttributeValue_CeInteger{ + CeInteger: vt, + } + case string: + attr.Attr = &pbv1.CloudEventAttributeValue_CeString{ + CeString: vt, + } + case []byte: + attr.Attr = &pbv1.CloudEventAttributeValue_CeBytes{ + CeBytes: vt, + } + case types.URI: + attr.Attr = &pbv1.CloudEventAttributeValue_CeUri{ + CeUri: vt.String(), + } + case types.URIRef: + attr.Attr = &pbv1.CloudEventAttributeValue_CeUriRef{ + CeUriRef: vt.String(), + } + case types.Timestamp: + attr.Attr = &pbv1.CloudEventAttributeValue_CeTimestamp{ + CeTimestamp: timestamppb.New(vt.Time), + } + default: + return nil, fmt.Errorf("unsupported attribute type: %T", v) + } + return attr, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go new file mode 100644 index 000000000..d043c1136 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go @@ -0,0 +1,77 @@ +package grpc + +import ( + "context" + "fmt" + "strings" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type gRPCSourceOptions struct { + GRPCOptions + errorChan chan error // grpc client connection doesn't have error channel, it will handle reconnecting automatically + sourceID string +} + +func NewSourceOptions(gRPCOptions *GRPCOptions, sourceID string) *options.CloudEventsSourceOptions { + return &options.CloudEventsSourceOptions{ + CloudEventsOptions: &gRPCSourceOptions{ + GRPCOptions: *gRPCOptions, + errorChan: make(chan error), + sourceID: sourceID, + }, + SourceID: sourceID, + } +} + +func (o *gRPCSourceOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + if eventType.Action == types.ResyncRequestAction { + // source publishes event to status resync topic to request to get resources status from all clusters + return cloudeventscontext.WithTopic(ctx, strings.Replace(StatusResyncTopic, "+", o.sourceID, -1)), nil + } + + clusterName, err := evtCtx.GetExtension(types.ExtensionClusterName) + if err != nil { + return nil, err + } + + // source publishes event to spec topic to send the resource spec to a specified cluster + specTopic := strings.Replace(SpecTopic, "+", o.sourceID, 1) + specTopic = strings.Replace(specTopic, "+", fmt.Sprintf("%s", clusterName), -1) + return cloudeventscontext.WithTopic(ctx, specTopic), nil +} + +func (o *gRPCSourceOptions) Client(ctx context.Context) (cloudevents.Client, error) { + receiver, err := o.GetCloudEventsClient( + ctx, + func(err error) { + o.errorChan <- err + }, + protocol.WithPublishOption(&protocol.PublishOption{}), + protocol.WithSubscribeOption(&protocol.SubscribeOption{ + Topics: []string{ + strings.Replace(StatusTopic, "+", o.sourceID, 1), // receiving the resources status from agents with status topic + SpecResyncTopic, // receiving the resources spec resync request from agents with spec resync topic + }, + }), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *gRPCSourceOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go index 0f9e3ffbf..b3f584e5e 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go @@ -51,7 +51,7 @@ func (o *mqttAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.E // agent request to sync resource spec from all sources if eventType.Action == types.ResyncRequestAction && originalSource == types.SourceAll { if len(o.Topics.AgentBroadcast) == 0 { - klog.Warningf("the source wild card resync topic not set, fall back to the agent events topic") + klog.Warningf("the agent broadcast topic not set, fall back to the agent events topic") // TODO after supporting multiple sources, we should list each source eventsTopic := replaceLast(o.Topics.AgentEvents, "+", o.clusterName) diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go index 0a9f9ac6b..e059b9812 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go @@ -47,9 +47,9 @@ func (o *mqttSourceOptions) WithContext(ctx context.Context, evtCtx cloudevents. } if eventType.Action == types.ResyncRequestAction && clusterName == types.ClusterAll { - // source request to get resources status from all sources + // source request to get resources status from all agents if len(o.Topics.SourceBroadcast) == 0 { - return nil, fmt.Errorf("the source wild card resync topic not set") + return nil, fmt.Errorf("the source broadcast topic not set") } resyncTopic := strings.Replace(o.Topics.SourceBroadcast, "+", o.sourceID, 1) diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go index bb783901d..58da81556 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go @@ -44,6 +44,7 @@ func NewCloudEventSourceClient[T ResourceObject]( baseClient := &baseClient{ cloudEventsOptions: sourceOptions.CloudEventsOptions, cloudEventsRateLimiter: NewRateLimiter(sourceOptions.EventRateLimit), + reconnectedChan: make(chan struct{}), } if err := baseClient.connect(ctx); err != nil { @@ -64,6 +65,10 @@ func NewCloudEventSourceClient[T ResourceObject]( }, nil } +func (c *CloudEventSourceClient[T]) ReconnectedChan() <-chan struct{} { + return c.reconnectedChan +} + // Resync the resources status by sending a status resync request from the current source to a specified cluster. func (c *CloudEventSourceClient[T]) Resync(ctx context.Context, clusterName string) error { // list the resource objects that are maintained by the current source with a specified cluster diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go index d66bac83c..2b0624241 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go @@ -7,7 +7,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" kubetypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/klog/v2" @@ -17,18 +16,11 @@ import ( workv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" ) -const ManifestsDeleted = "Deleted" - -const ( - UpdateRequestAction = "update_request" - DeleteRequestAction = "delete_request" -) - // ManifestWorkAgentClient implements the ManifestWorkInterface. It sends the manifestworks status back to source by // CloudEventAgentClient. type ManifestWorkAgentClient struct { @@ -37,8 +29,6 @@ type ManifestWorkAgentClient struct { lister workv1lister.ManifestWorkNamespaceLister } -var manifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} - var _ workv1client.ManifestWorkInterface = &ManifestWorkAgentClient{} func NewManifestWorkAgentClient(cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork], watcher *watcher.ManifestWorkWatcher) *ManifestWorkAgentClient { @@ -53,23 +43,23 @@ func (c *ManifestWorkAgentClient) SetLister(lister workv1lister.ManifestWorkName } func (c *ManifestWorkAgentClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { - return nil, errors.NewMethodNotSupported(manifestWorkGR, "create") + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "create") } func (c *ManifestWorkAgentClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { - return nil, errors.NewMethodNotSupported(manifestWorkGR, "update") + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "update") } func (c *ManifestWorkAgentClient) UpdateStatus(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { - return nil, errors.NewMethodNotSupported(manifestWorkGR, "updatestatus") + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "updatestatus") } func (c *ManifestWorkAgentClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return errors.NewMethodNotSupported(manifestWorkGR, "delete") + return errors.NewMethodNotSupported(common.ManifestWorkGR, "delete") } func (c *ManifestWorkAgentClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - return errors.NewMethodNotSupported(manifestWorkGR, "deletecollection") + return errors.NewMethodNotSupported(common.ManifestWorkGR, "deletecollection") } func (c *ManifestWorkAgentClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { @@ -88,7 +78,6 @@ func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOpti } func (c *ManifestWorkAgentClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - // TODO (skeeey) consider resync the manifestworks when the ManifestWorkInformer reconnected return c.watcher, nil } @@ -105,7 +94,7 @@ func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kub return nil, err } - eventDataType, err := types.ParseCloudEventsDataType(patchedWork.Annotations[codec.CloudEventsDataTypeAnnotationKey]) + eventDataType, err := types.ParseCloudEventsDataType(patchedWork.Annotations[common.CloudEventsDataTypeAnnotationKey]) if err != nil { return nil, err } @@ -123,7 +112,7 @@ func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kub } if statusUpdated { - eventType.Action = UpdateRequestAction + eventType.Action = common.UpdateRequestAction if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { return nil, err } @@ -137,13 +126,13 @@ func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kub // it back to source if !newWork.DeletionTimestamp.IsZero() && len(newWork.Finalizers) == 0 { meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{ - Type: ManifestsDeleted, + Type: common.ManifestsDeleted, Status: metav1.ConditionTrue, Reason: "ManifestsDeleted", Message: fmt.Sprintf("The manifests are deleted from the cluster %s", newWork.Namespace), }) - eventType.Action = DeleteRequestAction + eventType.Action = common.DeleteRequestAction if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { return nil, err } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go index 2e86952f1..368ee789e 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go @@ -17,17 +17,10 @@ import ( "open-cluster-management.io/api/utils/work/v1/workvalidator" workv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" ) -const ( - // CloudEventsDataTypeAnnotationKey is the key of the cloudevents data type annotation. - CloudEventsDataTypeAnnotationKey = "cloudevents.open-cluster-management.io/datatype" - - // CloudEventsDataTypeAnnotationKey is the key of the cloudevents original source annotation. - CloudEventsOriginalSourceAnnotationKey = "cloudevents.open-cluster-management.io/originalsource" -) - var sequenceGenerator *snowflake.Node func init() { @@ -67,7 +60,7 @@ func (c *ManifestCodec) Encode(source string, eventType types.CloudEventsType, w return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err) } - originalSource, ok := work.Annotations[CloudEventsOriginalSourceAnnotationKey] + originalSource, ok := work.Labels[common.CloudEventsOriginalSourceLabelKey] if !ok { return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) } @@ -134,9 +127,11 @@ func (c *ManifestCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, er ResourceVersion: resourceVersion, Name: resourceID, Namespace: clusterName, + Labels: map[string]string{ + common.CloudEventsOriginalSourceLabelKey: evt.Source(), + }, Annotations: map[string]string{ - CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), - CloudEventsOriginalSourceAnnotationKey: evt.Source(), + common.CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), }, }, } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go index 072701cbf..f2f44d6d3 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go @@ -13,6 +13,7 @@ import ( workv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" ) @@ -39,7 +40,7 @@ func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsT return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err) } - originalSource, ok := work.Annotations[CloudEventsOriginalSourceAnnotationKey] + originalSource, ok := work.Labels[common.CloudEventsOriginalSourceLabelKey] if !ok { return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) } @@ -99,9 +100,11 @@ func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWo ResourceVersion: resourceVersion, Name: resourceID, Namespace: clusterName, + Labels: map[string]string{ + common.CloudEventsOriginalSourceLabelKey: evt.Source(), + }, Annotations: map[string]string{ - CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), - CloudEventsOriginalSourceAnnotationKey: evt.Source(), + common.CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), }, }, } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go index 35ac25284..2832d6fd3 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go @@ -6,6 +6,7 @@ import ( "time" "k8s.io/client-go/rest" + "k8s.io/klog/v2" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" @@ -13,10 +14,15 @@ import ( workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" workv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" agentclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client" agenthandler "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal" + sourceclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client" + sourcehandler "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" ) @@ -27,15 +33,20 @@ const defaultInformerResyncTime = 10 * time.Minute // // ClientHolder also implements the ManifestWorksGetter interface. type ClientHolder struct { - workClient workv1client.WorkV1Interface + workClientSet workclientset.Interface manifestWorkInformer workv1informers.ManifestWorkInformer } var _ workv1client.ManifestWorksGetter = &ClientHolder{} +// WorkInterface returns a workclientset Interface +func (h *ClientHolder) WorkInterface() workclientset.Interface { + return h.workClientSet +} + // ManifestWorks returns a ManifestWorkInterface func (h *ClientHolder) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { - return h.workClient.ManifestWorks(namespace) + return h.workClientSet.WorkV1().ManifestWorks(namespace) } // ManifestWorkInformer returns a ManifestWorkInformer @@ -49,6 +60,7 @@ type ClientHolderBuilder struct { codecs []generic.Codec[*workv1.ManifestWork] informerOptions []workinformers.SharedInformerOption informerResyncTime time.Duration + sourceID string clusterName string clientID string } @@ -58,6 +70,7 @@ type ClientHolderBuilder struct { // Available configurations: // - Kubeconfig (*rest.Config): builds a manifestwork client with kubeconfig // - MQTTOptions (*mqtt.MQTTOptions): builds a manifestwork client based on cloudevents with MQTT +// - GRPCOptions (*grpc.GRPCOptions): builds a manifestwork client based on cloudevents with GRPC func NewClientHolderBuilder(clientID string, config any) *ClientHolderBuilder { return &ClientHolderBuilder{ clientID: clientID, @@ -66,6 +79,11 @@ func NewClientHolderBuilder(clientID string, config any) *ClientHolderBuilder { } } +func (b *ClientHolderBuilder) WithSourceID(sourceID string) *ClientHolderBuilder { + b.sourceID = sourceID + return b +} + // WithClusterName set the managed cluster name when building a manifestwork client for an agent. func (b *ClientHolderBuilder) WithClusterName(clusterName string) *ClientHolderBuilder { b.clusterName = clusterName @@ -99,25 +117,29 @@ func (b *ClientHolderBuilder) NewClientHolder(ctx context.Context) (*ClientHolde factory := workinformers.NewSharedInformerFactoryWithOptions(kubeWorkClientSet, b.informerResyncTime, b.informerOptions...) return &ClientHolder{ - workClient: kubeWorkClientSet.WorkV1(), + workClientSet: kubeWorkClientSet, manifestWorkInformer: factory.Work().V1().ManifestWorks(), }, nil case *mqtt.MQTTOptions: if len(b.clusterName) != 0 { - return b.newAgentClients(ctx, config) + return b.newAgentClients(ctx, mqtt.NewAgentOptions(config, b.clusterName, b.clientID)) } - //TODO build manifestwork clients for source - return nil, nil + return b.newSourceClients(ctx, mqtt.NewSourceOptions(config, b.clientID, b.sourceID)) + case *grpc.GRPCOptions: + if len(b.clusterName) != 0 { + return b.newAgentClients(ctx, grpc.NewAgentOptions(config, b.clusterName, b.clientID)) + } + + return b.newSourceClients(ctx, grpc.NewSourceOptions(config, b.sourceID)) default: return nil, fmt.Errorf("unsupported client configuration type %T", config) } } -func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, config *mqtt.MQTTOptions) (*ClientHolder, error) { +func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, agentOptions *options.CloudEventsAgentOptions) (*ClientHolder, error) { workLister := &ManifestWorkLister{} watcher := watcher.NewManifestWorkWatcher() - agentOptions := mqtt.NewAgentOptions(config, b.clusterName, b.clientID) cloudEventsClient, err := generic.NewCloudEventAgentClient[*workv1.ManifestWork]( ctx, agentOptions, @@ -145,8 +167,71 @@ func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, config *mqtt. cloudEventsClient.Subscribe(ctx, agenthandler.NewManifestWorkAgentHandler(namespacedLister, watcher)) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-cloudEventsClient.ReconnectedChan(): + // when receiving a client reconnected signal, we resync all sources for this agent + // TODO after supporting multiple sources, we should only resync agent known sources + if err := cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { + klog.Errorf("failed to send resync request, %v", err) + } + } + } + }() + + return &ClientHolder{ + workClientSet: workClientSet, + manifestWorkInformer: informers, + }, nil +} + +func (b *ClientHolderBuilder) newSourceClients(ctx context.Context, sourceOptions *options.CloudEventsSourceOptions) (*ClientHolder, error) { + workLister := &ManifestWorkLister{} + watcher := watcher.NewManifestWorkWatcher() + cloudEventsClient, err := generic.NewCloudEventSourceClient[*workv1.ManifestWork]( + ctx, + sourceOptions, + workLister, + ManifestWorkStatusHash, + b.codecs..., + ) + if err != nil { + return nil, err + } + + manifestWorkClient := sourceclient.NewManifestWorkSourceClient(b.sourceID, cloudEventsClient, watcher) + workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} + workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} + factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) + informers := factory.Work().V1().ManifestWorks() + manifestWorkLister := informers.Lister() + // Set informer lister back to work lister and client. + workLister.Lister = manifestWorkLister + manifestWorkClient.SetLister(manifestWorkLister) + + sourceHandler := sourcehandler.NewManifestWorkSourceHandler(manifestWorkLister, watcher) + cloudEventsClient.Subscribe(ctx, sourceHandler.HandlerFunc()) + + go sourceHandler.Run(ctx.Done()) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-cloudEventsClient.ReconnectedChan(): + // when receiving a client reconnected signal, we resync all clusters for this source + if err := cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + klog.Errorf("failed to send resync request, %v", err) + } + } + } + }() + return &ClientHolder{ - workClient: workClient, + workClientSet: workClientSet, manifestWorkInformer: informers, }, nil } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go new file mode 100644 index 000000000..53134eb89 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go @@ -0,0 +1,29 @@ +package common + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + // CloudEventsDataTypeAnnotationKey is the key of the cloudevents data type annotation. + CloudEventsDataTypeAnnotationKey = "cloudevents.open-cluster-management.io/datatype" + + // CloudEventsGenerationAnnotationKey is the key of the manifestwork generation annotation. + CloudEventsGenerationAnnotationKey = "cloudevents.open-cluster-management.io/generation" +) + +// CloudEventsOriginalSourceLabelKey is the key of the cloudevents original source label. +const CloudEventsOriginalSourceLabelKey = "cloudevents.open-cluster-management.io/originalsource" + +// ManifestsDeleted represents the manifests are deleted. +const ManifestsDeleted = "Deleted" + +const ( + CreateRequestAction = "create_request" + UpdateRequestAction = "update_request" + DeleteRequestAction = "delete_request" +) + +var ManifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go index 9b629d83f..768a6339b 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go @@ -7,6 +7,7 @@ import ( workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" workv1alpha1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1alpha1" + sourceclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client" ) // WorkClientSetWrapper wraps a work client that has a manifestwork client to a work clientset interface, this wrapper @@ -37,7 +38,10 @@ type WorkV1ClientWrapper struct { var _ workv1client.WorkV1Interface = &WorkV1ClientWrapper{} func (c *WorkV1ClientWrapper) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { - // TODO if the ManifestWorkClient is ManifestWorkSourceClient, we need set namespace here + if sourceManifestWorkClient, ok := c.ManifestWorkClient.(*sourceclient.ManifestWorkSourceClient); ok { + sourceManifestWorkClient.SetNamespace(namespace) + return sourceManifestWorkClient + } return c.ManifestWorkClient } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go index dc38d61fc..0911ee381 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go @@ -2,10 +2,12 @@ package work import ( "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" workv1 "open-cluster-management.io/api/work/v1" "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" ) // ManifestWorkLister list the ManifestWorks from a ManifestWorkInformer's local cache. @@ -15,5 +17,15 @@ type ManifestWorkLister struct { // List returns the ManifestWorks from a ManifestWorkInformer's local cache. func (l *ManifestWorkLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { - return l.Lister.ManifestWorks(options.ClusterName).List(labels.Everything()) + selector := labels.Everything() + if options.Source != types.SourceAll { + req, err := labels.NewRequirement(common.CloudEventsOriginalSourceLabelKey, selection.Equals, []string{options.Source}) + if err != nil { + return nil, err + } + + selector = labels.NewSelector().Add(*req) + } + + return l.Lister.ManifestWorks(options.ClusterName).List(selector) } diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go new file mode 100644 index 000000000..bc0e92eff --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go @@ -0,0 +1,225 @@ +package client + +import ( + "context" + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +// ManifestWorkSourceClient implements the ManifestWorkInterface. +type ManifestWorkSourceClient struct { + cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork] + watcher *watcher.ManifestWorkWatcher + lister workv1lister.ManifestWorkLister + namespace string + sourceID string +} + +var _ workv1client.ManifestWorkInterface = &ManifestWorkSourceClient{} + +func NewManifestWorkSourceClient(sourceID string, + cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork], + watcher *watcher.ManifestWorkWatcher) *ManifestWorkSourceClient { + return &ManifestWorkSourceClient{ + cloudEventsClient: cloudEventsClient, + watcher: watcher, + sourceID: sourceID, + } +} + +func (c *ManifestWorkSourceClient) SetLister(lister workv1lister.ManifestWorkLister) { + c.lister = lister +} + +func (mw *ManifestWorkSourceClient) SetNamespace(namespace string) { + mw.namespace = namespace +} + +func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { + _, err := c.lister.ManifestWorks(c.namespace).Get(manifestWork.Name) + if err == nil { + return nil, errors.NewAlreadyExists(common.ManifestWorkGR, manifestWork.Name) + } + + if !errors.IsNotFound(err) { + return nil, err + } + + eventDataType, err := types.ParseCloudEventsDataType(manifestWork.Annotations[common.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return nil, err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceSpec, + Action: common.CreateRequestAction, + } + + generation, err := getWorkGeneration(manifestWork) + if err != nil { + return nil, err + } + + newWork := manifestWork.DeepCopy() + newWork.UID = kubetypes.UID(utils.UID(c.sourceID, c.namespace, newWork.Name)) + newWork.Generation = generation + ensureSourceLabel(c.sourceID, newWork) + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // add the new work to the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Added, Object: newWork}) + return newWork.DeepCopy(), nil +} + +func (c *ManifestWorkSourceClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "update") +} + +func (c *ManifestWorkSourceClient) UpdateStatus(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "updatestatus") +} + +func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + work, err := c.lister.ManifestWorks(c.namespace).Get(name) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + eventDataType, err := types.ParseCloudEventsDataType(work.Annotations[common.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceSpec, + Action: common.DeleteRequestAction, + } + + deletingWork := work.DeepCopy() + now := metav1.Now() + deletingWork.DeletionTimestamp = &now + + if err := c.cloudEventsClient.Publish(ctx, eventType, deletingWork); err != nil { + return err + } + + // update the deleting work in the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: deletingWork}) + return nil +} + +func (c *ManifestWorkSourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + return errors.NewMethodNotSupported(common.ManifestWorkGR, "deletecollection") +} + +func (c *ManifestWorkSourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { + klog.V(4).Infof("getting manifestwork %s", name) + return c.lister.ManifestWorks(c.namespace).Get(name) +} + +func (c *ManifestWorkSourceClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { + klog.V(4).Infof("list manifestworks") + // send resync request to fetch manifestwork status from agents when the ManifestWorkInformer starts + if err := c.cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + return nil, err + } + + return &workv1.ManifestWorkList{}, nil +} + +func (c *ManifestWorkSourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.watcher, nil +} + +func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { + klog.V(4).Infof("patching manifestwork %s", name) + + if len(subresources) != 0 { + return nil, fmt.Errorf("unsupported to update subresources %v", subresources) + } + + lastWork, err := c.lister.ManifestWorks(c.namespace).Get(name) + if err != nil { + return nil, err + } + + patchedWork, err := utils.Patch(pt, lastWork, data) + if err != nil { + return nil, err + } + + generation, err := getWorkGeneration(patchedWork) + if err != nil { + return nil, err + } + + if generation <= lastWork.Generation { + return nil, fmt.Errorf("the work %s/%s current generation %d is less than or equal to the last generation %d", + c.namespace, name, generation, lastWork.Generation) + } + + eventDataType, err := types.ParseCloudEventsDataType(lastWork.Annotations[common.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return nil, err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceSpec, + Action: common.UpdateRequestAction, + } + + newWork := patchedWork.DeepCopy() + newWork.Generation = generation + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // refresh the work in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork.DeepCopy(), nil +} + +func getWorkGeneration(work *workv1.ManifestWork) (int64, error) { + generation, ok := work.Annotations[common.CloudEventsGenerationAnnotationKey] + if !ok { + return -1, fmt.Errorf("the annotation %s is not found from work %s", common.CloudEventsGenerationAnnotationKey, work.UID) + } + + generationInt, err := strconv.Atoi(generation) + if err != nil { + return -1, err + } + + return int64(generationInt), nil +} + +func ensureSourceLabel(sourceID string, work *workv1.ManifestWork) { + if work.Labels == nil { + work.Labels = map[string]string{} + } + + work.Labels[common.CloudEventsOriginalSourceLabelKey] = sourceID +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go new file mode 100644 index 000000000..eafc90e8f --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go @@ -0,0 +1,101 @@ +package codec + +import ( + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" +) + +// ManifestBundleCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for a source. +type ManifestBundleCodec struct{} + +func NewManifestBundleCodec() *ManifestBundleCodec { + return &ManifestBundleCodec{} +} + +// EventDataType always returns the event data type `io.open-cluster-management.works.v1alpha1.manifestbundles`. +func (c *ManifestBundleCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestBundleEventDataType +} + +// Encode the spec of a ManifestWork to a cloudevent with ManifestBundle. +func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evt := types.NewEventBuilder(source, eventType). + WithClusterName(work.Namespace). + WithResourceID(string(work.UID)). + WithResourceVersion(work.Generation). + NewEvent() + if !work.DeletionTimestamp.IsZero() { + evt.SetExtension(types.ExtensionDeletionTimestamp, work.DeletionTimestamp.Time) + return &evt, nil + } + + manifests := &payload.ManifestBundle{ + Manifests: work.Spec.Workload.Manifests, + DeleteOption: work.Spec.DeleteOption, + ManifestConfigs: work.Spec.ManifestConfigs, + } + if err := evt.SetData(cloudevents.ApplicationJSON, manifests); err != nil { + return nil, fmt.Errorf("failed to encode manifestwork status to a cloudevent: %v", err) + } + + return &evt, nil +} + +// Decode a cloudevent whose data is ManifestBundle to a ManifestWork. +func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: resourceVersion, + }, + } + + manifestStatus := &payload.ManifestBundleStatus{} + if err := evt.DataAs(manifestStatus); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + work.Status = workv1.ManifestWorkStatus{ + Conditions: manifestStatus.Conditions, + ResourceStatus: workv1.ManifestResourceStatus{ + Manifests: manifestStatus.ResourceStatus, + }, + } + + return work, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go new file mode 100644 index 000000000..0ad31f15d --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go @@ -0,0 +1,167 @@ +package handler + +import ( + "fmt" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +const ManifestWorkFinalizer = "cloudevents.open-cluster-management.io/manifest-work-cleanup" + +type ManifestWorkSourceHandler struct { + works workqueue.RateLimitingInterface + lister workv1lister.ManifestWorkLister + watcher *watcher.ManifestWorkWatcher +} + +// NewManifestWorkSourceHandler returns a ResourceHandler for a ManifestWork source client. It sends the kube events +// with ManifestWorWatcher after CloudEventSourceClient received the ManifestWork status from agent, then the +// ManifestWorkInformer handles the kube events in its local cache. +func NewManifestWorkSourceHandler(lister workv1lister.ManifestWorkLister, watcher *watcher.ManifestWorkWatcher) *ManifestWorkSourceHandler { + return &ManifestWorkSourceHandler{ + works: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "manifestwork-source-handler"), + lister: lister, + watcher: watcher, + } +} + +func (h *ManifestWorkSourceHandler) Run(stopCh <-chan struct{}) { + defer h.works.ShutDown() + + // start a goroutine to handle the works from the queue + // the .Until will re-kick the runWorker one second after the runWorker completes + go wait.Until(h.runWorker, time.Second, stopCh) + + // wait until we're told to stop + <-stopCh +} + +func (h *ManifestWorkSourceHandler) HandlerFunc() generic.ResourceHandler[*workv1.ManifestWork] { + return func(action types.ResourceAction, obj *workv1.ManifestWork) error { + switch action { + case types.StatusModified: + h.works.Add(obj) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + return nil + } +} + +func (h *ManifestWorkSourceHandler) runWorker() { + // hot loop until we're told to stop. processNextEvent will automatically wait until there's work available, so + // we don't worry about secondary waits + for h.processNextWork() { + } +} + +// processNextWork deals with one key off the queue. +func (h *ManifestWorkSourceHandler) processNextWork() bool { + // pull the next event item from queue. + // events queue blocks until it can return an item to be processed + key, quit := h.works.Get() + if quit { + // the current queue is shutdown and becomes empty, quit this process + return false + } + defer h.works.Done(key) + + if err := h.handleWork(key.(*workv1.ManifestWork)); err != nil { + // we failed to handle the work, we should requeue the item to work on later + // this method will add a backoff to avoid hotlooping on particular items + h.works.AddRateLimited(key) + return true + } + + // we handle the event successfully, tell the queue to stop tracking history for this event + h.works.Forget(key) + return true +} + +func (h *ManifestWorkSourceHandler) handleWork(work *workv1.ManifestWork) error { + lastWork := h.getWorkByUID(work.UID) + if lastWork == nil { + // the work is not found, this may be the client is restarted and the local cache is not ready, requeue this + // work + return errors.NewNotFound(common.ManifestWorkGR, string(work.UID)) + } + + updatedWork := lastWork.DeepCopy() + if meta.IsStatusConditionTrue(work.Status.Conditions, common.ManifestsDeleted) { + updatedWork.Finalizers = []string{} + h.watcher.Receive(watch.Event{Type: watch.Deleted, Object: updatedWork}) + return nil + } + + resourceVersion, err := strconv.Atoi(work.ResourceVersion) + if err != nil { + klog.Errorf("invalid resource version for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err) + return nil + } + + if int64(resourceVersion) > lastWork.Generation { + klog.Warningf("the work %s/%s resource version %d is great than its generation %d, ignore", + lastWork.Namespace, lastWork.Name, resourceVersion, work.Generation) + return nil + } + + // no status change + if equality.Semantic.DeepEqual(lastWork.Status, work.Status) { + return nil + } + + // the work has been handled by agent, we ensure a finalizer on the work + updatedWork.Finalizers = ensureFinalizers(updatedWork.Finalizers) + updatedWork.Status = work.Status + h.watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + return nil +} + +func (h *ManifestWorkSourceHandler) getWorkByUID(uid kubetypes.UID) *workv1.ManifestWork { + works, err := h.lister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to lists works, %v", err) + return nil + } + + for _, work := range works { + if work.UID == uid { + return work + } + } + + return nil +} + +func ensureFinalizers(workFinalizers []string) []string { + has := false + for _, f := range workFinalizers { + if f == ManifestWorkFinalizer { + has = true + break + } + } + + if !has { + workFinalizers = append(workFinalizers, ManifestWorkFinalizer) + } + + return workFinalizers +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go index 7a4afcc72..51b0b3354 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go @@ -5,8 +5,10 @@ import ( "fmt" jsonpatch "github.com/evanphx/json-patch" + "github.com/google/uuid" "k8s.io/apimachinery/pkg/types" workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" ) // Patch applies the patch to a work with the patch type. @@ -45,3 +47,9 @@ func Patch(patchType types.PatchType, work *workv1.ManifestWork, patchData []byt return patchedWork, nil } + +// UID returns a v5 UUID based on sourceID, work name and namespace to make sure it is consistent +func UID(sourceID, namespace, name string) string { + id := fmt.Sprintf("%s-%s-%s-%s", sourceID, common.ManifestWorkGR.String(), namespace, name) + return uuid.NewSHA1(uuid.NameSpaceOID, []byte(id)).String() +}