diff --git a/cmd/clusterctl/client/alpha/client.go b/cmd/clusterctl/client/alpha/client.go index c72386b650dc..0f4bd230a1c3 100644 --- a/cmd/clusterctl/client/alpha/client.go +++ b/cmd/clusterctl/client/alpha/client.go @@ -16,12 +16,6 @@ limitations under the License. package alpha -import "context" - -var ( - ctx = context.TODO() -) - // Client is the alpha client. type Client interface { Rollout() Rollout diff --git a/cmd/clusterctl/client/alpha/kubeadmcontrolplane.go b/cmd/clusterctl/client/alpha/kubeadmcontrolplane.go index 4ff821f8e88e..8f77d19fd3e0 100644 --- a/cmd/clusterctl/client/alpha/kubeadmcontrolplane.go +++ b/cmd/clusterctl/client/alpha/kubeadmcontrolplane.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "fmt" "time" @@ -29,7 +30,7 @@ import ( ) // getKubeadmControlPlane retrieves the KubeadmControlPlane object corresponding to the name and namespace specified. -func getKubeadmControlPlane(proxy cluster.Proxy, name, namespace string) (*controlplanev1.KubeadmControlPlane, error) { +func getKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string) (*controlplanev1.KubeadmControlPlane, error) { kcpObj := &controlplanev1.KubeadmControlPlane{} c, err := proxy.NewClient() if err != nil { @@ -47,13 +48,13 @@ func getKubeadmControlPlane(proxy cluster.Proxy, name, namespace string) (*contr } // setRolloutAfterOnKCP sets KubeadmControlPlane.spec.rolloutAfter. -func setRolloutAfterOnKCP(proxy cluster.Proxy, name, namespace string) error { +func setRolloutAfterOnKCP(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"rolloutAfter":"%v"}}`, time.Now().Format(time.RFC3339)))) - return patchKubeadmControlPlane(proxy, name, namespace, patch) + return patchKubeadmControlPlane(ctx, proxy, name, namespace, patch) } // patchKubeadmControlPlane applies a patch to a KubeadmControlPlane. -func patchKubeadmControlPlane(proxy cluster.Proxy, name, namespace string, patch client.Patch) error { +func patchKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string, patch client.Patch) error { cFrom, err := proxy.NewClient() if err != nil { return err diff --git a/cmd/clusterctl/client/alpha/machinedeployment.go b/cmd/clusterctl/client/alpha/machinedeployment.go index 4bf1609ba91e..ec9fcc78b1f9 100644 --- a/cmd/clusterctl/client/alpha/machinedeployment.go +++ b/cmd/clusterctl/client/alpha/machinedeployment.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "fmt" "strconv" "time" @@ -35,7 +36,7 @@ import ( ) // getMachineDeployment retrieves the MachineDeployment object corresponding to the name and namespace specified. -func getMachineDeployment(proxy cluster.Proxy, name, namespace string) (*clusterv1.MachineDeployment, error) { +func getMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string) (*clusterv1.MachineDeployment, error) { mdObj := &clusterv1.MachineDeployment{} c, err := proxy.NewClient() if err != nil { @@ -53,13 +54,13 @@ func getMachineDeployment(proxy cluster.Proxy, name, namespace string) (*cluster } // setRolloutAfterOnMachineDeployment sets MachineDeployment.spec.rolloutAfter. -func setRolloutAfterOnMachineDeployment(proxy cluster.Proxy, name, namespace string) error { +func setRolloutAfterOnMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"rolloutAfter":"%v"}}`, time.Now().Format(time.RFC3339)))) - return patchMachineDeployment(proxy, name, namespace, patch) + return patchMachineDeployment(ctx, proxy, name, namespace, patch) } // patchMachineDeployment applies a patch to a machinedeployment. -func patchMachineDeployment(proxy cluster.Proxy, name, namespace string, patch client.Patch) error { +func patchMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string, patch client.Patch) error { cFrom, err := proxy.NewClient() if err != nil { return err @@ -118,7 +119,7 @@ func findMachineDeploymentRevision(toRevision int64, allMSs []*clusterv1.Machine } // getMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment. -func getMachineSetsForDeployment(proxy cluster.Proxy, md *clusterv1.MachineDeployment) ([]*clusterv1.MachineSet, error) { +func getMachineSetsForDeployment(ctx context.Context, proxy cluster.Proxy, md *clusterv1.MachineDeployment) ([]*clusterv1.MachineSet, error) { log := logf.Log c, err := proxy.NewClient() if err != nil { diff --git a/cmd/clusterctl/client/alpha/rollout.go b/cmd/clusterctl/client/alpha/rollout.go index 0e67bba75dd5..8736ae79df0d 100644 --- a/cmd/clusterctl/client/alpha/rollout.go +++ b/cmd/clusterctl/client/alpha/rollout.go @@ -17,6 +17,8 @@ limitations under the License. package alpha import ( + "context" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -40,10 +42,10 @@ var validRollbackResourceTypes = []string{ // Rollout defines the behavior of a rollout implementation. type Rollout interface { - ObjectRestarter(cluster.Proxy, corev1.ObjectReference) error - ObjectPauser(cluster.Proxy, corev1.ObjectReference) error - ObjectResumer(cluster.Proxy, corev1.ObjectReference) error - ObjectRollbacker(cluster.Proxy, corev1.ObjectReference, int64) error + ObjectRestarter(context.Context, cluster.Proxy, corev1.ObjectReference) error + ObjectPauser(context.Context, cluster.Proxy, corev1.ObjectReference) error + ObjectResumer(context.Context, cluster.Proxy, corev1.ObjectReference) error + ObjectRollbacker(context.Context, cluster.Proxy, corev1.ObjectReference, int64) error } var _ Rollout = &rollout{} diff --git a/cmd/clusterctl/client/alpha/rollout_pauser.go b/cmd/clusterctl/client/alpha/rollout_pauser.go index e6407c043a6f..582c2cb7409d 100644 --- a/cmd/clusterctl/client/alpha/rollout_pauser.go +++ b/cmd/clusterctl/client/alpha/rollout_pauser.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "fmt" "github.com/pkg/errors" @@ -30,28 +31,28 @@ import ( ) // ObjectPauser will issue a pause on the specified cluster-api resource. -func (r *rollout) ObjectPauser(proxy cluster.Proxy, ref corev1.ObjectReference) error { +func (r *rollout) ObjectPauser(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference) error { switch ref.Kind { case MachineDeployment: - deployment, err := getMachineDeployment(proxy, ref.Name, ref.Namespace) + deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) if err != nil || deployment == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if deployment.Spec.Paused { return errors.Errorf("MachineDeployment is already paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // MachineDeployment is intentionally capitalized. } - if err := pauseMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil { + if err := pauseMachineDeployment(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } case KubeadmControlPlane: - kcp, err := getKubeadmControlPlane(proxy, ref.Name, ref.Namespace) + kcp, err := getKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace) if err != nil || kcp == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if annotations.HasPaused(kcp.GetObjectMeta()) { return errors.Errorf("KubeadmControlPlane is already paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // KubeadmControlPlane is intentionally capitalized. } - if err := pauseKubeadmControlPlane(proxy, ref.Name, ref.Namespace); err != nil { + if err := pauseKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } default: @@ -61,13 +62,13 @@ func (r *rollout) ObjectPauser(proxy cluster.Proxy, ref corev1.ObjectReference) } // pauseMachineDeployment sets Paused to true in the MachineDeployment's spec. -func pauseMachineDeployment(proxy cluster.Proxy, name, namespace string) error { +func pauseMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%t}}", true))) - return patchMachineDeployment(proxy, name, namespace, patch) + return patchMachineDeployment(ctx, proxy, name, namespace, patch) } // pauseKubeadmControlPlane sets paused annotation to true. -func pauseKubeadmControlPlane(proxy cluster.Proxy, name, namespace string) error { +func pauseKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{%q: \"%t\"}}}", clusterv1.PausedAnnotation, true))) - return patchKubeadmControlPlane(proxy, name, namespace, patch) + return patchKubeadmControlPlane(ctx, proxy, name, namespace, patch) } diff --git a/cmd/clusterctl/client/alpha/rollout_pauser_test.go b/cmd/clusterctl/client/alpha/rollout_pauser_test.go index 35e6a3622371..10b952bb7033 100644 --- a/cmd/clusterctl/client/alpha/rollout_pauser_test.go +++ b/cmd/clusterctl/client/alpha/rollout_pauser_test.go @@ -146,7 +146,7 @@ func Test_ObjectPauser(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectPauser(proxy, tt.fields.ref) + err := r.ObjectPauser(context.Background(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/alpha/rollout_restarter.go b/cmd/clusterctl/client/alpha/rollout_restarter.go index 78da02962221..d16392d5591a 100644 --- a/cmd/clusterctl/client/alpha/rollout_restarter.go +++ b/cmd/clusterctl/client/alpha/rollout_restarter.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "time" "github.com/pkg/errors" @@ -27,10 +28,10 @@ import ( ) // ObjectRestarter will issue a restart on the specified cluster-api resource. -func (r *rollout) ObjectRestarter(proxy cluster.Proxy, ref corev1.ObjectReference) error { +func (r *rollout) ObjectRestarter(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference) error { switch ref.Kind { case MachineDeployment: - deployment, err := getMachineDeployment(proxy, ref.Name, ref.Namespace) + deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) if err != nil || deployment == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } @@ -40,11 +41,11 @@ func (r *rollout) ObjectRestarter(proxy cluster.Proxy, ref corev1.ObjectReferenc if deployment.Spec.RolloutAfter != nil && deployment.Spec.RolloutAfter.After(time.Now()) { return errors.Errorf("can't update MachineDeployment (remove 'spec.rolloutAfter' first): %v/%v", ref.Kind, ref.Name) } - if err := setRolloutAfterOnMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil { + if err := setRolloutAfterOnMachineDeployment(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } case KubeadmControlPlane: - kcp, err := getKubeadmControlPlane(proxy, ref.Name, ref.Namespace) + kcp, err := getKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace) if err != nil || kcp == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } @@ -54,7 +55,7 @@ func (r *rollout) ObjectRestarter(proxy cluster.Proxy, ref corev1.ObjectReferenc if kcp.Spec.RolloutAfter != nil && kcp.Spec.RolloutAfter.After(time.Now()) { return errors.Errorf("can't update KubeadmControlPlane (remove 'spec.rolloutAfter' first): %v/%v", ref.Kind, ref.Name) } - if err := setRolloutAfterOnKCP(proxy, ref.Name, ref.Namespace); err != nil { + if err := setRolloutAfterOnKCP(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } default: diff --git a/cmd/clusterctl/client/alpha/rollout_restarter_test.go b/cmd/clusterctl/client/alpha/rollout_restarter_test.go index 2b13542d0e35..f0a3b181c3bc 100644 --- a/cmd/clusterctl/client/alpha/rollout_restarter_test.go +++ b/cmd/clusterctl/client/alpha/rollout_restarter_test.go @@ -204,7 +204,7 @@ func Test_ObjectRestarter(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectRestarter(proxy, tt.fields.ref) + err := r.ObjectRestarter(context.Background(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/alpha/rollout_resumer.go b/cmd/clusterctl/client/alpha/rollout_resumer.go index 7be784b8dfd3..b224e91e87a0 100644 --- a/cmd/clusterctl/client/alpha/rollout_resumer.go +++ b/cmd/clusterctl/client/alpha/rollout_resumer.go @@ -17,6 +17,7 @@ limitations under the License. package alpha import ( + "context" "fmt" "strings" @@ -31,28 +32,28 @@ import ( ) // ObjectResumer will issue a resume on the specified cluster-api resource. -func (r *rollout) ObjectResumer(proxy cluster.Proxy, ref corev1.ObjectReference) error { +func (r *rollout) ObjectResumer(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference) error { switch ref.Kind { case MachineDeployment: - deployment, err := getMachineDeployment(proxy, ref.Name, ref.Namespace) + deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) if err != nil || deployment == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if !deployment.Spec.Paused { return errors.Errorf("MachineDeployment is not currently paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // MachineDeployment is intentionally capitalized. } - if err := resumeMachineDeployment(proxy, ref.Name, ref.Namespace); err != nil { + if err := resumeMachineDeployment(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } case KubeadmControlPlane: - kcp, err := getKubeadmControlPlane(proxy, ref.Name, ref.Namespace) + kcp, err := getKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace) if err != nil || kcp == nil { return errors.Wrapf(err, "failed to fetch %v/%v", ref.Kind, ref.Name) } if !annotations.HasPaused(kcp.GetObjectMeta()) { return errors.Errorf("KubeadmControlPlane is not currently paused: %v/%v\n", ref.Kind, ref.Name) //nolint:revive // KubeadmControlPlane is intentionally capitalized. } - if err := resumeKubeadmControlPlane(proxy, ref.Name, ref.Namespace); err != nil { + if err := resumeKubeadmControlPlane(ctx, proxy, ref.Name, ref.Namespace); err != nil { return err } default: @@ -62,17 +63,17 @@ func (r *rollout) ObjectResumer(proxy cluster.Proxy, ref corev1.ObjectReference) } // resumeMachineDeployment sets Paused to true in the MachineDeployment's spec. -func resumeMachineDeployment(proxy cluster.Proxy, name, namespace string) error { +func resumeMachineDeployment(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%t}}", false))) - return patchMachineDeployment(proxy, name, namespace, patch) + return patchMachineDeployment(ctx, proxy, name, namespace, patch) } // resumeKubeadmControlPlane removes paused annotation. -func resumeKubeadmControlPlane(proxy cluster.Proxy, name, namespace string) error { +func resumeKubeadmControlPlane(ctx context.Context, proxy cluster.Proxy, name, namespace string) error { // In the paused annotation we must replace slashes to ~1, see https://datatracker.ietf.org/doc/html/rfc6901#section-3. pausedAnnotation := strings.Replace(clusterv1.PausedAnnotation, "/", "~1", -1) patch := client.RawPatch(types.JSONPatchType, []byte(fmt.Sprintf("[{\"op\": \"remove\", \"path\": \"/metadata/annotations/%s\"}]", pausedAnnotation))) - return patchKubeadmControlPlane(proxy, name, namespace, patch) + return patchKubeadmControlPlane(ctx, proxy, name, namespace, patch) } diff --git a/cmd/clusterctl/client/alpha/rollout_resumer_test.go b/cmd/clusterctl/client/alpha/rollout_resumer_test.go index 92ec8da5f757..727a9574b56d 100644 --- a/cmd/clusterctl/client/alpha/rollout_resumer_test.go +++ b/cmd/clusterctl/client/alpha/rollout_resumer_test.go @@ -149,7 +149,7 @@ func Test_ObjectResumer(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectResumer(proxy, tt.fields.ref) + err := r.ObjectResumer(context.Background(), proxy, tt.fields.ref) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/alpha/rollout_rollbacker.go b/cmd/clusterctl/client/alpha/rollout_rollbacker.go index d9832e7dbee6..d7b90ec3bb51 100644 --- a/cmd/clusterctl/client/alpha/rollout_rollbacker.go +++ b/cmd/clusterctl/client/alpha/rollout_rollbacker.go @@ -17,6 +17,8 @@ limitations under the License. package alpha import ( + "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -27,17 +29,17 @@ import ( ) // ObjectRollbacker will issue a rollback on the specified cluster-api resource. -func (r *rollout) ObjectRollbacker(proxy cluster.Proxy, ref corev1.ObjectReference, toRevision int64) error { +func (r *rollout) ObjectRollbacker(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference, toRevision int64) error { switch ref.Kind { case MachineDeployment: - deployment, err := getMachineDeployment(proxy, ref.Name, ref.Namespace) + deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) if err != nil || deployment == nil { return errors.Wrapf(err, "failed to get %v/%v", ref.Kind, ref.Name) } if deployment.Spec.Paused { return errors.Errorf("can't rollback a paused MachineDeployment: please run 'clusterctl rollout resume %v/%v' first", ref.Kind, ref.Name) } - if err := rollbackMachineDeployment(proxy, deployment, toRevision); err != nil { + if err := rollbackMachineDeployment(ctx, proxy, deployment, toRevision); err != nil { return err } default: @@ -47,7 +49,7 @@ func (r *rollout) ObjectRollbacker(proxy cluster.Proxy, ref corev1.ObjectReferen } // rollbackMachineDeployment will rollback to a previous MachineSet revision used by this MachineDeployment. -func rollbackMachineDeployment(proxy cluster.Proxy, md *clusterv1.MachineDeployment, toRevision int64) error { +func rollbackMachineDeployment(ctx context.Context, proxy cluster.Proxy, md *clusterv1.MachineDeployment, toRevision int64) error { log := logf.Log c, err := proxy.NewClient() if err != nil { @@ -57,7 +59,7 @@ func rollbackMachineDeployment(proxy cluster.Proxy, md *clusterv1.MachineDeploym if toRevision < 0 { return errors.Errorf("revision number cannot be negative: %v", toRevision) } - msList, err := getMachineSetsForDeployment(proxy, md) + msList, err := getMachineSetsForDeployment(ctx, proxy, md) if err != nil { return err } diff --git a/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go b/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go index 03e3fbc4dd66..374b5c040ec3 100644 --- a/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go +++ b/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go @@ -241,7 +241,7 @@ func Test_ObjectRollbacker(t *testing.T) { g := NewWithT(t) r := newRolloutClient() proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) - err := r.ObjectRollbacker(proxy, tt.fields.ref, tt.fields.toRevision) + err := r.ObjectRollbacker(context.Background(), proxy, tt.fields.ref, tt.fields.toRevision) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/client.go b/cmd/clusterctl/client/client.go index 1f7d4b093d65..dd76da457b33 100644 --- a/cmd/clusterctl/client/client.go +++ b/cmd/clusterctl/client/client.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/alpha" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -31,44 +33,44 @@ type Client interface { GetProvidersConfig() ([]Provider, error) // GetProviderComponents returns the provider components for a given provider with options including targetNamespace. - GetProviderComponents(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) + GetProviderComponents(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) // GenerateProvider returns the provider components for a given provider with options including targetNamespace. - GenerateProvider(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) + GenerateProvider(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) // Init initializes a management cluster by adding the requested list of providers. - Init(options InitOptions) ([]Components, error) + Init(ctx context.Context, options InitOptions) ([]Components, error) // InitImages returns the list of images required for executing the init command. - InitImages(options InitOptions) ([]string, error) + InitImages(ctx context.Context, options InitOptions) ([]string, error) // GetClusterTemplate returns a workload cluster template. - GetClusterTemplate(options GetClusterTemplateOptions) (Template, error) + GetClusterTemplate(ctx context.Context, options GetClusterTemplateOptions) (Template, error) // GetKubeconfig returns the kubeconfig of the workload cluster. - GetKubeconfig(options GetKubeconfigOptions) (string, error) + GetKubeconfig(ctx context.Context, options GetKubeconfigOptions) (string, error) // Delete deletes providers from a management cluster. - Delete(options DeleteOptions) error + Delete(ctx context.Context, options DeleteOptions) error // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster. - Move(options MoveOptions) error + Move(ctx context.Context, options MoveOptions) error // PlanUpgrade returns a set of suggested Upgrade plans for the cluster. - PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error) + PlanUpgrade(ctx context.Context, options PlanUpgradeOptions) ([]UpgradePlan, error) // PlanCertManagerUpgrade returns a CertManagerUpgradePlan. - PlanCertManagerUpgrade(options PlanUpgradeOptions) (CertManagerUpgradePlan, error) + PlanCertManagerUpgrade(ctx context.Context, options PlanUpgradeOptions) (CertManagerUpgradePlan, error) // ApplyUpgrade executes an upgrade plan. - ApplyUpgrade(options ApplyUpgradeOptions) error + ApplyUpgrade(ctx context.Context, options ApplyUpgradeOptions) error // ProcessYAML provides a direct way to process a yaml and inspect its // variables. - ProcessYAML(options ProcessYAMLOptions) (YamlPrinter, error) + ProcessYAML(ctx context.Context, options ProcessYAMLOptions) (YamlPrinter, error) // DescribeCluster returns the object tree representing the status of a Cluster API cluster. - DescribeCluster(options DescribeClusterOptions) (*tree.ObjectTree, error) + DescribeCluster(ctx context.Context, options DescribeClusterOptions) (*tree.ObjectTree, error) // AlphaClient is an Interface for alpha features in clusterctl AlphaClient @@ -77,15 +79,15 @@ type Client interface { // AlphaClient exposes the alpha features in clusterctl high-level client library. type AlphaClient interface { // RolloutRestart provides rollout restart of cluster-api resources - RolloutRestart(options RolloutRestartOptions) error + RolloutRestart(ctx context.Context, options RolloutRestartOptions) error // RolloutPause provides rollout pause of cluster-api resources - RolloutPause(options RolloutPauseOptions) error + RolloutPause(ctx context.Context, options RolloutPauseOptions) error // RolloutResume provides rollout resume of paused cluster-api resources - RolloutResume(options RolloutResumeOptions) error + RolloutResume(ctx context.Context, options RolloutResumeOptions) error // RolloutUndo provides rollout rollback of cluster-api resources - RolloutUndo(options RolloutUndoOptions) error + RolloutUndo(ctx context.Context, options RolloutUndoOptions) error // TopologyPlan dry runs the topology reconciler - TopologyPlan(options TopologyPlanOptions) (*TopologyPlanOutput, error) + TopologyPlan(ctx context.Context, options TopologyPlanOptions) (*TopologyPlanOutput, error) } // YamlPrinter exposes methods that prints the processed template and @@ -113,7 +115,7 @@ type RepositoryClientFactoryInput struct { } // RepositoryClientFactory is a factory of repository.Client from a given input. -type RepositoryClientFactory func(RepositoryClientFactoryInput) (repository.Client, error) +type RepositoryClientFactory func(context.Context, RepositoryClientFactoryInput) (repository.Client, error) // ClusterClientFactoryInput represents the inputs required by the factory. type ClusterClientFactoryInput struct { @@ -154,11 +156,11 @@ func InjectClusterClientFactory(factory ClusterClientFactory) Option { } // New returns a configClient. -func New(path string, options ...Option) (Client, error) { - return newClusterctlClient(path, options...) +func New(ctx context.Context, path string, options ...Option) (Client, error) { + return newClusterctlClient(ctx, path, options...) } -func newClusterctlClient(path string, options ...Option) (*clusterctlClient, error) { +func newClusterctlClient(ctx context.Context, path string, options ...Option) (*clusterctlClient, error) { client := &clusterctlClient{} for _, o := range options { o(client) @@ -167,7 +169,7 @@ func newClusterctlClient(path string, options ...Option) (*clusterctlClient, err // if there is an injected config, use it, otherwise use the default one // provided by the config low level library. if client.configClient == nil { - c, err := config.New(path) + c, err := config.New(ctx, path) if err != nil { return nil, err } @@ -195,8 +197,9 @@ func newClusterctlClient(path string, options ...Option) (*clusterctlClient, err // defaultRepositoryFactory is a RepositoryClientFactory func the uses the default client provided by the repository low level library. func defaultRepositoryFactory(configClient config.Client) RepositoryClientFactory { - return func(input RepositoryClientFactoryInput) (repository.Client, error) { + return func(ctx context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { return repository.New( + ctx, input.Provider, configClient, repository.InjectYamlProcessor(input.Processor), diff --git a/cmd/clusterctl/client/client_test.go b/cmd/clusterctl/client/client_test.go index 95a25c28690b..90c93f92110a 100644 --- a/cmd/clusterctl/client/client_test.go +++ b/cmd/clusterctl/client/client_test.go @@ -43,12 +43,14 @@ func TestNewFakeClient(_ *testing.T) { // create a fake config with a provider named P1 and a variable named var repository1Config := config.NewProvider("p1", "url", clusterctlv1.CoreProviderType) - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithVar("var", "value"). WithProvider(repository1Config) // create a fake repository with some YAML files in it (usually matching the list of providers defined in the config) - repository1 := newFakeRepository(repository1Config, config1). + repository1 := newFakeRepository(ctx, repository1Config, config1). WithPaths("root", "components"). WithDefaultVersion("v1.0"). WithFile("v1.0", "components.yaml", []byte("content")) @@ -58,7 +60,7 @@ func TestNewFakeClient(_ *testing.T) { WithObjs() // create a new fakeClient that allows to execute tests on the fake config, the fake repositories and the fake cluster. - newFakeClient(config1). + newFakeClient(context.Background(), config1). WithRepository(repository1). WithCluster(cluster1) } @@ -77,81 +79,81 @@ func (f fakeClient) GetProvidersConfig() ([]Provider, error) { return f.internalClient.GetProvidersConfig() } -func (f fakeClient) GetProviderComponents(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { - return f.internalClient.GetProviderComponents(provider, providerType, options) +func (f fakeClient) GetProviderComponents(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { + return f.internalClient.GetProviderComponents(ctx, provider, providerType, options) } -func (f fakeClient) GenerateProvider(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { - return f.internalClient.GenerateProvider(provider, providerType, options) +func (f fakeClient) GenerateProvider(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { + return f.internalClient.GenerateProvider(ctx, provider, providerType, options) } -func (f fakeClient) GetClusterTemplate(options GetClusterTemplateOptions) (Template, error) { - return f.internalClient.GetClusterTemplate(options) +func (f fakeClient) GetClusterTemplate(ctx context.Context, options GetClusterTemplateOptions) (Template, error) { + return f.internalClient.GetClusterTemplate(ctx, options) } -func (f fakeClient) GetKubeconfig(options GetKubeconfigOptions) (string, error) { - return f.internalClient.GetKubeconfig(options) +func (f fakeClient) GetKubeconfig(ctx context.Context, options GetKubeconfigOptions) (string, error) { + return f.internalClient.GetKubeconfig(ctx, options) } -func (f fakeClient) Init(options InitOptions) ([]Components, error) { - return f.internalClient.Init(options) +func (f fakeClient) Init(ctx context.Context, options InitOptions) ([]Components, error) { + return f.internalClient.Init(ctx, options) } -func (f fakeClient) InitImages(options InitOptions) ([]string, error) { - return f.internalClient.InitImages(options) +func (f fakeClient) InitImages(ctx context.Context, options InitOptions) ([]string, error) { + return f.internalClient.InitImages(ctx, options) } -func (f fakeClient) Delete(options DeleteOptions) error { - return f.internalClient.Delete(options) +func (f fakeClient) Delete(ctx context.Context, options DeleteOptions) error { + return f.internalClient.Delete(ctx, options) } -func (f fakeClient) Move(options MoveOptions) error { - return f.internalClient.Move(options) +func (f fakeClient) Move(ctx context.Context, options MoveOptions) error { + return f.internalClient.Move(ctx, options) } -func (f fakeClient) PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error) { - return f.internalClient.PlanUpgrade(options) +func (f fakeClient) PlanUpgrade(ctx context.Context, options PlanUpgradeOptions) ([]UpgradePlan, error) { + return f.internalClient.PlanUpgrade(ctx, options) } -func (f fakeClient) PlanCertManagerUpgrade(options PlanUpgradeOptions) (CertManagerUpgradePlan, error) { - return f.internalClient.PlanCertManagerUpgrade(options) +func (f fakeClient) PlanCertManagerUpgrade(ctx context.Context, options PlanUpgradeOptions) (CertManagerUpgradePlan, error) { + return f.internalClient.PlanCertManagerUpgrade(ctx, options) } -func (f fakeClient) ApplyUpgrade(options ApplyUpgradeOptions) error { - return f.internalClient.ApplyUpgrade(options) +func (f fakeClient) ApplyUpgrade(ctx context.Context, options ApplyUpgradeOptions) error { + return f.internalClient.ApplyUpgrade(ctx, options) } -func (f fakeClient) ProcessYAML(options ProcessYAMLOptions) (YamlPrinter, error) { - return f.internalClient.ProcessYAML(options) +func (f fakeClient) ProcessYAML(ctx context.Context, options ProcessYAMLOptions) (YamlPrinter, error) { + return f.internalClient.ProcessYAML(ctx, options) } -func (f fakeClient) RolloutRestart(options RolloutRestartOptions) error { - return f.internalClient.RolloutRestart(options) +func (f fakeClient) RolloutRestart(ctx context.Context, options RolloutRestartOptions) error { + return f.internalClient.RolloutRestart(ctx, options) } -func (f fakeClient) DescribeCluster(options DescribeClusterOptions) (*tree.ObjectTree, error) { - return f.internalClient.DescribeCluster(options) +func (f fakeClient) DescribeCluster(ctx context.Context, options DescribeClusterOptions) (*tree.ObjectTree, error) { + return f.internalClient.DescribeCluster(ctx, options) } -func (f fakeClient) RolloutPause(options RolloutPauseOptions) error { - return f.internalClient.RolloutPause(options) +func (f fakeClient) RolloutPause(ctx context.Context, options RolloutPauseOptions) error { + return f.internalClient.RolloutPause(ctx, options) } -func (f fakeClient) RolloutResume(options RolloutResumeOptions) error { - return f.internalClient.RolloutResume(options) +func (f fakeClient) RolloutResume(ctx context.Context, options RolloutResumeOptions) error { + return f.internalClient.RolloutResume(ctx, options) } -func (f fakeClient) RolloutUndo(options RolloutUndoOptions) error { - return f.internalClient.RolloutUndo(options) +func (f fakeClient) RolloutUndo(ctx context.Context, options RolloutUndoOptions) error { + return f.internalClient.RolloutUndo(ctx, options) } -func (f fakeClient) TopologyPlan(options TopologyPlanOptions) (*cluster.TopologyPlanOutput, error) { - return f.internalClient.TopologyPlan(options) +func (f fakeClient) TopologyPlan(ctx context.Context, options TopologyPlanOptions) (*cluster.TopologyPlanOutput, error) { + return f.internalClient.TopologyPlan(ctx, options) } // newFakeClient returns a clusterctl client that allows to execute tests on a set of fake config, fake repositories and fake clusters. // you can use WithCluster and WithRepository to prepare for the test case. -func newFakeClient(configClient config.Client) *fakeClient { +func newFakeClient(ctx context.Context, configClient config.Client) *fakeClient { fake := &fakeClient{ clusters: map[cluster.Kubeconfig]cluster.Client{}, repositories: map[string]repository.Client{}, @@ -159,7 +161,7 @@ func newFakeClient(configClient config.Client) *fakeClient { fake.configClient = configClient if fake.configClient == nil { - fake.configClient = newFakeConfig() + fake.configClient = newFakeConfig(ctx) } var clusterClientFactory = func(i ClusterClientFactoryInput) (cluster.Client, error) { @@ -171,10 +173,10 @@ func newFakeClient(configClient config.Client) *fakeClient { return fake.clusters[k], nil } - fake.internalClient, _ = newClusterctlClient("fake-config", + fake.internalClient, _ = newClusterctlClient(ctx, "fake-config", InjectConfig(fake.configClient), InjectClusterClientFactory(clusterClientFactory), - InjectRepositoryFactory(func(input RepositoryClientFactoryInput) (repository.Client, error) { + InjectRepositoryFactory(func(ctx context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel()) } @@ -217,7 +219,7 @@ func newFakeCluster(kubeconfig cluster.Kubeconfig, configClient config.Client) * fake.internalclient = cluster.New(kubeconfig, configClient, cluster.InjectProxy(fake.fakeProxy), cluster.InjectPollImmediateWaiter(pollImmediateWaiter), - cluster.InjectRepositoryFactory(func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + cluster.InjectRepositoryFactory(func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { if _, ok := fake.repositories[provider.Name()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", provider.Name()) } @@ -244,19 +246,19 @@ type fakeCertManagerClient struct { var _ cluster.CertManagerClient = &fakeCertManagerClient{} -func (p *fakeCertManagerClient) EnsureInstalled() error { +func (p *fakeCertManagerClient) EnsureInstalled(_ context.Context) error { return nil } -func (p *fakeCertManagerClient) EnsureLatestVersion() error { +func (p *fakeCertManagerClient) EnsureLatestVersion(_ context.Context) error { return nil } -func (p *fakeCertManagerClient) PlanUpgrade() (cluster.CertManagerUpgradePlan, error) { +func (p *fakeCertManagerClient) PlanUpgrade(_ context.Context) (cluster.CertManagerUpgradePlan, error) { return p.certManagerPlan, nil } -func (p *fakeCertManagerClient) Images() ([]string, error) { +func (p *fakeCertManagerClient) Images(_ context.Context) ([]string, error) { return p.images, p.imagesError } @@ -351,10 +353,10 @@ func (f *fakeClusterClient) WithCertManagerClient(client cluster.CertManagerClie // newFakeConfig return a fake implementation of the client for low-level config library. // The implementation uses a FakeReader that stores configuration settings in a map; you can use // the WithVar or WithProvider methods to set the map values. -func newFakeConfig() *fakeConfigClient { +func newFakeConfig(ctx context.Context) *fakeConfigClient { fakeReader := test.NewFakeReader() - client, _ := config.New("fake-config", config.InjectReader(fakeReader)) + client, _ := config.New(ctx, "fake-config", config.InjectReader(fakeReader)) return &fakeConfigClient{ fakeReader: fakeReader, @@ -398,11 +400,11 @@ func (f *fakeConfigClient) WithProvider(provider config.Provider) *fakeConfigCli // newFakeRepository return a fake implementation of the client for low-level repository library. // The implementation stores configuration settings in a map; you can use // the WithPaths or WithDefaultVersion methods to configure the repository and WithFile to set the map values. -func newFakeRepository(provider config.Provider, configClient config.Client) *fakeRepositoryClient { +func newFakeRepository(ctx context.Context, provider config.Provider, configClient config.Client) *fakeRepositoryClient { fakeRepository := repository.NewMemoryRepository() if configClient == nil { - configClient = newFakeConfig() + configClient = newFakeConfig(ctx) } return &fakeRepositoryClient{ @@ -426,8 +428,8 @@ func (f fakeRepositoryClient) DefaultVersion() string { return f.fakeRepository.DefaultVersion() } -func (f fakeRepositoryClient) GetVersions() ([]string, error) { - return f.fakeRepository.GetVersions() +func (f fakeRepositoryClient) GetVersions(ctx context.Context) ([]string, error) { + return f.fakeRepository.GetVersions(ctx) } func (f fakeRepositoryClient) Components() repository.ComponentsClient { @@ -501,14 +503,14 @@ type fakeTemplateClient struct { processor yaml.Processor } -func (f *fakeTemplateClient) Get(flavor, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { +func (f *fakeTemplateClient) Get(ctx context.Context, flavor, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { name := "cluster-template" if flavor != "" { name = fmt.Sprintf("%s-%s", name, flavor) } name = fmt.Sprintf("%s.yaml", name) - content, err := f.fakeRepository.GetFile(f.version, name) + content, err := f.fakeRepository.GetFile(ctx, f.version, name) if err != nil { return nil, err } @@ -529,9 +531,9 @@ type fakeClusterClassClient struct { processor yaml.Processor } -func (f *fakeClusterClassClient) Get(class, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { +func (f *fakeClusterClassClient) Get(ctx context.Context, class, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { name := fmt.Sprintf("clusterclass-%s.yaml", class) - content, err := f.fakeRepository.GetFile(f.version, name) + content, err := f.fakeRepository.GetFile(ctx, f.version, name) if err != nil { return nil, err } @@ -550,8 +552,8 @@ type fakeMetadataClient struct { fakeRepository *repository.MemoryRepository } -func (f *fakeMetadataClient) Get() (*clusterctlv1.Metadata, error) { - content, err := f.fakeRepository.GetFile(f.version, "metadata.yaml") +func (f *fakeMetadataClient) Get(ctx context.Context) (*clusterctlv1.Metadata, error) { + content, err := f.fakeRepository.GetFile(ctx, f.version, "metadata.yaml") if err != nil { return nil, err } @@ -573,12 +575,12 @@ type fakeComponentClient struct { processor yaml.Processor } -func (f *fakeComponentClient) Raw(options repository.ComponentsOptions) ([]byte, error) { - return f.getRawBytes(&options) +func (f *fakeComponentClient) Raw(ctx context.Context, options repository.ComponentsOptions) ([]byte, error) { + return f.getRawBytes(ctx, &options) } -func (f *fakeComponentClient) Get(options repository.ComponentsOptions) (repository.Components, error) { - content, err := f.getRawBytes(&options) +func (f *fakeComponentClient) Get(ctx context.Context, options repository.ComponentsOptions) (repository.Components, error) { + content, err := f.getRawBytes(ctx, &options) if err != nil { return nil, err } @@ -594,11 +596,11 @@ func (f *fakeComponentClient) Get(options repository.ComponentsOptions) (reposit ) } -func (f *fakeComponentClient) getRawBytes(options *repository.ComponentsOptions) ([]byte, error) { +func (f *fakeComponentClient) getRawBytes(ctx context.Context, options *repository.ComponentsOptions) ([]byte, error) { if options.Version == "" { options.Version = f.fakeRepository.DefaultVersion() } path := f.fakeRepository.ComponentsPath() - return f.fakeRepository.GetFile(options.Version, path) + return f.fakeRepository.GetFile(ctx, options.Version, path) } diff --git a/cmd/clusterctl/client/cluster/cert_manager.go b/cmd/clusterctl/client/cluster/cert_manager.go index 2c73c5d4b744..62752aa09221 100644 --- a/cmd/clusterctl/client/cluster/cert_manager.go +++ b/cmd/clusterctl/client/cluster/cert_manager.go @@ -67,18 +67,18 @@ type CertManagerUpgradePlan struct { type CertManagerClient interface { // EnsureInstalled makes sure cert-manager is running and its API is available. // This is required to install a new provider. - EnsureInstalled() error + EnsureInstalled(ctx context.Context) error // EnsureLatestVersion checks the cert-manager version currently installed, and if it is // older than the version currently suggested by clusterctl, upgrades it. - EnsureLatestVersion() error + EnsureLatestVersion(ctx context.Context) error // PlanUpgrade retruns a CertManagerUpgradePlan with information regarding // a cert-manager upgrade if necessary. - PlanUpgrade() (CertManagerUpgradePlan, error) + PlanUpgrade(ctx context.Context) (CertManagerUpgradePlan, error) // Images return the list of images required for installing the cert-manager. - Images() ([]string, error) + Images(ctx context.Context) ([]string, error) } // certManagerClient implements CertManagerClient . @@ -103,9 +103,9 @@ func newCertManagerClient(configClient config.Client, repositoryClientFactory Re } // Images return the list of images required for installing the cert-manager. -func (cm *certManagerClient) Images() ([]string, error) { +func (cm *certManagerClient) Images(ctx context.Context) ([]string, error) { // If cert manager already exists in the cluster, there is no need of additional images for cert-manager. - exists, err := cm.certManagerNamespaceExists() + exists, err := cm.certManagerNamespaceExists(ctx) if err != nil { return nil, err } @@ -119,7 +119,7 @@ func (cm *certManagerClient) Images() ([]string, error) { return nil, err } - objs, err := cm.getManifestObjs(config) + objs, err := cm.getManifestObjs(ctx, config) if err != nil { return nil, err } @@ -131,7 +131,7 @@ func (cm *certManagerClient) Images() ([]string, error) { return images, nil } -func (cm *certManagerClient) certManagerNamespaceExists() (bool, error) { +func (cm *certManagerClient) certManagerNamespaceExists(ctx context.Context) (bool, error) { ns := &corev1.Namespace{} key := client.ObjectKey{Name: certManagerNamespace} c, err := cm.proxy.NewClient() @@ -150,7 +150,7 @@ func (cm *certManagerClient) certManagerNamespaceExists() (bool, error) { // EnsureInstalled makes sure cert-manager is running and its API is available. // This is required to install a new provider. -func (cm *certManagerClient) EnsureInstalled() error { +func (cm *certManagerClient) EnsureInstalled(ctx context.Context) error { log := logf.Log // Checking if a version of cert manager supporting cert-manager-test-resources.yaml is already installed and properly working. @@ -162,10 +162,10 @@ func (cm *certManagerClient) EnsureInstalled() error { // Otherwise install cert manager. // NOTE: this instance of cert-manager will have clusterctl specific annotations that will be used to // manage the lifecycle of all the components. - return cm.install() + return cm.install(ctx) } -func (cm *certManagerClient) install() error { +func (cm *certManagerClient) install(ctx context.Context) error { log := logf.Log config, err := cm.configClient.CertManager().Get() @@ -175,7 +175,7 @@ func (cm *certManagerClient) install() error { log.Info("Installing cert-manager", "Version", config.Version()) // Gets the cert-manager components from the repository. - objs, err := cm.getManifestObjs(config) + objs, err := cm.getManifestObjs(ctx, config) if err != nil { return err } @@ -188,7 +188,7 @@ func (cm *certManagerClient) install() error { // Create the Kubernetes object. // Nb. The operation is wrapped in a retry loop to make ensureCerts more resilient to unexpected conditions. if err := retryWithExponentialBackoff(createCertManagerBackoff, func() error { - return cm.createObj(o) + return cm.createObj(ctx, o) }); err != nil { return err } @@ -198,12 +198,12 @@ func (cm *certManagerClient) install() error { return cm.waitForAPIReady(ctx, true) } -// PlanUpgrade retruns a CertManagerUpgradePlan with information regarding +// PlanUpgrade returns a CertManagerUpgradePlan with information regarding // a cert-manager upgrade if necessary. -func (cm *certManagerClient) PlanUpgrade() (CertManagerUpgradePlan, error) { +func (cm *certManagerClient) PlanUpgrade(ctx context.Context) (CertManagerUpgradePlan, error) { log := logf.Log - objs, err := cm.proxy.ListResources(map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}, certManagerNamespace) + objs, err := cm.proxy.ListResources(ctx, map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}, certManagerNamespace) if err != nil { return CertManagerUpgradePlan{}, errors.Wrap(err, "failed get cert manager components") } @@ -229,10 +229,10 @@ func (cm *certManagerClient) PlanUpgrade() (CertManagerUpgradePlan, error) { // EnsureLatestVersion checks the cert-manager version currently installed, and if it is // older than the version currently suggested by clusterctl, upgrades it. -func (cm *certManagerClient) EnsureLatestVersion() error { +func (cm *certManagerClient) EnsureLatestVersion(ctx context.Context) error { log := logf.Log - objs, err := cm.proxy.ListResources(map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}, certManagerNamespace) + objs, err := cm.proxy.ListResources(ctx, map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}, certManagerNamespace) if err != nil { return errors.Wrap(err, "failed get cert manager components") } @@ -256,7 +256,7 @@ func (cm *certManagerClient) EnsureLatestVersion() error { // Migrate CRs to latest CRD storage version, if necessary. // Note: We have to do this before cert-manager is deleted so conversion webhooks still work. - if err := cm.migrateCRDs(); err != nil { + if err := cm.migrateCRDs(ctx); err != nil { return err } @@ -264,22 +264,22 @@ func (cm *certManagerClient) EnsureLatestVersion() error { // NOTE: CRDs, and namespace are preserved in order to avoid deletion of user objects; // web-hooks are preserved to avoid a user attempting to CREATE a cert-manager resource while the upgrade is in progress. log.Info("Deleting cert-manager", "Version", currentVersion) - if err := cm.deleteObjs(objs); err != nil { + if err := cm.deleteObjs(ctx, objs); err != nil { return err } // Install cert-manager. - return cm.install() + return cm.install(ctx) } -func (cm *certManagerClient) migrateCRDs() error { +func (cm *certManagerClient) migrateCRDs(ctx context.Context) error { config, err := cm.configClient.CertManager().Get() if err != nil { return err } // Gets the new cert-manager components from the repository. - objs, err := cm.getManifestObjs(config) + objs, err := cm.getManifestObjs(ctx, config) if err != nil { return err } @@ -292,7 +292,7 @@ func (cm *certManagerClient) migrateCRDs() error { return newCRDMigrator(c).Run(ctx, objs) } -func (cm *certManagerClient) deleteObjs(objs []unstructured.Unstructured) error { +func (cm *certManagerClient) deleteObjs(ctx context.Context, objs []unstructured.Unstructured) error { deleteCertManagerBackoff := newWriteBackoff() for i := range objs { obj := objs[i] @@ -307,7 +307,7 @@ func (cm *certManagerClient) deleteObjs(objs []unstructured.Unstructured) error } if err := retryWithExponentialBackoff(deleteCertManagerBackoff, func() error { - if err := cm.deleteObj(obj); err != nil { + if err := cm.deleteObj(ctx, obj); err != nil { // tolerate NotFound errors when deleting the test resources if apierrors.IsNotFound(err) { return nil @@ -394,18 +394,18 @@ func (cm *certManagerClient) getWaitTimeout() time.Duration { return timeoutDuration } -func (cm *certManagerClient) getManifestObjs(certManagerConfig config.CertManager) ([]unstructured.Unstructured, error) { +func (cm *certManagerClient) getManifestObjs(ctx context.Context, certManagerConfig config.CertManager) ([]unstructured.Unstructured, error) { // Given that cert manager components yaml are stored in a repository like providers components yaml, // we are using the same machinery to retrieve the file by using a fake provider object using // the cert manager repository url. certManagerFakeProvider := config.NewProvider("cert-manager", certManagerConfig.URL(), "") - certManagerRepository, err := cm.repositoryClientFactory(certManagerFakeProvider, cm.configClient) + certManagerRepository, err := cm.repositoryClientFactory(ctx, certManagerFakeProvider, cm.configClient) if err != nil { return nil, err } // Gets the cert-manager component yaml from the repository. - file, err := certManagerRepository.Components().Raw(repository.ComponentsOptions{ + file, err := certManagerRepository.Components().Raw(ctx, repository.ComponentsOptions{ Version: certManagerConfig.Version(), }) if err != nil { @@ -468,7 +468,7 @@ func getTestResourcesManifestObjs() ([]unstructured.Unstructured, error) { return objs, nil } -func (cm *certManagerClient) createObj(obj unstructured.Unstructured) error { +func (cm *certManagerClient) createObj(ctx context.Context, obj unstructured.Unstructured) error { log := logf.Log c, err := cm.proxy.NewClient() @@ -508,7 +508,7 @@ func (cm *certManagerClient) createObj(obj unstructured.Unstructured) error { return nil } -func (cm *certManagerClient) deleteObj(obj unstructured.Unstructured) error { +func (cm *certManagerClient) deleteObj(ctx context.Context, obj unstructured.Unstructured) error { log := logf.Log log.V(5).Info("Deleting", logf.UnstructuredToValues(obj)...) @@ -545,7 +545,7 @@ func (cm *certManagerClient) waitForAPIReady(ctx context.Context, retry bool) er // This is wrapped with a retry as the cert-manager API may not be available // yet, so we need to keep retrying until it is. if err := cm.pollImmediateWaiter(ctx, waitCertManagerInterval, cm.getWaitTimeout(), func(ctx context.Context) (bool, error) { - if err := cm.createObj(o); err != nil { + if err := cm.createObj(ctx, o); err != nil { // If retrying is disabled, return the error here. if !retry { return false, err @@ -561,7 +561,7 @@ func (cm *certManagerClient) waitForAPIReady(ctx context.Context, retry bool) er for i := range testObjs { obj := testObjs[i] if err := retryWithExponentialBackoff(deleteCertManagerBackoff, func() error { - if err := cm.deleteObj(obj); err != nil { + if err := cm.deleteObj(ctx, obj); err != nil { // tolerate NotFound errors when deleting the test resources if apierrors.IsNotFound(err) { return nil diff --git a/cmd/clusterctl/client/cluster/cert_manager_test.go b/cmd/clusterctl/client/cluster/cert_manager_test.go index e85ae5d828e2..fda43d62e49e 100644 --- a/cmd/clusterctl/client/cluster/cert_manager_test.go +++ b/cmd/clusterctl/client/cluster/cert_manager_test.go @@ -61,7 +61,7 @@ var certManagerNamespaceYaml = []byte("apiVersion: v1\n" + func Test_getManifestObjs(t *testing.T) { g := NewWithT(t) - defaultConfigClient, err := config.New("", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", ""))) + defaultConfigClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", ""))) g.Expect(err).ToNot(HaveOccurred()) type fields struct { @@ -109,7 +109,7 @@ func Test_getManifestObjs(t *testing.T) { name: "successfully gets the cert-manager components for a custom release", fields: fields{ configClient: func() config.Client { - configClient, err := config.New("", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", "").WithCertManager("", "v1.0.0", ""))) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithImageMeta(config.CertManagerImageComponent, "bar-repository.io", "").WithCertManager("", "v1.0.0", ""))) g.Expect(err).ToNot(HaveOccurred()) return configClient }(), @@ -125,17 +125,19 @@ func Test_getManifestObjs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + cm := &certManagerClient{ configClient: defaultConfigClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository)) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository)) }, } certManagerConfig, err := cm.configClient.CertManager().Get() g.Expect(err).ToNot(HaveOccurred()) - got, err := cm.getManifestObjs(certManagerConfig) + got, err := cm.getManifestObjs(ctx, certManagerConfig) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -194,6 +196,7 @@ func Test_GetTimeout(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + cm := newCertManagerClient(tt.config, nil, nil, pollImmediateWaiter) tm := cm.getWaitTimeout() @@ -420,6 +423,7 @@ func Test_shouldUpgrade(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + proxy := test.NewFakeProxy() fakeConfigClient := newFakeConfig().WithCertManager("", tt.configVersion, "") pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc) error { @@ -561,16 +565,18 @@ func Test_certManagerClient_deleteObjs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + proxy := test.NewFakeProxy().WithObjs(tt.fields.objs...) cm := &certManagerClient{ pollImmediateWaiter: fakePollImmediateWaiter, proxy: proxy, } - objBefore, err := proxy.ListResources(map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}) + objBefore, err := proxy.ListResources(ctx, map[string]string{clusterctlv1.ClusterctlCoreLabel: clusterctlv1.ClusterctlCoreLabelCertManagerValue}) g.Expect(err).ToNot(HaveOccurred()) - err = cm.deleteObjs(objBefore) + err = cm.deleteObjs(ctx, objBefore) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -591,7 +597,7 @@ func Test_certManagerClient_deleteObjs(t *testing.T) { cl, err := proxy.NewClient() g.Expect(err).ToNot(HaveOccurred()) - err = cl.Get(ctx, client.ObjectKeyFromObject(obj), obj) + err = cl.Get(context.Background(), client.ObjectKeyFromObject(obj), obj) switch objShouldStillExist { case true: g.Expect(err).ToNot(HaveOccurred()) @@ -705,6 +711,8 @@ func Test_certManagerClient_PlanUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + proxy := test.NewFakeProxy().WithObjs(tt.objs...) fakeConfigClient := newFakeConfig() pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc) error { @@ -712,7 +720,7 @@ func Test_certManagerClient_PlanUpgrade(t *testing.T) { } cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter) - actualPlan, err := cm.PlanUpgrade() + actualPlan, err := cm.PlanUpgrade(ctx) if tt.expectErr { g.Expect(err).To(HaveOccurred()) g.Expect(actualPlan).To(BeComparableTo(CertManagerUpgradePlan{})) @@ -755,7 +763,7 @@ func Test_certManagerClient_EnsureLatestVersion(t *testing.T) { proxy: tt.fields.proxy, } - err := cm.EnsureLatestVersion() + err := cm.EnsureLatestVersion(context.Background()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -768,7 +776,7 @@ func Test_certManagerClient_EnsureLatestVersion(t *testing.T) { func newFakeConfig() *fakeConfigClient { fakeReader := test.NewFakeReader() - client, _ := config.New("fake-config", config.InjectReader(fakeReader)) + client, _ := config.New(context.Background(), "fake-config", config.InjectReader(fakeReader)) return &fakeConfigClient{ fakeReader: fakeReader, internalclient: client, diff --git a/cmd/clusterctl/client/cluster/client.go b/cmd/clusterctl/client/cluster/client.go index 5e3fc833df9b..84f3efda3334 100644 --- a/cmd/clusterctl/client/cluster/client.go +++ b/cmd/clusterctl/client/cluster/client.go @@ -29,10 +29,6 @@ import ( logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" ) -var ( - ctx = context.TODO() -) - // Kubeconfig is a type that specifies inputs related to the actual // kubeconfig. type Kubeconfig struct { @@ -102,7 +98,7 @@ type clusterClient struct { } // RepositoryClientFactory defines a function that returns a new repository.Client. -type RepositoryClientFactory func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) +type RepositoryClientFactory func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) // ensure clusterClient implements Client. var _ Client = &clusterClient{} diff --git a/cmd/clusterctl/client/cluster/components.go b/cmd/clusterctl/client/cluster/components.go index 3f2334610483..ae6800283ae7 100644 --- a/cmd/clusterctl/client/cluster/components.go +++ b/cmd/clusterctl/client/cluster/components.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "strings" @@ -54,17 +55,17 @@ type DeleteOptions struct { // ComponentsClient has methods to work with provider components in the cluster. type ComponentsClient interface { // Create creates the provider components in the management cluster. - Create(objs []unstructured.Unstructured) error + Create(ctx context.Context, objs []unstructured.Unstructured) error // Delete deletes the provider components from the management cluster. // The operation is designed to prevent accidental deletion of user created objects, so // it is required to explicitly opt-in for the deletion of the namespace where the provider components are hosted // and for the deletion of the provider's CRDs. - Delete(options DeleteOptions) error + Delete(ctx context.Context, options DeleteOptions) error // DeleteWebhookNamespace deletes the core provider webhook namespace (eg. capi-webhook-system). // This is required when upgrading to v1alpha4 where webhooks are included in the controller itself. - DeleteWebhookNamespace() error + DeleteWebhookNamespace(ctx context.Context) error } // providerComponents implements ComponentsClient. @@ -72,7 +73,7 @@ type providerComponents struct { proxy Proxy } -func (p *providerComponents) Create(objs []unstructured.Unstructured) error { +func (p *providerComponents) Create(ctx context.Context, objs []unstructured.Unstructured) error { createComponentObjectBackoff := newWriteBackoff() for i := range objs { obj := objs[i] @@ -80,7 +81,7 @@ func (p *providerComponents) Create(objs []unstructured.Unstructured) error { // Create the Kubernetes object. // Nb. The operation is wrapped in a retry loop to make Create more resilient to unexpected conditions. if err := retryWithExponentialBackoff(createComponentObjectBackoff, func() error { - return p.createObj(obj) + return p.createObj(ctx, obj) }); err != nil { return err } @@ -89,7 +90,7 @@ func (p *providerComponents) Create(objs []unstructured.Unstructured) error { return nil } -func (p *providerComponents) createObj(obj unstructured.Unstructured) error { +func (p *providerComponents) createObj(ctx context.Context, obj unstructured.Unstructured) error { log := logf.Log c, err := p.proxy.NewClient() if err != nil { @@ -127,7 +128,7 @@ func (p *providerComponents) createObj(obj unstructured.Unstructured) error { return nil } -func (p *providerComponents) Delete(options DeleteOptions) error { +func (p *providerComponents) Delete(ctx context.Context, options DeleteOptions) error { log := logf.Log log.Info("Deleting", "Provider", options.Provider.Name, "Version", options.Provider.Version, "Namespace", options.Provider.Namespace) @@ -139,7 +140,7 @@ func (p *providerComponents) Delete(options DeleteOptions) error { } namespaces := []string{options.Provider.Namespace} - resources, err := p.proxy.ListResources(labels, namespaces...) + resources, err := p.proxy.ListResources(ctx, labels, namespaces...) if err != nil { return err } @@ -234,7 +235,7 @@ func (p *providerComponents) Delete(options DeleteOptions) error { return kerrors.NewAggregate(errList) } -func (p *providerComponents) DeleteWebhookNamespace() error { +func (p *providerComponents) DeleteWebhookNamespace(ctx context.Context) error { const webhookNamespaceName = "capi-webhook-system" log := logf.Log diff --git a/cmd/clusterctl/client/cluster/components_test.go b/cmd/clusterctl/client/cluster/components_test.go index 473ba9524ff6..d2affd1ddaa5 100644 --- a/cmd/clusterctl/client/cluster/components_test.go +++ b/cmd/clusterctl/client/cluster/components_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "testing" @@ -256,7 +257,7 @@ func Test_providerComponents_Delete(t *testing.T) { c := newComponentsClient(proxy) - err := c.Delete(DeleteOptions{ + err := c.Delete(context.Background(), DeleteOptions{ Provider: tt.args.provider, IncludeNamespace: tt.args.includeNamespace, IncludeCRDs: tt.args.includeCRD, @@ -282,7 +283,7 @@ func Test_providerComponents_Delete(t *testing.T) { Name: want.object.Name, } - err := cs.Get(ctx, key, obj) + err := cs.Get(context.Background(), key, obj) if err != nil && !apierrors.IsNotFound(err) { t.Fatalf("Failed to get %v from the cluster: %v", key, err) } @@ -325,15 +326,15 @@ func Test_providerComponents_DeleteCoreProviderWebhookNamespace(t *testing.T) { var nsList corev1.NamespaceList // assert length before deleting - _ = proxyClient.List(ctx, &nsList) + _ = proxyClient.List(context.Background(), &nsList) g.Expect(nsList.Items).Should(HaveLen(1)) c := newComponentsClient(proxy) - err := c.DeleteWebhookNamespace() + err := c.DeleteWebhookNamespace(context.Background()) g.Expect(err).To(Not(HaveOccurred())) // assert length after deleting - _ = proxyClient.List(ctx, &nsList) + _ = proxyClient.List(context.Background(), &nsList) g.Expect(nsList.Items).Should(BeEmpty()) }) } @@ -448,7 +449,7 @@ func Test_providerComponents_Create(t *testing.T) { } unstructuredObjectsToCreate = append(unstructuredObjectsToCreate, *uns) } - err := c.Create(unstructuredObjectsToCreate) + err := c.Create(context.Background(), unstructuredObjectsToCreate) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -468,7 +469,7 @@ func Test_providerComponents_Create(t *testing.T) { Name: item.GetName(), } - err := cs.Get(ctx, key, obj) + err := cs.Get(context.Background(), key, obj) if err != nil && !apierrors.IsNotFound(err) { t.Fatalf("Failed to get %v from the cluster: %v", key, err) diff --git a/cmd/clusterctl/client/cluster/crd_migration_test.go b/cmd/clusterctl/client/cluster/crd_migration_test.go index 8edc510852d4..b3013c298f6c 100644 --- a/cmd/clusterctl/client/cluster/crd_migration_test.go +++ b/cmd/clusterctl/client/cluster/crd_migration_test.go @@ -272,7 +272,7 @@ func Test_CRDMigrator(t *testing.T) { Client: countingClient, } - isMigrated, err := m.run(ctx, tt.newCRD) + isMigrated, err := m.run(context.Background(), tt.newCRD) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -289,7 +289,7 @@ func Test_CRDMigrator(t *testing.T) { // Check storage versions has been cleaned up. currentCRD := &apiextensionsv1.CustomResourceDefinition{} - err = c.Get(ctx, client.ObjectKeyFromObject(tt.newCRD), currentCRD) + err = c.Get(context.Background(), client.ObjectKeyFromObject(tt.newCRD), currentCRD) g.Expect(err).ToNot(HaveOccurred()) g.Expect(currentCRD.Status.StoredVersions).To(Equal(tt.wantStoredVersions)) } diff --git a/cmd/clusterctl/client/cluster/installer.go b/cmd/clusterctl/client/cluster/installer.go index 908c37ce747c..018f5cc10aee 100644 --- a/cmd/clusterctl/client/cluster/installer.go +++ b/cmd/clusterctl/client/cluster/installer.go @@ -52,7 +52,7 @@ type ProviderInstaller interface { Add(repository.Components) // Install performs the installation of the providers ready in the install queue. - Install(InstallOptions) ([]repository.Components, error) + Install(context.Context, InstallOptions) ([]repository.Components, error) // Validate performs steps to validate a management cluster by looking at the current state and the providers in the queue. // The following checks are performed in order to ensure a fully operational cluster: @@ -60,7 +60,7 @@ type ProviderInstaller interface { // - All the providers in must support the same API Version of Cluster API (contract) // - All provider CRDs that are referenced in core Cluster API CRDs must comply with the CRD naming scheme, // otherwise a warning is logged. - Validate() error + Validate(context.Context) error // Images returns the list of images required for installing the providers ready in the install queue. Images() []string @@ -93,36 +93,36 @@ func (i *providerInstaller) Add(components repository.Components) { }) } -func (i *providerInstaller) Install(opts InstallOptions) ([]repository.Components, error) { +func (i *providerInstaller) Install(ctx context.Context, opts InstallOptions) ([]repository.Components, error) { ret := make([]repository.Components, 0, len(i.installQueue)) for _, components := range i.installQueue { - if err := installComponentsAndUpdateInventory(components, i.providerComponents, i.providerInventory); err != nil { + if err := installComponentsAndUpdateInventory(ctx, components, i.providerComponents, i.providerInventory); err != nil { return nil, err } ret = append(ret, components) } - return ret, waitForProvidersReady(opts, i.installQueue, i.proxy) + return ret, waitForProvidersReady(ctx, opts, i.installQueue, i.proxy) } -func installComponentsAndUpdateInventory(components repository.Components, providerComponents ComponentsClient, providerInventory InventoryClient) error { +func installComponentsAndUpdateInventory(ctx context.Context, components repository.Components, providerComponents ComponentsClient, providerInventory InventoryClient) error { log := logf.Log log.Info("Installing", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) inventoryObject := components.InventoryObject() log.V(1).Info("Creating objects", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) - if err := providerComponents.Create(components.Objs()); err != nil { + if err := providerComponents.Create(ctx, components.Objs()); err != nil { return err } log.V(1).Info("Creating inventory entry", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace()) - return providerInventory.Create(inventoryObject) + return providerInventory.Create(ctx, inventoryObject) } // waitForProvidersReady waits till the installed components are ready. -func waitForProvidersReady(opts InstallOptions, installQueue []repository.Components, proxy Proxy) error { +func waitForProvidersReady(ctx context.Context, opts InstallOptions, installQueue []repository.Components, proxy Proxy) error { // If we dont have to wait for providers to be installed // return early. if !opts.WaitProviders { @@ -132,15 +132,15 @@ func waitForProvidersReady(opts InstallOptions, installQueue []repository.Compon log := logf.Log log.Info("Waiting for providers to be available...") - return waitManagerDeploymentsReady(opts, installQueue, proxy) + return waitManagerDeploymentsReady(ctx, opts, installQueue, proxy) } // waitManagerDeploymentsReady waits till the installed manager deployments are ready. -func waitManagerDeploymentsReady(opts InstallOptions, installQueue []repository.Components, proxy Proxy) error { +func waitManagerDeploymentsReady(ctx context.Context, opts InstallOptions, installQueue []repository.Components, proxy Proxy) error { for _, components := range installQueue { for _, obj := range components.Objs() { if util.IsDeploymentWithManager(obj) { - if err := waitDeploymentReady(obj, opts.WaitProviderTimeout, proxy); err != nil { + if err := waitDeploymentReady(ctx, obj, opts.WaitProviderTimeout, proxy); err != nil { return errors.Wrapf(err, "deployment %q is not ready after %s", obj.GetName(), opts.WaitProviderTimeout) } } @@ -149,8 +149,8 @@ func waitManagerDeploymentsReady(opts InstallOptions, installQueue []repository. return nil } -func waitDeploymentReady(deployment unstructured.Unstructured, timeout time.Duration, proxy Proxy) error { - return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { +func waitDeploymentReady(ctx context.Context, deployment unstructured.Unstructured, timeout time.Duration, proxy Proxy) error { + return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { c, err := proxy.NewClient() if err != nil { return false, err @@ -172,9 +172,9 @@ func waitDeploymentReady(deployment unstructured.Unstructured, timeout time.Dura }) } -func (i *providerInstaller) Validate() error { +func (i *providerInstaller) Validate(ctx context.Context) error { // Get the list of providers currently in the cluster. - providerList, err := i.providerInventory.List() + providerList, err := i.providerInventory.List(ctx) if err != nil { return err } @@ -198,7 +198,7 @@ func (i *providerInstaller) Validate() error { } coreProvider := coreProviders[0] - managementClusterContract, err := i.getProviderContract(providerInstanceContracts, coreProvider) + managementClusterContract, err := i.getProviderContract(ctx, providerInstanceContracts, coreProvider) if err != nil { return err } @@ -208,7 +208,7 @@ func (i *providerInstaller) Validate() error { provider := components.InventoryObject() // Gets the API Version of Cluster API (contract) the provider support and compare it with the management cluster contract. - providerContract, err := i.getProviderContract(providerInstanceContracts, provider) + providerContract, err := i.getProviderContract(ctx, providerInstanceContracts, provider) if err != nil { return err } @@ -286,7 +286,7 @@ func validateCRDName(obj unstructured.Unstructured, gk *schema.GroupKind) error } // getProviderContract returns the API Version of Cluster API (contract) for a provider instance. -func (i *providerInstaller) getProviderContract(providerInstanceContracts map[string]string, provider clusterctlv1.Provider) (string, error) { +func (i *providerInstaller) getProviderContract(ctx context.Context, providerInstanceContracts map[string]string, provider clusterctlv1.Provider) (string, error) { // If the contract for the provider instance is already known, return it. if contract, ok := providerInstanceContracts[provider.InstanceName()]; ok { return contract, nil @@ -300,12 +300,12 @@ func (i *providerInstaller) getProviderContract(providerInstanceContracts map[st return "", err } - providerRepository, err := i.repositoryClientFactory(configRepository, i.configClient) + providerRepository, err := i.repositoryClientFactory(ctx, configRepository, i.configClient) if err != nil { return "", err } - latestMetadata, err := providerRepository.Metadata(provider.Version).Get() + latestMetadata, err := providerRepository.Metadata(provider.Version).Get(ctx) if err != nil { return "", err } diff --git a/cmd/clusterctl/client/cluster/installer_test.go b/cmd/clusterctl/client/cluster/installer_test.go index c22adfae69bb..b5cee3dd8444 100644 --- a/cmd/clusterctl/client/cluster/installer_test.go +++ b/cmd/clusterctl/client/cluster/installer_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" . "github.com/onsi/gomega" @@ -236,19 +237,21 @@ func Test_providerInstaller_Validate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(fakeReader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(fakeReader)) i := &providerInstaller{ configClient: configClient, proxy: tt.fields.proxy, providerInventory: newInventoryClient(tt.fields.proxy, nil), - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(repositoryMap[provider.ManifestLabel()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(repositoryMap[provider.ManifestLabel()])) }, installQueue: tt.fields.installQueue, } - err := i.Validate() + err := i.Validate(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { diff --git a/cmd/clusterctl/client/cluster/inventory.go b/cmd/clusterctl/client/cluster/inventory.go index 6f621bf43dc8..8412e78c6752 100644 --- a/cmd/clusterctl/client/cluster/inventory.go +++ b/cmd/clusterctl/client/cluster/inventory.go @@ -94,34 +94,34 @@ type InventoryClient interface { // EnsureCustomResourceDefinitions installs the CRD required for creating inventory items, if necessary. // Nb. In order to provide a simpler out-of-the box experience, the inventory CRD // is embedded in the clusterctl binary. - EnsureCustomResourceDefinitions() error + EnsureCustomResourceDefinitions(ctx context.Context) error // Create an inventory item for a provider instance installed in the cluster. - Create(clusterctlv1.Provider) error + Create(context.Context, clusterctlv1.Provider) error // List returns the inventory items for all the provider instances installed in the cluster. - List() (*clusterctlv1.ProviderList, error) + List(ctx context.Context) (*clusterctlv1.ProviderList, error) // GetDefaultProviderName returns the default provider for a given ProviderType. // In case there is only a single provider for a given type, e.g. only the AWS infrastructure Provider, it returns // this as the default provider; In case there are more provider of the same type, there is no default provider. - GetDefaultProviderName(providerType clusterctlv1.ProviderType) (string, error) + GetDefaultProviderName(ctx context.Context, providerType clusterctlv1.ProviderType) (string, error) // GetProviderVersion returns the version for a given provider. - GetProviderVersion(provider string, providerType clusterctlv1.ProviderType) (string, error) + GetProviderVersion(ctx context.Context, provider string, providerType clusterctlv1.ProviderType) (string, error) // GetProviderNamespace returns the namespace for a given provider. - GetProviderNamespace(provider string, providerType clusterctlv1.ProviderType) (string, error) + GetProviderNamespace(ctx context.Context, provider string, providerType clusterctlv1.ProviderType) (string, error) // CheckCAPIContract checks the Cluster API version installed in the management cluster, and fails if this version // does not match the current one supported by clusterctl. - CheckCAPIContract(...CheckCAPIContractOption) error + CheckCAPIContract(context.Context, ...CheckCAPIContractOption) error // CheckCAPIInstalled checks if Cluster API is installed on the management cluster. - CheckCAPIInstalled() (bool, error) + CheckCAPIInstalled(ctx context.Context) (bool, error) // CheckSingleProviderInstance ensures that only one instance of a provider is running, returns error otherwise. - CheckSingleProviderInstance() error + CheckSingleProviderInstance(ctx context.Context) error } // inventoryClient implements InventoryClient. @@ -141,7 +141,7 @@ func newInventoryClient(proxy Proxy, pollImmediateWaiter PollImmediateWaiter) *i } } -func (p *inventoryClient) EnsureCustomResourceDefinitions() error { +func (p *inventoryClient) EnsureCustomResourceDefinitions(ctx context.Context) error { log := logf.Log if err := p.proxy.ValidateKubernetesVersion(); err != nil { @@ -164,7 +164,7 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { listInventoryBackoff := newReadBackoff() if err := retryWithExponentialBackoff(listInventoryBackoff, func() error { var err error - crdIsIstalled, err = checkInventoryCRDs(p.proxy) + crdIsIstalled, err = checkInventoryCRDs(ctx, p.proxy) return err }); err != nil { return err @@ -190,7 +190,7 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { // Create the Kubernetes object. // Nb. The operation is wrapped in a retry loop to make EnsureCustomResourceDefinitions more resilient to unexpected conditions. if err := retryWithExponentialBackoff(createInventoryObjectBackoff, func() error { - return p.createObj(o) + return p.createObj(ctx, o) }); err != nil { return err } @@ -225,7 +225,7 @@ func (p *inventoryClient) EnsureCustomResourceDefinitions() error { } // checkInventoryCRDs checks if the inventory CRDs are installed in the cluster. -func checkInventoryCRDs(proxy Proxy) (bool, error) { +func checkInventoryCRDs(ctx context.Context, proxy Proxy) (bool, error) { c, err := proxy.NewClient() if err != nil { return false, err @@ -247,7 +247,7 @@ func checkInventoryCRDs(proxy Proxy) (bool, error) { return true, errors.Errorf("clusterctl inventory CRD does not defines the %s version", clusterctlv1.GroupVersion.Version) } -func (p *inventoryClient) createObj(o unstructured.Unstructured) error { +func (p *inventoryClient) createObj(ctx context.Context, o unstructured.Unstructured) error { c, err := p.proxy.NewClient() if err != nil { return err @@ -269,7 +269,7 @@ func (p *inventoryClient) createObj(o unstructured.Unstructured) error { return nil } -func (p *inventoryClient) Create(m clusterctlv1.Provider) error { +func (p *inventoryClient) Create(ctx context.Context, m clusterctlv1.Provider) error { // Create the Kubernetes object. createInventoryObjectBackoff := newWriteBackoff() return retryWithExponentialBackoff(createInventoryObjectBackoff, func() error { @@ -306,12 +306,12 @@ func (p *inventoryClient) Create(m clusterctlv1.Provider) error { }) } -func (p *inventoryClient) List() (*clusterctlv1.ProviderList, error) { +func (p *inventoryClient) List(ctx context.Context) (*clusterctlv1.ProviderList, error) { providerList := &clusterctlv1.ProviderList{} listProvidersBackoff := newReadBackoff() if err := retryWithExponentialBackoff(listProvidersBackoff, func() error { - return listProviders(p.proxy, providerList) + return listProviders(ctx, p.proxy, providerList) }); err != nil { return nil, err } @@ -320,7 +320,7 @@ func (p *inventoryClient) List() (*clusterctlv1.ProviderList, error) { } // listProviders retrieves the list of provider inventory objects. -func listProviders(proxy Proxy, providerList *clusterctlv1.ProviderList) error { +func listProviders(ctx context.Context, proxy Proxy, providerList *clusterctlv1.ProviderList) error { cl, err := proxy.NewClient() if err != nil { return err @@ -332,8 +332,8 @@ func listProviders(proxy Proxy, providerList *clusterctlv1.ProviderList) error { return nil } -func (p *inventoryClient) GetDefaultProviderName(providerType clusterctlv1.ProviderType) (string, error) { - providerList, err := p.List() +func (p *inventoryClient) GetDefaultProviderName(ctx context.Context, providerType clusterctlv1.ProviderType) (string, error) { + providerList, err := p.List(ctx) if err != nil { return "", err } @@ -353,8 +353,8 @@ func (p *inventoryClient) GetDefaultProviderName(providerType clusterctlv1.Provi return "", nil } -func (p *inventoryClient) GetProviderVersion(provider string, providerType clusterctlv1.ProviderType) (string, error) { - providerList, err := p.List() +func (p *inventoryClient) GetProviderVersion(ctx context.Context, provider string, providerType clusterctlv1.ProviderType) (string, error) { + providerList, err := p.List(ctx) if err != nil { return "", err } @@ -373,8 +373,8 @@ func (p *inventoryClient) GetProviderVersion(provider string, providerType clust return "", nil } -func (p *inventoryClient) GetProviderNamespace(provider string, providerType clusterctlv1.ProviderType) (string, error) { - providerList, err := p.List() +func (p *inventoryClient) GetProviderNamespace(ctx context.Context, provider string, providerType clusterctlv1.ProviderType) (string, error) { + providerList, err := p.List(ctx) if err != nil { return "", err } @@ -393,7 +393,7 @@ func (p *inventoryClient) GetProviderNamespace(provider string, providerType clu return "", nil } -func (p *inventoryClient) CheckCAPIContract(options ...CheckCAPIContractOption) error { +func (p *inventoryClient) CheckCAPIContract(ctx context.Context, options ...CheckCAPIContractOption) error { opt := &CheckCAPIContractOptions{} for _, o := range options { o.Apply(opt) @@ -432,8 +432,8 @@ func (p *inventoryClient) CheckCAPIContract(options ...CheckCAPIContractOption) return errors.Errorf("failed to check Cluster API version") } -func (p *inventoryClient) CheckCAPIInstalled() (bool, error) { - if err := p.CheckCAPIContract(AllowCAPIAnyContract{}); err != nil { +func (p *inventoryClient) CheckCAPIInstalled(ctx context.Context) (bool, error) { + if err := p.CheckCAPIContract(ctx, AllowCAPIAnyContract{}); err != nil { if apierrors.IsNotFound(err) { // The expected CRDs are not installed on the management. This would mean that Cluster API is not installed on the cluster. return false, nil @@ -443,8 +443,8 @@ func (p *inventoryClient) CheckCAPIInstalled() (bool, error) { return true, nil } -func (p *inventoryClient) CheckSingleProviderInstance() error { - providers, err := p.List() +func (p *inventoryClient) CheckSingleProviderInstance(ctx context.Context) error { + providers, err := p.List(ctx) if err != nil { return err } diff --git a/cmd/clusterctl/client/cluster/inventory_test.go b/cmd/clusterctl/client/cluster/inventory_test.go index 5c73f320947c..d3a80f3b995e 100644 --- a/cmd/clusterctl/client/cluster/inventory_test.go +++ b/cmd/clusterctl/client/cluster/inventory_test.go @@ -67,14 +67,16 @@ func Test_inventoryClient_CheckInventoryCRDs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + proxy := test.NewFakeProxy() p := newInventoryClient(proxy, fakePollImmediateWaiter) if tt.fields.alreadyHasCRD { // forcing creation of metadata before test - g.Expect(p.EnsureCustomResourceDefinitions()).To(Succeed()) + g.Expect(p.EnsureCustomResourceDefinitions(ctx)).To(Succeed()) } - res, err := checkInventoryCRDs(proxy) + res, err := checkInventoryCRDs(ctx, proxy) g.Expect(res).To(Equal(tt.want)) if tt.wantErr { g.Expect(err).To(HaveOccurred()) @@ -115,7 +117,7 @@ func Test_inventoryClient_List(t *testing.T) { g := NewWithT(t) p := newInventoryClient(test.NewFakeProxy().WithObjs(tt.fields.initObjs...), fakePollImmediateWaiter) - got, err := p.List() + got, err := p.List(context.Background()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -177,10 +179,12 @@ func Test_inventoryClient_Create(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + p := &inventoryClient{ proxy: tt.fields.proxy, } - err := p.Create(tt.args.m) + err := p.Create(ctx, tt.args.m) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -188,7 +192,7 @@ func Test_inventoryClient_Create(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) - got, err := p.List() + got, err := p.List(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -351,7 +355,7 @@ func Test_CheckCAPIContract(t *testing.T) { p := &inventoryClient{ proxy: tt.fields.proxy, } - err := p.CheckCAPIContract(tt.args.options...) + err := p.CheckCAPIContract(context.Background(), tt.args.options...) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -398,7 +402,7 @@ func Test_inventoryClient_CheckSingleProviderInstance(t *testing.T) { g := NewWithT(t) p := newInventoryClient(test.NewFakeProxy().WithObjs(tt.fields.initObjs...), fakePollImmediateWaiter) - err := p.CheckSingleProviderInstance() + err := p.CheckSingleProviderInstance(context.Background()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 5b9ba98dc4cc..0246c9fe4a5a 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -51,13 +51,13 @@ type ResourceMutatorFunc func(u *unstructured.Unstructured) error // ObjectMover defines methods for moving Cluster API objects to another management cluster. type ObjectMover interface { // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster. - Move(namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error + Move(ctx context.Context, namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error // ToDirectory writes all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target directory. - ToDirectory(namespace string, directory string) error + ToDirectory(ctx context.Context, namespace string, directory string) error // FromDirectory reads all the Cluster API objects existing in a configured directory to a target management cluster. - FromDirectory(toCluster Client, directory string) error + FromDirectory(ctx context.Context, toCluster Client, directory string) error } // objectMover implements the ObjectMover interface. @@ -70,7 +70,7 @@ type objectMover struct { // ensure objectMover implements the ObjectMover interface. var _ ObjectMover = &objectMover{} -func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error { +func (o *objectMover) Move(ctx context.Context, namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error { log := logf.Log log.Info("Performing move...") o.dryRun = dryRun @@ -82,12 +82,12 @@ func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool, muta // checks that all the required providers in place in the target cluster. if !o.dryRun { - if err := o.checkTargetProviders(toCluster.ProviderInventory()); err != nil { + if err := o.checkTargetProviders(ctx, toCluster.ProviderInventory()); err != nil { return errors.Wrap(err, "failed to check providers in target cluster") } } - objectGraph, err := o.getObjectGraph(namespace) + objectGraph, err := o.getObjectGraph(ctx, namespace) if err != nil { return errors.Wrap(err, "failed to get object graph") } @@ -98,22 +98,22 @@ func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool, muta proxy = toCluster.Proxy() } - return o.move(objectGraph, proxy, mutators...) + return o.move(ctx, objectGraph, proxy, mutators...) } -func (o *objectMover) ToDirectory(namespace string, directory string) error { +func (o *objectMover) ToDirectory(ctx context.Context, namespace string, directory string) error { log := logf.Log log.Info("Moving to directory...") - objectGraph, err := o.getObjectGraph(namespace) + objectGraph, err := o.getObjectGraph(ctx, namespace) if err != nil { return errors.Wrap(err, "failed to get object graph") } - return o.toDirectory(objectGraph, directory) + return o.toDirectory(ctx, objectGraph, directory) } -func (o *objectMover) FromDirectory(toCluster Client, directory string) error { +func (o *objectMover) FromDirectory(ctx context.Context, toCluster Client, directory string) error { log := logf.Log log.Info("Moving from directory...") @@ -121,7 +121,7 @@ func (o *objectMover) FromDirectory(toCluster Client, directory string) error { objectGraph := newObjectGraph(o.fromProxy, o.fromProviderInventory) // Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types. - err := objectGraph.getDiscoveryTypes() + err := objectGraph.getDiscoveryTypes(ctx) if err != nil { return errors.Wrap(err, "failed to retrieve discovery types") } @@ -150,7 +150,7 @@ func (o *objectMover) FromDirectory(toCluster Client, directory string) error { // Restore the objects to the target cluster. proxy := toCluster.Proxy() - return o.fromDirectory(objectGraph, proxy) + return o.fromDirectory(ctx, objectGraph, proxy) } func (o *objectMover) filesToObjs(dir string) ([]unstructured.Unstructured, error) { @@ -184,11 +184,11 @@ func (o *objectMover) filesToObjs(dir string) ([]unstructured.Unstructured, erro return objs, nil } -func (o *objectMover) getObjectGraph(namespace string) (*objectGraph, error) { +func (o *objectMover) getObjectGraph(ctx context.Context, namespace string) (*objectGraph, error) { objectGraph := newObjectGraph(o.fromProxy, o.fromProviderInventory) // Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types. - err := objectGraph.getDiscoveryTypes() + err := objectGraph.getDiscoveryTypes(ctx) if err != nil { return nil, errors.Wrap(err, "failed to retrieve discovery types") } @@ -196,7 +196,7 @@ func (o *objectMover) getObjectGraph(namespace string) (*objectGraph, error) { // Discovery the object graph for the selected types: // - Nodes are defined the Kubernetes objects (Clusters, Machines etc.) identified during the discovery process. // - Edges are derived by the OwnerReferences between nodes. - if err := objectGraph.Discovery(namespace); err != nil { + if err := objectGraph.Discovery(ctx, namespace); err != nil { return nil, errors.Wrap(err, "failed to discover the object graph") } @@ -204,7 +204,7 @@ func (o *objectMover) getObjectGraph(namespace string) (*objectGraph, error) { // This is required because if the infrastructure is provisioned, then we can reasonably assume that the objects we are moving/backing up are // not currently waiting for long-running reconciliation loops, and so we can safely rely on the pause field on the Cluster object // for blocking any further object reconciliation on the source objects. - if err := o.checkProvisioningCompleted(objectGraph); err != nil { + if err := o.checkProvisioningCompleted(ctx, objectGraph); err != nil { return nil, errors.Wrap(err, "failed to check for provisioned infrastructure") } @@ -222,7 +222,7 @@ func newObjectMover(fromProxy Proxy, fromProviderInventory InventoryClient) *obj } // checkProvisioningCompleted checks if Cluster API has already completed the provisioning of the infrastructure for the objects involved in the move operation. -func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { +func (o *objectMover) checkProvisioningCompleted(ctx context.Context, graph *objectGraph) error { if o.dryRun { return nil } @@ -235,7 +235,7 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { cluster := clusters[i] clusterObj := &clusterv1.Cluster{} if err := retryWithExponentialBackoff(readClusterBackoff, func() error { - return getClusterObj(o.fromProxy, cluster, clusterObj) + return getClusterObj(ctx, o.fromProxy, cluster, clusterObj) }); err != nil { return err } @@ -265,7 +265,7 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { machine := machines[i] machineObj := &clusterv1.Machine{} if err := retryWithExponentialBackoff(readMachinesBackoff, func() error { - return getMachineObj(o.fromProxy, machine, machineObj) + return getMachineObj(ctx, o.fromProxy, machine, machineObj) }); err != nil { return err } @@ -279,7 +279,7 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { } // getClusterObj retrieves the clusterObj corresponding to a node with type Cluster. -func getClusterObj(proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) error { +func getClusterObj(ctx context.Context, proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) error { c, err := proxy.NewClient() if err != nil { return err @@ -297,7 +297,7 @@ func getClusterObj(proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) er } // getMachineObj retrieves the machineObj corresponding to a node with type Machine. -func getMachineObj(proxy Proxy, machine *node, machineObj *clusterv1.Machine) error { +func getMachineObj(ctx context.Context, proxy Proxy, machine *node, machineObj *clusterv1.Machine) error { c, err := proxy.NewClient() if err != nil { return err @@ -315,7 +315,7 @@ func getMachineObj(proxy Proxy, machine *node, machineObj *clusterv1.Machine) er } // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster. -func (o *objectMover) move(graph *objectGraph, toProxy Proxy, mutators ...ResourceMutatorFunc) error { +func (o *objectMover) move(ctx context.Context, graph *objectGraph, toProxy Proxy, mutators ...ResourceMutatorFunc) error { log := logf.Log clusters := graph.getClusters() @@ -326,12 +326,12 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy, mutators ...Resour // Sets the pause field on the Cluster object in the source management cluster, so the controllers stop reconciling it. log.V(1).Info("Pausing the source cluster") - if err := setClusterPause(o.fromProxy, clusters, true, o.dryRun); err != nil { + if err := setClusterPause(ctx, o.fromProxy, clusters, true, o.dryRun); err != nil { return err } log.V(1).Info("Pausing the source ClusterClasses") - if err := setClusterClassPause(o.fromProxy, clusterClasses, true, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, true, o.dryRun); err != nil { return errors.Wrap(err, "error pausing ClusterClasses") } @@ -344,7 +344,7 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy, mutators ...Resour Steps: 10, Jitter: 0.1, } - if err := waitReadyForMove(o.fromProxy, graph.getMoveNodes(), o.dryRun, waitForMoveUnblockedBackoff); err != nil { + if err := waitReadyForMove(ctx, o.fromProxy, graph.getMoveNodes(), o.dryRun, waitForMoveUnblockedBackoff); err != nil { return errors.Wrap(err, "error waiting for resources to be ready to move") } @@ -362,7 +362,7 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy, mutators ...Resour // Create all objects group by group, ensuring all the ownerReferences are re-created. log.Info("Creating objects in the target cluster") for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { - if err := o.createGroup(moveSequence.getGroup(groupIndex), toProxy, mutators...); err != nil { + if err := o.createGroup(ctx, moveSequence.getGroup(groupIndex), toProxy, mutators...); err != nil { return err } } @@ -374,23 +374,23 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy, mutators ...Resour // Delete all objects group by group in reverse order. log.Info("Deleting objects from the source cluster") for groupIndex := len(moveSequence.groups) - 1; groupIndex >= 0; groupIndex-- { - if err := o.deleteGroup(moveSequence.getGroup(groupIndex)); err != nil { + if err := o.deleteGroup(ctx, moveSequence.getGroup(groupIndex)); err != nil { return err } } // Resume the ClusterClasses in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the target ClusterClasses") - if err := setClusterClassPause(toProxy, clusterClasses, false, o.dryRun, mutators...); err != nil { + if err := setClusterClassPause(ctx, toProxy, clusterClasses, false, o.dryRun, mutators...); err != nil { return errors.Wrap(err, "error resuming ClusterClasses") } // Reset the pause field on the Cluster object in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the target cluster") - return setClusterPause(toProxy, clusters, false, o.dryRun, mutators...) + return setClusterPause(ctx, toProxy, clusters, false, o.dryRun, mutators...) } -func (o *objectMover) toDirectory(graph *objectGraph, directory string) error { +func (o *objectMover) toDirectory(ctx context.Context, graph *objectGraph, directory string) error { log := logf.Log clusters := graph.getClusters() @@ -401,12 +401,12 @@ func (o *objectMover) toDirectory(graph *objectGraph, directory string) error { // Sets the pause field on the Cluster object in the source management cluster, so the controllers stop reconciling it. log.V(1).Info("Pausing the source cluster") - if err := setClusterPause(o.fromProxy, clusters, true, o.dryRun); err != nil { + if err := setClusterPause(ctx, o.fromProxy, clusters, true, o.dryRun); err != nil { return err } log.V(1).Info("Pausing the source ClusterClasses") - if err := setClusterClassPause(o.fromProxy, clusterClasses, true, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, true, o.dryRun); err != nil { return errors.Wrap(err, "error pausing ClusterClasses") } @@ -420,23 +420,23 @@ func (o *objectMover) toDirectory(graph *objectGraph, directory string) error { // Save all objects group by group log.Info(fmt.Sprintf("Saving files to %s", directory)) for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { - if err := o.backupGroup(moveSequence.getGroup(groupIndex), directory); err != nil { + if err := o.backupGroup(ctx, moveSequence.getGroup(groupIndex), directory); err != nil { return err } } // Resume the ClusterClasses in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the target ClusterClasses") - if err := setClusterClassPause(o.fromProxy, clusterClasses, false, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, false, o.dryRun); err != nil { return errors.Wrap(err, "error resuming ClusterClasses") } // Reset the pause field on the Cluster object in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the source cluster") - return setClusterPause(o.fromProxy, clusters, false, o.dryRun) + return setClusterPause(ctx, o.fromProxy, clusters, false, o.dryRun) } -func (o *objectMover) fromDirectory(graph *objectGraph, toProxy Proxy) error { +func (o *objectMover) fromDirectory(ctx context.Context, graph *objectGraph, toProxy Proxy) error { log := logf.Log // Get clusters from graph @@ -446,7 +446,7 @@ func (o *objectMover) fromDirectory(graph *objectGraph, toProxy Proxy) error { // Ensure all the expected target namespaces are in place before creating objects. log.V(1).Info("Creating target namespaces, if missing") - if err := o.ensureNamespaces(graph, toProxy); err != nil { + if err := o.ensureNamespaces(ctx, graph, toProxy); err != nil { return err } @@ -460,7 +460,7 @@ func (o *objectMover) fromDirectory(graph *objectGraph, toProxy Proxy) error { // Create all objects group by group, ensuring all the ownerReferences are re-created. log.Info("Restoring objects into the target cluster") for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { - if err := o.restoreGroup(moveSequence.getGroup(groupIndex), toProxy); err != nil { + if err := o.restoreGroup(ctx, moveSequence.getGroup(groupIndex), toProxy); err != nil { return err } } @@ -468,14 +468,14 @@ func (o *objectMover) fromDirectory(graph *objectGraph, toProxy Proxy) error { // Resume reconciling the ClusterClasses after being restored from a backup. // By default, during backup, ClusterClasses are paused so they must be unpaused to be used again log.V(1).Info("Resuming the target ClusterClasses") - if err := setClusterClassPause(toProxy, clusterClasses, false, o.dryRun); err != nil { + if err := setClusterClassPause(ctx, toProxy, clusterClasses, false, o.dryRun); err != nil { return errors.Wrap(err, "error resuming ClusterClasses") } // Resume reconciling the Clusters after being restored from a directory. // By default, when moved to a directory, Clusters are paused, so they must be unpaused to be used again. log.V(1).Info("Resuming the target cluster") - return setClusterPause(toProxy, clusters, false, o.dryRun) + return setClusterPause(ctx, toProxy, clusters, false, o.dryRun) } // moveSequence defines a list of group of moveGroups. @@ -554,7 +554,7 @@ func getMoveSequence(graph *objectGraph) *moveSequence { } // setClusterPause sets the paused field on nodes referring to Cluster objects. -func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool, mutators ...ResourceMutatorFunc) error { +func setClusterPause(ctx context.Context, proxy Proxy, clusters []*node, value bool, dryRun bool, mutators ...ResourceMutatorFunc) error { if dryRun { return nil } @@ -575,7 +575,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool, mut // Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions. if err := retryWithExponentialBackoff(setClusterPauseBackoff, func() error { - return patchCluster(proxy, cluster, patch, mutators...) + return patchCluster(ctx, proxy, cluster, patch, mutators...) }); err != nil { return errors.Wrapf(err, "error setting Cluster.Spec.Paused=%t", value) } @@ -584,7 +584,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool, mut } // setClusterClassPause sets the paused annotation on nodes referring to ClusterClass objects. -func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRun bool, mutators ...ResourceMutatorFunc) error { +func setClusterClassPause(ctx context.Context, proxy Proxy, clusterclasses []*node, pause bool, dryRun bool, mutators ...ResourceMutatorFunc) error { if dryRun { return nil } @@ -602,7 +602,7 @@ func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRu // Nb. The operation is wrapped in a retry loop to make setClusterClassPause more resilient to unexpected conditions. if err := retryWithExponentialBackoff(setClusterClassPauseBackoff, func() error { - return pauseClusterClass(proxy, clusterclass, pause, mutators...) + return pauseClusterClass(ctx, proxy, clusterclass, pause, mutators...) }); err != nil { return errors.Wrapf(err, "error updating ClusterClass %s/%s", clusterclass.identity.Namespace, clusterclass.identity.Name) } @@ -610,7 +610,7 @@ func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRu return nil } -func waitReadyForMove(proxy Proxy, nodes []*node, dryRun bool, backoff wait.Backoff) error { +func waitReadyForMove(ctx context.Context, proxy Proxy, nodes []*node, dryRun bool, backoff wait.Backoff) error { if dryRun { return nil } @@ -660,7 +660,7 @@ func waitReadyForMove(proxy Proxy, nodes []*node, dryRun bool, backoff wait.Back } // patchCluster applies a patch to a node referring to a Cluster object. -func patchCluster(proxy Proxy, n *node, patch client.Patch, mutators ...ResourceMutatorFunc) error { +func patchCluster(ctx context.Context, proxy Proxy, n *node, patch client.Patch, mutators ...ResourceMutatorFunc) error { cFrom, err := proxy.NewClient() if err != nil { return err @@ -695,7 +695,7 @@ func patchCluster(proxy Proxy, n *node, patch client.Patch, mutators ...Resource return nil } -func pauseClusterClass(proxy Proxy, n *node, pause bool, mutators ...ResourceMutatorFunc) error { +func pauseClusterClass(ctx context.Context, proxy Proxy, n *node, pause bool, mutators ...ResourceMutatorFunc) error { cFrom, err := proxy.NewClient() if err != nil { return errors.Wrap(err, "error creating client") @@ -756,7 +756,7 @@ func pauseClusterClass(proxy Proxy, n *node, pause bool, mutators ...ResourceMut } // ensureNamespaces ensures all the expected target namespaces are in place before creating objects. -func (o *objectMover) ensureNamespaces(graph *objectGraph, toProxy Proxy) error { +func (o *objectMover) ensureNamespaces(ctx context.Context, graph *objectGraph, toProxy Proxy) error { if o.dryRun { return nil } @@ -778,7 +778,7 @@ func (o *objectMover) ensureNamespaces(graph *objectGraph, toProxy Proxy) error namespaces.Insert(namespace) if err := retryWithExponentialBackoff(ensureNamespaceBackoff, func() error { - return o.ensureNamespace(toProxy, namespace) + return o.ensureNamespace(ctx, toProxy, namespace) }); err != nil { return err } @@ -788,7 +788,7 @@ func (o *objectMover) ensureNamespaces(graph *objectGraph, toProxy Proxy) error } // ensureNamespace ensures a target namespaces is in place before creating objects. -func (o *objectMover) ensureNamespace(toProxy Proxy, namespace string) error { +func (o *objectMover) ensureNamespace(ctx context.Context, toProxy Proxy, namespace string) error { log := logf.Log cs, err := toProxy.NewClient() @@ -851,7 +851,7 @@ func (o *objectMover) ensureNamespace(toProxy Proxy, namespace string) error { } // createGroup creates all the Kubernetes objects into the target management cluster corresponding to the object graph nodes in a moveGroup. -func (o *objectMover) createGroup(group moveGroup, toProxy Proxy, mutators ...ResourceMutatorFunc) error { +func (o *objectMover) createGroup(ctx context.Context, group moveGroup, toProxy Proxy, mutators ...ResourceMutatorFunc) error { createTargetObjectBackoff := newWriteBackoff() errList := []error{} @@ -862,7 +862,7 @@ func (o *objectMover) createGroup(group moveGroup, toProxy Proxy, mutators ...Re // Creates the Kubernetes object corresponding to the nodeToCreate. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. err := retryWithExponentialBackoff(createTargetObjectBackoff, func() error { - return o.createTargetObject(nodeToCreate, toProxy, mutators, existingNamespaces) + return o.createTargetObject(ctx, nodeToCreate, toProxy, mutators, existingNamespaces) }) if err != nil { errList = append(errList, err) @@ -876,7 +876,7 @@ func (o *objectMover) createGroup(group moveGroup, toProxy Proxy, mutators ...Re return nil } -func (o *objectMover) backupGroup(group moveGroup, directory string) error { +func (o *objectMover) backupGroup(ctx context.Context, group moveGroup, directory string) error { backupTargetObjectBackoff := newWriteBackoff() errList := []error{} @@ -884,7 +884,7 @@ func (o *objectMover) backupGroup(group moveGroup, directory string) error { // Backs-up the Kubernetes object corresponding to the nodeToBackup. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. err := retryWithExponentialBackoff(backupTargetObjectBackoff, func() error { - return o.backupTargetObject(nodeToBackup, directory) + return o.backupTargetObject(ctx, nodeToBackup, directory) }) if err != nil { errList = append(errList, err) @@ -898,7 +898,7 @@ func (o *objectMover) backupGroup(group moveGroup, directory string) error { return nil } -func (o *objectMover) restoreGroup(group moveGroup, toProxy Proxy) error { +func (o *objectMover) restoreGroup(ctx context.Context, group moveGroup, toProxy Proxy) error { restoreTargetObjectBackoff := newWriteBackoff() errList := []error{} @@ -906,7 +906,7 @@ func (o *objectMover) restoreGroup(group moveGroup, toProxy Proxy) error { // Creates the Kubernetes object corresponding to the nodeToRestore. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. err := retryWithExponentialBackoff(restoreTargetObjectBackoff, func() error { - return o.restoreTargetObject(nodeToRestore, toProxy) + return o.restoreTargetObject(ctx, nodeToRestore, toProxy) }) if err != nil { errList = append(errList, err) @@ -921,7 +921,7 @@ func (o *objectMover) restoreGroup(group moveGroup, toProxy Proxy) error { } // createTargetObject creates the Kubernetes object in the target Management cluster corresponding to the object graph node, taking care of restoring the OwnerReference with the owner nodes, if any. -func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy, mutators []ResourceMutatorFunc, existingNamespaces sets.Set[string]) error { +func (o *objectMover) createTargetObject(ctx context.Context, nodeToCreate *node, toProxy Proxy, mutators []ResourceMutatorFunc, existingNamespaces sets.Set[string]) error { log := logf.Log log.V(1).Info("Creating", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace) @@ -975,7 +975,7 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy, muta } // Applying mutators MAY change the namespace, so ensure the namespace exists before creating the resource. if !nodeToCreate.isGlobal && !existingNamespaces.Has(obj.GetNamespace()) { - if err = o.ensureNamespace(toProxy, obj.GetNamespace()); err != nil { + if err = o.ensureNamespace(ctx, toProxy, obj.GetNamespace()); err != nil { return err } existingNamespaces.Insert(obj.GetNamespace()) @@ -1022,7 +1022,7 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy, muta return nil } -func (o *objectMover) backupTargetObject(nodeToCreate *node, directory string) error { +func (o *objectMover) backupTargetObject(ctx context.Context, nodeToCreate *node, directory string) error { log := logf.Log log.V(1).Info("Saving", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace) @@ -1073,7 +1073,7 @@ func (o *objectMover) backupTargetObject(nodeToCreate *node, directory string) e return nil } -func (o *objectMover) restoreTargetObject(nodeToCreate *node, toProxy Proxy) error { +func (o *objectMover) restoreTargetObject(ctx context.Context, nodeToCreate *node, toProxy Proxy) error { log := logf.Log log.V(1).Info("Restoring", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace) @@ -1155,7 +1155,7 @@ func (o *objectMover) buildOwnerChain(obj *unstructured.Unstructured, n *node) { } // deleteGroup deletes all the Kubernetes objects from the source management cluster corresponding to the object graph nodes in a moveGroup. -func (o *objectMover) deleteGroup(group moveGroup) error { +func (o *objectMover) deleteGroup(ctx context.Context, group moveGroup) error { deleteSourceObjectBackoff := newWriteBackoff() errList := []error{} for i := range group { @@ -1164,7 +1164,7 @@ func (o *objectMover) deleteGroup(group moveGroup) error { // Delete the Kubernetes object corresponding to the current node. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. err := retryWithExponentialBackoff(deleteSourceObjectBackoff, func() error { - return o.deleteSourceObject(nodeToDelete) + return o.deleteSourceObject(ctx, nodeToDelete) }) if err != nil { @@ -1182,7 +1182,7 @@ var ( // deleteSourceObject deletes the Kubernetes object corresponding to the node from the source management cluster, taking care of removing all the finalizers so // the objects gets immediately deleted (force delete). -func (o *objectMover) deleteSourceObject(nodeToDelete *node) error { +func (o *objectMover) deleteSourceObject(ctx context.Context, nodeToDelete *node) error { // Don't delete cluster-wide nodes or nodes that are below a hierarchy that starts with a global object (e.g. a secrets owned by a global identity object). if nodeToDelete.isGlobal || nodeToDelete.isGlobalHierarchy { return nil @@ -1240,18 +1240,18 @@ func (o *objectMover) deleteSourceObject(nodeToDelete *node) error { } // checkTargetProviders checks that all the providers installed in the source cluster exists in the target cluster as well (with a version >= of the current version). -func (o *objectMover) checkTargetProviders(toInventory InventoryClient) error { +func (o *objectMover) checkTargetProviders(ctx context.Context, toInventory InventoryClient) error { if o.dryRun { return nil } // Gets the list of providers in the source/target cluster. - fromProviders, err := o.fromProviderInventory.List() + fromProviders, err := o.fromProviderInventory.List(ctx) if err != nil { return errors.Wrapf(err, "failed to get provider list from the source cluster") } - toProviders, err := toInventory.List() + toProviders, err := toInventory.List(ctx) if err != nil { return errors.Wrapf(err, "failed to get provider list from the target cluster") } diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index a5832887c31b..1f8653119245 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -729,14 +729,16 @@ func Test_objectMover_backupTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // Run backupTargetObject on nodes in graph mover := objectMover{ @@ -750,7 +752,7 @@ func Test_objectMover_backupTargetObject(t *testing.T) { defer os.RemoveAll(dir) for _, node := range graph.uidToNode { - err = mover.backupTargetObject(node, dir) + err = mover.backupTargetObject(ctx, node, dir) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -785,7 +787,7 @@ func Test_objectMover_backupTargetObject(t *testing.T) { time.Sleep(time.Millisecond * 50) // Running backupTargetObject should override any existing files since it represents a new toDirectory - err = mover.backupTargetObject(node, dir) + err = mover.backupTargetObject(ctx, node, dir) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -811,6 +813,8 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // temporary directory dir, err := os.MkdirTemp("/tmp", "cluster-api") if err != nil { @@ -822,10 +826,10 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { graph := getObjectGraph() // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -853,7 +857,7 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { } for _, node := range graph.uidToNode { - err = mover.restoreTargetObject(node, toProxy) + err = mover.restoreTargetObject(ctx, node, toProxy) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -881,7 +885,7 @@ func Test_objectMover_restoreTargetObject(t *testing.T) { } // Re-running restoreTargetObjects won't override existing objects - err = mover.restoreTargetObject(node, toProxy) + err = mover.restoreTargetObject(ctx, node, toProxy) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -924,14 +928,16 @@ func Test_objectMover_toDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // Run toDirectory mover := objectMover{ @@ -944,7 +950,7 @@ func Test_objectMover_toDirectory(t *testing.T) { } defer os.RemoveAll(dir) - err = mover.toDirectory(graph, dir) + err = mover.toDirectory(ctx, graph, dir) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1058,6 +1064,8 @@ func Test_objectMover_fromDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // temporary directory dir, err := os.MkdirTemp("/tmp", "cluster-api") if err != nil { @@ -1069,7 +1077,7 @@ func Test_objectMover_fromDirectory(t *testing.T) { graph := getObjectGraph() // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -1103,7 +1111,7 @@ func Test_objectMover_fromDirectory(t *testing.T) { graph.setTenants() graph.checkVirtualNode() - err = mover.fromDirectory(graph, toProxy) + err = mover.fromDirectory(ctx, graph, toProxy) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1141,14 +1149,16 @@ func Test_getMoveSequence(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) moveSequence := getMoveSequence(graph) g.Expect(moveSequence.groups).To(HaveLen(len(tt.wantMoveGroups))) @@ -1172,14 +1182,16 @@ func Test_objectMover_move_dryRun(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -1190,7 +1202,7 @@ func Test_objectMover_move_dryRun(t *testing.T) { dryRun: true, } - err := mover.move(graph, toProxy, nil) + err := mover.move(ctx, graph, toProxy, nil) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1245,14 +1257,16 @@ func Test_objectMover_move(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -1261,7 +1275,7 @@ func Test_objectMover_move(t *testing.T) { mover := objectMover{ fromProxy: graph.proxy, } - err := mover.move(graph, toProxy) + err := mover.move(ctx, graph, toProxy) if tt.wantErr { g.Expect(err).To(HaveOccurred()) @@ -1319,6 +1333,9 @@ func Test_objectMover_move_with_Mutator(t *testing.T) { for _, tt := range moveTests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + toNamespace := "foobar" updateKnownKinds := map[string][][]string{ "Cluster": { @@ -1358,10 +1375,10 @@ func Test_objectMover_move_with_Mutator(t *testing.T) { graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) // gets a fakeProxy to an empty cluster with all the required CRDs toProxy := getFakeProxyWithCRDs() @@ -1371,7 +1388,7 @@ func Test_objectMover_move_with_Mutator(t *testing.T) { fromProxy: graph.proxy, } - err := mover.move(graph, toProxy, namespaceMutator) + err := mover.move(ctx, graph, toProxy, namespaceMutator) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1637,19 +1654,21 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) o := &objectMover{ fromProxy: graph.proxy, } - err := o.checkProvisioningCompleted(graph) + err := o.checkProvisioningCompleted(ctx, graph) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -1728,10 +1747,12 @@ func Test_objectsMoverService_checkTargetProviders(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + o := &objectMover{ fromProviderInventory: newInventoryClient(tt.fields.fromProxy, nil), } - err := o.checkTargetProviders(newInventoryClient(tt.args.toProxy, nil)) + err := o.checkTargetProviders(ctx, newInventoryClient(tt.args.toProxy, nil)) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -1781,11 +1802,13 @@ func Test_objectMoverService_ensureNamespace(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + mover := objectMover{ fromProxy: test.NewFakeProxy(), } - err := mover.ensureNamespace(tt.args.toProxy, tt.args.namespace) + err := mover.ensureNamespace(ctx, tt.args.toProxy, tt.args.namespace) g.Expect(err).ToNot(HaveOccurred()) // Check that the namespaces either existed or were created in the @@ -1878,19 +1901,21 @@ func Test_objectMoverService_ensureNamespaces(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + graph := getObjectGraphWithObjs(tt.fields.objs) // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // Trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) mover := objectMover{ fromProxy: graph.proxy, } - err := mover.ensureNamespaces(graph, tt.args.toProxy) + err := mover.ensureNamespaces(ctx, graph, tt.args.toProxy) g.Expect(err).ToNot(HaveOccurred()) // Check that the namespaces either existed or were created in the @@ -2000,13 +2025,13 @@ func Test_createTargetObject(t *testing.T) { nsKey := client.ObjectKey{ Name: "ns1", } - g.Expect(toClient.Get(ctx, nsKey, ns)).To(Succeed()) + g.Expect(toClient.Get(context.Background(), nsKey, ns)).To(Succeed()) c := &clusterv1.Cluster{} key := client.ObjectKey{ Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(ctx, key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.OwnerReferences).To(HaveLen(1)) g.Expect(c.OwnerReferences[0].Controller).To(Equal(pointer.Bool(true))) }, @@ -2046,7 +2071,7 @@ func Test_createTargetObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(ctx, key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).To(BeEmpty()) }, }, @@ -2082,7 +2107,7 @@ func Test_createTargetObject(t *testing.T) { key := client.ObjectKey{ Name: "foo", } - g.Expect(toClient.Get(ctx, key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).ToNot(BeEmpty()) }, }, @@ -2122,7 +2147,7 @@ func Test_createTargetObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(toClient.Get(ctx, key, c)).ToNot(HaveOccurred()) + g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.Annotations).ToNot(BeEmpty()) }, }, @@ -2132,11 +2157,13 @@ func Test_createTargetObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + mover := objectMover{ fromProxy: tt.args.fromProxy, } - err := mover.createTargetObject(tt.args.node, tt.args.toProxy, nil, sets.New[string]()) + err := mover.createTargetObject(ctx, tt.args.node, tt.args.toProxy, nil, sets.New[string]()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -2181,7 +2208,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(ctx, key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) }, }, { @@ -2210,7 +2237,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(ctx, key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) }, }, { @@ -2238,7 +2265,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(ctx, key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) }, }, { @@ -2268,7 +2295,7 @@ func Test_deleteSourceObject(t *testing.T) { Namespace: "ns1", Name: "foo", } - g.Expect(apierrors.IsNotFound(toClient.Get(ctx, key, c))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue()) }, }, } @@ -2277,11 +2304,13 @@ func Test_deleteSourceObject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + mover := objectMover{ fromProxy: tt.args.fromProxy, } - err := mover.deleteSourceObject(tt.args.node) + err := mover.deleteSourceObject(ctx, tt.args.node) g.Expect(err).ToNot(HaveOccurred()) fromClient, err := tt.args.fromProxy.NewClient() @@ -2318,6 +2347,8 @@ func TestWaitReadyForMove(t *testing.T) { clusterNamespace := "ns1" objs := test.NewFakeCluster(clusterNamespace, clusterName).Objs() + ctx := context.Background() + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(objs) @@ -2325,7 +2356,6 @@ func TestWaitReadyForMove(t *testing.T) { c, err := graph.proxy.NewClient() g.Expect(err).NotTo(HaveOccurred()) - ctx := context.Background() cluster := &clusterv1.Cluster{} err = c.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, cluster) g.Expect(err).NotTo(HaveOccurred()) @@ -2340,15 +2370,15 @@ func TestWaitReadyForMove(t *testing.T) { } // Get all the types to be considered for discovery - g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + g.Expect(getFakeDiscoveryTypes(ctx, graph)).To(Succeed()) // trigger discovery the content of the source cluster - g.Expect(graph.Discovery("")).To(Succeed()) + g.Expect(graph.Discovery(ctx, "")).To(Succeed()) backoff := wait.Backoff{ Steps: 1, } - err := waitReadyForMove(graph.proxy, graph.getMoveNodes(), false, backoff) + err := waitReadyForMove(ctx, graph.proxy, graph.getMoveNodes(), false, backoff) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { diff --git a/cmd/clusterctl/client/cluster/objectgraph.go b/cmd/clusterctl/client/cluster/objectgraph.go index b0934b45c7dc..2bfd157f423e 100644 --- a/cmd/clusterctl/client/cluster/objectgraph.go +++ b/cmd/clusterctl/client/cluster/objectgraph.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "strings" @@ -323,11 +324,11 @@ func (o *objectGraph) objMetaToNode(obj *unstructured.Unstructured, n *node) { // getDiscoveryTypes returns the list of TypeMeta to be considered for the move discovery phase. // This list includes all the types defines by the CRDs installed by clusterctl and the ConfigMap/Secret core types. -func (o *objectGraph) getDiscoveryTypes() error { +func (o *objectGraph) getDiscoveryTypes(ctx context.Context) error { crdList := &apiextensionsv1.CustomResourceDefinitionList{} getDiscoveryTypesBackoff := newReadBackoff() if err := retryWithExponentialBackoff(getDiscoveryTypesBackoff, func() error { - return getCRDList(o.proxy, crdList) + return getCRDList(ctx, o.proxy, crdList) }); err != nil { return err } @@ -397,7 +398,7 @@ func getKindAPIString(typeMeta metav1.TypeMeta) string { return fmt.Sprintf("%ss.%s", strings.ToLower(typeMeta.Kind), api) } -func getCRDList(proxy Proxy, crdList *apiextensionsv1.CustomResourceDefinitionList) error { +func getCRDList(ctx context.Context, proxy Proxy, crdList *apiextensionsv1.CustomResourceDefinitionList) error { c, err := proxy.NewClient() if err != nil { return err @@ -411,7 +412,7 @@ func getCRDList(proxy Proxy, crdList *apiextensionsv1.CustomResourceDefinitionLi // Discovery reads all the Kubernetes objects existing in a namespace (or in all namespaces if empty) for the types received in input, and then adds // everything to the objects graph. -func (o *objectGraph) Discovery(namespace string) error { +func (o *objectGraph) Discovery(ctx context.Context, namespace string) error { log := logf.Log log.Info("Discovering Cluster API objects") @@ -426,14 +427,14 @@ func (o *objectGraph) Discovery(namespace string) error { objList := new(unstructured.UnstructuredList) if err := retryWithExponentialBackoff(discoveryBackoff, func() error { - return getObjList(o.proxy, typeMeta, selectors, objList) + return getObjList(ctx, o.proxy, typeMeta, selectors, objList) }); err != nil { return err } // if we are discovering Secrets, also secrets from the providers namespace should be included. if discoveryType.typeMeta.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("SecretList").GroupKind() { - providers, err := o.providerInventory.List() + providers, err := o.providerInventory.List(ctx) if err != nil { return err } @@ -442,7 +443,7 @@ func (o *objectGraph) Discovery(namespace string) error { providerNamespaceSelector := []client.ListOption{client.InNamespace(p.Namespace)} providerNamespaceSecretList := new(unstructured.UnstructuredList) if err := retryWithExponentialBackoff(discoveryBackoff, func() error { - return getObjList(o.proxy, typeMeta, providerNamespaceSelector, providerNamespaceSecretList) + return getObjList(ctx, o.proxy, typeMeta, providerNamespaceSelector, providerNamespaceSecretList) }); err != nil { return err } @@ -476,7 +477,7 @@ func (o *objectGraph) Discovery(namespace string) error { return nil } -func getObjList(proxy Proxy, typeMeta metav1.TypeMeta, selectors []client.ListOption, objList *unstructured.UnstructuredList) error { +func getObjList(ctx context.Context, proxy Proxy, typeMeta metav1.TypeMeta, selectors []client.ListOption, objList *unstructured.UnstructuredList) error { c, err := proxy.NewClient() if err != nil { return err diff --git a/cmd/clusterctl/client/cluster/objectgraph_test.go b/cmd/clusterctl/client/cluster/objectgraph_test.go index df835eea4d66..be24869135f7 100644 --- a/cmd/clusterctl/client/cluster/objectgraph_test.go +++ b/cmd/clusterctl/client/cluster/objectgraph_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "sort" "testing" @@ -221,8 +222,10 @@ func TestObjectGraph_getDiscoveryTypeMetaList(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + graph := newObjectGraph(tt.fields.proxy, nil) - err := graph.getDiscoveryTypes() + err := graph.getDiscoveryTypes(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1771,8 +1774,8 @@ func getFakeProxyWithCRDs() *test.FakeProxy { return proxy } -func getFakeDiscoveryTypes(graph *objectGraph) error { - if err := graph.getDiscoveryTypes(); err != nil { +func getFakeDiscoveryTypes(ctx context.Context, graph *objectGraph) error { + if err := graph.getDiscoveryTypes(ctx); err != nil { return err } @@ -1789,15 +1792,17 @@ func TestObjectGraph_Discovery(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound to a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.args.objs) // Get all the types to be considered for discovery - err := getFakeDiscoveryTypes(graph) + err := getFakeDiscoveryTypes(ctx, graph) g.Expect(err).ToNot(HaveOccurred()) // finally test discovery - err = graph.Discovery("") + err = graph.Discovery(ctx, "") if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1945,15 +1950,17 @@ func TestObjectGraph_DiscoveryByNamespace(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create an objectGraph bound to a source cluster with all the CRDs for the types involved in the test. graph := getObjectGraphWithObjs(tt.args.objs) // Get all the types to be considered for discovery - err := getFakeDiscoveryTypes(graph) + err := getFakeDiscoveryTypes(ctx, graph) g.Expect(err).ToNot(HaveOccurred()) // finally test discovery - err = graph.Discovery(tt.args.namespace) + err = graph.Discovery(ctx, tt.args.namespace) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/cluster/ownergraph.go b/cmd/clusterctl/client/cluster/ownergraph.go index 6d1601b743c9..487cd7de8cc5 100644 --- a/cmd/clusterctl/client/cluster/ownergraph.go +++ b/cmd/clusterctl/client/cluster/ownergraph.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "strings" "github.com/pkg/errors" @@ -42,28 +43,28 @@ type OwnerGraphNode struct { // NOTE: this data structure is exposed to allow implementation of E2E tests verifying that CAPI can properly rebuild its // own owner references; there is no guarantee about the stability of this API. Using this test with providers may require // a custom implementation of this function, or the OwnerGraph it returns. -func GetOwnerGraph(namespace, kubeconfigPath string) (OwnerGraph, error) { +func GetOwnerGraph(ctx context.Context, namespace, kubeconfigPath string) (OwnerGraph, error) { p := newProxy(Kubeconfig{Path: kubeconfigPath, Context: ""}) invClient := newInventoryClient(p, nil) graph := newObjectGraph(p, invClient) // Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types. - err := graph.getDiscoveryTypes() + err := graph.getDiscoveryTypes(ctx) if err != nil { return OwnerGraph{}, errors.Wrap(err, "failed to retrieve discovery types") } // graph.Discovery can not be used here as it will use the latest APIVersion for ownerReferences - not those // present in the object 'metadata.ownerReferences`. - owners, err := discoverOwnerGraph(namespace, graph) + owners, err := discoverOwnerGraph(ctx, namespace, graph) if err != nil { return OwnerGraph{}, errors.Wrap(err, "failed to discovery ownerGraph types") } return owners, nil } -func discoverOwnerGraph(namespace string, o *objectGraph) (OwnerGraph, error) { +func discoverOwnerGraph(ctx context.Context, namespace string, o *objectGraph) (OwnerGraph, error) { selectors := []client.ListOption{} if namespace != "" { selectors = append(selectors, client.InNamespace(namespace)) @@ -76,14 +77,14 @@ func discoverOwnerGraph(namespace string, o *objectGraph) (OwnerGraph, error) { objList := new(unstructured.UnstructuredList) if err := retryWithExponentialBackoff(discoveryBackoff, func() error { - return getObjList(o.proxy, typeMeta, selectors, objList) + return getObjList(ctx, o.proxy, typeMeta, selectors, objList) }); err != nil { return nil, err } // if we are discovering Secrets, also secrets from the providers namespace should be included. if discoveryType.typeMeta.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("SecretList").GroupKind() { - providers, err := o.providerInventory.List() + providers, err := o.providerInventory.List(ctx) if err != nil { return nil, err } @@ -92,7 +93,7 @@ func discoverOwnerGraph(namespace string, o *objectGraph) (OwnerGraph, error) { providerNamespaceSelector := []client.ListOption{client.InNamespace(p.Namespace)} providerNamespaceSecretList := new(unstructured.UnstructuredList) if err := retryWithExponentialBackoff(discoveryBackoff, func() error { - return getObjList(o.proxy, typeMeta, providerNamespaceSelector, providerNamespaceSecretList) + return getObjList(ctx, o.proxy, typeMeta, providerNamespaceSelector, providerNamespaceSecretList) }); err != nil { return nil, err } diff --git a/cmd/clusterctl/client/cluster/proxy.go b/cmd/clusterctl/client/cluster/proxy.go index 5c114beac458..9337b802e373 100644 --- a/cmd/clusterctl/client/cluster/proxy.go +++ b/cmd/clusterctl/client/cluster/proxy.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "os" "strconv" @@ -68,13 +69,13 @@ type Proxy interface { // Certificates for cert-manager, Clusters for CAPI, AWSCluster for CAPA and so on). // This is done to avoid errors when listing resources of providers which have already been deleted/scaled down to 0 replicas/with // malfunctioning webhooks. - ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) + ListResources(ctx context.Context, labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) // GetContexts returns the list of contexts in kubeconfig which begin with prefix. GetContexts(prefix string) ([]string, error) // GetResourceNames returns the list of resource names which begin with prefix. - GetResourceNames(groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) + GetResourceNames(ctx context.Context, groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) } type proxy struct { @@ -209,7 +210,7 @@ func (k *proxy) CheckClusterAvailable() error { // - If we now want to delete e.g. the kubeadm bootstrap provider, we cannot list AWSClusterControllerIdentity resources // as the conversion would fail, because the AWS controller hosting the conversion webhook has already been deleted. // - Thus we exclude resources of other providers if we detect that ListResources is called to list resources of a provider. -func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { +func (k *proxy) ListResources(ctx context.Context, labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { cs, err := k.newClientSet() if err != nil { return nil, err @@ -282,14 +283,14 @@ func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([ // List all the object instances of this resourceKind with the given labels if resourceKind.Namespaced { for _, namespace := range namespaces { - objList, err := listObjByGVK(c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels), client.InNamespace(namespace)}) + objList, err := listObjByGVK(ctx, c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels), client.InNamespace(namespace)}) if err != nil { return nil, err } ret = append(ret, objList.Items...) } } else { - objList, err := listObjByGVK(c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels)}) + objList, err := listObjByGVK(ctx, c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels)}) if err != nil { return nil, err } @@ -318,13 +319,13 @@ func (k *proxy) GetContexts(prefix string) ([]string, error) { } // GetResourceNames returns the list of resource names which begin with prefix. -func (k *proxy) GetResourceNames(groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) { +func (k *proxy) GetResourceNames(ctx context.Context, groupVersion, kind string, options []client.ListOption, prefix string) ([]string, error) { client, err := k.NewClient() if err != nil { return nil, err } - objList, err := listObjByGVK(client, groupVersion, kind, options) + objList, err := listObjByGVK(ctx, client, groupVersion, kind, options) if err != nil { return nil, err } @@ -341,7 +342,7 @@ func (k *proxy) GetResourceNames(groupVersion, kind string, options []client.Lis return comps, nil } -func listObjByGVK(c client.Client, groupVersion, kind string, options []client.ListOption) (*unstructured.UnstructuredList, error) { +func listObjByGVK(ctx context.Context, c client.Client, groupVersion, kind string, options []client.ListOption) (*unstructured.UnstructuredList, error) { objList := new(unstructured.UnstructuredList) objList.SetAPIVersion(groupVersion) objList.SetKind(kind) diff --git a/cmd/clusterctl/client/cluster/template.go b/cmd/clusterctl/client/cluster/template.go index 0dab1df7f6d3..a07821489de3 100644 --- a/cmd/clusterctl/client/cluster/template.go +++ b/cmd/clusterctl/client/cluster/template.go @@ -40,17 +40,17 @@ import ( // TemplateClient has methods to work with templates stored in the cluster/out of the provider repository. type TemplateClient interface { // GetFromConfigMap returns a workload cluster template from the given ConfigMap. - GetFromConfigMap(namespace, name, dataKey, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) + GetFromConfigMap(ctx context.Context, namespace, name, dataKey, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) // GetFromURL returns a workload cluster template from the given URL. - GetFromURL(templateURL, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) + GetFromURL(ctx context.Context, templateURL, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) } // templateClient implements TemplateClient. type templateClient struct { proxy Proxy configClient config.Client - gitHubClientFactory func(configVariablesClient config.VariablesClient) (*github.Client, error) + gitHubClientFactory func(ctx context.Context, configVariablesClient config.VariablesClient) (*github.Client, error) processor yaml.Processor httpClient *http.Client } @@ -76,7 +76,7 @@ func newTemplateClient(input TemplateClientInput) *templateClient { } } -func (t *templateClient) GetFromConfigMap(configMapNamespace, configMapName, configMapDataKey, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { +func (t *templateClient) GetFromConfigMap(ctx context.Context, configMapNamespace, configMapName, configMapDataKey, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { if configMapNamespace == "" { return nil, errors.New("invalid GetFromConfigMap operation: missing configMapNamespace value") } @@ -113,12 +113,12 @@ func (t *templateClient) GetFromConfigMap(configMapNamespace, configMapName, con }) } -func (t *templateClient) GetFromURL(templateURL, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { +func (t *templateClient) GetFromURL(ctx context.Context, templateURL, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) { if templateURL == "" { return nil, errors.New("invalid GetFromURL operation: missing templateURL value") } - content, err := t.getURLContent(templateURL) + content, err := t.getURLContent(ctx, templateURL) if err != nil { return nil, errors.Wrapf(err, "invalid GetFromURL operation") } @@ -132,7 +132,7 @@ func (t *templateClient) GetFromURL(templateURL, targetNamespace string, skipTem }) } -func (t *templateClient) getURLContent(templateURL string) ([]byte, error) { +func (t *templateClient) getURLContent(ctx context.Context, templateURL string) ([]byte, error) { if templateURL == "-" { b, err := io.ReadAll(os.Stdin) if err != nil { @@ -148,9 +148,9 @@ func (t *templateClient) getURLContent(templateURL string) ([]byte, error) { if rURL.Scheme == "https" { if rURL.Host == "github.com" { - return t.getGitHubFileContent(rURL) + return t.getGitHubFileContent(ctx, rURL) } - return t.getRawURLFileContent(templateURL) + return t.getRawURLFileContent(ctx, templateURL) } if rURL.Scheme == "file" || rURL.Scheme == "" { @@ -176,7 +176,7 @@ func (t *templateClient) getLocalFileContent(rURL *url.URL) ([]byte, error) { return content, nil } -func (t *templateClient) getGitHubFileContent(rURL *url.URL) ([]byte, error) { +func (t *templateClient) getGitHubFileContent(ctx context.Context, rURL *url.URL) ([]byte, error) { // Check if the path is in the expected format, urlSplit := strings.Split(strings.TrimPrefix(rURL.Path, "/"), "/") if len(urlSplit) < 5 { @@ -193,7 +193,7 @@ func (t *templateClient) getGitHubFileContent(rURL *url.URL) ([]byte, error) { linkType := urlSplit[2] // gets the GitHub client - ghClient, err := t.gitHubClientFactory(t.configClient.Variables()) + ghClient, err := t.gitHubClientFactory(ctx, t.configClient.Variables()) if err != nil { return nil, err } @@ -204,7 +204,7 @@ func (t *templateClient) getGitHubFileContent(rURL *url.URL) ([]byte, error) { branch := urlSplit[3] path := strings.Join(urlSplit[4:], "/") - return getGithubFileContentFromCode(ghClient, rURL.Path, owner, repo, path, branch) + return getGithubFileContentFromCode(ctx, ghClient, rURL.Path, owner, repo, path, branch) case "releases": // get a github release asset if urlSplit[3] != "download" { @@ -213,13 +213,13 @@ func (t *templateClient) getGitHubFileContent(rURL *url.URL) ([]byte, error) { tag := urlSplit[4] assetName := urlSplit[5] - return getGithubAssetFromRelease(ghClient, rURL.Path, owner, repo, tag, assetName) + return getGithubAssetFromRelease(ctx, ghClient, rURL.Path, owner, repo, tag, assetName) } return nil, fmt.Errorf("unknown github URL: %v", rURL) } -func getGithubFileContentFromCode(ghClient *github.Client, fullPath string, owner string, repo string, path string, branch string) ([]byte, error) { +func getGithubFileContentFromCode(ctx context.Context, ghClient *github.Client, fullPath string, owner string, repo string, path string, branch string) ([]byte, error) { fileContent, _, _, err := ghClient.Repositories.GetContents(ctx, owner, repo, path, &github.RepositoryContentGetOptions{Ref: branch}) if err != nil { return nil, handleGithubErr(err, "failed to get %q", fullPath) @@ -237,7 +237,7 @@ func getGithubFileContentFromCode(ghClient *github.Client, fullPath string, owne return content, nil } -func (t *templateClient) getRawURLFileContent(rURL string) ([]byte, error) { +func (t *templateClient) getRawURLFileContent(ctx context.Context, rURL string) ([]byte, error) { request, err := http.NewRequestWithContext(ctx, http.MethodGet, rURL, http.NoBody) if err != nil { return nil, err @@ -261,7 +261,7 @@ func (t *templateClient) getRawURLFileContent(rURL string) ([]byte, error) { return content, nil } -func getGithubAssetFromRelease(ghClient *github.Client, path string, owner string, repo string, tag string, assetName string) ([]byte, error) { +func getGithubAssetFromRelease(ctx context.Context, ghClient *github.Client, path string, owner string, repo string, tag string, assetName string) ([]byte, error) { release, _, err := ghClient.Repositories.GetReleaseByTag(ctx, owner, repo, tag) if err != nil { return nil, handleGithubErr(err, "failed to get release '%s' from %s/%s repository", tag, owner, repo) @@ -291,13 +291,13 @@ func getGithubAssetFromRelease(ghClient *github.Client, path string, owner strin return io.ReadAll(rc) } -func getGitHubClient(configVariablesClient config.VariablesClient) (*github.Client, error) { +func getGitHubClient(ctx context.Context, configVariablesClient config.VariablesClient) (*github.Client, error) { var authenticatingHTTPClient *http.Client if token, err := configVariablesClient.Get(config.GitHubTokenVariable); err == nil { ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: token}, ) - authenticatingHTTPClient = oauth2.NewClient(context.TODO(), ts) + authenticatingHTTPClient = oauth2.NewClient(ctx, ts) } return github.NewClient(authenticatingHTTPClient), nil diff --git a/cmd/clusterctl/client/cluster/template_test.go b/cmd/clusterctl/client/cluster/template_test.go index 06da5c44812b..a374e553968b 100644 --- a/cmd/clusterctl/client/cluster/template_test.go +++ b/cmd/clusterctl/client/cluster/template_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "encoding/base64" "fmt" "net/http" @@ -46,7 +47,7 @@ kind: Machine` func Test_templateClient_GetFromConfigMap(t *testing.T) { g := NewWithT(t) - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) configMap := &corev1.ConfigMap{ @@ -134,9 +135,11 @@ func Test_templateClient_GetFromConfigMap(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + processor := yaml.NewSimpleProcessor() tc := newTemplateClient(TemplateClientInput{tt.fields.proxy, tt.fields.configClient, processor}) - got, err := tc.GetFromConfigMap(tt.args.configMapNamespace, tt.args.configMapName, tt.args.configMapDataKey, tt.args.targetNamespace, tt.args.skipTemplateProcess) + got, err := tc.GetFromConfigMap(ctx, tt.args.configMapNamespace, tt.args.configMapName, tt.args.configMapDataKey, tt.args.targetNamespace, tt.args.skipTemplateProcess) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -162,7 +165,7 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { client, mux, teardown := test.NewFakeGitHub() defer teardown() - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, r *http.Request) { @@ -207,13 +210,15 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + c := &templateClient{ configClient: configClient, - gitHubClientFactory: func(configVariablesClient config.VariablesClient) (*github.Client, error) { + gitHubClientFactory: func(ctx context.Context, configVariablesClient config.VariablesClient) (*github.Client, error) { return client, nil }, } - got, err := c.getGitHubFileContent(tt.args.rURL) + got, err := c.getGitHubFileContent(ctx, tt.args.rURL) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -255,8 +260,10 @@ func Test_templateClient_getRawUrlFileContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + c := newTemplateClient(TemplateClientInput{}) - got, err := c.getRawURLFileContent(tt.args.rURL) + got, err := c.getRawURLFileContent(ctx, tt.args.rURL) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -330,7 +337,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) fakeGithubClient, mux, teardown := test.NewFakeGitHub() @@ -479,7 +486,9 @@ func Test_templateClient_GetFromURL(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - gitHubClientFactory := func(configVariablesClient config.VariablesClient) (*github.Client, error) { + ctx := context.Background() + + gitHubClientFactory := func(ctx context.Context, configVariablesClient config.VariablesClient) (*github.Client, error) { return fakeGithubClient, nil } processor := yaml.NewSimpleProcessor() @@ -487,7 +496,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { // override the github client factory c.gitHubClientFactory = gitHubClientFactory - got, err := c.GetFromURL(tt.args.templateURL, tt.args.targetNamespace, tt.args.skipTemplateProcess) + got, err := c.GetFromURL(ctx, tt.args.templateURL, tt.args.targetNamespace, tt.args.skipTemplateProcess) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/cluster/topology.go b/cmd/clusterctl/client/cluster/topology.go index a8d6f9d082f2..8998578e9d80 100644 --- a/cmd/clusterctl/client/cluster/topology.go +++ b/cmd/clusterctl/client/cluster/topology.go @@ -55,7 +55,7 @@ const ( // TopologyClient has methods to work with ClusterClass and ManagedTopologies. type TopologyClient interface { - Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error) + Plan(ctx context.Context, in *TopologyPlanInput) (*TopologyPlanOutput, error) } // topologyClient implements TopologyClient. @@ -106,8 +106,7 @@ type TopologyPlanOutput struct { // Plan performs a dry run execution of the topology reconciler using the given inputs. // It returns a summary of the changes observed during the execution. -func (t *topologyClient) Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error) { - ctx := context.TODO() +func (t *topologyClient) Plan(ctx context.Context, in *TopologyPlanInput) (*TopologyPlanOutput, error) { log := logf.Log // Make sure the inputs are valid. @@ -122,7 +121,7 @@ func (t *topologyClient) Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error // only has a Cluster object. var c client.Client if err := t.proxy.CheckClusterAvailable(); err == nil { - if initialized, err := t.inventoryClient.CheckCAPIInstalled(); err == nil && initialized { + if initialized, err := t.inventoryClient.CheckCAPIInstalled(ctx); err == nil && initialized { c, err = t.proxy.NewClient() if err != nil { return nil, errors.Wrap(err, "failed to create a client to the cluster") @@ -481,7 +480,7 @@ func (t *topologyClient) reconcileClusterClasses(ctx context.Context, inputObjec // This is required as Clusters are validated based of variable definitions in the ClusterClass `.status.variables`. reconciledClusterClasses := []client.Object{} for _, class := range allClusterClasses { - reconciledClusterClass, err := reconcileClusterClass(apiReader, class, reconciliationObjects) + reconciledClusterClass, err := reconcileClusterClass(ctx, apiReader, class, reconciliationObjects) if err != nil { return nil, errors.Wrapf(err, "ClusterClass %s could not be reconciled for dry run", class.GetName()) } @@ -507,7 +506,7 @@ func (t *topologyClient) reconcileClusterClasses(ctx context.Context, inputObjec return reconciledClusterClasses, nil } -func reconcileClusterClass(apiReader client.Reader, class client.Object, reconciliationObjects []client.Object) (*unstructured.Unstructured, error) { +func reconcileClusterClass(ctx context.Context, apiReader client.Reader, class client.Object, reconciliationObjects []client.Object) (*unstructured.Unstructured, error) { targetClusterClass := client.ObjectKey{Namespace: class.GetNamespace(), Name: class.GetName()} reconciliationObjects = append(reconciliationObjects, class) diff --git a/cmd/clusterctl/client/cluster/topology_test.go b/cmd/clusterctl/client/cluster/topology_test.go index ee97af34defd..6b5b5f6a4a24 100644 --- a/cmd/clusterctl/client/cluster/topology_test.go +++ b/cmd/clusterctl/client/cluster/topology_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" _ "embed" "fmt" "strings" @@ -264,6 +265,8 @@ func Test_topologyClient_Plan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + existingObjects := []client.Object{} for _, o := range tt.existingObjects { existingObjects = append(existingObjects, o) @@ -275,7 +278,7 @@ func Test_topologyClient_Plan(t *testing.T) { inventoryClient, ) - res, err := tc.Plan(tt.args.in) + res, err := tc.Plan(ctx, tt.args.in) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go index 1ca5ea4f1e26..6d035a749791 100644 --- a/cmd/clusterctl/client/cluster/upgrader.go +++ b/cmd/clusterctl/client/cluster/upgrader.go @@ -39,13 +39,13 @@ import ( // ProviderUpgrader defines methods for supporting provider upgrade. type ProviderUpgrader interface { // Plan returns a set of suggested Upgrade plans for the management cluster. - Plan() ([]UpgradePlan, error) + Plan(ctx context.Context) ([]UpgradePlan, error) // ApplyPlan executes an upgrade following an UpgradePlan generated by clusterctl. - ApplyPlan(opts UpgradeOptions, clusterAPIVersion string) error + ApplyPlan(ctx context.Context, opts UpgradeOptions, clusterAPIVersion string) error // ApplyCustomPlan plan executes an upgrade using the UpgradeItems provided by the user. - ApplyCustomPlan(opts UpgradeOptions, providersToUpgrade ...UpgradeItem) error + ApplyCustomPlan(ctx context.Context, opts UpgradeOptions, providersToUpgrade ...UpgradeItem) error } // UpgradePlan defines a list of possible upgrade targets for a management cluster. @@ -91,11 +91,11 @@ type providerUpgrader struct { var _ ProviderUpgrader = &providerUpgrader{} -func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { +func (u *providerUpgrader) Plan(ctx context.Context) ([]UpgradePlan, error) { log := logf.Log log.Info("Checking new release availability...") - providerList, err := u.providerInventory.List() + providerList, err := u.providerInventory.List(ctx) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { } coreProvider := coreProviders[0] - coreUpgradeInfo, err := u.getUpgradeInfo(coreProvider) + coreUpgradeInfo, err := u.getUpgradeInfo(ctx, coreProvider) if err != nil { return nil, err } @@ -135,7 +135,7 @@ func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { // e.g. v1alpha4, cluster-api --> v0.5.1, kubeadm bootstrap --> v0.5.1, aws --> v0.Y.4 (not supported in current clusterctl release, but upgrade plan should report these options). ret := make([]UpgradePlan, 0) for _, contract := range contractsForUpgrade { - upgradePlan, err := u.getUpgradePlan(providerList.Items, contract) + upgradePlan, err := u.getUpgradePlan(ctx, providerList.Items, contract) if err != nil { return nil, err } @@ -153,7 +153,7 @@ func (u *providerUpgrader) Plan() ([]UpgradePlan, error) { return ret, nil } -func (u *providerUpgrader) ApplyPlan(opts UpgradeOptions, contract string) error { +func (u *providerUpgrader) ApplyPlan(ctx context.Context, opts UpgradeOptions, contract string) error { if contract != clusterv1.GroupVersion.Version { return errors.Errorf("current version of clusterctl could only upgrade to %s contract, requested %s", clusterv1.GroupVersion.Version, contract) } @@ -162,42 +162,42 @@ func (u *providerUpgrader) ApplyPlan(opts UpgradeOptions, contract string) error log.Info("Performing upgrade...") // Gets the upgrade plan for the selected API Version of Cluster API (contract). - providerList, err := u.providerInventory.List() + providerList, err := u.providerInventory.List(ctx) if err != nil { return err } - upgradePlan, err := u.getUpgradePlan(providerList.Items, contract) + upgradePlan, err := u.getUpgradePlan(ctx, providerList.Items, contract) if err != nil { return err } // Do the upgrade - return u.doUpgrade(upgradePlan, opts) + return u.doUpgrade(ctx, upgradePlan, opts) } -func (u *providerUpgrader) ApplyCustomPlan(opts UpgradeOptions, upgradeItems ...UpgradeItem) error { +func (u *providerUpgrader) ApplyCustomPlan(ctx context.Context, opts UpgradeOptions, upgradeItems ...UpgradeItem) error { log := logf.Log log.Info("Performing upgrade...") // Create a custom upgrade plan from the upgrade items, taking care of ensuring all the providers in a management // cluster are consistent with the API Version of Cluster API (contract). - upgradePlan, err := u.createCustomPlan(upgradeItems) + upgradePlan, err := u.createCustomPlan(ctx, upgradeItems) if err != nil { return err } // Do the upgrade - return u.doUpgrade(upgradePlan, opts) + return u.doUpgrade(ctx, upgradePlan, opts) } // getUpgradePlan returns the upgrade plan for a specific set of providers/contract // NB. this function is used both for upgrade plan and upgrade apply. -func (u *providerUpgrader) getUpgradePlan(providers []clusterctlv1.Provider, contract string) (*UpgradePlan, error) { +func (u *providerUpgrader) getUpgradePlan(ctx context.Context, providers []clusterctlv1.Provider, contract string) (*UpgradePlan, error) { upgradeItems := []UpgradeItem{} for _, provider := range providers { // Gets the upgrade info for the provider. - providerUpgradeInfo, err := u.getUpgradeInfo(provider) + providerUpgradeInfo, err := u.getUpgradeInfo(ctx, provider) if err != nil { return nil, err } @@ -220,14 +220,14 @@ func (u *providerUpgrader) getUpgradePlan(providers []clusterctlv1.Provider, con // createCustomPlan creates a custom upgrade plan from a set of upgrade items, taking care of ensuring all the providers // in a management cluster are consistent with the API Version of Cluster API (contract). -func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*UpgradePlan, error) { +func (u *providerUpgrader) createCustomPlan(ctx context.Context, upgradeItems []UpgradeItem) (*UpgradePlan, error) { // Gets the API Version of Cluster API (contract). // The this is required to ensure all the providers in a management cluster are consistent with the contract supported by the core provider. // e.g if the core provider is v1alpha3, all the provider should be v1alpha3 as well. // The target contract is derived from the current version of the core provider, or, if the core provider is included in the upgrade list, // from its target version. - providerList, err := u.providerInventory.List() + providerList, err := u.providerInventory.List(ctx) if err != nil { return nil, err } @@ -245,7 +245,7 @@ func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*Upgrad } } - targetContract, err := u.getProviderContractByVersion(coreProvider, targetCoreProviderVersion) + targetContract, err := u.getProviderContractByVersion(ctx, coreProvider, targetCoreProviderVersion) if err != nil { return nil, err } @@ -274,7 +274,7 @@ func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*Upgrad } // Retrieves the contract that is supported by the target version of the provider. - contract, err := u.getProviderContractByVersion(*provider, upgradeItem.NextVersion) + contract, err := u.getProviderContractByVersion(ctx, *provider, upgradeItem.NextVersion) if err != nil { return nil, err } @@ -295,7 +295,7 @@ func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*Upgrad } // Retrieves the contract that is supported by the current version of the provider. - contract, err := u.getProviderContractByVersion(provider, provider.Version) + contract, err := u.getProviderContractByVersion(ctx, provider, provider.Version) if err != nil { return nil, err } @@ -308,14 +308,14 @@ func (u *providerUpgrader) createCustomPlan(upgradeItems []UpgradeItem) (*Upgrad } // getProviderContractByVersion returns the contract that a provider will support if updated to the given target version. -func (u *providerUpgrader) getProviderContractByVersion(provider clusterctlv1.Provider, targetVersion string) (string, error) { +func (u *providerUpgrader) getProviderContractByVersion(ctx context.Context, provider clusterctlv1.Provider, targetVersion string) (string, error) { targetSemVersion, err := version.ParseSemantic(targetVersion) if err != nil { return "", errors.Wrapf(err, "failed to parse target version for the %s provider", provider.InstanceName()) } // Gets the metadata for the core Provider - upgradeInfo, err := u.getUpgradeInfo(provider) + upgradeInfo, err := u.getUpgradeInfo(ctx, provider) if err != nil { return "", err } @@ -328,13 +328,13 @@ func (u *providerUpgrader) getProviderContractByVersion(provider clusterctlv1.Pr } // getUpgradeComponents returns the provider components for the selected target version. -func (u *providerUpgrader) getUpgradeComponents(provider UpgradeItem) (repository.Components, error) { +func (u *providerUpgrader) getUpgradeComponents(ctx context.Context, provider UpgradeItem) (repository.Components, error) { configRepository, err := u.configClient.Providers().Get(provider.ProviderName, provider.GetProviderType()) if err != nil { return nil, err } - providerRepository, err := u.repositoryClientFactory(configRepository, u.configClient) + providerRepository, err := u.repositoryClientFactory(ctx, configRepository, u.configClient) if err != nil { return nil, err } @@ -343,17 +343,17 @@ func (u *providerUpgrader) getUpgradeComponents(provider UpgradeItem) (repositor Version: provider.NextVersion, TargetNamespace: provider.Namespace, } - components, err := providerRepository.Components().Get(options) + components, err := providerRepository.Components().Get(ctx, options) if err != nil { return nil, err } return components, nil } -func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptions) error { +func (u *providerUpgrader) doUpgrade(ctx context.Context, upgradePlan *UpgradePlan, opts UpgradeOptions) error { // Check for multiple instances of the same provider if current contract is v1alpha3. if upgradePlan.Contract == clusterv1.GroupVersion.Version { - if err := u.providerInventory.CheckSingleProviderInstance(); err != nil { + if err := u.providerInventory.CheckSingleProviderInstance(ctx); err != nil { return err } } @@ -374,7 +374,7 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio } // Gets the provider components for the target version. - components, err := u.getUpgradeComponents(upgradeItem) + components, err := u.getUpgradeComponents(ctx, upgradeItem) if err != nil { return err } @@ -404,7 +404,7 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio } // Scale down provider. - if err := u.scaleDownProvider(upgradeItem.Provider); err != nil { + if err := u.scaleDownProvider(ctx, upgradeItem.Provider); err != nil { return err } } @@ -419,7 +419,7 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio } // Gets the provider components for the target version. - components, err := u.getUpgradeComponents(upgradeItem) + components, err := u.getUpgradeComponents(ctx, upgradeItem) if err != nil { return err } @@ -427,7 +427,7 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio installQueue = append(installQueue, components) // Delete the provider, preserving CRD, namespace and the inventory. - if err := u.providerComponents.Delete(DeleteOptions{ + if err := u.providerComponents.Delete(ctx, DeleteOptions{ Provider: upgradeItem.Provider, IncludeNamespace: false, IncludeCRDs: false, @@ -437,22 +437,22 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan, opts UpgradeOptio } // Install the new version of the provider components. - if err := installComponentsAndUpdateInventory(components, u.providerComponents, u.providerInventory); err != nil { + if err := installComponentsAndUpdateInventory(ctx, components, u.providerComponents, u.providerInventory); err != nil { return err } } // Delete webhook namespace since it's not needed from v1alpha4. if upgradePlan.Contract == clusterv1.GroupVersion.Version { - if err := u.providerComponents.DeleteWebhookNamespace(); err != nil { + if err := u.providerComponents.DeleteWebhookNamespace(ctx); err != nil { return err } } - return waitForProvidersReady(InstallOptions(opts), installQueue, u.proxy) + return waitForProvidersReady(ctx, InstallOptions(opts), installQueue, u.proxy) } -func (u *providerUpgrader) scaleDownProvider(provider clusterctlv1.Provider) error { +func (u *providerUpgrader) scaleDownProvider(ctx context.Context, provider clusterctlv1.Provider) error { log := logf.Log log.Info("Scaling down", "Provider", provider.Name, "Version", provider.Version, "Namespace", provider.Namespace) diff --git a/cmd/clusterctl/client/cluster/upgrader_info.go b/cmd/clusterctl/client/cluster/upgrader_info.go index 8033b79b7e0a..40dc6b3f9a2c 100644 --- a/cmd/clusterctl/client/cluster/upgrader_info.go +++ b/cmd/clusterctl/client/cluster/upgrader_info.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "fmt" "sort" @@ -46,19 +47,19 @@ type upgradeInfo struct { // getUpgradeInfo returns all the info required for taking upgrade decisions for a provider. // NOTE: This could contain also versions for the previous or next Cluster API contract (not supported in current clusterctl release, but upgrade plan should report this options). -func (u *providerUpgrader) getUpgradeInfo(provider clusterctlv1.Provider) (*upgradeInfo, error) { +func (u *providerUpgrader) getUpgradeInfo(ctx context.Context, provider clusterctlv1.Provider) (*upgradeInfo, error) { // Gets the list of versions available in the provider repository. configRepository, err := u.configClient.Providers().Get(provider.ProviderName, provider.GetProviderType()) if err != nil { return nil, err } - providerRepository, err := u.repositoryClientFactory(configRepository, u.configClient) + providerRepository, err := u.repositoryClientFactory(ctx, configRepository, u.configClient) if err != nil { return nil, err } - repositoryVersions, err := providerRepository.GetVersions() + repositoryVersions, err := providerRepository.GetVersions(ctx) if err != nil { return nil, err } @@ -80,7 +81,7 @@ func (u *providerUpgrader) getUpgradeInfo(provider clusterctlv1.Provider) (*upgr } } - latestMetadata, err := providerRepository.Metadata(versionTag(latestVersion)).Get() + latestMetadata, err := providerRepository.Metadata(versionTag(latestVersion)).Get(ctx) if err != nil { return nil, err } diff --git a/cmd/clusterctl/client/cluster/upgrader_info_test.go b/cmd/clusterctl/client/cluster/upgrader_info_test.go index 5a2267074c09..52ce9de876e0 100644 --- a/cmd/clusterctl/client/cluster/upgrader_info_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_info_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" . "github.com/onsi/gomega" @@ -228,15 +229,15 @@ func Test_providerUpgrader_getUpgradeInfo(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + configClient, _ := config.New(context.Background(), "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repo)) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repo)) }, } - got, err := u.getUpgradeInfo(tt.args.provider) + got, err := u.getUpgradeInfo(context.Background(), tt.args.provider) if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { diff --git a/cmd/clusterctl/client/cluster/upgrader_test.go b/cmd/clusterctl/client/cluster/upgrader_test.go index 8c93a536e51a..55fbad586fb0 100644 --- a/cmd/clusterctl/client/cluster/upgrader_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" "github.com/google/go-cmp/cmp" @@ -384,16 +385,18 @@ func Test_providerUpgrader_Plan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), } - got, err := u.Plan() + got, err := u.Plan(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -853,16 +856,18 @@ func Test_providerUpgrader_createCustomPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.Name()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.Name()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), } - got, err := u.createCustomPlan(tt.args.providersToUpgrade) + got, err := u.createCustomPlan(ctx, tt.args.providersToUpgrade) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -970,16 +975,18 @@ func Test_providerUpgrader_ApplyPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), } - err := u.ApplyPlan(tt.opts, tt.contract) + err := u.ApplyPlan(ctx, tt.opts, tt.contract) if tt.wantErr { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).Should(ContainSubstring(tt.errorMsg)) @@ -1109,16 +1116,18 @@ func Test_providerUpgrader_ApplyCustomPlan(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - configClient, _ := config.New("", config.InjectReader(tt.fields.reader)) + ctx := context.Background() + + configClient, _ := config.New(ctx, "", config.InjectReader(tt.fields.reader)) u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { - return repository.New(provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), } - err := u.ApplyCustomPlan(tt.opts, tt.providersToUpgrade...) + err := u.ApplyCustomPlan(ctx, tt.opts, tt.providersToUpgrade...) if tt.wantErr { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).Should(ContainSubstring(tt.errorMsg)) diff --git a/cmd/clusterctl/client/cluster/workload_cluster.go b/cmd/clusterctl/client/cluster/workload_cluster.go index a5e085dae2c9..0e92d9d5038e 100644 --- a/cmd/clusterctl/client/cluster/workload_cluster.go +++ b/cmd/clusterctl/client/cluster/workload_cluster.go @@ -17,6 +17,8 @@ limitations under the License. package cluster import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/controller-runtime/pkg/client" @@ -26,7 +28,7 @@ import ( // WorkloadCluster has methods for fetching kubeconfig of workload cluster from management cluster. type WorkloadCluster interface { // GetKubeconfig returns the kubeconfig of the workload cluster. - GetKubeconfig(workloadClusterName string, namespace string) (string, error) + GetKubeconfig(ctx context.Context, workloadClusterName string, namespace string) (string, error) } // workloadCluster implements WorkloadCluster. @@ -41,7 +43,7 @@ func newWorkloadCluster(proxy Proxy) *workloadCluster { } } -func (p *workloadCluster) GetKubeconfig(workloadClusterName string, namespace string) (string, error) { +func (p *workloadCluster) GetKubeconfig(ctx context.Context, workloadClusterName string, namespace string) (string, error) { cs, err := p.proxy.NewClient() if err != nil { return "", err diff --git a/cmd/clusterctl/client/cluster/workload_cluster_test.go b/cmd/clusterctl/client/cluster/workload_cluster_test.go index 50333d3efb08..4759a7b67796 100644 --- a/cmd/clusterctl/client/cluster/workload_cluster_test.go +++ b/cmd/clusterctl/client/cluster/workload_cluster_test.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "context" "testing" . "github.com/onsi/gomega" @@ -84,8 +85,10 @@ users: t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + wc := newWorkloadCluster(tt.proxy) - data, err := wc.GetKubeconfig("test1", "test") + data, err := wc.GetKubeconfig(ctx, "test1", "test") if tt.expectErr { g.Expect(err).To(HaveOccurred()) diff --git a/cmd/clusterctl/client/clusterclass.go b/cmd/clusterctl/client/clusterclass.go index d0e431599a96..2b0b59b39305 100644 --- a/cmd/clusterctl/client/clusterclass.go +++ b/cmd/clusterctl/client/clusterclass.go @@ -34,7 +34,7 @@ import ( // addClusterClassIfMissing returns a Template that includes the base template and adds any cluster class definitions that // are references in the template. If the cluster class referenced already exists in the cluster it is not added to the // template. -func addClusterClassIfMissing(template Template, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, targetNamespace string, listVariablesOnly bool) (Template, error) { +func addClusterClassIfMissing(ctx context.Context, template Template, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, targetNamespace string, listVariablesOnly bool) (Template, error) { classes, err := clusterClassNamesFromTemplate(template) if err != nil { return nil, err @@ -44,7 +44,7 @@ func addClusterClassIfMissing(template Template, clusterClassClient repository.C return template, nil } - clusterClassesTemplate, err := fetchMissingClusterClassTemplates(clusterClassClient, clusterClient, classes, targetNamespace, listVariablesOnly) + clusterClassesTemplate, err := fetchMissingClusterClassTemplates(ctx, clusterClassClient, clusterClient, classes, targetNamespace, listVariablesOnly) if err != nil { return nil, err } @@ -87,7 +87,7 @@ func clusterClassNamesFromTemplate(template Template) ([]string, error) { // fetchMissingClusterClassTemplates returns a list of templates for ClusterClasses that do not yet exist // in the cluster. If the cluster is not initialized, all the ClusterClasses are added. -func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, classes []string, targetNamespace string, listVariablesOnly bool) (Template, error) { +func fetchMissingClusterClassTemplates(ctx context.Context, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, classes []string, targetNamespace string, listVariablesOnly bool) (Template, error) { // first check if the cluster is initialized. // If it is initialized: // For every ClusterClass check if it already exists in the cluster. @@ -100,7 +100,7 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas clusterInitialized := false var err error if err := clusterClient.Proxy().CheckClusterAvailable(); err == nil { - clusterInitialized, err = clusterClient.ProviderInventory().CheckCAPIInstalled() + clusterInitialized, err = clusterClient.ProviderInventory().CheckCAPIInstalled(ctx) if err != nil { return nil, errors.Wrap(err, "failed to check if the cluster is initialized") } @@ -118,7 +118,7 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas templates := []repository.Template{} for _, class := range classes { if clusterInitialized { - exists, err := clusterClassExists(c, class, targetNamespace) + exists, err := clusterClassExists(ctx, c, class, targetNamespace) if err != nil { return nil, err } @@ -128,7 +128,7 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas } // The cluster is either not initialized or the ClusterClass does not yet exist in the cluster. // Fetch the cluster class to install. - clusterClassTemplate, err := clusterClassClient.Get(class, targetNamespace, listVariablesOnly) + clusterClassTemplate, err := clusterClassClient.Get(ctx, class, targetNamespace, listVariablesOnly) if err != nil { return nil, errors.Wrapf(err, "failed to get the cluster class template for %q", class) } @@ -139,7 +139,7 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas // that we do not add a ClusterClass (and associated objects) who definition is unknown. if clusterInitialized { for _, obj := range clusterClassTemplate.Objs() { - if exists, err := objExists(c, obj); err != nil { + if exists, err := objExists(ctx, c, obj); err != nil { return nil, err } else if exists { return nil, fmt.Errorf("%s(%s) already exists in the cluster", obj.GetName(), obj.GetObjectKind().GroupVersionKind()) @@ -157,9 +157,9 @@ func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClas return merged, nil } -func clusterClassExists(c client.Client, class, targetNamespace string) (bool, error) { +func clusterClassExists(ctx context.Context, c client.Client, class, targetNamespace string) (bool, error) { clusterClass := &clusterv1.ClusterClass{} - if err := c.Get(context.TODO(), client.ObjectKey{Name: class, Namespace: targetNamespace}, clusterClass); err != nil { + if err := c.Get(ctx, client.ObjectKey{Name: class, Namespace: targetNamespace}, clusterClass); err != nil { if apierrors.IsNotFound(err) { return false, nil } @@ -168,9 +168,9 @@ func clusterClassExists(c client.Client, class, targetNamespace string) (bool, e return true, nil } -func objExists(c client.Client, obj unstructured.Unstructured) (bool, error) { +func objExists(ctx context.Context, c client.Client, obj unstructured.Unstructured) (bool, error) { o := obj.DeepCopy() - if err := c.Get(context.TODO(), client.ObjectKeyFromObject(o), o); err != nil { + if err := c.Get(ctx, client.ObjectKeyFromObject(o), o); err != nil { if apierrors.IsNotFound(err) { return false, nil } diff --git a/cmd/clusterctl/client/clusterclass_test.go b/cmd/clusterctl/client/clusterclass_test.go index d762c3267848..581de70b6676 100644 --- a/cmd/clusterctl/client/clusterclass_test.go +++ b/cmd/clusterctl/client/clusterclass_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "testing" @@ -67,11 +68,13 @@ func TestClusterClassExists(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - config := newFakeConfig() + ctx := context.Background() + + config := newFakeConfig(ctx) client := newFakeCluster(cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, config).WithObjs(tt.objs...) c, _ := client.Proxy().NewClient() - actual, err := clusterClassExists(c, tt.clusterClass, metav1.NamespaceDefault) + actual, err := clusterClassExists(ctx, c, tt.clusterClass, metav1.NamespaceDefault) g.Expect(err).ToNot(HaveOccurred()) g.Expect(actual).To(Equal(tt.want)) }) @@ -152,8 +155,10 @@ func TestAddClusterClassIfMissing(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config1 := newFakeConfig().WithProvider(infraProviderConfig) - repository1 := newFakeRepository(infraProviderConfig, config1). + ctx := context.Background() + + config1 := newFakeConfig(ctx).WithProvider(infraProviderConfig) + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", ""). WithDefaultVersion("v1.0.0"). WithFile("v1.0.0", "clusterclass-dev.yaml", tt.clusterClassTemplateContent) @@ -205,7 +210,7 @@ func TestAddClusterClassIfMissing(t *testing.T) { } g := NewWithT(t) - template, err := addClusterClassIfMissing(baseTemplate, clusterClassClient, cluster, tt.targetNamespace, tt.listVariablesOnly) + template, err := addClusterClassIfMissing(ctx, baseTemplate, clusterClassClient, cluster, tt.targetNamespace, tt.listVariablesOnly) if tt.wantError { g.Expect(err).To(HaveOccurred()) } else { diff --git a/cmd/clusterctl/client/common.go b/cmd/clusterctl/client/common.go index be299a7307c7..dfd1d42f5bf0 100644 --- a/cmd/clusterctl/client/common.go +++ b/cmd/clusterctl/client/common.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "strings" "github.com/pkg/errors" @@ -28,7 +29,7 @@ import ( // getComponentsByName is a utility method that returns components // for a given provider with options including targetNamespace. -func (c *clusterctlClient) getComponentsByName(provider string, providerType clusterctlv1.ProviderType, options repository.ComponentsOptions) (repository.Components, error) { +func (c *clusterctlClient) getComponentsByName(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options repository.ComponentsOptions) (repository.Components, error) { // Parse the abbreviated syntax for name[:version] name, version, err := parseProviderName(provider) if err != nil { @@ -47,12 +48,12 @@ func (c *clusterctlClient) getComponentsByName(provider string, providerType clu // namespace etc. // Currently we are not supporting custom yaml processors for the provider // components. So we revert to using the default SimpleYamlProcessor. - repositoryClientFactory, err := c.repositoryClientFactory(RepositoryClientFactoryInput{Provider: providerConfig}) + repositoryClientFactory, err := c.repositoryClientFactory(ctx, RepositoryClientFactoryInput{Provider: providerConfig}) if err != nil { return nil, err } - components, err := repositoryClientFactory.Components().Get(options) + components, err := repositoryClientFactory.Components().Get(ctx, options) if err != nil { return nil, err } diff --git a/cmd/clusterctl/client/config.go b/cmd/clusterctl/client/config.go index 8f2d631d2a1f..8e1b9eb6812f 100644 --- a/cmd/clusterctl/client/config.go +++ b/cmd/clusterctl/client/config.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "io" "strconv" @@ -45,8 +46,8 @@ func (c *clusterctlClient) GetProvidersConfig() ([]Provider, error) { return rr, nil } -func (c *clusterctlClient) GetProviderComponents(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { - components, err := c.getComponentsByName(provider, providerType, repository.ComponentsOptions(options)) +func (c *clusterctlClient) GetProviderComponents(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { + components, err := c.getComponentsByName(ctx, provider, providerType, repository.ComponentsOptions(options)) if err != nil { return nil, err } @@ -71,7 +72,7 @@ type ProcessYAMLOptions struct { SkipTemplateProcess bool } -func (c *clusterctlClient) ProcessYAML(options ProcessYAMLOptions) (YamlPrinter, error) { +func (c *clusterctlClient) ProcessYAML(ctx context.Context, options ProcessYAMLOptions) (YamlPrinter, error) { if options.ReaderSource != nil { // NOTE: Beware of potentially reading in large files all at once // since this is inefficient and increases memory utilziation. @@ -103,7 +104,7 @@ func (c *clusterctlClient) ProcessYAML(options ProcessYAMLOptions) (YamlPrinter, } if options.URLSource != nil { - return c.getTemplateFromURL(clstr, *options.URLSource, "", options.SkipTemplateProcess) + return c.getTemplateFromURL(ctx, clstr, *options.URLSource, "", options.SkipTemplateProcess) } return nil, errors.New("unable to read custom template. Please specify a template source") @@ -203,7 +204,7 @@ type ConfigMapSourceOptions struct { DataKey string } -func (c *clusterctlClient) GetClusterTemplate(options GetClusterTemplateOptions) (Template, error) { +func (c *clusterctlClient) GetClusterTemplate(ctx context.Context, options GetClusterTemplateOptions) (Template, error) { // Checks that no more than on source is set numsSource := options.numSources() if numsSource > 1 { @@ -249,24 +250,24 @@ func (c *clusterctlClient) GetClusterTemplate(options GetClusterTemplateOptions) // users to dry-run the command and take a look at what the cluster will look like; in both scenarios, it is required // to pass provider:version given that auto-discovery can't work without a provider inventory installed in a cluster. if options.Kubeconfig.Path != "" { - if err := clusterClient.ProviderInventory().CheckCAPIContract(cluster.AllowCAPINotInstalled{}); err != nil { + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx, cluster.AllowCAPINotInstalled{}); err != nil { return nil, err } } - return c.getTemplateFromRepository(clusterClient, options) + return c.getTemplateFromRepository(ctx, clusterClient, options) } if options.ConfigMapSource != nil { - return c.getTemplateFromConfigMap(clusterClient, *options.ConfigMapSource, options.TargetNamespace, options.ListVariablesOnly) + return c.getTemplateFromConfigMap(ctx, clusterClient, *options.ConfigMapSource, options.TargetNamespace, options.ListVariablesOnly) } if options.URLSource != nil { - return c.getTemplateFromURL(clusterClient, *options.URLSource, options.TargetNamespace, options.ListVariablesOnly) + return c.getTemplateFromURL(ctx, clusterClient, *options.URLSource, options.TargetNamespace, options.ListVariablesOnly) } return nil, errors.New("unable to read custom template. Please specify a template source") } // getTemplateFromRepository returns a workload cluster template from a provider repository. -func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, options GetClusterTemplateOptions) (Template, error) { +func (c *clusterctlClient) getTemplateFromRepository(ctx context.Context, cluster cluster.Client, options GetClusterTemplateOptions) (Template, error) { source := *options.ProviderRepositorySource targetNamespace := options.TargetNamespace listVariablesOnly := options.ListVariablesOnly @@ -280,12 +281,12 @@ func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, opt return nil, errors.Wrap(err, "management cluster not available. Cannot auto-discover default infrastructure provider. Please specify an infrastructure provider") } // ensure the custom resource definitions required by clusterctl are in place - if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, errors.Wrapf(err, "provider custom resource definitions (CRDs) are not installed") } ensureCustomResourceDefinitions = true - defaultProviderName, err := cluster.ProviderInventory().GetDefaultProviderName(clusterctlv1.InfrastructureProviderType) + defaultProviderName, err := cluster.ProviderInventory().GetDefaultProviderName(ctx, clusterctlv1.InfrastructureProviderType) if err != nil { return nil, err } @@ -309,12 +310,12 @@ func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, opt } // ensure the custom resource definitions required by clusterctl are in place (if not already done) if !ensureCustomResourceDefinitions { - if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, errors.Wrapf(err, "failed to identify the default version for the provider %q. Please specify a version", name) } } - inventoryVersion, err := cluster.ProviderInventory().GetProviderVersion(name, clusterctlv1.InfrastructureProviderType) + inventoryVersion, err := cluster.ProviderInventory().GetProviderVersion(ctx, name, clusterctlv1.InfrastructureProviderType) if err != nil { return nil, err } @@ -331,19 +332,19 @@ func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, opt return nil, err } - repo, err := c.repositoryClientFactory(RepositoryClientFactoryInput{Provider: providerConfig, Processor: processor}) + repo, err := c.repositoryClientFactory(ctx, RepositoryClientFactoryInput{Provider: providerConfig, Processor: processor}) if err != nil { return nil, err } - template, err := repo.Templates(version).Get(source.Flavor, targetNamespace, listVariablesOnly) + template, err := repo.Templates(version).Get(ctx, source.Flavor, targetNamespace, listVariablesOnly) if err != nil { return nil, err } clusterClassClient := repo.ClusterClasses(version) - template, err = addClusterClassIfMissing(template, clusterClassClient, cluster, targetNamespace, listVariablesOnly) + template, err = addClusterClassIfMissing(ctx, template, clusterClassClient, cluster, targetNamespace, listVariablesOnly) if err != nil { return nil, err } @@ -352,7 +353,7 @@ func (c *clusterctlClient) getTemplateFromRepository(cluster cluster.Client, opt } // getTemplateFromConfigMap returns a workload cluster template from a ConfigMap. -func (c *clusterctlClient) getTemplateFromConfigMap(cluster cluster.Client, source ConfigMapSourceOptions, targetNamespace string, listVariablesOnly bool) (Template, error) { +func (c *clusterctlClient) getTemplateFromConfigMap(ctx context.Context, cluster cluster.Client, source ConfigMapSourceOptions, targetNamespace string, listVariablesOnly bool) (Template, error) { // If the option specifying the configMapNamespace is empty, default it to the current namespace. if source.Namespace == "" { currentNamespace, err := cluster.Proxy().CurrentNamespace() @@ -367,12 +368,12 @@ func (c *clusterctlClient) getTemplateFromConfigMap(cluster cluster.Client, sour source.DataKey = DefaultCustomTemplateConfigMapKey } - return cluster.Template().GetFromConfigMap(source.Namespace, source.Name, source.DataKey, targetNamespace, listVariablesOnly) + return cluster.Template().GetFromConfigMap(ctx, source.Namespace, source.Name, source.DataKey, targetNamespace, listVariablesOnly) } // getTemplateFromURL returns a workload cluster template from an URL. -func (c *clusterctlClient) getTemplateFromURL(cluster cluster.Client, source URLSourceOptions, targetNamespace string, listVariablesOnly bool) (Template, error) { - return cluster.Template().GetFromURL(source.URL, targetNamespace, listVariablesOnly) +func (c *clusterctlClient) getTemplateFromURL(ctx context.Context, cluster cluster.Client, source URLSourceOptions, targetNamespace string, listVariablesOnly bool) (Template, error) { + return cluster.Template().GetFromURL(ctx, source.URL, targetNamespace, listVariablesOnly) } // templateOptionsToVariables injects some of the templateOptions to the configClient so they can be consumed as a variables from the template. diff --git a/cmd/clusterctl/client/config/client.go b/cmd/clusterctl/client/config/client.go index f689b402cdd1..50f10034cfee 100644 --- a/cmd/clusterctl/client/config/client.go +++ b/cmd/clusterctl/client/config/client.go @@ -17,6 +17,8 @@ limitations under the License. package config import ( + "context" + "github.com/pkg/errors" ) @@ -75,11 +77,11 @@ func InjectReader(reader Reader) Option { } // New returns a Client for interacting with the clusterctl configuration. -func New(path string, options ...Option) (Client, error) { - return newConfigClient(path, options...) +func New(ctx context.Context, path string, options ...Option) (Client, error) { + return newConfigClient(ctx, path, options...) } -func newConfigClient(path string, options ...Option) (*configClient, error) { +func newConfigClient(ctx context.Context, path string, options ...Option) (*configClient, error) { client := &configClient{} for _, o := range options { o(client) @@ -91,7 +93,7 @@ func newConfigClient(path string, options ...Option) (*configClient, error) { if client.reader, err = newViperReader(); err != nil { return nil, errors.Wrap(err, "failed to create the configuration reader") } - if err = client.reader.Init(path); err != nil { + if err = client.reader.Init(ctx, path); err != nil { return nil, errors.Wrap(err, "failed to initialize the configuration reader") } } @@ -102,7 +104,7 @@ func newConfigClient(path string, options ...Option) (*configClient, error) { // Reader define the behaviours of a configuration reader. type Reader interface { // Init allows to initialize the configuration reader. - Init(path string) error + Init(ctx context.Context, path string) error // Get returns a configuration value of type string. // In case the configuration value does not exists, it returns an error. diff --git a/cmd/clusterctl/client/config/reader_memory.go b/cmd/clusterctl/client/config/reader_memory.go index 05ddeb2980b4..1ad1c346cfeb 100644 --- a/cmd/clusterctl/client/config/reader_memory.go +++ b/cmd/clusterctl/client/config/reader_memory.go @@ -17,6 +17,8 @@ limitations under the License. package config import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/yaml" @@ -42,7 +44,7 @@ func NewMemoryReader() *MemoryReader { } // Init initialize the reader. -func (f *MemoryReader) Init(_ string) error { +func (f *MemoryReader) Init(_ context.Context, _ string) error { data, err := yaml.Marshal(f.providers) if err != nil { return err diff --git a/cmd/clusterctl/client/config/reader_memory_test.go b/cmd/clusterctl/client/config/reader_memory_test.go index ebff5a6c8ae8..6cd907c0f9cc 100644 --- a/cmd/clusterctl/client/config/reader_memory_test.go +++ b/cmd/clusterctl/client/config/reader_memory_test.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "testing" . "github.com/onsi/gomega" @@ -52,8 +53,11 @@ func TestMemoryReader(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + f := NewMemoryReader() - g.Expect(f.Init("")).To(Succeed()) + g.Expect(f.Init(ctx, "")).To(Succeed()) for _, p := range tt.providers { _, err := f.AddProvider(p.Name, p.Type, p.URL) g.Expect(err).ToNot(HaveOccurred()) diff --git a/cmd/clusterctl/client/config/reader_viper.go b/cmd/clusterctl/client/config/reader_viper.go index 85485b038f67..96b3ef78351c 100644 --- a/cmd/clusterctl/client/config/reader_viper.go +++ b/cmd/clusterctl/client/config/reader_viper.go @@ -75,7 +75,7 @@ func newViperReader(opts ...viperReaderOption) (Reader, error) { } // Init initialize the viperReader. -func (v *viperReader) Init(path string) error { +func (v *viperReader) Init(ctx context.Context, path string) error { log := logf.Log // Configure viper for reading environment variables as well, and more specifically: @@ -106,7 +106,7 @@ func (v *viperReader) Init(path string) error { } downloadConfigFile := filepath.Join(configDirectory, DownloadConfigFile) - err = downloadFile(url.String(), downloadConfigFile) + err = downloadFile(ctx, url.String(), downloadConfigFile) if err != nil { return err } @@ -141,9 +141,7 @@ func (v *viperReader) Init(path string) error { return nil } -func downloadFile(url string, filepath string) error { - ctx := context.TODO() - +func downloadFile(ctx context.Context, url string, filepath string) error { // Create the file out, err := os.Create(filepath) //nolint:gosec // No security issue: filepath is safe. if err != nil { diff --git a/cmd/clusterctl/client/config/reader_viper_test.go b/cmd/clusterctl/client/config/reader_viper_test.go index 00d11ce69b20..15dfd6a9c090 100644 --- a/cmd/clusterctl/client/config/reader_viper_test.go +++ b/cmd/clusterctl/client/config/reader_viper_test.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -108,12 +109,15 @@ func Test_viperReader_Init(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gg := NewWithT(t) + + ctx := context.Background() + v, _ := newViperReader(injectConfigPaths(tt.configDirs)) if tt.expectErr { - gg.Expect(v.Init(tt.configPath)).ToNot(Succeed()) + gg.Expect(v.Init(ctx, tt.configPath)).ToNot(Succeed()) return } - gg.Expect(v.Init(tt.configPath)).To(Succeed()) + gg.Expect(v.Init(ctx, tt.configPath)).To(Succeed()) }) } } @@ -168,9 +172,11 @@ func Test_viperReader_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) + ctx := context.Background() + v, _ := newViperReader(injectConfigPaths([]string{dir})) - gs.Expect(v.Init(configFile)).To(Succeed()) + gs.Expect(v.Init(ctx, configFile)).To(Succeed()) got, err := v.Get(tt.args.key) if tt.wantErr { @@ -186,6 +192,9 @@ func Test_viperReader_Get(t *testing.T) { func Test_viperReader_GetWithoutDefaultConfig(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + dir, err := os.MkdirTemp("", "clusterctl") g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(dir) @@ -194,7 +203,7 @@ func Test_viperReader_GetWithoutDefaultConfig(t *testing.T) { v, err := newViperReader(injectConfigPaths([]string{dir})) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(v.Init("")).To(Succeed()) + g.Expect(v.Init(ctx, "")).To(Succeed()) got, err := v.Get("FOO_FOO") g.Expect(err).ToNot(HaveOccurred()) @@ -236,9 +245,11 @@ func Test_viperReader_Set(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) + ctx := context.Background() + v := &viperReader{} - gs.Expect(v.Init(configFile)).To(Succeed()) + gs.Expect(v.Init(ctx, configFile)).To(Succeed()) v.Set(tt.args.key, tt.args.value) diff --git a/cmd/clusterctl/client/config/variables_client_test.go b/cmd/clusterctl/client/config/variables_client_test.go index 8cb2d5baef1e..f51d06cac92d 100644 --- a/cmd/clusterctl/client/config/variables_client_test.go +++ b/cmd/clusterctl/client/config/variables_client_test.go @@ -61,7 +61,6 @@ func Test_variables_Get(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - p := &variablesClient{ reader: reader, } diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index 2fbc9b191b90..cf3fb111da71 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "os" "path/filepath" @@ -51,7 +52,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { { name: "Returns default providers", field: field{ - client: newFakeClient(newFakeConfig()), + client: newFakeClient(context.Background(), newFakeConfig(context.Background())), }, // note: these will be sorted by name by the Providers() call, so be sure they are in alphabetical order here too wantProviders: []string{ @@ -100,7 +101,7 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { { name: "Returns default providers and custom providers if defined", field: field{ - client: newFakeClient(newFakeConfig().WithProvider(customProviderConfig)), + client: newFakeClient(context.Background(), newFakeConfig(context.Background()).WithProvider(customProviderConfig)), }, // note: these will be sorted by name by the Providers() call, so be sure they are in alphabetical order here too wantProviders: []string{ @@ -170,15 +171,17 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { } func Test_clusterctlClient_GetProviderComponents(t *testing.T) { - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithProvider(capiProviderConfig) - repository1 := newFakeRepository(capiProviderConfig, config1). + repository1 := newFakeRepository(ctx, capiProviderConfig, config1). WithPaths("root", "components.yaml"). WithDefaultVersion("v1.0.0"). WithFile("v1.0.0", "components.yaml", componentsYAML("ns1")) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithRepository(repository1) type args struct { @@ -220,10 +223,12 @@ func Test_clusterctlClient_GetProviderComponents(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + options := ComponentsOptions{ TargetNamespace: tt.args.targetNameSpace, } - got, err := client.GetProviderComponents(tt.args.provider, capiProviderConfig.Type(), options) + got, err := client.GetProviderComponents(ctx, tt.args.provider, capiProviderConfig.Type(), options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -239,13 +244,15 @@ func Test_clusterctlClient_GetProviderComponents(t *testing.T) { func Test_getComponentsByName_withEmptyVariables(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + // Create a fake config with a provider named P1 and a variable named foo. repository1Config := config.NewProvider("p1", "url", clusterctlv1.InfrastructureProviderType) - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(repository1Config) - repository1 := newFakeRepository(repository1Config, config1). + repository1 := newFakeRepository(ctx, repository1Config, config1). WithPaths("root", "components.yaml"). WithDefaultVersion("v1.0.0"). WithFile("v1.0.0", "components.yaml", componentsYAML("${FOO}")). @@ -260,7 +267,7 @@ func Test_getComponentsByName_withEmptyVariables(t *testing.T) { // Create a new fakeClient that allows to execute tests on the fake config, // the fake repositories and the fake cluster. - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithRepository(repository1). WithCluster(cluster1) @@ -268,7 +275,7 @@ func Test_getComponentsByName_withEmptyVariables(t *testing.T) { TargetNamespace: "ns1", SkipTemplateProcess: true, } - components, err := client.GetProviderComponents(repository1Config.Name(), repository1Config.Type(), options) + components, err := client.GetProviderComponents(ctx, repository1Config.Name(), repository1Config.Type(), options) g.Expect(err).ToNot(HaveOccurred()) g.Expect(components.Variables()).To(HaveLen(1)) g.Expect(components.Name()).To(Equal("p1")) @@ -412,7 +419,9 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - config := newFakeConfig(). + ctx := context.Background() + + config := newFakeConfig(ctx). WithVar("KUBERNETES_VERSION", "v3.4.5") // with this line we are simulating an env var c := &clusterctlClient{ @@ -435,7 +444,9 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { } func Test_clusterctlClient_templateOptionsToVariables_withExistingMachineCountVariables(t *testing.T) { - configClient := newFakeConfig(). + ctx := context.Background() + + configClient := newFakeConfig(ctx). WithVar("CONTROL_PLANE_MACHINE_COUNT", "3"). WithVar("WORKER_MACHINE_COUNT", "10") @@ -474,6 +485,8 @@ func Test_clusterctlClient_templateOptionsToVariables_withExistingMachineCountVa func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + rawTemplate := templateYAML("ns3", "${ CLUSTER_NAME }") // Template on a file @@ -499,10 +512,10 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, } - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) - repository1 := newFakeRepository(infraProviderConfig, config1). + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", "components"). WithDefaultVersion("v3.0.0"). WithFile("v3.0.0", "cluster-template.yaml", rawTemplate) @@ -512,7 +525,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { WithObjs(configMap). WithObjs(test.FakeCAPISetupObjects()...) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1). WithRepository(repository1) @@ -637,7 +650,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - got, err := client.GetClusterTemplate(tt.args.options) + got, err := client.GetClusterTemplate(ctx, tt.args.options) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) return @@ -657,11 +670,13 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { func Test_clusterctlClient_GetClusterTemplate_withClusterClass(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + rawTemplate := mangedTopologyTemplateYAML("ns4", "${CLUSTER_NAME}", "dev") rawClusterClassTemplate := clusterClassYAML("ns4", "dev") - config1 := newFakeConfig().WithProvider(infraProviderConfig) + config1 := newFakeConfig(ctx).WithProvider(infraProviderConfig) - repository1 := newFakeRepository(infraProviderConfig, config1). + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", "components"). WithDefaultVersion("v3.0.0"). WithFile("v3.0.0", "cluster-template-dev.yaml", rawTemplate). @@ -671,12 +686,12 @@ func Test_clusterctlClient_GetClusterTemplate_withClusterClass(t *testing.T) { WithProviderInventory(infraProviderConfig.Name(), infraProviderConfig.Type(), "v3.0.0", "ns4"). WithObjs(test.FakeCAPISetupObjects()...) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1). WithRepository(repository1) // Assert output - got, err := client.GetClusterTemplate(GetClusterTemplateOptions{ + got, err := client.GetClusterTemplate(ctx, GetClusterTemplateOptions{ Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, ClusterName: "test", TargetNamespace: "ns1", @@ -717,18 +732,18 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, } - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) cluster1 := newFakeCluster(cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, config1). WithObjs(configMap) - repository1 := newFakeRepository(infraProviderConfig, config1). + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", "components"). WithDefaultVersion("v3.0.0"). WithFile("v3.0.0", "cluster-template.yaml", rawTemplate) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1). WithRepository(repository1) @@ -829,7 +844,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - got, err := client.GetClusterTemplate(tt.args.options) + got, err := client.GetClusterTemplate(ctx, tt.args.options) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) return @@ -853,9 +868,9 @@ func newFakeClientWithoutCluster(configClient config.Client) *fakeClient { } var err error - fake.internalClient, err = newClusterctlClient("fake-config", + fake.internalClient, err = newClusterctlClient(context.Background(), "fake-config", InjectConfig(fake.configClient), - InjectRepositoryFactory(func(input RepositoryClientFactoryInput) (repository.Client, error) { + InjectRepositoryFactory(func(ctx context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel()) } @@ -872,10 +887,12 @@ func newFakeClientWithoutCluster(configClient config.Client) *fakeClient { func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { rawTemplate := templateYAML("ns3", "${ CLUSTER_NAME }") - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) - repository1 := newFakeRepository(infraProviderConfig, config1). + repository1 := newFakeRepository(ctx, infraProviderConfig, config1). WithPaths("root", "components"). WithDefaultVersion("v3.0.0"). WithFile("v3.0.0", "cluster-template.yaml", rawTemplate) @@ -940,7 +957,7 @@ func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - got, err := client.GetClusterTemplate(tt.args.options) + got, err := client.GetClusterTemplate(ctx, tt.args.options) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) return @@ -1037,13 +1054,13 @@ v3: default3`, for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) cluster1 := newFakeCluster(cluster.Kubeconfig{}, config1) - client := newFakeClient(config1).WithCluster(cluster1) + client := newFakeClient(ctx, config1).WithCluster(cluster1) - printer, err := client.ProcessYAML(tt.options) + printer, err := client.ProcessYAML(ctx, tt.options) if tt.expectErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/delete.go b/cmd/clusterctl/client/delete.go index 3797abb3e9e2..f59f443544a6 100644 --- a/cmd/clusterctl/client/delete.go +++ b/cmd/clusterctl/client/delete.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -65,24 +67,24 @@ type DeleteOptions struct { SkipInventory bool } -func (c *clusterctlClient) Delete(options DeleteOptions) error { +func (c *clusterctlClient) Delete(ctx context.Context, options DeleteOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err } // Ensure this command only runs against management clusters with the current Cluster API contract. - if err := clusterClient.ProviderInventory().CheckCAPIContract(); err != nil { + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx); err != nil { return err } // Ensure the custom resource definitions required by clusterctl are in place. - if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return err } // Get the list of installed providers. - installedProviders, err := clusterClient.ProviderInventory().List() + installedProviders, err := clusterClient.ProviderInventory().List(ctx) if err != nil { return err } @@ -132,7 +134,7 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error { for _, provider := range providers { // Try to detect the namespace where the provider lives - provider.Namespace, err = clusterClient.ProviderInventory().GetProviderNamespace(provider.ProviderName, provider.GetProviderType()) + provider.Namespace, err = clusterClient.ProviderInventory().GetProviderNamespace(ctx, provider.ProviderName, provider.GetProviderType()) if err != nil { return err } @@ -141,7 +143,7 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error { } if provider.Version != "" { - version, err := clusterClient.ProviderInventory().GetProviderVersion(provider.ProviderName, provider.GetProviderType()) + version, err := clusterClient.ProviderInventory().GetProviderVersion(ctx, provider.ProviderName, provider.GetProviderType()) if err != nil { return err } @@ -156,7 +158,7 @@ func (c *clusterctlClient) Delete(options DeleteOptions) error { // Delete the selected providers. for _, provider := range providersToDelete { - if err := clusterClient.ProviderComponents().Delete(cluster.DeleteOptions{Provider: provider, IncludeNamespace: options.IncludeNamespace, IncludeCRDs: options.IncludeCRDs, SkipInventory: options.SkipInventory}); err != nil { + if err := clusterClient.ProviderComponents().Delete(ctx, cluster.DeleteOptions{Provider: provider, IncludeNamespace: options.IncludeNamespace, IncludeCRDs: options.IncludeCRDs, SkipInventory: options.SkipInventory}); err != nil { return err } } diff --git a/cmd/clusterctl/client/delete_test.go b/cmd/clusterctl/client/delete_test.go index 8a65f7aedc0d..52e9d4b03970 100644 --- a/cmd/clusterctl/client/delete_test.go +++ b/cmd/clusterctl/client/delete_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "testing" . "github.com/onsi/gomega" @@ -167,7 +168,9 @@ func Test_clusterctlClient_Delete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.Delete(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.Delete(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -194,17 +197,19 @@ func Test_clusterctlClient_Delete(t *testing.T) { // clusterctl client for a management cluster with capi and bootstrap provider. func fakeClusterForDelete() *fakeClient { - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithVar("var", "value"). WithProvider(capiProviderConfig). WithProvider(bootstrapProviderConfig). WithProvider(controlPlaneProviderConfig). WithProvider(infraProviderConfig) - repository1 := newFakeRepository(capiProviderConfig, config1) - repository2 := newFakeRepository(bootstrapProviderConfig, config1) - repository3 := newFakeRepository(controlPlaneProviderConfig, config1) - repository4 := newFakeRepository(infraProviderConfig, config1) + repository1 := newFakeRepository(ctx, capiProviderConfig, config1) + repository2 := newFakeRepository(ctx, bootstrapProviderConfig, config1) + repository3 := newFakeRepository(ctx, controlPlaneProviderConfig, config1) + repository4 := newFakeRepository(ctx, infraProviderConfig, config1) cluster1 := newFakeCluster(cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, config1) cluster1.fakeProxy.WithProviderInventory(capiProviderConfig.Name(), capiProviderConfig.Type(), providerVersion, "capi-system") @@ -213,7 +218,7 @@ func fakeClusterForDelete() *fakeClient { cluster1.fakeProxy.WithProviderInventory(infraProviderConfig.Name(), infraProviderConfig.Type(), providerVersion, namespace) cluster1.fakeProxy.WithFakeCAPISetup() - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). // fake repository for capi, bootstrap, controlplane and infra provider (matching provider's config) WithRepository(repository1). WithRepository(repository2). diff --git a/cmd/clusterctl/client/describe.go b/cmd/clusterctl/client/describe.go index 2bdf1464537d..4ebd75dc5898 100644 --- a/cmd/clusterctl/client/describe.go +++ b/cmd/clusterctl/client/describe.go @@ -60,7 +60,7 @@ type DescribeClusterOptions struct { } // DescribeCluster returns the object tree representing the status of a Cluster API cluster. -func (c *clusterctlClient) DescribeCluster(options DescribeClusterOptions) (*tree.ObjectTree, error) { +func (c *clusterctlClient) DescribeCluster(ctx context.Context, options DescribeClusterOptions) (*tree.ObjectTree, error) { // gets access to the management cluster cluster, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -68,7 +68,7 @@ func (c *clusterctlClient) DescribeCluster(options DescribeClusterOptions) (*tre } // Ensure this command only runs against management clusters with the current Cluster API contract. - if err := cluster.ProviderInventory().CheckCAPIContract(); err != nil { + if err := cluster.ProviderInventory().CheckCAPIContract(ctx); err != nil { return nil, err } @@ -88,7 +88,7 @@ func (c *clusterctlClient) DescribeCluster(options DescribeClusterOptions) (*tre } // Gets the object tree representing the status of a Cluster API cluster. - return tree.Discovery(context.TODO(), client, options.Namespace, options.ClusterName, tree.DiscoverOptions{ + return tree.Discovery(ctx, client, options.Namespace, options.ClusterName, tree.DiscoverOptions{ ShowOtherConditions: options.ShowOtherConditions, ShowMachineSets: options.ShowMachineSets, ShowClusterResourceSets: options.ShowClusterResourceSets, diff --git a/cmd/clusterctl/client/generate_provider.go b/cmd/clusterctl/client/generate_provider.go index b9f24f8d8a31..a59c6aee0158 100644 --- a/cmd/clusterctl/client/generate_provider.go +++ b/cmd/clusterctl/client/generate_provider.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/version" @@ -24,7 +26,7 @@ import ( clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ) -func (c *clusterctlClient) GenerateProvider(provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { +func (c *clusterctlClient) GenerateProvider(ctx context.Context, provider string, providerType clusterctlv1.ProviderType, options ComponentsOptions) (Components, error) { providerName, providerVersion, err := parseProviderName(provider) if err != nil { return nil, err @@ -35,7 +37,7 @@ func (c *clusterctlClient) GenerateProvider(provider string, providerType cluste return nil, err } - providerRepositoryClient, err := c.repositoryClientFactory(RepositoryClientFactoryInput{Provider: configRepository}) + providerRepositoryClient, err := c.repositoryClientFactory(ctx, RepositoryClientFactoryInput{Provider: configRepository}) if err != nil { return nil, err } @@ -44,7 +46,7 @@ func (c *clusterctlClient) GenerateProvider(provider string, providerType cluste providerVersion = providerRepositoryClient.DefaultVersion() } - latestMetadata, err := providerRepositoryClient.Metadata(providerVersion).Get() + latestMetadata, err := providerRepositoryClient.Metadata(providerVersion).Get(ctx) if err != nil { return nil, err } @@ -63,5 +65,5 @@ func (c *clusterctlClient) GenerateProvider(provider string, providerType cluste return nil, errors.Errorf("current version of clusterctl is only compatible with %s providers, detected %s for provider %s", clusterv1.GroupVersion.Version, releaseSeries.Contract, providerName) } - return c.GetProviderComponents(provider, providerType, options) + return c.GetProviderComponents(ctx, provider, providerType, options) } diff --git a/cmd/clusterctl/client/get_kubeconfig.go b/cmd/clusterctl/client/get_kubeconfig.go index 7f33402d7d8f..9b8c8eb991ee 100644 --- a/cmd/clusterctl/client/get_kubeconfig.go +++ b/cmd/clusterctl/client/get_kubeconfig.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + "github.com/pkg/errors" ) @@ -33,7 +35,7 @@ type GetKubeconfigOptions struct { WorkloadClusterName string } -func (c *clusterctlClient) GetKubeconfig(options GetKubeconfigOptions) (string, error) { +func (c *clusterctlClient) GetKubeconfig(ctx context.Context, options GetKubeconfigOptions) (string, error) { // gets access to the management cluster clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -41,7 +43,7 @@ func (c *clusterctlClient) GetKubeconfig(options GetKubeconfigOptions) (string, } // Ensure this command only runs against management clusters with the current Cluster API contract. - if err := clusterClient.ProviderInventory().CheckCAPIContract(); err != nil { + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx); err != nil { return "", err } @@ -56,5 +58,5 @@ func (c *clusterctlClient) GetKubeconfig(options GetKubeconfigOptions) (string, options.Namespace = currentNamespace } - return clusterClient.WorkloadCluster().GetKubeconfig(options.WorkloadClusterName, options.Namespace) + return clusterClient.WorkloadCluster().GetKubeconfig(ctx, options.WorkloadClusterName, options.Namespace) } diff --git a/cmd/clusterctl/client/get_kubeconfig_test.go b/cmd/clusterctl/client/get_kubeconfig_test.go index c3700803adfd..e9c58d289909 100644 --- a/cmd/clusterctl/client/get_kubeconfig_test.go +++ b/cmd/clusterctl/client/get_kubeconfig_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "testing" . "github.com/onsi/gomega" @@ -26,13 +27,15 @@ import ( ) func Test_clusterctlClient_GetKubeconfig(t *testing.T) { - configClient := newFakeConfig() + ctx := context.Background() + + configClient := newFakeConfig(ctx) kubeconfig := cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"} clusterClient := newFakeCluster(cluster.Kubeconfig{Path: "cluster1"}, configClient) // create a clusterctl client where the proxy returns an empty namespace clusterClient.fakeProxy = test.NewFakeProxy().WithNamespace("").WithFakeCAPISetup() - badClient := newFakeClient(configClient).WithCluster(clusterClient) + badClient := newFakeClient(ctx, configClient).WithCluster(clusterClient) tests := []struct { name string @@ -57,7 +60,7 @@ func Test_clusterctlClient_GetKubeconfig(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - config, err := tt.client.GetKubeconfig(tt.options) + config, err := tt.client.GetKubeconfig(ctx, tt.options) if tt.expectErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/init.go b/cmd/clusterctl/client/init.go index 20e30f0b9cdd..562f21ee737c 100644 --- a/cmd/clusterctl/client/init.go +++ b/cmd/clusterctl/client/init.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "sort" "time" @@ -89,7 +90,7 @@ type InitOptions struct { } // Init initializes a management cluster by adding the requested list of providers. -func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { +func (c *clusterctlClient) Init(ctx context.Context, options InitOptions) ([]Components, error) { log := logf.Log // Default WaitProviderTimeout as we cannot rely on defaulting in the CLI @@ -105,12 +106,12 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { } // ensure the custom resource definitions required by clusterctl are in place - if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, err } // Ensure this command only runs against v1beta1 management clusters - if err := clusterClient.ProviderInventory().CheckCAPIContract(cluster.AllowCAPINotInstalled{}); err != nil { + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx, cluster.AllowCAPINotInstalled{}); err != nil { return nil, err } @@ -118,11 +119,11 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { // if not we consider this the first time init is executed, and thus we enforce the installation of a core provider, // a bootstrap provider and a control-plane provider (if not already explicitly requested by the user) log.Info("Fetching providers") - firstRun := c.addDefaultProviders(clusterClient, &options) + firstRun := c.addDefaultProviders(ctx, clusterClient, &options) // create an installer service, add the requested providers to the install queue and then perform validation // of the target state of the management cluster before starting the installation. - installer, err := c.setupInstaller(clusterClient, options) + installer, err := c.setupInstaller(ctx, clusterClient, options) if err != nil { return nil, err } @@ -132,7 +133,7 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { // - All the providers must support the same API Version of Cluster API (contract) // - All provider CRDs that are referenced in core Cluster API CRDs must comply with the CRD naming scheme, // otherwise a warning is logged. - if err := installer.Validate(); err != nil { + if err := installer.Validate(ctx); err != nil { if !options.IgnoreValidationErrors { return nil, err } @@ -141,7 +142,7 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { // Before installing the providers, ensure the cert-manager Webhook is in place. certManager := clusterClient.CertManager() - if err := certManager.EnsureInstalled(); err != nil { + if err := certManager.EnsureInstalled(ctx); err != nil { return nil, err } @@ -149,7 +150,7 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { WaitProviders: options.WaitProviders, WaitProviderTimeout: options.WaitProviderTimeout, } - components, err := installer.Install(installOpts) + components, err := installer.Install(ctx, installOpts) if err != nil { return nil, err } @@ -174,7 +175,7 @@ func (c *clusterctlClient) Init(options InitOptions) ([]Components, error) { } // InitImages returns the list of images required for init. -func (c *clusterctlClient) InitImages(options InitOptions) ([]string, error) { +func (c *clusterctlClient) InitImages(ctx context.Context, options InitOptions) ([]string, error) { // gets access to the management cluster clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -182,14 +183,14 @@ func (c *clusterctlClient) InitImages(options InitOptions) ([]string, error) { } // Ensure this command only runs against empty management clusters or v1beta1 management clusters. - if err := clusterClient.ProviderInventory().CheckCAPIContract(cluster.AllowCAPINotInstalled{}); err != nil { + if err := clusterClient.ProviderInventory().CheckCAPIContract(ctx, cluster.AllowCAPINotInstalled{}); err != nil { return nil, err } // checks if the cluster already contains a Core provider. // if not we consider this the first time init is executed, and thus we enforce the installation of a core provider, // a bootstrap provider and a control-plane provider (if not already explicitly requested by the user) - c.addDefaultProviders(clusterClient, &options) + c.addDefaultProviders(ctx, clusterClient, &options) // skip variable parsing when listing images options.skipTemplateProcess = true @@ -198,14 +199,14 @@ func (c *clusterctlClient) InitImages(options InitOptions) ([]string, error) { // create an installer service, add the requested providers to the install queue and then perform validation // of the target state of the management cluster before starting the installation. - installer, err := c.setupInstaller(clusterClient, options) + installer, err := c.setupInstaller(ctx, clusterClient, options) if err != nil { return nil, err } // Gets the list of container images required for the cert-manager (if not already installed). certManager := clusterClient.CertManager() - images, err := certManager.Images() + images, err := certManager.Images(ctx) if err != nil { return nil, err } @@ -217,7 +218,7 @@ func (c *clusterctlClient) InitImages(options InitOptions) ([]string, error) { return images, nil } -func (c *clusterctlClient) setupInstaller(cluster cluster.Client, options InitOptions) (cluster.ProviderInstaller, error) { +func (c *clusterctlClient) setupInstaller(ctx context.Context, cluster cluster.Client, options InitOptions) (cluster.ProviderInstaller, error) { installer := cluster.ProviderInstaller() providerList := &clusterctlv1.ProviderList{} @@ -230,7 +231,7 @@ func (c *clusterctlClient) setupInstaller(cluster cluster.Client, options InitOp } if !options.allowMissingProviderCRD { - providerList, err := cluster.ProviderInventory().List() + providerList, err := cluster.ProviderInventory().List(ctx) if err != nil { return nil, err } @@ -239,44 +240,44 @@ func (c *clusterctlClient) setupInstaller(cluster cluster.Client, options InitOp } if options.CoreProvider != "" { - if err := c.addToInstaller(addOptions, clusterctlv1.CoreProviderType, options.CoreProvider); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.CoreProviderType, options.CoreProvider); err != nil { return nil, err } } - if err := c.addToInstaller(addOptions, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.IPAMProviderType, options.IPAMProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.IPAMProviderType, options.IPAMProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.RuntimeExtensionProviderType, options.RuntimeExtensionProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.RuntimeExtensionProviderType, options.RuntimeExtensionProviders...); err != nil { return nil, err } - if err := c.addToInstaller(addOptions, clusterctlv1.AddonProviderType, options.AddonProviders...); err != nil { + if err := c.addToInstaller(ctx, addOptions, clusterctlv1.AddonProviderType, options.AddonProviders...); err != nil { return nil, err } return installer, nil } -func (c *clusterctlClient) addDefaultProviders(cluster cluster.Client, options *InitOptions) bool { +func (c *clusterctlClient) addDefaultProviders(ctx context.Context, cluster cluster.Client, options *InitOptions) bool { firstRun := false // Check if there is already a core provider installed in the cluster // Nb. we are ignoring the error so this operation can support listing images even if there is no an existing management cluster; // in case there is no an existing management cluster, we assume there are no core providers installed in the cluster. - currentCoreProvider, _ := cluster.ProviderInventory().GetDefaultProviderName(clusterctlv1.CoreProviderType) + currentCoreProvider, _ := cluster.ProviderInventory().GetDefaultProviderName(ctx, clusterctlv1.CoreProviderType) // If there are no core providers installed in the cluster, consider this a first run and add default providers to the list // of providers to be installed. @@ -303,7 +304,7 @@ type addToInstallerOptions struct { } // addToInstaller adds the components to the install queue and checks that the actual provider type match the target group. -func (c *clusterctlClient) addToInstaller(options addToInstallerOptions, providerType clusterctlv1.ProviderType, providers ...string) error { +func (c *clusterctlClient) addToInstaller(ctx context.Context, options addToInstallerOptions, providerType clusterctlv1.ProviderType, providers ...string) error { for _, provider := range providers { // It is possible to opt-out from automatic installation of bootstrap/control-plane providers using '-' as a provider name (NoopProvider). if provider == NoopProvider { @@ -316,7 +317,7 @@ func (c *clusterctlClient) addToInstaller(options addToInstallerOptions, provide TargetNamespace: options.targetNamespace, SkipTemplateProcess: options.skipTemplateProcess, } - components, err := c.getComponentsByName(provider, providerType, componentsOptions) + components, err := c.getComponentsByName(ctx, provider, providerType, componentsOptions) if err != nil { return errors.Wrapf(err, "failed to get provider components for the %q provider", provider) } diff --git a/cmd/clusterctl/client/init_test.go b/cmd/clusterctl/client/init_test.go index 8103ad6554d5..3a98ee88b94a 100644 --- a/cmd/clusterctl/client/init_test.go +++ b/cmd/clusterctl/client/init_test.go @@ -170,7 +170,7 @@ func Test_clusterctlClient_InitImages(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := tt.field.client.InitImages(InitOptions{ + got, err := tt.field.client.InitImages(ctx, InitOptions{ Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: tt.args.kubeconfigContext}, CoreProvider: tt.args.coreProvider, BootstrapProviders: tt.args.bootstrapProvider, @@ -196,7 +196,7 @@ func Test_clusterctlClient_InitImages(t *testing.T) { func Test_clusterctlClient_Init(t *testing.T) { // create a config variables client which does not have the value for // SOME_VARIABLE as expected in the infra components YAML - fconfig := newFakeConfig(). + fconfig := newFakeConfig(ctx). WithVar("ANOTHER_VARIABLE", "value"). WithProvider(capiProviderConfig). WithProvider(infraProviderConfig) @@ -538,10 +538,10 @@ func Test_clusterctlClient_Init(t *testing.T) { if tt.field.hasCRD { input := cluster.Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"} - g.Expect(tt.field.client.clusters[input].ProviderInventory().EnsureCustomResourceDefinitions()).To(Succeed()) + g.Expect(tt.field.client.clusters[input].ProviderInventory().EnsureCustomResourceDefinitions(ctx)).To(Succeed()) } - got, err := tt.field.client.Init(InitOptions{ + got, err := tt.field.client.Init(ctx, InitOptions{ Kubeconfig: Kubeconfig{Path: "kubeconfig", Context: "mgmt-context"}, CoreProvider: tt.args.coreProvider, BootstrapProviders: tt.args.bootstrapProvider, @@ -578,7 +578,7 @@ var ( func setupCluster(providers []Provider, certManagerClient cluster.CertManagerClient) (*fakeConfigClient, *fakeClient) { // create a config variables client which does not have the value for // SOME_VARIABLE as expected in the infra components YAML - cfg := newFakeConfig(). + cfg := newFakeConfig(ctx). WithVar("ANOTHER_VARIABLE", "value"). WithProvider(capiProviderConfig). WithProvider(infraProviderConfig) @@ -613,7 +613,7 @@ func fakeEmptyCluster() *fakeClient { } func fakeConfig(providers []config.Provider, variables map[string]string) *fakeConfigClient { - config := newFakeConfig() + config := newFakeConfig(ctx) for _, p := range providers { config = config.WithProvider(p) } @@ -635,7 +635,7 @@ func fakeCluster(config *fakeConfigClient, repos []*fakeRepositoryClient, certMa // fakeRepositories returns a base set of repositories for the different types // of providers. func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRepositoryClient { - repository1 := newFakeRepository(capiProviderConfig, config). + repository1 := newFakeRepository(ctx, capiProviderConfig, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v1.0.0"). WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). @@ -658,7 +658,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep {Major: 1, Minor: 1, Contract: test.CurrentCAPIContract}, }, }) - repository2 := newFakeRepository(bootstrapProviderConfig, config). + repository2 := newFakeRepository(ctx, bootstrapProviderConfig, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v2.0.0"). WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). @@ -681,7 +681,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep {Major: 2, Minor: 1, Contract: test.CurrentCAPIContract}, }, }) - repository3 := newFakeRepository(controlPlaneProviderConfig, config). + repository3 := newFakeRepository(ctx, controlPlaneProviderConfig, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v2.0.0"). WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). @@ -704,7 +704,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep {Major: 2, Minor: 1, Contract: test.CurrentCAPIContract}, }, }) - repository4 := newFakeRepository(infraProviderConfig, config). + repository4 := newFakeRepository(ctx, infraProviderConfig, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v3.0.0"). WithFile("v0.9.0", "components.yaml", componentsYAML("ns1")). @@ -733,7 +733,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep for _, provider := range providers { providerRepositories = append(providerRepositories, - newFakeRepository(provider, config). + newFakeRepository(ctx, provider, config). WithPaths("root", "components.yaml"). WithDefaultVersion("v2.0.0"). WithFile("v2.0.0", "components.yaml", componentsYAML("ns2")). @@ -748,7 +748,7 @@ func fakeRepositories(config *fakeConfigClient, providers []Provider) []*fakeRep } func fakeClusterCtlClient(config *fakeConfigClient, repos []*fakeRepositoryClient, clusters []*fakeClusterClient) *fakeClient { - client := newFakeClient(config) + client := newFakeClient(ctx, config) for _, r := range repos { client = client.WithRepository(r) } diff --git a/cmd/clusterctl/client/move.go b/cmd/clusterctl/client/move.go index 38617143237c..a8de5152801b 100644 --- a/cmd/clusterctl/client/move.go +++ b/cmd/clusterctl/client/move.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "os" "github.com/pkg/errors" @@ -52,7 +53,7 @@ type MoveOptions struct { DryRun bool } -func (c *clusterctlClient) Move(options MoveOptions) error { +func (c *clusterctlClient) Move(ctx context.Context, options MoveOptions) error { // Both backup and restore makes no sense. It's a complete move. if options.FromDirectory != "" && options.ToDirectory != "" { return errors.Errorf("can't set both FromDirectory and ToDirectory") @@ -66,17 +67,17 @@ func (c *clusterctlClient) Move(options MoveOptions) error { } if options.ToDirectory != "" { - return c.toDirectory(options) + return c.toDirectory(ctx, options) } else if options.FromDirectory != "" { - return c.fromDirectory(options) + return c.fromDirectory(ctx, options) } else { - return c.move(options) + return c.move(ctx, options) } } -func (c *clusterctlClient) move(options MoveOptions) error { +func (c *clusterctlClient) move(ctx context.Context, options MoveOptions) error { // Get the client for interacting with the source management cluster. - fromCluster, err := c.getClusterClient(options.FromKubeconfig) + fromCluster, err := c.getClusterClient(ctx, options.FromKubeconfig) if err != nil { return err } @@ -93,16 +94,16 @@ func (c *clusterctlClient) move(options MoveOptions) error { var toCluster cluster.Client if !options.DryRun { // Get the client for interacting with the target management cluster. - if toCluster, err = c.getClusterClient(options.ToKubeconfig); err != nil { + if toCluster, err = c.getClusterClient(ctx, options.ToKubeconfig); err != nil { return err } } - return fromCluster.ObjectMover().Move(options.Namespace, toCluster, options.DryRun, options.ExperimentalResourceMutators...) + return fromCluster.ObjectMover().Move(ctx, options.Namespace, toCluster, options.DryRun, options.ExperimentalResourceMutators...) } -func (c *clusterctlClient) fromDirectory(options MoveOptions) error { - toCluster, err := c.getClusterClient(options.ToKubeconfig) +func (c *clusterctlClient) fromDirectory(ctx context.Context, options MoveOptions) error { + toCluster, err := c.getClusterClient(ctx, options.ToKubeconfig) if err != nil { return err } @@ -111,11 +112,11 @@ func (c *clusterctlClient) fromDirectory(options MoveOptions) error { return err } - return toCluster.ObjectMover().FromDirectory(toCluster, options.FromDirectory) + return toCluster.ObjectMover().FromDirectory(ctx, toCluster, options.FromDirectory) } -func (c *clusterctlClient) toDirectory(options MoveOptions) error { - fromCluster, err := c.getClusterClient(options.FromKubeconfig) +func (c *clusterctlClient) toDirectory(ctx context.Context, options MoveOptions) error { + fromCluster, err := c.getClusterClient(ctx, options.FromKubeconfig) if err != nil { return err } @@ -133,22 +134,22 @@ func (c *clusterctlClient) toDirectory(options MoveOptions) error { return err } - return fromCluster.ObjectMover().ToDirectory(options.Namespace, options.ToDirectory) + return fromCluster.ObjectMover().ToDirectory(ctx, options.Namespace, options.ToDirectory) } -func (c *clusterctlClient) getClusterClient(kubeconfig Kubeconfig) (cluster.Client, error) { +func (c *clusterctlClient) getClusterClient(ctx context.Context, kubeconfig Kubeconfig) (cluster.Client, error) { cluster, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: kubeconfig}) if err != nil { return nil, err } // Ensure this command only runs against management clusters with the current Cluster API contract. - if err := cluster.ProviderInventory().CheckCAPIContract(); err != nil { + if err := cluster.ProviderInventory().CheckCAPIContract(ctx); err != nil { return nil, err } // Ensures the custom resource definitions required by clusterctl are in place. - if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := cluster.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, err } return cluster, nil diff --git a/cmd/clusterctl/client/move_test.go b/cmd/clusterctl/client/move_test.go index 8bf911b451a8..83ad30d67a82 100644 --- a/cmd/clusterctl/client/move_test.go +++ b/cmd/clusterctl/client/move_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "os" "testing" @@ -129,7 +130,9 @@ func Test_clusterctlClient_Move(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.Move(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -192,7 +195,9 @@ func Test_clusterctlClient_ToDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.Move(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -255,7 +260,9 @@ func Test_clusterctlClient_FromDirectory(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.Move(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.Move(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -266,10 +273,12 @@ func Test_clusterctlClient_FromDirectory(t *testing.T) { } func fakeClientForMove() *fakeClient { + ctx := context.Background() + core := config.NewProvider("cluster-api", "https://somewhere.com", clusterctlv1.CoreProviderType) infra := config.NewProvider("infra", "https://somewhere.com", clusterctlv1.InfrastructureProviderType) - config1 := newFakeConfig(). + config1 := newFakeConfig(ctx). WithProvider(core). WithProvider(infra) @@ -285,7 +294,7 @@ func fakeClientForMove() *fakeClient { WithProviderInventory(infra.Name(), infra.Type(), "v2.0.0", "infra-system"). WithObjs(test.FakeCAPISetupObjects()...) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1). WithCluster(cluster2) @@ -298,22 +307,22 @@ type fakeObjectMover struct { fromDirectoryErr error } -func (f *fakeObjectMover) Move(_ string, _ cluster.Client, _ bool, _ ...cluster.ResourceMutatorFunc) error { +func (f *fakeObjectMover) Move(_ context.Context, _ string, _ cluster.Client, _ bool, _ ...cluster.ResourceMutatorFunc) error { return f.moveErr } -func (f *fakeObjectMover) ToDirectory(_ string, _ string) error { +func (f *fakeObjectMover) ToDirectory(_ context.Context, _ string, _ string) error { return f.toDirectoryErr } -func (f *fakeObjectMover) Backup(_ string, _ string) error { +func (f *fakeObjectMover) Backup(_ context.Context, _ string, _ string) error { return f.toDirectoryErr } -func (f *fakeObjectMover) FromDirectory(_ cluster.Client, _ string) error { +func (f *fakeObjectMover) FromDirectory(_ context.Context, _ cluster.Client, _ string) error { return f.fromDirectoryErr } -func (f *fakeObjectMover) Restore(_ cluster.Client, _ string) error { +func (f *fakeObjectMover) Restore(_ context.Context, _ cluster.Client, _ string) error { return f.fromDirectoryErr } diff --git a/cmd/clusterctl/client/repository/client.go b/cmd/clusterctl/client/repository/client.go index 915936880ec0..b191b9b0fdb9 100644 --- a/cmd/clusterctl/client/repository/client.go +++ b/cmd/clusterctl/client/repository/client.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "net/url" "strings" @@ -39,7 +40,7 @@ type Client interface { DefaultVersion() string // GetVersions return the list of versions that are available in a provider repository - GetVersions() ([]string, error) + GetVersions(ctx context.Context) ([]string, error) // Components provide access to YAML file for creating provider components. Components() ComponentsClient @@ -71,8 +72,8 @@ func (c *repositoryClient) DefaultVersion() string { return c.repository.DefaultVersion() } -func (c *repositoryClient) GetVersions() ([]string, error) { - return c.repository.GetVersions() +func (c *repositoryClient) GetVersions(ctx context.Context) ([]string, error) { + return c.repository.GetVersions(ctx) } func (c *repositoryClient) Components() ComponentsClient { @@ -115,11 +116,11 @@ func InjectYamlProcessor(p yaml.Processor) Option { } // New returns a Client. -func New(provider config.Provider, configClient config.Client, options ...Option) (Client, error) { - return newRepositoryClient(provider, configClient, options...) +func New(ctx context.Context, provider config.Provider, configClient config.Client, options ...Option) (Client, error) { + return newRepositoryClient(ctx, provider, configClient, options...) } -func newRepositoryClient(provider config.Provider, configClient config.Client, options ...Option) (*repositoryClient, error) { +func newRepositoryClient(ctx context.Context, provider config.Provider, configClient config.Client, options ...Option) (*repositoryClient, error) { client := &repositoryClient{ Provider: provider, configClient: configClient, @@ -131,7 +132,7 @@ func newRepositoryClient(provider config.Provider, configClient config.Client, o // if there is an injected repository, use it, otherwise use a default one if client.repository == nil { - r, err := repositoryFactory(provider, configClient.Variables()) + r, err := repositoryFactory(ctx, provider, configClient.Variables()) if err != nil { return nil, errors.Wrapf(err, "failed to get repository client for the %s with name %s", provider.Type(), provider.Name()) } @@ -160,14 +161,14 @@ type Repository interface { ComponentsPath() string // GetFile return a file for a given provider version. - GetFile(version string, path string) ([]byte, error) + GetFile(ctx context.Context, version string, path string) ([]byte, error) // GetVersions return the list of versions that are available in a provider repository - GetVersions() ([]string, error) + GetVersions(ctx context.Context) ([]string, error) } // repositoryFactory returns the repository implementation corresponding to the provider URL. -func repositoryFactory(providerConfig config.Provider, configVariablesClient config.VariablesClient) (Repository, error) { +func repositoryFactory(ctx context.Context, providerConfig config.Provider, configVariablesClient config.VariablesClient) (Repository, error) { // parse the repository url rURL, err := url.Parse(providerConfig.URL()) if err != nil { @@ -177,7 +178,7 @@ func repositoryFactory(providerConfig config.Provider, configVariablesClient con if rURL.Scheme == httpsScheme { // if the url is a GitHub repository if rURL.Host == githubDomain { - repo, err := NewGitHubRepository(providerConfig, configVariablesClient) + repo, err := NewGitHubRepository(ctx, providerConfig, configVariablesClient) if err != nil { return nil, errors.Wrap(err, "error creating the GitHub repository client") } @@ -198,7 +199,7 @@ func repositoryFactory(providerConfig config.Provider, configVariablesClient con // if the url is a local filesystem repository if rURL.Scheme == "file" || rURL.Scheme == "" { - repo, err := newLocalRepository(providerConfig, configVariablesClient) + repo, err := newLocalRepository(ctx, providerConfig, configVariablesClient) if err != nil { return nil, errors.Wrap(err, "error creating the local filesystem repository client") } diff --git a/cmd/clusterctl/client/repository/client_test.go b/cmd/clusterctl/client/repository/client_test.go index 69867e965e92..5de7d91d85b9 100644 --- a/cmd/clusterctl/client/repository/client_test.go +++ b/cmd/clusterctl/client/repository/client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "os" "testing" @@ -31,13 +32,15 @@ import ( func Test_newRepositoryClient_LocalFileSystemRepository(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + tmpDir := createTempDir(t) defer os.RemoveAll(tmpDir) dst1 := createLocalTestProviderFile(t, tmpDir, "bootstrap-foo/v1.0.0/bootstrap-components.yaml", "") dst2 := createLocalTestProviderFile(t, tmpDir, "bootstrap-bar/v2.0.0/bootstrap-components.yaml", "") - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) + configClient, err := config.New(ctx, "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) type fields struct { @@ -81,7 +84,9 @@ func Test_newRepositoryClient_LocalFileSystemRepository(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) - repoClient, err := newRepositoryClient(tt.fields.provider, configClient) + ctx := context.Background() + + repoClient, err := newRepositoryClient(ctx, tt.fields.provider, configClient) gs.Expect(err).ToNot(HaveOccurred()) gs.Expect(repoClient.repository).To(BeAssignableToTypeOf(tt.expected)) @@ -126,13 +131,17 @@ func Test_newRepositoryClient_YamlProcessor(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + configProvider := config.NewProvider("fakeProvider", "", clusterctlv1.CoreProviderType) - configClient, err := config.New("", config.InjectReader(test.NewFakeReader())) + configClient, err := config.New(ctx, "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) tt.opts = append(tt.opts, InjectRepository(NewMemoryRepository())) repoClient, err := newRepositoryClient( + ctx, configProvider, configClient, tt.opts..., diff --git a/cmd/clusterctl/client/repository/clusterclass_client.go b/cmd/clusterctl/client/repository/clusterclass_client.go index afd0d8d60ebb..dbd595d0fc5e 100644 --- a/cmd/clusterctl/client/repository/clusterclass_client.go +++ b/cmd/clusterctl/client/repository/clusterclass_client.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -27,7 +29,7 @@ import ( // ClusterClassClient has methods to work with cluster class templates hosted on a provider repository. // Templates are yaml files to be used for creating a guest cluster. type ClusterClassClient interface { - Get(name, targetNamespace string, skipTemplateProcess bool) (Template, error) + Get(ctx context.Context, name, targetNamespace string, skipTemplateProcess bool) (Template, error) } type clusterClassClient struct { @@ -57,7 +59,7 @@ func newClusterClassClient(input ClusterClassClientInput) *clusterClassClient { } } -func (cc *clusterClassClient) Get(name, targetNamespace string, skipTemplateProcess bool) (Template, error) { +func (cc *clusterClassClient) Get(ctx context.Context, name, targetNamespace string, skipTemplateProcess bool) (Template, error) { log := logf.Log if targetNamespace == "" { @@ -80,7 +82,7 @@ func (cc *clusterClassClient) Get(name, targetNamespace string, skipTemplateProc if rawArtifact == nil { log.V(5).Info("Fetching", "File", filename, "Provider", cc.provider.Name(), "Type", cc.provider.Type(), "Version", version) - rawArtifact, err = cc.repository.GetFile(version, filename) + rawArtifact, err = cc.repository.GetFile(ctx, version, filename) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", filename, cc.provider.ManifestLabel()) } diff --git a/cmd/clusterctl/client/repository/clusterclass_client_test.go b/cmd/clusterctl/client/repository/clusterclass_client_test.go index ce770edab8a8..4ba742ce5626 100644 --- a/cmd/clusterctl/client/repository/clusterclass_client_test.go +++ b/cmd/clusterctl/client/repository/clusterclass_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "testing" @@ -161,6 +162,8 @@ func Test_ClusterClassClient_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + f := newClusterClassClient( ClusterClassClientInput{ version: tt.fields.version, @@ -170,7 +173,7 @@ func Test_ClusterClassClient_Get(t *testing.T) { processor: tt.fields.processor, }, ) - got, err := f.Get(tt.args.name, tt.args.targetNamespace, tt.args.listVariablesOnly) + got, err := f.Get(ctx, tt.args.name, tt.args.targetNamespace, tt.args.listVariablesOnly) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/repository/components_client.go b/cmd/clusterctl/client/repository/components_client.go index 107e4cd5547d..ca002a23dcbc 100644 --- a/cmd/clusterctl/client/repository/components_client.go +++ b/cmd/clusterctl/client/repository/components_client.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -27,8 +29,8 @@ import ( // ComponentsClient has methods to work with yaml file for generating provider components. // Assets are yaml files to be used for deploying a provider into a management cluster. type ComponentsClient interface { - Raw(options ComponentsOptions) ([]byte, error) - Get(options ComponentsOptions) (Components, error) + Raw(ctx context.Context, options ComponentsOptions) ([]byte, error) + Get(ctx context.Context, options ComponentsOptions) (Components, error) } // componentsClient implements ComponentsClient. @@ -53,20 +55,20 @@ func newComponentsClient(provider config.Provider, repository Repository, config } // Raw returns the components from a repository. -func (f *componentsClient) Raw(options ComponentsOptions) ([]byte, error) { - return f.getRawBytes(&options) +func (f *componentsClient) Raw(ctx context.Context, options ComponentsOptions) ([]byte, error) { + return f.getRawBytes(ctx, &options) } // Get returns the components from a repository. -func (f *componentsClient) Get(options ComponentsOptions) (Components, error) { - file, err := f.getRawBytes(&options) +func (f *componentsClient) Get(ctx context.Context, options ComponentsOptions) (Components, error) { + file, err := f.getRawBytes(ctx, &options) if err != nil { return nil, err } return NewComponents(ComponentsInput{f.provider, f.configClient, f.processor, file, options}) } -func (f *componentsClient) getRawBytes(options *ComponentsOptions) ([]byte, error) { +func (f *componentsClient) getRawBytes(ctx context.Context, options *ComponentsOptions) ([]byte, error) { log := logf.Log // If the request does not target a specific version, read from the default repository version that is derived from the repository URL, e.g. latest. @@ -90,7 +92,7 @@ func (f *componentsClient) getRawBytes(options *ComponentsOptions) ([]byte, erro if file == nil { log.V(5).Info("Fetching", "File", path, "Provider", f.provider.Name(), "Type", f.provider.Type(), "Version", options.Version) - file, err = f.repository.GetFile(options.Version, path) + file, err = f.repository.GetFile(ctx, options.Version, path) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", path, f.provider.ManifestLabel()) } diff --git a/cmd/clusterctl/client/repository/components_client_test.go b/cmd/clusterctl/client/repository/components_client_test.go index b502ec0b1290..48c476ea358f 100644 --- a/cmd/clusterctl/client/repository/components_client_test.go +++ b/cmd/clusterctl/client/repository/components_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "testing" @@ -66,7 +67,7 @@ func Test_componentsClient_Get(t *testing.T) { p1 := config.NewProvider("p1", "", clusterctlv1.BootstrapProviderType) - configClient, err := config.New("", config.InjectReader(test.NewFakeReader().WithVar(variableName, variableValue))) + configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader().WithVar(variableName, variableValue))) g.Expect(err).ToNot(HaveOccurred()) type fields struct { @@ -259,6 +260,8 @@ func Test_componentsClient_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gs := NewWithT(t) + ctx := context.Background() + options := ComponentsOptions{ Version: tt.args.version, TargetNamespace: tt.args.targetNamespace, @@ -268,7 +271,7 @@ func Test_componentsClient_Get(t *testing.T) { if tt.fields.processor != nil { f.processor = tt.fields.processor } - got, err := f.Get(options) + got, err := f.Get(ctx, options) if tt.wantErr { gs.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/repository/metadata_client.go b/cmd/clusterctl/client/repository/metadata_client.go index 6a75e0580e4b..25d95098d91c 100644 --- a/cmd/clusterctl/client/repository/metadata_client.go +++ b/cmd/clusterctl/client/repository/metadata_client.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -33,7 +35,7 @@ const metadataFile = "metadata.yaml" // Metadata are yaml files providing additional information about provider's assets like e.g the version compatibility Matrix. type MetadataClient interface { // Get returns the provider's metadata. - Get() (*clusterctlv1.Metadata, error) + Get(ctx context.Context) (*clusterctlv1.Metadata, error) } // metadataClient implements MetadataClient. @@ -57,7 +59,7 @@ func newMetadataClient(provider config.Provider, version string, repository Repo } } -func (f *metadataClient) Get() (*clusterctlv1.Metadata, error) { +func (f *metadataClient) Get(ctx context.Context) (*clusterctlv1.Metadata, error) { log := logf.Log // gets the metadata file from the repository @@ -74,7 +76,7 @@ func (f *metadataClient) Get() (*clusterctlv1.Metadata, error) { } if file == nil { log.V(5).Info("Fetching", "File", metadataFile, "Provider", f.provider.Name(), "Type", f.provider.Type(), "Version", version) - file, err = f.repository.GetFile(version, metadataFile) + file, err = f.repository.GetFile(ctx, version, metadataFile) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from the repository for provider %q", metadataFile, f.provider.ManifestLabel()) } diff --git a/cmd/clusterctl/client/repository/metadata_client_test.go b/cmd/clusterctl/client/repository/metadata_client_test.go index 7fd178877022..84ceb260aa34 100644 --- a/cmd/clusterctl/client/repository/metadata_client_test.go +++ b/cmd/clusterctl/client/repository/metadata_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "testing" . "github.com/onsi/gomega" @@ -121,7 +122,7 @@ func Test_metadataClient_Get(t *testing.T) { version: tt.fields.version, repository: tt.fields.repository, } - got, err := f.Get() + got, err := f.Get(context.Background()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/repository/overrides_test.go b/cmd/clusterctl/client/repository/overrides_test.go index 523139edc30d..b3fc1ebb149c 100644 --- a/cmd/clusterctl/client/repository/overrides_test.go +++ b/cmd/clusterctl/client/repository/overrides_test.go @@ -99,6 +99,7 @@ func TestOverrides(t *testing.T) { func TestGetLocalOverrides(t *testing.T) { t.Run("returns contents of file successfully", func(t *testing.T) { g := NewWithT(t) + tmpDir := createTempDir(t) defer os.RemoveAll(tmpDir) diff --git a/cmd/clusterctl/client/repository/repository_github.go b/cmd/clusterctl/client/repository/repository_github.go index ab51f41fb3ab..66f389ac1d67 100644 --- a/cmd/clusterctl/client/repository/repository_github.go +++ b/cmd/clusterctl/client/repository/repository_github.go @@ -101,7 +101,7 @@ func (g *gitHubRepository) DefaultVersion() string { } // GetVersions returns the list of versions that are available in a provider repository. -func (g *gitHubRepository) GetVersions() ([]string, error) { +func (g *gitHubRepository) GetVersions(ctx context.Context) ([]string, error) { log := logf.Log cacheID := fmt.Sprintf("%s/%s", g.owner, g.repository) @@ -120,7 +120,7 @@ func (g *gitHubRepository) GetVersions() ([]string, error) { gomodulePath := path.Join(githubDomain, g.owner, g.repository) var parsedVersions semver.Versions - parsedVersions, err = goProxyClient.GetVersions(context.TODO(), gomodulePath) + parsedVersions, err = goProxyClient.GetVersions(ctx, gomodulePath) // Log the error before fallback to github repository client happens. if err != nil { @@ -134,7 +134,7 @@ func (g *gitHubRepository) GetVersions() ([]string, error) { // Fallback to github repository client if goProxyClient is nil or an error occurred. if goProxyClient == nil || err != nil { - versions, err = g.getVersions() + versions, err = g.getVersions(ctx) if err != nil { return nil, errors.Wrapf(err, "failed to get repository versions") } @@ -155,8 +155,8 @@ func (g *gitHubRepository) ComponentsPath() string { } // GetFile returns a file for a given provider version. -func (g *gitHubRepository) GetFile(version, path string) ([]byte, error) { - release, err := g.getReleaseByTag(version) +func (g *gitHubRepository) GetFile(ctx context.Context, version, path string) ([]byte, error) { + release, err := g.getReleaseByTag(ctx, version) if err != nil { if errors.Is(err, errNotFound) { // If it was ErrNotFound, then there is no release yet for the resolved tag. @@ -167,7 +167,7 @@ func (g *gitHubRepository) GetFile(version, path string) ([]byte, error) { } // Download files from the release. - files, err := g.downloadFilesFromRelease(release, path) + files, err := g.downloadFilesFromRelease(ctx, release, path) if err != nil { return nil, errors.Wrapf(err, "failed to download files from GitHub release %s", version) } @@ -176,7 +176,7 @@ func (g *gitHubRepository) GetFile(version, path string) ([]byte, error) { } // NewGitHubRepository returns a gitHubRepository implementation. -func NewGitHubRepository(providerConfig config.Provider, configVariablesClient config.VariablesClient, opts ...githubRepositoryOption) (Repository, error) { +func NewGitHubRepository(ctx context.Context, providerConfig config.Provider, configVariablesClient config.VariablesClient, opts ...githubRepositoryOption) (Repository, error) { if configVariablesClient == nil { return nil, errors.New("invalid arguments: configVariablesClient can't be nil") } @@ -228,11 +228,11 @@ func NewGitHubRepository(providerConfig config.Provider, configVariablesClient c } if token, err := configVariablesClient.Get(config.GitHubTokenVariable); err == nil { - repo.setClientToken(token) + repo.setClientToken(ctx, token) } if defaultVersion == githubLatestReleaseLabel { - repo.defaultVersion, err = latestContractRelease(repo, clusterv1.GroupVersion.Version) + repo.defaultVersion, err = latestContractRelease(ctx, repo, clusterv1.GroupVersion.Version) if err != nil { return nil, errors.Wrap(err, "failed to get latest release") } @@ -275,22 +275,22 @@ func (g *gitHubRepository) getGoproxyClient() (*goproxy.Client, error) { } // setClientToken sets authenticatingHTTPClient field of gitHubRepository struct. -func (g *gitHubRepository) setClientToken(token string) { +func (g *gitHubRepository) setClientToken(ctx context.Context, token string) { ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: token}, ) - g.authenticatingHTTPClient = oauth2.NewClient(context.TODO(), ts) + g.authenticatingHTTPClient = oauth2.NewClient(ctx, ts) } // getVersions returns all the release versions for a github repository. -func (g *gitHubRepository) getVersions() ([]string, error) { +func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) { client := g.getClient() // Get all the releases. // NB. currently Github API does not support result ordering, so it not possible to limit results var allReleases []*github.RepositoryRelease var retryError error - _ = wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { var listReleasesErr error // Get the first page of GitHub releases. releases, response, listReleasesErr := client.Repositories.ListReleases(ctx, g.owner, g.repository, &github.ListOptions{PerPage: githubListReleasesPerPageLimit}) @@ -343,7 +343,7 @@ func (g *gitHubRepository) getVersions() ([]string, error) { } // getReleaseByTag returns the github repository release with a specific tag name. -func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryRelease, error) { +func (g *gitHubRepository) getReleaseByTag(ctx context.Context, tag string) (*github.RepositoryRelease, error) { cacheID := fmt.Sprintf("%s/%s:%s", g.owner, g.repository, tag) if release, ok := cacheReleases[cacheID]; ok { return release, nil @@ -353,7 +353,7 @@ func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryReleas var release *github.RepositoryRelease var retryError error - _ = wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { var getReleasesErr error release, _, getReleasesErr = client.Repositories.GetReleaseByTag(ctx, g.owner, g.repository, tag) if getReleasesErr != nil { @@ -380,9 +380,7 @@ func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryReleas } // downloadFilesFromRelease download a file from release. -func (g *gitHubRepository) downloadFilesFromRelease(release *github.RepositoryRelease, fileName string) ([]byte, error) { - ctx := context.TODO() - +func (g *gitHubRepository) downloadFilesFromRelease(ctx context.Context, release *github.RepositoryRelease, fileName string) ([]byte, error) { cacheID := fmt.Sprintf("%s/%s:%s:%s", g.owner, g.repository, *release.TagName, fileName) if content, ok := cacheFiles[cacheID]; ok { return content, nil diff --git a/cmd/clusterctl/client/repository/repository_github_test.go b/cmd/clusterctl/client/repository/repository_github_test.go index 68f902c7e29c..cf4781e43135 100644 --- a/cmd/clusterctl/client/repository/repository_github_test.go +++ b/cmd/clusterctl/client/repository/repository_github_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -114,12 +115,15 @@ func Test_gitHubRepository_GetVersions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gRepo, err := NewGitHubRepository(tt.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) + gRepo, err := NewGitHubRepository(ctx, tt.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) g.Expect(err).ToNot(HaveOccurred()) - got, err := gRepo.GetVersions() + got, err := gRepo.GetVersions(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -214,7 +218,7 @@ func Test_githubRepository_newGitHubRepository(t *testing.T) { g := NewWithT(t) resetCaches() - gitHub, err := NewGitHubRepository(tt.field.providerConfig, tt.field.variableClient) + gitHub, err := NewGitHubRepository(context.Background(), tt.field.providerConfig, tt.field.variableClient) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -316,10 +320,10 @@ func Test_githubRepository_getFile(t *testing.T) { g := NewWithT(t) resetCaches() - gitHub, err := NewGitHubRepository(providerConfig, configVariablesClient, injectGithubClient(client)) + gitHub, err := NewGitHubRepository(context.Background(), providerConfig, configVariablesClient, injectGithubClient(client)) g.Expect(err).ToNot(HaveOccurred()) - got, err := gitHub.GetFile(tt.release, tt.fileName) + got, err := gitHub.GetFile(context.Background(), tt.release, tt.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -392,12 +396,15 @@ func Test_gitHubRepository_getVersions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gitHub, err := NewGitHubRepository(tt.field.providerConfig, configVariablesClient, injectGithubClient(client)) + gitHub, err := NewGitHubRepository(ctx, tt.field.providerConfig, configVariablesClient, injectGithubClient(client)) g.Expect(err).ToNot(HaveOccurred()) - got, err := gitHub.(*gitHubRepository).getVersions() + got, err := gitHub.(*gitHubRepository).getVersions(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -504,10 +511,10 @@ func Test_gitHubRepository_getLatestContractRelease(t *testing.T) { g := NewWithT(t) resetCaches() - gRepo, err := NewGitHubRepository(tt.field.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) + gRepo, err := NewGitHubRepository(context.Background(), tt.field.providerConfig, configVariablesClient, injectGithubClient(client), injectGoproxyClient(clientGoproxy)) g.Expect(err).ToNot(HaveOccurred()) - got, err := latestContractRelease(gRepo, tt.contract) + got, err := latestContractRelease(context.Background(), gRepo, tt.contract) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -591,12 +598,15 @@ func Test_gitHubRepository_getLatestRelease(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gRepo, err := NewGitHubRepository(tt.field.providerConfig, configVariablesClient, injectGoproxyClient(clientGoproxy)) + gRepo, err := NewGitHubRepository(ctx, tt.field.providerConfig, configVariablesClient, injectGoproxyClient(clientGoproxy)) g.Expect(err).ToNot(HaveOccurred()) - got, err := latestRelease(gRepo) + got, err := latestRelease(ctx, gRepo) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -673,12 +683,15 @@ func Test_gitHubRepository_getLatestPatchRelease(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gRepo, err := NewGitHubRepository(tt.field.providerConfig, configVariablesClient, injectGoproxyClient(clientGoproxy)) + gRepo, err := NewGitHubRepository(ctx, tt.field.providerConfig, configVariablesClient, injectGoproxyClient(clientGoproxy)) g.Expect(err).ToNot(HaveOccurred()) - got, err := latestPatchRelease(gRepo, tt.major, tt.minor) + got, err := latestPatchRelease(ctx, gRepo, tt.major, tt.minor) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -734,12 +747,15 @@ func Test_gitHubRepository_getReleaseByTag(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + resetCaches() - gRepo, err := NewGitHubRepository(providerConfig, configVariablesClient, injectGithubClient(client)) + gRepo, err := NewGitHubRepository(ctx, providerConfig, configVariablesClient, injectGithubClient(client)) g.Expect(err).ToNot(HaveOccurred()) - got, err := gRepo.(*gitHubRepository).getReleaseByTag(tt.args.tag) + got, err := gRepo.(*gitHubRepository).getReleaseByTag(ctx, tt.args.tag) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -872,10 +888,10 @@ func Test_gitHubRepository_downloadFilesFromRelease(t *testing.T) { g := NewWithT(t) resetCaches() - gRepo, err := NewGitHubRepository(tt.providerConfig, configVariablesClient, injectGithubClient(client)) + gRepo, err := NewGitHubRepository(context.Background(), tt.providerConfig, configVariablesClient, injectGithubClient(client)) g.Expect(err).ToNot(HaveOccurred()) - got, err := gRepo.(*gitHubRepository).downloadFilesFromRelease(tt.args.release, tt.args.fileName) + got, err := gRepo.(*gitHubRepository).downloadFilesFromRelease(context.Background(), tt.args.release, tt.args.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/repository/repository_gitlab.go b/cmd/clusterctl/client/repository/repository_gitlab.go index 45265b56e721..9657e357d080 100644 --- a/cmd/clusterctl/client/repository/repository_gitlab.go +++ b/cmd/clusterctl/client/repository/repository_gitlab.go @@ -117,7 +117,7 @@ func (g *gitLabRepository) DefaultVersion() string { } // GetVersions returns the list of versions that are available in a provider repository. -func (g *gitLabRepository) GetVersions() ([]string, error) { +func (g *gitLabRepository) GetVersions(_ context.Context) ([]string, error) { // FIXME Get versions from GitLab API return []string{g.defaultVersion}, nil } @@ -133,8 +133,7 @@ func (g *gitLabRepository) ComponentsPath() string { } // GetFile returns a file for a given provider version. -func (g *gitLabRepository) GetFile(version, path string) ([]byte, error) { - ctx := context.TODO() +func (g *gitLabRepository) GetFile(ctx context.Context, version, path string) ([]byte, error) { url := fmt.Sprintf( "https://%s/api/v4/projects/%s/packages/generic/%s/%s/%s", g.host, diff --git a/cmd/clusterctl/client/repository/repository_gitlab_test.go b/cmd/clusterctl/client/repository/repository_gitlab_test.go index b8f4e0bc7aac..59b0a46b9d81 100644 --- a/cmd/clusterctl/client/repository/repository_gitlab_test.go +++ b/cmd/clusterctl/client/repository/repository_gitlab_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -195,7 +196,7 @@ func Test_gitLabRepository_getFile(t *testing.T) { gitLab.(*gitLabRepository).httpClient = client g.Expect(err).ToNot(HaveOccurred()) - got, err := gitLab.GetFile(tt.version, tt.fileName) + got, err := gitLab.GetFile(context.Background(), tt.version, tt.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/repository/repository_local.go b/cmd/clusterctl/client/repository/repository_local.go index 906bdb2c52ab..16ced216f888 100644 --- a/cmd/clusterctl/client/repository/repository_local.go +++ b/cmd/clusterctl/client/repository/repository_local.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "net/url" "os" "path/filepath" @@ -82,11 +83,11 @@ func (r *localRepository) ComponentsPath() string { } // GetFile returns a file for a given provider version. -func (r *localRepository) GetFile(version, fileName string) ([]byte, error) { +func (r *localRepository) GetFile(ctx context.Context, version, fileName string) ([]byte, error) { var err error if version == latestVersionTag { - version, err = latestRelease(r) + version, err = latestRelease(ctx, r) if err != nil { return nil, errors.Wrapf(err, "failed to get the latest release") } @@ -111,7 +112,7 @@ func (r *localRepository) GetFile(version, fileName string) ([]byte, error) { } // GetVersions returns the list of versions that are available for a local repository. -func (r *localRepository) GetVersions() ([]string, error) { +func (r *localRepository) GetVersions(_ context.Context) ([]string, error) { // get all the sub-directories under {basepath}/{provider-id}/ releasesPath := filepath.Join(r.basepath, r.providerLabel) files, err := os.ReadDir(releasesPath) @@ -135,7 +136,7 @@ func (r *localRepository) GetVersions() ([]string, error) { } // newLocalRepository returns a new localRepository. -func newLocalRepository(providerConfig config.Provider, configVariablesClient config.VariablesClient) (*localRepository, error) { +func newLocalRepository(ctx context.Context, providerConfig config.Provider, configVariablesClient config.VariablesClient) (*localRepository, error) { url, err := url.Parse(providerConfig.URL()) if err != nil { return nil, errors.Wrap(err, "invalid url") @@ -189,7 +190,7 @@ func newLocalRepository(providerConfig config.Provider, configVariablesClient co } if defaultVersion == latestVersionTag { - repo.defaultVersion, err = latestContractRelease(repo, clusterv1.GroupVersion.Version) + repo.defaultVersion, err = latestContractRelease(ctx, repo, clusterv1.GroupVersion.Version) if err != nil { return nil, errors.Wrap(err, "failed to get latest version") } diff --git a/cmd/clusterctl/client/repository/repository_local_test.go b/cmd/clusterctl/client/repository/repository_local_test.go index a728e3e8c5aa..89d3040a9b64 100644 --- a/cmd/clusterctl/client/repository/repository_local_test.go +++ b/cmd/clusterctl/client/repository/repository_local_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "os" "path/filepath" "testing" @@ -108,7 +109,7 @@ func Test_localRepository_newLocalRepository(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := newLocalRepository(tt.fields.provider, tt.fields.configVariablesClient) + got, err := newLocalRepository(context.Background(), tt.fields.provider, tt.fields.configVariablesClient) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -165,7 +166,7 @@ func Test_localRepository_newLocalRepository_Latest(t *testing.T) { p2URLLatestAbs := filepath.Join(tmpDir, p2URLLatest) p2 := config.NewProvider("foo", p2URLLatestAbs, clusterctlv1.BootstrapProviderType) - got, err := newLocalRepository(p2, test.NewFakeVariableClient()) + got, err := newLocalRepository(context.Background(), p2, test.NewFakeVariableClient()) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got.basepath).To(Equal(tmpDir)) @@ -295,10 +296,10 @@ func Test_localRepository_GetFile(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - r, err := newLocalRepository(tt.fields.provider, tt.fields.configVariablesClient) + r, err := newLocalRepository(context.Background(), tt.fields.provider, tt.fields.configVariablesClient) g.Expect(err).ToNot(HaveOccurred()) - got, err := r.GetFile(tt.args.version, tt.args.fileName) + got, err := r.GetFile(context.Background(), tt.args.version, tt.args.fileName) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -370,10 +371,12 @@ func Test_localRepository_GetVersions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - r, err := newLocalRepository(tt.fields.provider, tt.fields.configVariablesClient) + ctx := context.Background() + + r, err := newLocalRepository(ctx, tt.fields.provider, tt.fields.configVariablesClient) g.Expect(err).ToNot(HaveOccurred()) - got, err := r.GetVersions() + got, err := r.GetVersions(ctx) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/repository/repository_memory.go b/cmd/clusterctl/client/repository/repository_memory.go index 2d340a7e7c1f..e1549ebb6b87 100644 --- a/cmd/clusterctl/client/repository/repository_memory.go +++ b/cmd/clusterctl/client/repository/repository_memory.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "github.com/pkg/errors" @@ -73,13 +74,13 @@ func (f *MemoryRepository) ComponentsPath() string { // GetFile returns a file for a given provider version. // NOTE: If the provided version is missing, the default version is used. -func (f *MemoryRepository) GetFile(version string, path string) ([]byte, error) { +func (f *MemoryRepository) GetFile(ctx context.Context, version string, path string) ([]byte, error) { if version == "" { version = f.DefaultVersion() } if version == latestVersionTag { var err error - version, err = latestContractRelease(f, clusterv1.GroupVersion.Version) + version, err = latestContractRelease(ctx, f, clusterv1.GroupVersion.Version) if err != nil { return nil, err } @@ -97,7 +98,7 @@ func (f *MemoryRepository) GetFile(version string, path string) ([]byte, error) } // GetVersions returns the list of versions that are available. -func (f *MemoryRepository) GetVersions() ([]string, error) { +func (f *MemoryRepository) GetVersions(_ context.Context) ([]string, error) { v := make([]string, 0, len(f.versions)) for k := range f.versions { v = append(v, k) diff --git a/cmd/clusterctl/client/repository/repository_memory_test.go b/cmd/clusterctl/client/repository/repository_memory_test.go index da03eba48a13..baa7828262eb 100644 --- a/cmd/clusterctl/client/repository/repository_memory_test.go +++ b/cmd/clusterctl/client/repository/repository_memory_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "testing" . "github.com/onsi/gomega" @@ -124,14 +125,16 @@ releaseSeries: t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + r := tt.repo g.Expect(r.RootPath()).To(Equal("")) - g.Expect(r.GetFile(r.DefaultVersion(), r.ComponentsPath())).To(Equal(tt.want.defaultVersion)) - g.Expect(r.GetFile("", r.ComponentsPath())).To(Equal(tt.want.defaultVersion)) - g.Expect(r.GetFile("latest", r.ComponentsPath())).To(Equal(tt.want.latestVersion)) + g.Expect(r.GetFile(ctx, r.DefaultVersion(), r.ComponentsPath())).To(Equal(tt.want.defaultVersion)) + g.Expect(r.GetFile(ctx, "", r.ComponentsPath())).To(Equal(tt.want.defaultVersion)) + g.Expect(r.GetFile(ctx, "latest", r.ComponentsPath())).To(Equal(tt.want.latestVersion)) - got, err := r.GetVersions() + got, err := r.GetVersions(ctx) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(ConsistOf(tt.want.versions)) }) diff --git a/cmd/clusterctl/client/repository/repository_versions.go b/cmd/clusterctl/client/repository/repository_versions.go index 7fe9349ffc7a..3f2c5497d2ce 100644 --- a/cmd/clusterctl/client/repository/repository_versions.go +++ b/cmd/clusterctl/client/repository/repository_versions.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -32,14 +34,14 @@ const ( // latestContractRelease returns the latest patch release for a repository for the current API contract, according to // semantic version order of the release tag name. -func latestContractRelease(repo Repository, contract string) (string, error) { - latest, err := latestRelease(repo) +func latestContractRelease(ctx context.Context, repo Repository, contract string) (string, error) { + latest, err := latestRelease(ctx, repo) if err != nil { return latest, err } // Attempt to check if the latest release satisfies the API Contract // This is a best-effort attempt to find the latest release for an older API contract if it's not the latest release. - file, err := repo.GetFile(latest, metadataFile) + file, err := repo.GetFile(ctx, latest, metadataFile) // If an error occurs, we just return the latest release. if err != nil { if errors.Is(err, errNotFound) { @@ -69,20 +71,20 @@ func latestContractRelease(repo Repository, contract string) (string, error) { // If the Major or Minor version of the latest release doesn't match the release series for the current contract, // return the latest patch release of the desired Major/Minor version. if sv.Major() != releaseSeries.Major || sv.Minor() != releaseSeries.Minor { - return latestPatchRelease(repo, &releaseSeries.Major, &releaseSeries.Minor) + return latestPatchRelease(ctx, repo, &releaseSeries.Major, &releaseSeries.Minor) } return latest, nil } // latestRelease returns the latest release for a repository, according to // semantic version order of the release tag name. -func latestRelease(repo Repository) (string, error) { - return latestPatchRelease(repo, nil, nil) +func latestRelease(ctx context.Context, repo Repository) (string, error) { + return latestPatchRelease(ctx, repo, nil, nil) } // latestPatchRelease returns the latest patch release for a given Major and Minor version. -func latestPatchRelease(repo Repository, major, minor *uint) (string, error) { - versions, err := repo.GetVersions() +func latestPatchRelease(ctx context.Context, repo Repository, major, minor *uint) (string, error) { + versions, err := repo.GetVersions(ctx) if err != nil { return "", errors.Wrapf(err, "failed to get repository versions") } diff --git a/cmd/clusterctl/client/repository/template_client.go b/cmd/clusterctl/client/repository/template_client.go index 26b50c60e74e..a427c47086a1 100644 --- a/cmd/clusterctl/client/repository/template_client.go +++ b/cmd/clusterctl/client/repository/template_client.go @@ -17,6 +17,8 @@ limitations under the License. package repository import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -27,7 +29,7 @@ import ( // TemplateClient has methods to work with cluster templates hosted on a provider repository. // Templates are yaml files to be used for creating a guest cluster. type TemplateClient interface { - Get(flavor, targetNamespace string, listVariablesOnly bool) (Template, error) + Get(ctx context.Context, flavor, targetNamespace string, listVariablesOnly bool) (Template, error) } // templateClient implements TemplateClient. @@ -66,7 +68,7 @@ func newTemplateClient(input TemplateClientInput) *templateClient { // Get return the template for the flavor specified. // In case the template does not exists, an error is returned. // Get assumes the following naming convention for templates: cluster-template[-].yaml. -func (c *templateClient) Get(flavor, targetNamespace string, skipTemplateProcess bool) (Template, error) { +func (c *templateClient) Get(ctx context.Context, flavor, targetNamespace string, skipTemplateProcess bool) (Template, error) { log := logf.Log if targetNamespace == "" { @@ -89,7 +91,7 @@ func (c *templateClient) Get(flavor, targetNamespace string, skipTemplateProcess if rawArtifact == nil { log.V(5).Info("Fetching", "File", name, "Provider", c.provider.Name(), "Type", c.provider.Type(), "Version", version) - rawArtifact, err = c.repository.GetFile(version, name) + rawArtifact, err = c.repository.GetFile(ctx, version, name) if err != nil { return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", name, c.provider.ManifestLabel()) } diff --git a/cmd/clusterctl/client/repository/template_client_test.go b/cmd/clusterctl/client/repository/template_client_test.go index 84ca38d520a9..34bd6fa2f87a 100644 --- a/cmd/clusterctl/client/repository/template_client_test.go +++ b/cmd/clusterctl/client/repository/template_client_test.go @@ -17,6 +17,7 @@ limitations under the License. package repository import ( + "context" "fmt" "testing" @@ -193,7 +194,7 @@ func Test_templates_Get(t *testing.T) { processor: tt.fields.processor, }, ) - got, err := f.Get(tt.args.flavor, tt.args.targetNamespace, tt.args.listVariablesOnly) + got, err := f.Get(context.Background(), tt.args.flavor, tt.args.targetNamespace, tt.args.listVariablesOnly) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/rollout.go b/cmd/clusterctl/client/rollout.go index c69ef2edab4b..d30dc3665294 100644 --- a/cmd/clusterctl/client/rollout.go +++ b/cmd/clusterctl/client/rollout.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "fmt" "strings" @@ -85,7 +86,7 @@ type RolloutUndoOptions struct { ToRevision int64 } -func (c *clusterctlClient) RolloutRestart(options RolloutRestartOptions) error { +func (c *clusterctlClient) RolloutRestart(ctx context.Context, options RolloutRestartOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err @@ -95,14 +96,14 @@ func (c *clusterctlClient) RolloutRestart(options RolloutRestartOptions) error { return err } for _, ref := range objRefs { - if err := c.alphaClient.Rollout().ObjectRestarter(clusterClient.Proxy(), ref); err != nil { + if err := c.alphaClient.Rollout().ObjectRestarter(ctx, clusterClient.Proxy(), ref); err != nil { return err } } return nil } -func (c *clusterctlClient) RolloutPause(options RolloutPauseOptions) error { +func (c *clusterctlClient) RolloutPause(ctx context.Context, options RolloutPauseOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err @@ -112,14 +113,14 @@ func (c *clusterctlClient) RolloutPause(options RolloutPauseOptions) error { return err } for _, ref := range objRefs { - if err := c.alphaClient.Rollout().ObjectPauser(clusterClient.Proxy(), ref); err != nil { + if err := c.alphaClient.Rollout().ObjectPauser(ctx, clusterClient.Proxy(), ref); err != nil { return err } } return nil } -func (c *clusterctlClient) RolloutResume(options RolloutResumeOptions) error { +func (c *clusterctlClient) RolloutResume(ctx context.Context, options RolloutResumeOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err @@ -129,14 +130,14 @@ func (c *clusterctlClient) RolloutResume(options RolloutResumeOptions) error { return err } for _, ref := range objRefs { - if err := c.alphaClient.Rollout().ObjectResumer(clusterClient.Proxy(), ref); err != nil { + if err := c.alphaClient.Rollout().ObjectResumer(ctx, clusterClient.Proxy(), ref); err != nil { return err } } return nil } -func (c *clusterctlClient) RolloutUndo(options RolloutUndoOptions) error { +func (c *clusterctlClient) RolloutUndo(ctx context.Context, options RolloutUndoOptions) error { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return err @@ -146,7 +147,7 @@ func (c *clusterctlClient) RolloutUndo(options RolloutUndoOptions) error { return err } for _, ref := range objRefs { - if err := c.alphaClient.Rollout().ObjectRollbacker(clusterClient.Proxy(), ref, options.ToRevision); err != nil { + if err := c.alphaClient.Rollout().ObjectRollbacker(ctx, clusterClient.Proxy(), ref, options.ToRevision); err != nil { return err } } diff --git a/cmd/clusterctl/client/rollout_test.go b/cmd/clusterctl/client/rollout_test.go index c2fe26d199ca..01f36f23e538 100644 --- a/cmd/clusterctl/client/rollout_test.go +++ b/cmd/clusterctl/client/rollout_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "testing" . "github.com/onsi/gomega" @@ -51,7 +52,10 @@ func fakeClientForRollout() *fakeClient { Name: "md-2", }, } - config1 := newFakeConfig(). + + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithProvider(core). WithProvider(infra) @@ -61,7 +65,7 @@ func fakeClientForRollout() *fakeClient { WithObjs(md1). WithObjs(md2) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithCluster(cluster1) return client @@ -169,7 +173,9 @@ func Test_clusterctlClient_RolloutRestart(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.RolloutRestart(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.RolloutRestart(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -252,7 +258,9 @@ func Test_clusterctlClient_RolloutPause(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.RolloutPause(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.RolloutPause(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -335,7 +343,9 @@ func Test_clusterctlClient_RolloutResume(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.RolloutResume(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.RolloutResume(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/topology.go b/cmd/clusterctl/client/topology.go index 8e65bb3547db..648f7a92aa89 100644 --- a/cmd/clusterctl/client/topology.go +++ b/cmd/clusterctl/client/topology.go @@ -17,6 +17,8 @@ limitations under the License. package client import ( + "context" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -46,13 +48,13 @@ type TopologyPlanOutput = cluster.TopologyPlanOutput // TopologyPlan performs a dry run execution of the topology reconciler using the given inputs. // It returns a summary of the changes observed during the execution. -func (c *clusterctlClient) TopologyPlan(options TopologyPlanOptions) (*TopologyPlanOutput, error) { +func (c *clusterctlClient) TopologyPlan(ctx context.Context, options TopologyPlanOptions) (*TopologyPlanOutput, error) { clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { return nil, err } - out, err := clusterClient.Topology().Plan(&cluster.TopologyPlanInput{ + out, err := clusterClient.Topology().Plan(ctx, &cluster.TopologyPlanInput{ Objs: options.Objs, TargetClusterName: options.Cluster, TargetNamespace: options.Namespace, diff --git a/cmd/clusterctl/client/upgrade.go b/cmd/clusterctl/client/upgrade.go index 53e6c875267c..f7140e5338fa 100644 --- a/cmd/clusterctl/client/upgrade.go +++ b/cmd/clusterctl/client/upgrade.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "strings" "time" @@ -38,7 +39,7 @@ type PlanUpgradeOptions struct { Kubeconfig Kubeconfig } -func (c *clusterctlClient) PlanCertManagerUpgrade(options PlanUpgradeOptions) (CertManagerUpgradePlan, error) { +func (c *clusterctlClient) PlanCertManagerUpgrade(ctx context.Context, options PlanUpgradeOptions) (CertManagerUpgradePlan, error) { // Get the client for interacting with the management cluster. cluster, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -46,11 +47,11 @@ func (c *clusterctlClient) PlanCertManagerUpgrade(options PlanUpgradeOptions) (C } certManager := cluster.CertManager() - plan, err := certManager.PlanUpgrade() + plan, err := certManager.PlanUpgrade(ctx) return CertManagerUpgradePlan(plan), err } -func (c *clusterctlClient) PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePlan, error) { +func (c *clusterctlClient) PlanUpgrade(ctx context.Context, options PlanUpgradeOptions) ([]UpgradePlan, error) { // Get the client for interacting with the management cluster. clusterClient, err := c.clusterClientFactory(ClusterClientFactoryInput{Kubeconfig: options.Kubeconfig}) if err != nil { @@ -61,6 +62,7 @@ func (c *clusterctlClient) PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePla // NOTE: given that v1beta1 (current) and v1alpha4 (previous) does not have breaking changes, we support also upgrades from v1alpha3 to v1beta1; // this is an exception and support for skipping releases should be removed in future releases. if err := clusterClient.ProviderInventory().CheckCAPIContract( + ctx, cluster.AllowCAPIContract{Contract: clusterv1alpha3.GroupVersion.Version}, cluster.AllowCAPIContract{Contract: clusterv1alpha4.GroupVersion.Version}, ); err != nil { @@ -68,11 +70,11 @@ func (c *clusterctlClient) PlanUpgrade(options PlanUpgradeOptions) ([]UpgradePla } // Ensures the custom resource definitions required by clusterctl are in place. - if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return nil, err } - upgradePlans, err := clusterClient.ProviderUpgrader().Plan() + upgradePlans, err := clusterClient.ProviderUpgrader().Plan(ctx) if err != nil { return nil, err } @@ -131,7 +133,7 @@ type ApplyUpgradeOptions struct { WaitProviderTimeout time.Duration } -func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { +func (c *clusterctlClient) ApplyUpgrade(ctx context.Context, options ApplyUpgradeOptions) error { if options.Contract != "" && options.Contract != clusterv1.GroupVersion.Version { return errors.Errorf("current version of clusterctl could only upgrade to %s contract, requested %s", clusterv1.GroupVersion.Version, options.Contract) } @@ -152,6 +154,7 @@ func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { // NOTE: given that v1beta1 (current) and v1alpha4 (previous) does not have breaking changes, we support also upgrades from v1alpha3 to v1beta1; // this is an exception and support for skipping releases should be removed in future releases. if err := clusterClient.ProviderInventory().CheckCAPIContract( + ctx, cluster.AllowCAPIContract{Contract: clusterv1alpha3.GroupVersion.Version}, cluster.AllowCAPIContract{Contract: clusterv1alpha4.GroupVersion.Version}, ); err != nil { @@ -159,7 +162,7 @@ func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { } // Ensures the custom resource definitions required by clusterctl are in place. - if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(); err != nil { + if err := clusterClient.ProviderInventory().EnsureCustomResourceDefinitions(ctx); err != nil { return err } @@ -168,7 +171,7 @@ func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { // conversion web-hooks around Issuer/Certificate kinds, so installing an older versions of providers // should continue to work with the latest cert-manager. certManager := clusterClient.CertManager() - if err := certManager.EnsureLatestVersion(); err != nil { + if err := certManager.EnsureLatestVersion(ctx); err != nil { return err } @@ -192,47 +195,47 @@ func (c *clusterctlClient) ApplyUpgrade(options ApplyUpgradeOptions) error { upgradeItems := []cluster.UpgradeItem{} if options.CoreProvider != "" { - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.CoreProviderType, options.CoreProvider) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.CoreProviderType, options.CoreProvider) if err != nil { return err } } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.BootstrapProviderType, options.BootstrapProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.ControlPlaneProviderType, options.ControlPlaneProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.InfrastructureProviderType, options.InfrastructureProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.IPAMProviderType, options.IPAMProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.IPAMProviderType, options.IPAMProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.RuntimeExtensionProviderType, options.RuntimeExtensionProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.RuntimeExtensionProviderType, options.RuntimeExtensionProviders...) if err != nil { return err } - upgradeItems, err = addUpgradeItems(clusterClient, upgradeItems, clusterctlv1.AddonProviderType, options.AddonProviders...) + upgradeItems, err = addUpgradeItems(ctx, clusterClient, upgradeItems, clusterctlv1.AddonProviderType, options.AddonProviders...) if err != nil { return err } // Execute the upgrade using the custom upgrade items - return clusterClient.ProviderUpgrader().ApplyCustomPlan(opts, upgradeItems...) + return clusterClient.ProviderUpgrader().ApplyCustomPlan(ctx, opts, upgradeItems...) } // Otherwise we are upgrading a whole management cluster according to a clusterctl generated upgrade plan. - return clusterClient.ProviderUpgrader().ApplyPlan(opts, options.Contract) + return clusterClient.ProviderUpgrader().ApplyPlan(ctx, opts, options.Contract) } -func addUpgradeItems(clusterClient cluster.Client, upgradeItems []cluster.UpgradeItem, providerType clusterctlv1.ProviderType, providers ...string) ([]cluster.UpgradeItem, error) { +func addUpgradeItems(ctx context.Context, clusterClient cluster.Client, upgradeItems []cluster.UpgradeItem, providerType clusterctlv1.ProviderType, providers ...string) ([]cluster.UpgradeItem, error) { for _, upgradeReference := range providers { - providerUpgradeItem, err := parseUpgradeItem(clusterClient, upgradeReference, providerType) + providerUpgradeItem, err := parseUpgradeItem(ctx, clusterClient, upgradeReference, providerType) if err != nil { return nil, err } @@ -244,7 +247,7 @@ func addUpgradeItems(clusterClient cluster.Client, upgradeItems []cluster.Upgrad return upgradeItems, nil } -func parseUpgradeItem(clusterClient cluster.Client, ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { +func parseUpgradeItem(ctx context.Context, clusterClient cluster.Client, ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { // TODO(oscr) Remove when explicit namespaces for providers is removed // ref format is old format: namespace/provider:version if strings.Contains(ref, "/") { @@ -252,7 +255,7 @@ func parseUpgradeItem(clusterClient cluster.Client, ref string, providerType clu } // ref format is: provider:version - return parseUpgradeItemWithoutNamespace(clusterClient, ref, providerType) + return parseUpgradeItemWithoutNamespace(ctx, clusterClient, ref, providerType) } func parseUpgradeItemWithNamespace(ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { @@ -288,7 +291,7 @@ func parseUpgradeItemWithNamespace(ref string, providerType clusterctlv1.Provide }, nil } -func parseUpgradeItemWithoutNamespace(clusterClient cluster.Client, ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { +func parseUpgradeItemWithoutNamespace(ctx context.Context, clusterClient cluster.Client, ref string, providerType clusterctlv1.ProviderType) (*cluster.UpgradeItem, error) { if !strings.Contains(ref, ":") { return nil, errors.Errorf(upgradeItemProviderNameError, ref) } @@ -298,7 +301,7 @@ func parseUpgradeItemWithoutNamespace(clusterClient cluster.Client, ref string, return nil, errors.Wrapf(err, upgradeItemProviderNameError, ref) } - namespace, err := clusterClient.ProviderInventory().GetProviderNamespace(name, providerType) + namespace, err := clusterClient.ProviderInventory().GetProviderNamespace(ctx, name, providerType) if err != nil { return nil, errors.Errorf("unable to find default namespace for provider %q", ref) } diff --git a/cmd/clusterctl/client/upgrade_test.go b/cmd/clusterctl/client/upgrade_test.go index 22eaa7a0d924..7d0ddbe44405 100644 --- a/cmd/clusterctl/client/upgrade_test.go +++ b/cmd/clusterctl/client/upgrade_test.go @@ -17,6 +17,7 @@ limitations under the License. package client import ( + "context" "sort" "testing" @@ -35,13 +36,15 @@ func Test_clusterctlClient_PlanCertUpgrade(t *testing.T) { // create a fake config with a provider named P1 and a variable named var repository1Config := config.NewProvider("p1", "url", clusterctlv1.CoreProviderType) - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithVar("var", "value"). WithProvider(repository1Config) // create a fake repository with some YAML files in it (usually matching // the list of providers defined in the config) - repository1 := newFakeRepository(repository1Config, config1). + repository1 := newFakeRepository(ctx, repository1Config, config1). WithPaths("root", "components"). WithDefaultVersion("v1.0"). WithFile("v1.0", "components.yaml", []byte("content")) @@ -56,7 +59,7 @@ func Test_clusterctlClient_PlanCertUpgrade(t *testing.T) { cluster1 := newFakeCluster(cluster.Kubeconfig{Path: "cluster1"}, config1). WithCertManagerClient(newFakeCertManagerClient(nil, nil).WithCertManagerPlan(certManagerPlan)) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithRepository(repository1). WithCluster(cluster1) @@ -75,10 +78,13 @@ func Test_clusterctlClient_PlanCertUpgrade(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + options := PlanUpgradeOptions{ Kubeconfig: Kubeconfig{Path: "cluster1"}, } - actualPlan, err := tt.client.PlanCertManagerUpgrade(options) + actualPlan, err := tt.client.PlanCertManagerUpgrade(ctx, options) if tt.expectErr { g.Expect(err).To(HaveOccurred()) g.Expect(actualPlan).To(BeComparableTo(CertManagerUpgradePlan{})) @@ -133,7 +139,9 @@ func Test_clusterctlClient_PlanUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - _, err := tt.fields.client.PlanUpgrade(tt.args.options) + ctx := context.Background() + + _, err := tt.fields.client.PlanUpgrade(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -274,7 +282,9 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - err := tt.fields.client.ApplyUpgrade(tt.args.options) + ctx := context.Background() + + err := tt.fields.client.ApplyUpgrade(ctx, tt.args.options) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -309,11 +319,13 @@ func fakeClientForUpgrade() *fakeClient { core := config.NewProvider("cluster-api", "https://somewhere.com", clusterctlv1.CoreProviderType) infra := config.NewProvider("infra", "https://somewhere.com", clusterctlv1.InfrastructureProviderType) - config1 := newFakeConfig(). + ctx := context.Background() + + config1 := newFakeConfig(ctx). WithProvider(core). WithProvider(infra) - repository1 := newFakeRepository(core, config1). + repository1 := newFakeRepository(ctx, core, config1). WithPaths("root", "components.yaml"). WithDefaultVersion("v1.0.1"). WithFile("v1.0.1", "components.yaml", componentsYAML("ns2")). @@ -323,7 +335,7 @@ func fakeClientForUpgrade() *fakeClient { {Major: 1, Minor: 0, Contract: test.CurrentCAPIContract}, }, }) - repository2 := newFakeRepository(infra, config1). + repository2 := newFakeRepository(ctx, infra, config1). WithPaths("root", "components.yaml"). WithDefaultVersion("v2.0.0"). WithFile("v2.0.1", "components.yaml", componentsYAML("ns2")). @@ -341,7 +353,7 @@ func fakeClientForUpgrade() *fakeClient { WithProviderInventory(infra.Name(), infra.Type(), "v2.0.0", "infra-system"). WithObjs(test.FakeCAPISetupObjects()...) - client := newFakeClient(config1). + client := newFakeClient(ctx, config1). WithRepository(repository1). WithRepository(repository2). WithCluster(cluster1) @@ -376,7 +388,9 @@ func Test_parseUpgradeItem(t *testing.T) { provider string } - configClient := newFakeConfig() + ctx := context.Background() + + configClient := newFakeConfig(ctx) clusterClient := newFakeCluster(cluster.Kubeconfig{Path: "cluster1"}, configClient) clusterClient.WithProviderInventory("best-provider", clusterctlv1.CoreProviderType, "v1.0.0", "best-provider-system") @@ -469,7 +483,9 @@ func Test_parseUpgradeItem(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := parseUpgradeItem(clusterClient, tt.args.provider, clusterctlv1.CoreProviderType) + ctx := context.Background() + + got, err := parseUpgradeItem(ctx, clusterClient, tt.args.provider, clusterctlv1.CoreProviderType) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/cmd/completion.go b/cmd/clusterctl/cmd/completion.go index 3501781d104d..11812b0ad96d 100644 --- a/cmd/clusterctl/cmd/completion.go +++ b/cmd/clusterctl/cmd/completion.go @@ -18,6 +18,7 @@ package cmd import ( "bytes" + "context" "fmt" "io" "os" @@ -153,7 +154,7 @@ func runCompletionZsh(out io.Writer, cmd *cobra.Command) error { func contextCompletionFunc(kubeconfigFlag *pflag.Flag) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - configClient, err := config.New(cfgFile) + configClient, err := config.New(context.Background(), cfgFile) if err != nil { return completionError(err) } @@ -170,7 +171,9 @@ func contextCompletionFunc(kubeconfigFlag *pflag.Flag) func(cmd *cobra.Command, func resourceNameCompletionFunc(kubeconfigFlag, contextFlag, namespaceFlag *pflag.Flag, groupVersion, kind string) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - configClient, err := config.New(cfgFile) + ctx := context.Background() + + configClient, err := config.New(ctx, cfgFile) if err != nil { return completionError(err) } @@ -189,7 +192,7 @@ func resourceNameCompletionFunc(kubeconfigFlag, contextFlag, namespaceFlag *pfla } } - comps, err := clusterClient.Proxy().GetResourceNames(groupVersion, kind, []client.ListOption{client.InNamespace(namespace)}, toComplete) + comps, err := clusterClient.Proxy().GetResourceNames(ctx, groupVersion, kind, []client.ListOption{client.InNamespace(namespace)}, toComplete) if err != nil { return completionError(err) } diff --git a/cmd/clusterctl/cmd/config_repositories.go b/cmd/clusterctl/cmd/config_repositories.go index 0531e50130c7..8e780c390fcd 100644 --- a/cmd/clusterctl/cmd/config_repositories.go +++ b/cmd/clusterctl/cmd/config_repositories.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "io" "os" @@ -85,7 +86,9 @@ func runGetRepositories(cfgFile string, out io.Writer) error { return errors.New("unable to print to nil output writer") } - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/delete.go b/cmd/clusterctl/cmd/delete.go index 9d512fc8a999..99e135a26910 100644 --- a/cmd/clusterctl/cmd/delete.go +++ b/cmd/clusterctl/cmd/delete.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "context" + "github.com/pkg/errors" "github.com/spf13/cobra" @@ -120,7 +122,9 @@ func init() { } func runDelete() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -141,7 +145,7 @@ func runDelete() error { return errors.New("At least one of --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon should be specified or the --all flag should be set") } - return c.Delete(client.DeleteOptions{ + return c.Delete(ctx, client.DeleteOptions{ Kubeconfig: client.Kubeconfig{Path: dd.kubeconfig, Context: dd.kubeconfigContext}, IncludeNamespace: dd.includeNamespace, IncludeCRDs: dd.includeCRDs, diff --git a/cmd/clusterctl/cmd/describe_cluster.go b/cmd/clusterctl/cmd/describe_cluster.go index b73cdf3e75d0..fe11d2135697 100644 --- a/cmd/clusterctl/cmd/describe_cluster.go +++ b/cmd/clusterctl/cmd/describe_cluster.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "os" "sort" @@ -146,12 +147,14 @@ func init() { } func runDescribeCluster(cmd *cobra.Command, name string) error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - tree, err := c.DescribeCluster(client.DescribeClusterOptions{ + tree, err := c.DescribeCluster(ctx, client.DescribeClusterOptions{ Kubeconfig: client.Kubeconfig{Path: dc.kubeconfig, Context: dc.kubeconfigContext}, Namespace: dc.namespace, ClusterName: name, diff --git a/cmd/clusterctl/cmd/generate_cluster.go b/cmd/clusterctl/cmd/generate_cluster.go index 976780273dbf..976d6b9ee9db 100644 --- a/cmd/clusterctl/cmd/generate_cluster.go +++ b/cmd/clusterctl/cmd/generate_cluster.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "github.com/pkg/errors" @@ -145,7 +146,9 @@ func init() { } func runGenerateClusterTemplate(cmd *cobra.Command, name string) error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -186,7 +189,7 @@ func runGenerateClusterTemplate(cmd *cobra.Command, name string) error { } } - template, err := c.GetClusterTemplate(templateOptions) + template, err := c.GetClusterTemplate(ctx, templateOptions) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/generate_provider.go b/cmd/clusterctl/cmd/generate_provider.go index 89fba0590bad..bd105d8d89bc 100644 --- a/cmd/clusterctl/cmd/generate_provider.go +++ b/cmd/clusterctl/cmd/generate_provider.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "context" + "github.com/pkg/errors" "github.com/spf13/cobra" @@ -105,11 +107,13 @@ func init() { } func runGenerateProviderComponents() error { + ctx := context.Background() + providerName, providerType, err := parseProvider() if err != nil { return err } - c, err := client.New(cfgFile) + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -119,7 +123,7 @@ func runGenerateProviderComponents() error { SkipTemplateProcess: gpo.raw || gpo.textOutput, } - components, err := c.GenerateProvider(providerName, providerType, options) + components, err := c.GenerateProvider(ctx, providerName, providerType, options) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/generate_yaml.go b/cmd/clusterctl/cmd/generate_yaml.go index cf09b38e023b..ff978acbb491 100644 --- a/cmd/clusterctl/cmd/generate_yaml.go +++ b/cmd/clusterctl/cmd/generate_yaml.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "io" "os" @@ -79,7 +80,9 @@ func init() { } func generateYAML(r io.Reader, w io.Writer) error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -97,7 +100,7 @@ func generateYAML(r io.Reader, w io.Writer) error { } } } - printer, err := c.ProcessYAML(options) + printer, err := c.ProcessYAML(ctx, options) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/get_kubeconfig.go b/cmd/clusterctl/cmd/get_kubeconfig.go index e2e95fda048a..dfd35569ac3d 100644 --- a/cmd/clusterctl/cmd/get_kubeconfig.go +++ b/cmd/clusterctl/cmd/get_kubeconfig.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "github.com/pkg/errors" @@ -79,7 +80,9 @@ func init() { } func runGetKubeconfig(workloadClusterName string) error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -90,7 +93,7 @@ func runGetKubeconfig(workloadClusterName string) error { Namespace: gk.namespace, } - out, err := c.GetKubeconfig(options) + out, err := c.GetKubeconfig(ctx, options) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/init.go b/cmd/clusterctl/cmd/init.go index aac13344bead..e93b7c9014d5 100644 --- a/cmd/clusterctl/cmd/init.go +++ b/cmd/clusterctl/cmd/init.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "time" "github.com/spf13/cobra" @@ -120,7 +121,9 @@ func init() { } func runInit() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -141,7 +144,7 @@ func runInit() error { IgnoreValidationErrors: !initOpts.validate, } - if _, err := c.Init(options); err != nil { + if _, err := c.Init(ctx, options); err != nil { return err } return nil diff --git a/cmd/clusterctl/cmd/init_list_images.go b/cmd/clusterctl/cmd/init_list_images.go index 12cf787ebf8e..9b198acff8f2 100644 --- a/cmd/clusterctl/cmd/init_list_images.go +++ b/cmd/clusterctl/cmd/init_list_images.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "github.com/spf13/cobra" @@ -48,7 +49,9 @@ var initListImagesCmd = &cobra.Command{ } func runInitListImages() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -65,7 +68,7 @@ func runInitListImages() error { LogUsageInstructions: false, } - images, err := c.InitImages(options) + images, err := c.InitImages(ctx, options) if err != nil { return err } diff --git a/cmd/clusterctl/cmd/move.go b/cmd/clusterctl/cmd/move.go index c75557e0aa10..c8316b3762f6 100644 --- a/cmd/clusterctl/cmd/move.go +++ b/cmd/clusterctl/cmd/move.go @@ -17,6 +17,8 @@ limitations under the License. package cmd import ( + "context" + "github.com/pkg/errors" "github.com/spf13/cobra" @@ -87,6 +89,8 @@ func init() { } func runMove() error { + ctx := context.Background() + if mo.toDirectory == "" && mo.fromDirectory == "" && mo.toKubeconfig == "" && @@ -94,12 +98,12 @@ func runMove() error { return errors.New("please specify a target cluster using the --to-kubeconfig flag when not using --dry-run, --to-directory or --from-directory") } - c, err := client.New(cfgFile) + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.Move(client.MoveOptions{ + return c.Move(ctx, client.MoveOptions{ FromKubeconfig: client.Kubeconfig{Path: mo.fromKubeconfig, Context: mo.fromKubeconfigContext}, ToKubeconfig: client.Kubeconfig{Path: mo.toKubeconfig, Context: mo.toKubeconfigContext}, FromDirectory: mo.fromDirectory, diff --git a/cmd/clusterctl/cmd/rollout/pause.go b/cmd/clusterctl/cmd/rollout/pause.go index 8e29ed200fec..7b921c747def 100644 --- a/cmd/clusterctl/cmd/rollout/pause.go +++ b/cmd/clusterctl/cmd/rollout/pause.go @@ -18,6 +18,8 @@ limitations under the License. package rollout import ( + "context" + "github.com/spf13/cobra" "k8s.io/kubectl/pkg/util/templates" @@ -72,12 +74,14 @@ func NewCmdRolloutPause(cfgFile string) *cobra.Command { func runPause(cfgFile string, args []string) error { pauseOpt.resources = args - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.RolloutPause(client.RolloutPauseOptions{ + return c.RolloutPause(ctx, client.RolloutPauseOptions{ Kubeconfig: client.Kubeconfig{Path: pauseOpt.kubeconfig, Context: pauseOpt.kubeconfigContext}, Namespace: pauseOpt.namespace, Resources: pauseOpt.resources, diff --git a/cmd/clusterctl/cmd/rollout/restart.go b/cmd/clusterctl/cmd/rollout/restart.go index 7f46fac317fd..b582c6c46411 100644 --- a/cmd/clusterctl/cmd/rollout/restart.go +++ b/cmd/clusterctl/cmd/rollout/restart.go @@ -17,6 +17,8 @@ limitations under the License. package rollout import ( + "context" + "github.com/spf13/cobra" "k8s.io/kubectl/pkg/util/templates" @@ -71,12 +73,14 @@ func NewCmdRolloutRestart(cfgFile string) *cobra.Command { func runRestart(cfgFile string, _ *cobra.Command, args []string) error { restartOpt.resources = args - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.RolloutRestart(client.RolloutRestartOptions{ + return c.RolloutRestart(ctx, client.RolloutRestartOptions{ Kubeconfig: client.Kubeconfig{Path: restartOpt.kubeconfig, Context: restartOpt.kubeconfigContext}, Namespace: restartOpt.namespace, Resources: restartOpt.resources, diff --git a/cmd/clusterctl/cmd/rollout/resume.go b/cmd/clusterctl/cmd/rollout/resume.go index 07dc4bc1fa5f..fec6af6e116d 100644 --- a/cmd/clusterctl/cmd/rollout/resume.go +++ b/cmd/clusterctl/cmd/rollout/resume.go @@ -17,6 +17,8 @@ limitations under the License. package rollout import ( + "context" + "github.com/spf13/cobra" "k8s.io/kubectl/pkg/util/templates" @@ -71,12 +73,14 @@ func NewCmdRolloutResume(cfgFile string) *cobra.Command { func runResume(cfgFile string, args []string) error { resumeOpt.resources = args - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.RolloutResume(client.RolloutResumeOptions{ + return c.RolloutResume(ctx, client.RolloutResumeOptions{ Kubeconfig: client.Kubeconfig{Path: resumeOpt.kubeconfig, Context: resumeOpt.kubeconfigContext}, Namespace: resumeOpt.namespace, Resources: resumeOpt.resources, diff --git a/cmd/clusterctl/cmd/rollout/undo.go b/cmd/clusterctl/cmd/rollout/undo.go index cfa6603009b4..5e018207212b 100644 --- a/cmd/clusterctl/cmd/rollout/undo.go +++ b/cmd/clusterctl/cmd/rollout/undo.go @@ -17,6 +17,8 @@ limitations under the License. package rollout import ( + "context" + "github.com/spf13/cobra" "k8s.io/kubectl/pkg/util/templates" @@ -71,12 +73,14 @@ func NewCmdRolloutUndo(cfgFile string) *cobra.Command { func runUndo(cfgFile string, args []string) error { undoOpt.resources = args - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - return c.RolloutUndo(client.RolloutUndoOptions{ + return c.RolloutUndo(ctx, client.RolloutUndoOptions{ Kubeconfig: client.Kubeconfig{Path: undoOpt.kubeconfig, Context: undoOpt.kubeconfigContext}, Namespace: undoOpt.namespace, Resources: undoOpt.resources, diff --git a/cmd/clusterctl/cmd/root.go b/cmd/clusterctl/cmd/root.go index dd7bbc30f959..55f8bdc52d92 100644 --- a/cmd/clusterctl/cmd/root.go +++ b/cmd/clusterctl/cmd/root.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "flag" "fmt" "os" @@ -59,9 +60,11 @@ var RootCmd = &cobra.Command{ Get started with Cluster API using clusterctl to create a management cluster, install providers, and create templates for your workload cluster.`), PersistentPostRunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + // Check if clusterctl needs an upgrade "AFTER" running each command // and sub-command. - configClient, err := config.New(cfgFile) + configClient, err := config.New(ctx, cfgFile) if err != nil { return err } @@ -70,11 +73,11 @@ var RootCmd = &cobra.Command{ // version check is disabled. Return early. return nil } - checker, err := newVersionChecker(configClient.Variables()) + checker, err := newVersionChecker(ctx, configClient.Variables()) if err != nil { return err } - output, err := checker.Check() + output, err := checker.Check(ctx) if err != nil { return errors.Wrap(err, "unable to verify clusterctl version") } @@ -148,9 +151,11 @@ func init() { } func initConfig() { + ctx := context.Background() + // check if the CLUSTERCTL_LOG_LEVEL was set via env var or in the config file if *verbosity == 0 { - configClient, err := config.New(cfgFile) + configClient, err := config.New(ctx, cfgFile) if err == nil { v, err := configClient.Variables().Get("CLUSTERCTL_LOG_LEVEL") if err == nil && v != "" { diff --git a/cmd/clusterctl/cmd/topology_plan.go b/cmd/clusterctl/cmd/topology_plan.go index 8a8ede41bfe5..cec05594628d 100644 --- a/cmd/clusterctl/cmd/topology_plan.go +++ b/cmd/clusterctl/cmd/topology_plan.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "io" "os" @@ -109,7 +110,9 @@ func init() { } func runTopologyPlan() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -127,7 +130,7 @@ func runTopologyPlan() error { objs = append(objs, objects...) } - out, err := c.TopologyPlan(client.TopologyPlanOptions{ + out, err := c.TopologyPlan(ctx, client.TopologyPlanOptions{ Kubeconfig: client.Kubeconfig{Path: tp.kubeconfig, Context: tp.kubeconfigContext}, Objs: convertToPtrSlice(objs), Cluster: tp.cluster, diff --git a/cmd/clusterctl/cmd/upgrade_apply.go b/cmd/clusterctl/cmd/upgrade_apply.go index fa0fdafa3d65..137a10ef56fa 100644 --- a/cmd/clusterctl/cmd/upgrade_apply.go +++ b/cmd/clusterctl/cmd/upgrade_apply.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "time" "github.com/pkg/errors" @@ -95,7 +96,9 @@ func init() { } func runUpgradeApply() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } @@ -115,7 +118,7 @@ func runUpgradeApply() error { return errors.New("The --contract flag can't be used in combination with --core, --bootstrap, --control-plane, --infrastructure, --ipam, --extension, --addon") } - return c.ApplyUpgrade(client.ApplyUpgradeOptions{ + return c.ApplyUpgrade(ctx, client.ApplyUpgradeOptions{ Kubeconfig: client.Kubeconfig{Path: ua.kubeconfig, Context: ua.kubeconfigContext}, Contract: ua.contract, CoreProvider: ua.coreProvider, diff --git a/cmd/clusterctl/cmd/upgrade_plan.go b/cmd/clusterctl/cmd/upgrade_plan.go index 00f711e8a53c..52b05cf8ca79 100644 --- a/cmd/clusterctl/cmd/upgrade_plan.go +++ b/cmd/clusterctl/cmd/upgrade_plan.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "os" "text/tabwriter" @@ -65,12 +66,14 @@ func init() { } func runUpgradePlan() error { - c, err := client.New(cfgFile) + ctx := context.Background() + + c, err := client.New(ctx, cfgFile) if err != nil { return err } - certManUpgradePlan, err := c.PlanCertManagerUpgrade(client.PlanUpgradeOptions{ + certManUpgradePlan, err := c.PlanCertManagerUpgrade(ctx, client.PlanUpgradeOptions{ Kubeconfig: client.Kubeconfig{Path: up.kubeconfig, Context: up.kubeconfigContext}, }) if err != nil { @@ -84,7 +87,7 @@ func runUpgradePlan() error { } } - upgradePlans, err := c.PlanUpgrade(client.PlanUpgradeOptions{ + upgradePlans, err := c.PlanUpgrade(ctx, client.PlanUpgradeOptions{ Kubeconfig: client.Kubeconfig{Path: up.kubeconfig, Context: up.kubeconfigContext}, }) diff --git a/cmd/clusterctl/cmd/version_checker.go b/cmd/clusterctl/cmd/version_checker.go index f0334225cac7..de8f11ba2c12 100644 --- a/cmd/clusterctl/cmd/version_checker.go +++ b/cmd/clusterctl/cmd/version_checker.go @@ -51,14 +51,14 @@ type versionChecker struct { // newVersionChecker returns a versionChecker. Its behavior has been inspired // by https://github.com/cli/cli. -func newVersionChecker(vc config.VariablesClient) (*versionChecker, error) { +func newVersionChecker(ctx context.Context, vc config.VariablesClient) (*versionChecker, error) { var client *github.Client token, err := vc.Get("GITHUB_TOKEN") if err == nil { ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: token}, ) - tc := oauth2.NewClient(context.TODO(), ts) + tc := oauth2.NewClient(ctx, ts) client = github.NewClient(tc) } else { client = github.NewClient(nil) @@ -94,14 +94,14 @@ type VersionState struct { // release from github at most once during a 24 hour period and caches the // state by default in $XDG_CONFIG_HOME/cluster-api/state.yaml. If the clusterctl // version is the same or greater it returns nothing. -func (v *versionChecker) Check() (string, error) { +func (v *versionChecker) Check(ctx context.Context) (string, error) { log := logf.Log cliVer, err := semver.ParseTolerant(v.cliVersion().GitVersion) if err != nil { return "", errors.Wrap(err, "unable to semver parse clusterctl GitVersion") } - release, err := v.getLatestRelease() + release, err := v.getLatestRelease(ctx) if err != nil { return "", err } @@ -137,7 +137,7 @@ New clusterctl version available: v%s -> v%s return "", nil } -func (v *versionChecker) getLatestRelease() (*ReleaseInfo, error) { +func (v *versionChecker) getLatestRelease(ctx context.Context) (*ReleaseInfo, error) { log := logf.Log vs, err := readStateFile(v.versionFilePath) if err != nil { @@ -146,7 +146,7 @@ func (v *versionChecker) getLatestRelease() (*ReleaseInfo, error) { // if there is no release info in the state file, pull latest release from github if vs == nil { - release, _, err := v.githubClient.Repositories.GetLatestRelease(context.TODO(), "kubernetes-sigs", "cluster-api") + release, _, err := v.githubClient.Repositories.GetLatestRelease(ctx, "kubernetes-sigs", "cluster-api") if err != nil { log.V(1).Info("⚠️ Unable to get latest github release for clusterctl") // failing silently here so we don't error out in air-gapped diff --git a/cmd/clusterctl/cmd/version_checker_test.go b/cmd/clusterctl/cmd/version_checker_test.go index aa78ab54f537..9c6302ebb728 100644 --- a/cmd/clusterctl/cmd/version_checker_test.go +++ b/cmd/clusterctl/cmd/version_checker_test.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "net/http" "os" @@ -35,7 +36,9 @@ import ( func TestVersionChecker_newVersionChecker(t *testing.T) { g := NewWithT(t) - versionChecker, err := newVersionChecker(test.NewFakeVariableClient()) + ctx := context.Background() + + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) g.Expect(err).ToNot(HaveOccurred()) @@ -233,6 +236,9 @@ https://github.com/foo/bar/releases/v0.3.8-alpha.1 for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -246,14 +252,14 @@ https://github.com/foo/bar/releases/v0.3.8-alpha.1 }, ) defer cleanup() - versionChecker, err := newVersionChecker(test.NewFakeVariableClient()) + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) g.Expect(err).ToNot(HaveOccurred()) versionChecker.cliVersion = tt.cliVersion versionChecker.githubClient = fakeGithubClient versionChecker.versionFilePath = tmpVersionFile - output, err := versionChecker.Check() + output, err := versionChecker.Check(ctx) if tt.expectErr { g.Expect(err).To(HaveOccurred()) @@ -267,6 +273,9 @@ https://github.com/foo/bar/releases/v0.3.8-alpha.1 func TestVersionChecker_WriteStateFile(t *testing.T) { g := NewWithT(t) + + ctx := context.Background() + fakeGithubClient, mux, cleanup := test.NewFakeGitHub() mux.HandleFunc( "/repos/kubernetes-sigs/cluster-api/releases/latest", @@ -280,12 +289,12 @@ func TestVersionChecker_WriteStateFile(t *testing.T) { tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() - versionChecker, err := newVersionChecker(test.NewFakeVariableClient()) + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) g.Expect(err).ToNot(HaveOccurred()) versionChecker.versionFilePath = tmpVersionFile versionChecker.githubClient = fakeGithubClient - release, err := versionChecker.getLatestRelease() + release, err := versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) // ensure that the state file has been created @@ -300,6 +309,8 @@ func TestVersionChecker_WriteStateFile(t *testing.T) { func TestVersionChecker_ReadFromStateFile(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -312,14 +323,14 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { }, ) defer cleanup1() - versionChecker, err := newVersionChecker(test.NewFakeVariableClient()) + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) g.Expect(err).ToNot(HaveOccurred()) versionChecker.versionFilePath = tmpVersionFile versionChecker.githubClient = fakeGithubClient1 // this call to getLatestRelease will pull from our fakeGithubClient1 and // store the information including timestamp into the state file. - _, err = versionChecker.getLatestRelease() + _, err = versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) // override the github client with response to a new version v0.3.99 @@ -337,7 +348,7 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { // now instead of making another call to github, we want to read from the // file. This will avoid unnecessary calls to github. - release, err := versionChecker.getLatestRelease() + release, err := versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) g.Expect(release.Version).To(Equal("v0.3.8")) g.Expect(release.URL).To(Equal("https://github.com/foo/bar/releases/v0.3.8")) @@ -347,6 +358,8 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { func TestVersionChecker_ReadFromStateFileWithin24Hrs(t *testing.T) { g := NewWithT(t) + ctx := context.Background() + tmpVersionFile, cleanDir := generateTempVersionFilePath(g) defer cleanDir() @@ -369,17 +382,17 @@ func TestVersionChecker_ReadFromStateFileWithin24Hrs(t *testing.T) { }, ) defer cleanup1() - versionChecker, err := newVersionChecker(test.NewFakeVariableClient()) + versionChecker, err := newVersionChecker(ctx, test.NewFakeVariableClient()) g.Expect(err).ToNot(HaveOccurred()) versionChecker.versionFilePath = tmpVersionFile versionChecker.githubClient = fakeGithubClient1 - _, err = versionChecker.getLatestRelease() + _, err = versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) // Since the state file is more that 24 hours old we want to retrieve the // latest release from github. - release, err := versionChecker.getLatestRelease() + release, err := versionChecker.getLatestRelease(ctx) g.Expect(err).ToNot(HaveOccurred()) g.Expect(release.Version).To(Equal("v0.3.10")) g.Expect(release.URL).To(Equal("https://github.com/foo/bar/releases/v0.3.10")) diff --git a/cmd/clusterctl/internal/test/fake_processor.go b/cmd/clusterctl/internal/test/fake_processor.go index 92a5dd229949..5b50b8aa0d48 100644 --- a/cmd/clusterctl/internal/test/fake_processor.go +++ b/cmd/clusterctl/internal/test/fake_processor.go @@ -16,7 +16,9 @@ limitations under the License. package test -import "fmt" +import ( + "fmt" +) type FakeProcessor struct { errGetVariables error diff --git a/cmd/clusterctl/internal/test/fake_proxy.go b/cmd/clusterctl/internal/test/fake_proxy.go index f1524f477c8c..3fb9864f6168 100644 --- a/cmd/clusterctl/internal/test/fake_proxy.go +++ b/cmd/clusterctl/internal/test/fake_proxy.go @@ -17,6 +17,7 @@ limitations under the License. package test import ( + "context" "errors" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -96,7 +97,7 @@ func (f *FakeProxy) CheckClusterAvailable() error { } // ListResources returns all the resources known by the FakeProxy. -func (f *FakeProxy) ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { +func (f *FakeProxy) ListResources(_ context.Context, labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) { var ret []unstructured.Unstructured //nolint:prealloc for _, o := range f.objs { u := unstructured.Unstructured{} @@ -142,7 +143,7 @@ func (f *FakeProxy) GetContexts(_ string) ([]string, error) { return nil, nil } -func (f *FakeProxy) GetResourceNames(_, _ string, _ []client.ListOption, _ string) ([]string, error) { +func (f *FakeProxy) GetResourceNames(_ context.Context, _, _ string, _ []client.ListOption, _ string) ([]string, error) { return nil, nil } diff --git a/cmd/clusterctl/internal/test/fake_reader.go b/cmd/clusterctl/internal/test/fake_reader.go index 3fd8ea4c9f05..d34fa6821fc2 100644 --- a/cmd/clusterctl/internal/test/fake_reader.go +++ b/cmd/clusterctl/internal/test/fake_reader.go @@ -17,6 +17,8 @@ limitations under the License. package test import ( + "context" + "github.com/pkg/errors" "sigs.k8s.io/yaml" @@ -55,7 +57,7 @@ type imageMeta struct { Tag string `json:"tag,omitempty"` } -func (f *FakeReader) Init(_ string) error { +func (f *FakeReader) Init(_ context.Context, _ string) error { f.initialized = true return nil } diff --git a/docs/book/src/developer/providers/migrations/v1.5-to-v1.6.md b/docs/book/src/developer/providers/migrations/v1.5-to-v1.6.md index 45809200b5a6..950c4ed0a30e 100644 --- a/docs/book/src/developer/providers/migrations/v1.5-to-v1.6.md +++ b/docs/book/src/developer/providers/migrations/v1.5-to-v1.6.md @@ -29,7 +29,7 @@ maintainers of providers and consumers of our Go API. - `ProviderID` type and all related methods/construct have been removed. Please see this [PR](https://github.com/kubernetes-sigs/cluster-api/pull/8577) for a reference. ### API Changes - +- Several public functions in `cmd/clusterctl/` now require `context.Context` as the first parameter. ### Other - `clusterctl move` can be blocked temporarily by a provider when an object to be moved is annotated with `clusterctl.cluster.x-k8s.io/block-move`. diff --git a/hack/tools/internal/tilt-prepare/main.go b/hack/tools/internal/tilt-prepare/main.go index bdd45d13c9b1..277fa4710a71 100644 --- a/hack/tools/internal/tilt-prepare/main.go +++ b/hack/tools/internal/tilt-prepare/main.go @@ -548,14 +548,14 @@ func preLoadImageTask(image string) taskFunction { // certManagerTask generates a task for installing cert-manager if not already present. func certManagerTask() taskFunction { return func(ctx context.Context, prefix string, errCh chan error) { - config, err := config.New("") + config, err := config.New(ctx, "") if err != nil { errCh <- errors.Wrapf(err, "[%s] failed create clusterctl config", prefix) return } cluster := cluster.New(cluster.Kubeconfig{}, config) - if err := cluster.CertManager().EnsureInstalled(); err != nil { + if err := cluster.CertManager().EnsureInstalled(ctx); err != nil { errCh <- errors.Wrapf(err, "[%s] failed to install cert-manger", prefix) } } diff --git a/test/framework/autoscaler_helpers.go b/test/framework/autoscaler_helpers.go index 8601ee3f723c..8af2725d715a 100644 --- a/test/framework/autoscaler_helpers.go +++ b/test/framework/autoscaler_helpers.go @@ -220,11 +220,13 @@ type ProcessYAMLInput struct { } func ProcessYAML(input *ProcessYAMLInput) ([]byte, error) { + ctx := context.Background() + for n, v := range input.Env { _ = os.Setenv(n, v) } - c, err := clusterctlclient.New(input.ClusterctlConfigPath) + c, err := clusterctlclient.New(ctx, input.ClusterctlConfigPath) if err != nil { return nil, err } @@ -234,7 +236,7 @@ func ProcessYAML(input *ProcessYAMLInput) ([]byte, error) { }, } - printer, err := c.ProcessYAML(options) + printer, err := c.ProcessYAML(ctx, options) if err != nil { return nil, err } diff --git a/test/framework/clusterctl/client.go b/test/framework/clusterctl/client.go index 2fd4993dee37..4c11de9cc50a 100644 --- a/test/framework/clusterctl/client.go +++ b/test/framework/clusterctl/client.go @@ -61,7 +61,7 @@ type InitInput struct { } // Init calls clusterctl init with the list of providers defined in the local repository. -func Init(_ context.Context, input InitInput) { +func Init(ctx context.Context, input InitInput) { args := calculateClusterCtlInitArgs(input) log.Logf("clusterctl %s", strings.Join(args, " ")) @@ -81,10 +81,10 @@ func Init(_ context.Context, input InitInput) { WaitProviders: true, } - clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-init.log", input.LogFolder) + clusterctlClient, log := getClusterctlClientWithLogger(ctx, input.ClusterctlConfigPath, "clusterctl-init.log", input.LogFolder) defer log.Close() - _, err := clusterctlClient.Init(initOpt) + _, err := clusterctlClient.Init(ctx, initOpt) Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl init") } @@ -209,10 +209,10 @@ func Upgrade(ctx context.Context, input UpgradeInput) { WaitProviders: true, } - clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-upgrade.log", input.LogFolder) + clusterctlClient, log := getClusterctlClientWithLogger(ctx, input.ClusterctlConfigPath, "clusterctl-upgrade.log", input.LogFolder) defer log.Close() - err := clusterctlClient.ApplyUpgrade(upgradeOpt) + err := clusterctlClient.ApplyUpgrade(ctx, upgradeOpt) Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl upgrade") } @@ -224,7 +224,7 @@ type DeleteInput struct { } // Delete calls clusterctl delete --all. -func Delete(_ context.Context, input DeleteInput) { +func Delete(ctx context.Context, input DeleteInput) { log.Logf("clusterctl delete --all") deleteOpts := clusterctlclient.DeleteOptions{ @@ -235,10 +235,10 @@ func Delete(_ context.Context, input DeleteInput) { DeleteAll: true, } - clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-delete.log", input.LogFolder) + clusterctlClient, log := getClusterctlClientWithLogger(ctx, input.ClusterctlConfigPath, "clusterctl-delete.log", input.LogFolder) defer log.Close() - err := clusterctlClient.Delete(deleteOpts) + err := clusterctlClient.Delete(ctx, deleteOpts) Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl upgrade") } @@ -294,10 +294,10 @@ func ConfigCluster(ctx context.Context, input ConfigClusterInput) []byte { input.ClusterctlConfigPath = outputPath } - clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, fmt.Sprintf("%s-cluster-template.yaml", input.ClusterName), input.LogFolder) + clusterctlClient, log := getClusterctlClientWithLogger(ctx, input.ClusterctlConfigPath, fmt.Sprintf("%s-cluster-template.yaml", input.ClusterName), input.LogFolder) defer log.Close() - template, err := clusterctlClient.GetClusterTemplate(templateOptions) + template, err := clusterctlClient.GetClusterTemplate(ctx, templateOptions) Expect(err).ToNot(HaveOccurred(), "Failed to run clusterctl config cluster") yaml, err := template.Yaml() @@ -403,7 +403,7 @@ func Move(ctx context.Context, input MoveInput) { input.Namespace, ) - clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-move.log", logDir) + clusterctlClient, log := getClusterctlClientWithLogger(ctx, input.ClusterctlConfigPath, "clusterctl-move.log", logDir) defer log.Close() options := clusterctlclient.MoveOptions{ FromKubeconfig: clusterctlclient.Kubeconfig{Path: input.FromKubeconfigPath, Context: ""}, @@ -411,17 +411,17 @@ func Move(ctx context.Context, input MoveInput) { Namespace: input.Namespace, } - Expect(clusterctlClient.Move(options)).To(Succeed(), "Failed to run clusterctl move") + Expect(clusterctlClient.Move(ctx, options)).To(Succeed(), "Failed to run clusterctl move") } -func getClusterctlClientWithLogger(configPath, logName, logFolder string) (clusterctlclient.Client, *logger.LogFile) { +func getClusterctlClientWithLogger(ctx context.Context, configPath, logName, logFolder string) (clusterctlclient.Client, *logger.LogFile) { log := logger.OpenLogFile(logger.OpenLogFileInput{ LogFolder: logFolder, Name: logName, }) clusterctllog.SetLogger(log.Logger()) - c, err := clusterctlclient.New(configPath) + c, err := clusterctlclient.New(ctx, configPath) Expect(err).ToNot(HaveOccurred(), "Failed to create the clusterctl client library") return c, log } diff --git a/test/framework/ownerreference_helpers.go b/test/framework/ownerreference_helpers.go index e763fcc295c0..eccee11106af 100644 --- a/test/framework/ownerreference_helpers.go +++ b/test/framework/ownerreference_helpers.go @@ -84,7 +84,9 @@ func AssertOwnerReferences(namespace, kubeconfigPath string, assertFuncs ...map[ } Eventually(func() error { allErrs := []error{} - graph, err := clusterctlcluster.GetOwnerGraph(namespace, kubeconfigPath) + ctx := context.Background() + + graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, kubeconfigPath) Expect(err).ToNot(HaveOccurred()) for _, v := range graph { if _, ok := allAssertFuncs[v.Object.Kind]; !ok { @@ -352,7 +354,7 @@ func forceClusterClassReconcile(ctx context.Context, cli client.Client, clusterK } func removeOwnerReferences(ctx context.Context, proxy ClusterProxy, namespace string) { - graph, err := clusterctlcluster.GetOwnerGraph(namespace, proxy.GetKubeconfigPath()) + graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath()) Expect(err).ToNot(HaveOccurred()) for _, object := range graph { ref := object.Object