diff --git a/cmd/csi-provisioner/csi-provisioner.go b/cmd/csi-provisioner/csi-provisioner.go index 9ae14d45c8..8b14a80da6 100644 --- a/cmd/csi-provisioner/csi-provisioner.go +++ b/cmd/csi-provisioner/csi-provisioner.go @@ -74,7 +74,7 @@ var ( workerThreads = flag.Uint("worker-threads", 100, "Number of provisioner worker threads, in other words nr. of simultaneous CSI calls.") finalizerThreads = flag.Uint("cloning-protection-threads", 1, "Number of simultaneously running threads, handling cloning finalizer removal") capacityThreads = flag.Uint("capacity-threads", 1, "Number of simultaneously running threads, handling CSIStorageCapacity objects") - operationTimeout = flag.Duration("timeout", 10*time.Second, "Timeout for waiting for creation or deletion of a volume") + operationTimeout = flag.Duration("timeout", 10*time.Second, "Timeout for waiting for volume operation (creation, deletion, capacity queries)") enableLeaderElection = flag.Bool("leader-election", false, "Enables leader election. If leader election is enabled, additional RBAC rules are required. Please refer to the Kubernetes CSI documentation for instructions on setting up these RBAC rules.") @@ -475,6 +475,7 @@ func main() { factoryForNamespace.Storage().V1beta1().CSIStorageCapacities(), *capacityPollInterval, *capacityImmediateBinding, + *operationTimeout, ) legacyregistry.CustomMustRegister(capacityController) diff --git a/pkg/capacity/capacity.go b/pkg/capacity/capacity.go index c1874a8b26..dc5debcebd 100644 --- a/pkg/capacity/capacity.go +++ b/pkg/capacity/capacity.go @@ -91,6 +91,7 @@ type Controller struct { cInformer storageinformersv1beta1.CSIStorageCapacityInformer pollPeriod time.Duration immediateBinding bool + timeout time.Duration // capacities contains one entry for each object that is // supposed to exist. Entries that exist on the API server @@ -164,6 +165,7 @@ func NewCentralCapacityController( cInformer storageinformersv1beta1.CSIStorageCapacityInformer, pollPeriod time.Duration, immediateBinding bool, + timeout time.Duration, ) *Controller { c := &Controller{ csiController: csiController, @@ -178,6 +180,7 @@ func NewCentralCapacityController( cInformer: cInformer, pollPeriod: pollPeriod, immediateBinding: immediateBinding, + timeout: timeout, capacities: map[workItem]*storagev1beta1.CSIStorageCapacity{}, } @@ -596,7 +599,9 @@ func (c *Controller) syncCapacity(ctx context.Context, item workItem) error { Segments: item.segment.GetLabelMap(), } } - resp, err := c.csiController.GetCapacity(ctx, req) + syncCtx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + resp, err := c.csiController.GetCapacity(syncCtx, req) if err != nil { return fmt.Errorf("CSI GetCapacity for %+v: %v", item, err) } diff --git a/pkg/capacity/capacity_test.go b/pkg/capacity/capacity_test.go index 3d6fd0f600..513c415539 100644 --- a/pkg/capacity/capacity_test.go +++ b/pkg/capacity/capacity_test.go @@ -58,6 +58,7 @@ func init() { } const ( + timeout = 10 * time.Second driverName = "test-driver" ownerNamespace = "testns" csiscRev = "CSISC-REV-" @@ -1361,6 +1362,7 @@ func fakeController(ctx context.Context, client *fakeclientset.Clientset, owner cInformer, 1000*time.Hour, // Not used, but even if it was, we wouldn't want automatic capacity polling while the test runs... immediateBinding, + timeout, ) // This ensures that the informers are running and up-to-date.