Skip to content

Commit

Permalink
chore(bors): merge pull request openebs#784
Browse files Browse the repository at this point in the history
784: fix(volume/resize): handle overflow during volume shrink attempt r=dsharma-dc a=dsharma-dc

This fixes the case where an attempt to shrink the volume with a capacity limit set on cluster - panicked with below error:

> stderr:
thread 'tokio-runtime-worker' panicked at control-plane/agents/src/bin/core/volume/service.rs:611:24:
attempt to subtract with overflow
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace

Co-authored-by: Diwakar Sharma <diwakar.sharma@datacore.com>
  • Loading branch information
mayastor-bors and dsharma-dc committed Mar 19, 2024
2 parents a160da4 + 0e39475 commit 1ee5592
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 5 deletions.
29 changes: 25 additions & 4 deletions control-plane/agents/src/bin/core/tests/volume/resize.rs
Original file line number Diff line number Diff line change
Expand Up @@ -279,22 +279,38 @@ async fn resize_with_cluster_capacity_limit() {

let vol_cli = cluster.grpc_client().volume();

// resize exceeding the capacity limit
// resize(shrink) within a valid capacity limit
grpc_resize_volume_with_limit(
&vol_cli,
Some(EXPANDED_SIZE),
Some(ReplyErrorKind::NotAcceptable {}),
false,
)
.await;
// resize(expand) exceeding the capacity limit
grpc_resize_volume_with_limit(
&vol_cli,
Some(EXPANDED_SIZE - CAPACITY_LIMIT_DIFF),
Some(ReplyErrorKind::CapacityLimitExceeded {}),
true,
)
.await;

// resize within the capacity limit
grpc_resize_volume_with_limit(&vol_cli, Some(EXPANDED_SIZE + CAPACITY_LIMIT_DIFF), None).await;
// resize(expand) within the capacity limit
grpc_resize_volume_with_limit(
&vol_cli,
Some(EXPANDED_SIZE + CAPACITY_LIMIT_DIFF),
None,
true,
)
.await;
// resize a new volume, but reduce the limit set previously. The limit balance
// calculations are expected to work based on reduced limit value now.
grpc_resize_volume_with_limit(
&vol_cli,
Some(EXPANDED_SIZE + CAPACITY_LIMIT_DIFF / 2),
None,
true,
)
.await;
}
Expand Down Expand Up @@ -368,8 +384,13 @@ async fn grpc_resize_volume_with_limit(
volume_client: &dyn VolumeOperations,
capacity: Option<u64>,
expected_error: Option<ReplyErrorKind>,
expand: bool,
) {
let vol_uuid = Uuid::new_v4();
let new_size = match expand {
true => EXPANDED_SIZE,
false => SIZE / 2,
};

let volume = volume_client
.create(
Expand All @@ -389,7 +410,7 @@ async fn grpc_resize_volume_with_limit(
.resize(
&ResizeVolume {
uuid: volume.uuid().clone(),
requested_size: EXPANDED_SIZE,
requested_size: new_size,
cluster_capacity_limit: capacity,
},
None,
Expand Down
7 changes: 6 additions & 1 deletion control-plane/agents/src/bin/core/volume/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -608,7 +608,12 @@ impl Service {
return volume.resize(&self.registry, request).await;
};

let required = request.requested_size - volume.as_ref().size;
// If requested size is less than volume's current size(attempt to shrink volume),
// then required becomes zero because we won't need to borrow anything from capacity_limit.
let required = request
.requested_size
.checked_sub(volume.as_ref().size)
.unwrap_or_default();
*self.capacity_limit_borrow.write() += required;
// If there is a defined system wide capacity limit, ensure we don't breach that.
let current = *self.capacity_limit_borrow.read();
Expand Down

0 comments on commit 1ee5592

Please sign in to comment.