Skip to content

Commit

Permalink
CEPH-83602685: Tier-2 test to verify deletion of object and pool snaps
Browse files Browse the repository at this point in the history
Signed-off-by: Harsh Kumar <hakumar@redhat.com>
  • Loading branch information
harshkumarRH committed Nov 20, 2024
1 parent 2e768c5 commit a6032a8
Show file tree
Hide file tree
Showing 8 changed files with 387 additions and 13 deletions.
53 changes: 51 additions & 2 deletions ceph/rados/core_workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -4300,9 +4300,9 @@ def log_cluster_health(self):
"""
log.debug("Printing cluster health and status")
health_detail, _ = self.node.shell(args=["ceph health detail"])
log.info(f"\n****\n Cluster health detail: \n {health_detail} \n ****")
log.info(f"\n****\n Cluster health detail: \n {health_detail} \n****")
log.info(
f"\n****\n Cluster status: \n {self.run_ceph_command(cmd='ceph -s', client_exec=True)} \n ****"
f"\n****\n Cluster status: \n {self.client.exec_command(cmd='ceph -s', sudo=True)[0]} \n****"
)
return health_detail

Expand Down Expand Up @@ -4578,3 +4578,52 @@ def check_crash_status(self):
log.info(out)
return True
return False

def list_obj_snaps(self, pool_name: str, obj_name: str):
"""
Module to fetch the list of snaps for an object
Args:
pool_name: name of the pool where obj is present
obj_name: name of the object
Return:
o/p of rados listsnaps -p <pool_name> <objectname>
Example:
# rados listsnaps -p test-snap obj-1
obj-1:
cloneid snaps size overlap
2 1,2 4194304 []
head - 4194304
"""
_cmd = f"rados listsnaps -p {pool_name} {obj_name}"
return self.run_ceph_command(cmd=_cmd, client_exec=True)

def list_pool_snaps(self, pool_name: str):
"""
Module to list pool snapshots
Args:
pool_name: name of the pool
Returns:
o/p of rados lssnap -p <pool_name>
Example:
# rados lssnap -p test-snap
2 snap-2 2024.05.29 10:45:30
3 snap-3 2024.05.29 10:58:17
2 snaps
"""
_cmd = f"rados lssnap -p {pool_name}"
return self.client.exec_command(cmd=_cmd, sudo=True)[0]

def get_rados_df(self, pool_name: str = None):
"""
Module to fetch rados df output
should return only pool detail if pool_name is provided
Args:
pool_name(optional): name of the pool
Returns:
rados df o/p if no pool name if provided
rados df o/p for the pool whose pool name is provided
"""
_cmd = f"rados df -p {pool_name}" if pool_name else "rados df"
out = self.run_ceph_command(cmd=_cmd, client_exec=True)

return out["pools"][0] if pool_name else out
14 changes: 7 additions & 7 deletions ceph/rados/pool_workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,9 +363,9 @@ def create_pool_snap(self, pool_name: str, count: int = 1):
"""
# Checking if snapshots can be created on the supplied pool
cmd = "ceph osd dump"
pool_status = self.rados_obj.run_ceph_command(cmd=cmd, timeout=800)
for detail in pool_status["pools"]:
cmd = "ceph osd pool ls detail"
pool_detail = self.rados_obj.run_ceph_command(cmd=cmd, timeout=800)
for detail in pool_detail:
if detail["pool_name"] != pool_name:
continue
if "selfmanaged_snaps" in detail["flags_names"]:
Expand Down Expand Up @@ -408,9 +408,9 @@ def get_snap_names(self, pool_name: str) -> list:
Returns: list of the snaps created
"""
cmd = "ceph osd dump"
pool_status = self.rados_obj.run_ceph_command(cmd=cmd, timeout=800)
for detail in pool_status["pools"]:
cmd = "ceph osd pool ls detail"
pool_detail = self.rados_obj.run_ceph_command(cmd=cmd, timeout=800)
for detail in pool_detail:
if detail["pool_name"] == pool_name:
snap_list = [snap["name"] for snap in detail["pool_snaps"]]
log.debug(f"snapshots on pool : {snap_list}")
Expand All @@ -426,7 +426,7 @@ def delete_pool_snap(self, pool_name: str, snap_name: str = None) -> bool:
"""
if snap_name:
delete_list = list(snap_name)
delete_list = snap_name.split()
else:
delete_list = self.get_snap_names(pool_name=pool_name)

Expand Down
18 changes: 17 additions & 1 deletion suites/pacific/rados/tier-2_rados_test-brownfield.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ tests:
verbose: true
args:
rhcs-version: 5.3
release: z5
release: z5 # deploying old build to verify obj snap deletion
mon-ip: node1
orphan-initial-daemons: true
registry-url: registry.redhat.io
Expand Down Expand Up @@ -145,13 +145,29 @@ tests:
polarion-id: CEPH-83574439
abort-on-fail: false

- test:
name: "issue repro: obj snap and pool snap deletion"
module: test_pool_snap.py
desc: obj snap deletion when pool snapshot is deleted on unfixed build
polarion-id: CEPH-83602685
config:
issue_reproduction: true

- test:
name: Upgrade ceph cluster
desc: Upgrade cluster to latest version and check health warn
module: test_upgrade_warn.py
polarion-id: CEPH-83574934
abort-on-fail: true

- test:
name: "verify fix: obj snap and pool snap deletion"
module: test_pool_snap.py
desc: obj snap deletion when pool snapshot is deleted on fixed build
polarion-id: CEPH-83602685
config:
verify_fix: true

# Running basic rbd and rgw tests after upgrade
- test:
name: rbd-io
Expand Down
18 changes: 17 additions & 1 deletion suites/quincy/rados/tier-2_rados_test-brownfield.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ tests:
verbose: true
args:
rhcs-version: 5.3
release: rc
release: z6 # deploying old build to verify obj snap deletion
mon-ip: node1
orphan-initial-daemons: true
registry-url: registry.redhat.io
Expand Down Expand Up @@ -145,13 +145,29 @@ tests:
polarion-id: CEPH-83574439
abort-on-fail: false

- test:
name: "issue repro: obj snap and pool snap deletion"
module: test_pool_snap.py
desc: obj snap deletion when pool snapshot is deleted on unfixed build
polarion-id: CEPH-83602685
config:
issue_reproduction: true

- test:
name: Upgrade ceph cluster
desc: Upgrade cluster to latest version and check health warn
module: test_upgrade_warn.py
polarion-id: CEPH-83574934
abort-on-fail: true

- test:
name: "verify fix: obj snap and pool snap deletion"
module: test_pool_snap.py
desc: obj snap deletion when pool snapshot is deleted on fixed build
polarion-id: CEPH-83602685
config:
verify_fix: true

# Running basic rbd and rgw tests after upgrade
- test:
name: rbd-io
Expand Down
1 change: 1 addition & 0 deletions suites/quincy/rados/tier-2_rados_test_bluestore.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ tests:
desc: Verify tuning of BlueStore cache size for HDDs and SSDs

# commented until CBT bluefs-stats o/p is not verbose
# RFE: https://bugzilla.redhat.com/show_bug.cgi?id=2326891
# - test:
# name: Bluefs DB utilization
# desc: DB utilization is under check - bluefs files are not inflated
Expand Down
18 changes: 17 additions & 1 deletion suites/reef/rados/tier-2_rados_test-brownfield.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ tests:
verbose: true
args:
rhcs-version: 6.1
release: rc
release: z6 # deploying old build to verify obj snap deletion
mon-ip: node1
orphan-initial-daemons: true
registry-url: registry.redhat.io
Expand Down Expand Up @@ -145,6 +145,14 @@ tests:
polarion-id: CEPH-83574439
abort-on-fail: false

- test:
name: "issue repro-obj snap and pool snap deletion"
module: test_pool_snap.py
desc: obj snap deletion when pool snapshot is deleted on unfixed build
polarion-id: CEPH-83602685
config:
issue_reproduction: true

- test:
name: Upgrade cluster to latest 7.x ceph version
desc: Upgrade cluster to latest version
Expand All @@ -159,6 +167,14 @@ tests:
destroy-cluster: false
abort-on-fail: true

- test:
name: "verify fix-obj snap and pool snap deletion"
module: test_pool_snap.py
desc: obj snap deletion when pool snapshot is deleted on fixed build
polarion-id: CEPH-83602685
config:
verify_fix: true

# Running basic rbd and rgw tests after upgrade
- test:
name: rbd-io
Expand Down
20 changes: 19 additions & 1 deletion suites/squid/rados/tier-2_rados_test-brownfield.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ tests:
verbose: true
args:
rhcs-version: 7.1
release: rc
release: z1 # deploying old build to verify obj snap deletion
mon-ip: node1
orphan-initial-daemons: true
registry-url: registry.redhat.io
Expand Down Expand Up @@ -145,6 +145,15 @@ tests:
polarion-id: CEPH-83574439
abort-on-fail: false

# commented until fix merged in Squid: #2326892
# - test:
# name: "issue repro-obj snap and pool snap deletion"
# module: test_pool_snap.py
# desc: obj snap deletion when pool snapshot is deleted on unfixed build
# polarion-id: CEPH-83602685
# config:
# issue_reproduction: true

- test:
name: Upgrade cluster to latest 8.x ceph version
desc: Upgrade cluster to latest version
Expand All @@ -159,6 +168,15 @@ tests:
destroy-cluster: false
abort-on-fail: true

# commented until fix merged in Squid: #2326892
# - test:
# name: "verify fix-obj snap and pool snap deletion"
# module: test_pool_snap.py
# desc: obj snap deletion when pool snapshot is deleted on fixed build
# polarion-id: CEPH-83602685
# config:
# verify_fix: true

# Running basic rbd and rgw tests after upgrade
- test:
name: rbd-io
Expand Down
Loading

0 comments on commit a6032a8

Please sign in to comment.