From 0f1ff5dc269949f17961e4b61b8fc0000fdfc81b Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 28 Dec 2023 14:53:15 +0200 Subject: [PATCH] feature(adaptive_timeouts): collect number of db nodes since in some cases we will want to use that number as a scale for selecting timeouts so we'll start collecting it now --- sdcm/utils/adaptive_timeouts/__init__.py | 9 +++++++++ unit_tests/test_adaptive_timeouts.py | 3 ++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/sdcm/utils/adaptive_timeouts/__init__.py b/sdcm/utils/adaptive_timeouts/__init__.py index 9645680529..00e6557f03 100644 --- a/sdcm/utils/adaptive_timeouts/__init__.py +++ b/sdcm/utils/adaptive_timeouts/__init__.py @@ -74,6 +74,14 @@ class Operations(Enum): ("timeout", "service_level_for_test_step")) +class TestInfoServices: # pylint: disable=too-few-public-methods + @staticmethod + def get(node: "BaseNode") -> dict: + return dict( + n_db_nodes=len(node.parent_cluster.nodes), + ) + + @contextmanager def adaptive_timeout(operation: Operations, node: "BaseNode", stats_storage: AdaptiveTimeoutStore = ESAdaptiveTimeoutStore(), **kwargs): """ @@ -87,6 +95,7 @@ def adaptive_timeout(operation: Operations, node: "BaseNode", stats_storage: Ada assert arg in kwargs, f"Argument '{arg}' is required for operation {operation.name}" args[arg] = kwargs[arg] timeout, load_metrics = operation.value[1](node_info_service=NodeLoadInfoServices().get(node), **args) + load_metrics = load_metrics | TestInfoServices.get(node) start_time = time.time() timeout_occurred = False try: diff --git a/unit_tests/test_adaptive_timeouts.py b/unit_tests/test_adaptive_timeouts.py index d929d83a2d..94443faa0b 100644 --- a/unit_tests/test_adaptive_timeouts.py +++ b/unit_tests/test_adaptive_timeouts.py @@ -23,7 +23,7 @@ from sdcm.remote import RemoteCmdRunnerBase from sdcm.utils.adaptive_timeouts.load_info_store import AdaptiveTimeoutStore from sdcm.utils.adaptive_timeouts import Operations, adaptive_timeout -from unit_tests.lib.fake_remoter import FakeRemoter +from unit_tests.test_cluster import DummyDbCluster LOGGER = logging.getLogger(__name__) @@ -34,6 +34,7 @@ def __init__(self, name: str, remoter): self.name = name self.remoter = remoter self.scylla_version_detailed = "2042.1.12-0.20220620.e23889f17" + self.parent_cluster = DummyDbCluster(nodes=[self], params={'n_db_nodes': 1}) class MemoryAdaptiveTimeoutStore(AdaptiveTimeoutStore):