diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 392c4d4c0b50..1349776cdf11 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -78,19 +78,6 @@ DEFAULT_BRANCH_NAME: str = "main" BASE_PORT: int = 15000 -WORKER_PORT_NUM: int = 1000 - - -def pytest_configure(config: Config): - """ - Check that we do not overflow available ports range. - """ - - numprocesses = config.getoption("numprocesses") - if ( - numprocesses is not None and BASE_PORT + numprocesses * WORKER_PORT_NUM > 32768 - ): # do not use ephemeral ports - raise Exception("Too many workers configured. Cannot distribute ports for services.") @pytest.fixture(scope="session") @@ -192,6 +179,11 @@ def myfixture(...) return scope +@pytest.fixture(scope="session") +def worker_port_num(): + return (32768 - BASE_PORT) // int(os.environ.get("PYTEST_XDIST_WORKER_COUNT", "1")) + + @pytest.fixture(scope="session") def worker_seq_no(worker_id: str) -> int: # worker_id is a pytest-xdist fixture @@ -204,10 +196,10 @@ def worker_seq_no(worker_id: str) -> int: @pytest.fixture(scope="session") -def worker_base_port(worker_seq_no: int) -> int: - # so we divide ports in ranges of 100 ports +def worker_base_port(worker_seq_no: int, worker_port_num: int) -> int: + # so we divide ports in ranges of ports # so workers have disjoint set of ports for services - return BASE_PORT + worker_seq_no * WORKER_PORT_NUM + return BASE_PORT + worker_seq_no * worker_port_num def get_dir_size(path: str) -> int: @@ -292,8 +284,8 @@ def _replace_port_str(self, value: str) -> str: @pytest.fixture(scope="session") -def port_distributor(worker_base_port: int) -> PortDistributor: - return PortDistributor(base_port=worker_base_port, port_number=WORKER_PORT_NUM) +def port_distributor(worker_base_port: int, worker_port_num: int) -> PortDistributor: + return PortDistributor(base_port=worker_base_port, port_number=worker_port_num) @pytest.fixture(scope="session") diff --git a/test_runner/regress/test_duplicate_layers.py b/test_runner/regress/test_duplicate_layers.py index c1832a2063ad..7f76a8e04287 100644 --- a/test_runner/regress/test_duplicate_layers.py +++ b/test_runner/regress/test_duplicate_layers.py @@ -33,4 +33,4 @@ def test_duplicate_layers(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): time.sleep(10) # let compaction to be performed assert env.pageserver.log_contains("compact-level0-phase1-return-same") - pg_bin.run_capture(["pgbench", "-P1", "-N", "-c5", "-T500", "-Mprepared", connstr]) + pg_bin.run_capture(["pgbench", "-P1", "-N", "-c5", "-T200", "-Mprepared", connstr]) diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index bb8ee8f52c57..cfc131a3aa21 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -245,7 +245,7 @@ def test_restarts_frequent_checkpoints(neon_env_builder: NeonEnvBuilder): # we try to simulate large (flush_lsn - truncate_lsn) lag, to test that WAL segments # are not removed before broadcasted to all safekeepers, with the help of replication slot asyncio.run( - run_restarts_under_load(env, endpoint, env.safekeepers, period_time=15, iterations=5) + run_restarts_under_load(env, endpoint, env.safekeepers, period_time=15, iterations=4) )