diff --git a/.github/actions/install-aiida-core/action.yml b/.github/actions/install-aiida-core/action.yml index 700e565034..f51cf4d299 100644 --- a/.github/actions/install-aiida-core/action.yml +++ b/.github/actions/install-aiida-core/action.yml @@ -28,7 +28,7 @@ runs: - name: Install uv installer run: curl --proto '=https' --tlsv1.2 -LsSf https://${{ env.UV_URL }} | sh env: - UV_VERSION: 0.1.44 + UV_VERSION: 0.2.5 UV_URL: github.com/astral-sh/uv/releases/download/$UV_VERSION/uv-installer.sh shell: bash diff --git a/.github/workflows/ci-code.yml b/.github/workflows/ci-code.yml index bec312c60f..bde141594b 100644 --- a/.github/workflows/ci-code.yml +++ b/.github/workflows/ci-code.yml @@ -1,4 +1,4 @@ -name: continuous-integration-code +name: ci-code on: push: @@ -13,6 +13,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true +env: + FORCE_COLOR: 1 + jobs: check-requirements: @@ -99,7 +102,9 @@ jobs: env: AIIDA_TEST_PROFILE: test_aiida AIIDA_WARN_v3: 1 - run: pytest --cov aiida --verbose tests -m 'not nightly' + # Python 3.12 has a performance regression when running with code coverage + # so run code coverage only for python 3.9. + run: pytest -v tests -m 'not nightly' ${{ matrix.python-version == '3.9' && '--cov aiida' || '' }} - name: Upload coverage report if: matrix.python-version == 3.9 && github.repository == 'aiidateam/aiida-core' @@ -110,15 +115,48 @@ jobs: file: ./coverage.xml fail_ci_if_error: false # don't fail job, if coverage upload fails - verdi: + tests-presto: + + needs: [check-requirements] runs-on: ubuntu-latest - timeout-minutes: 15 + timeout-minutes: 20 - strategy: - fail-fast: false - matrix: - python-version: ['3.9', '3.12'] + steps: + - uses: actions/checkout@v4 + + - name: Install graphviz + run: sudo apt update && sudo apt install graphviz + + - name: Install aiida-core + uses: ./.github/actions/install-aiida-core + with: + python-version: '3.11' + + - name: Setup SSH on localhost + run: .github/workflows/setup_ssh.sh + + - name: Run test suite + env: + AIIDA_WARN_v3: 0 + run: pytest -m 'presto' --cov aiida + + - name: Upload coverage report + if: github.repository == 'aiidateam/aiida-core' + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + name: aiida-pytests-presto + flags: presto + file: ./coverage.xml + fail_ci_if_error: false # don't fail job, if coverage upload fails + + + verdi: + + needs: [check-requirements] + runs-on: ubuntu-latest + timeout-minutes: 10 steps: - uses: actions/checkout@v4 @@ -126,10 +164,9 @@ jobs: - name: Install aiida-core uses: ./.github/actions/install-aiida-core with: - python-version: ${{ matrix.python-version }} - from-requirements: 'false' + python-version: '3.12' - - name: Run verdi + - name: Run verdi tests run: | verdi devel check-load-time verdi devel check-undesired-imports diff --git a/.github/workflows/ci-style.yml b/.github/workflows/ci-style.yml index 064359bca5..83a0e2bd87 100644 --- a/.github/workflows/ci-style.yml +++ b/.github/workflows/ci-style.yml @@ -1,4 +1,4 @@ -name: continuous-integration-style +name: ci-style on: push: diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index ad77fd9161..c7ea8cb787 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -19,6 +19,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true +env: + FORCE_COLOR: 1 + jobs: nightly-tests: @@ -91,7 +94,7 @@ jobs: rabbitmq-tests: runs-on: ubuntu-latest - timeout-minutes: 30 + timeout-minutes: 10 strategy: fail-fast: false @@ -132,6 +135,12 @@ jobs: - name: Install system dependencies run: sudo apt update && sudo apt install postgresql + - name: Setup SSH on localhost + run: .github/workflows/setup_ssh.sh + + - name: Suppress RabbitMQ version warning + run: verdi config set warnings.rabbitmq_version False + - name: Run tests id: tests env: diff --git a/.github/workflows/setup.sh b/.github/workflows/setup.sh index 174e09b598..aaa0228d36 100755 --- a/.github/workflows/setup.sh +++ b/.github/workflows/setup.sh @@ -1,12 +1,8 @@ #!/usr/bin/env bash set -ev -ssh-keygen -q -t rsa -b 4096 -N "" -f "${HOME}/.ssh/id_rsa" -ssh-keygen -y -f "${HOME}/.ssh/id_rsa" >> "${HOME}/.ssh/authorized_keys" -ssh-keyscan -H localhost >> "${HOME}/.ssh/known_hosts" - -# The permissions on the GitHub runner are 777 which will cause SSH to refuse the keys and cause authentication to fail -chmod 755 "${HOME}" +# Setup SSH on localhost +${GITHUB_WORKSPACE}/.github/workflows/setup_ssh.sh # Replace the placeholders in configuration files with actual values CONFIG="${GITHUB_WORKSPACE}/.github/config" diff --git a/.github/workflows/setup_ssh.sh b/.github/workflows/setup_ssh.sh new file mode 100755 index 0000000000..a244f1e470 --- /dev/null +++ b/.github/workflows/setup_ssh.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -ev + +ssh-keygen -q -t rsa -b 4096 -N "" -f "${HOME}/.ssh/id_rsa" +ssh-keygen -y -f "${HOME}/.ssh/id_rsa" >> "${HOME}/.ssh/authorized_keys" +ssh-keyscan -H localhost >> "${HOME}/.ssh/known_hosts" + +# The permissions on the GitHub runner are 777 which will cause SSH to refuse the keys and cause authentication to fail +chmod 755 "${HOME}" diff --git a/.github/workflows/verdi.sh b/.github/workflows/verdi.sh index 11bf8d2c0b..1aaac1a0a4 100755 --- a/.github/workflows/verdi.sh +++ b/.github/workflows/verdi.sh @@ -1,14 +1,13 @@ #!/usr/bin/env bash -# Test the loading time of `verdi`. This is and attempt to catch changes to the imports in `aiida.cmdline` that will -# indirectly load the `aiida.orm` module which will trigger loading of the backend environment. This slows down `verdi` -# significantly, making tab-completion unusable. +# Test the loading time of `verdi`. This is an attempt to catch changes to the imports in `aiida.cmdline` that +# would slow down `verdi` invocations and make tab-completion unusable. VERDI=`which verdi` -# Typically, the loading time of `verdi` should be around ~0.2 seconds. When loading the database environment this -# tends to go towards ~0.8 seconds. Since these timings are obviously machine and environment dependent, typically these -# types of tests are fragile. But with a load limit of more than twice the ideal loading time, if exceeded, should give -# a reasonably sure indication that the loading of `verdi` is unacceptably slowed down. +# Typically, the loading time of `verdi` should be around ~0.2 seconds. +# Typically these types of tests are fragile. But with a load limit of more than twice +# the ideal loading time, if exceeded, should give a reasonably sure indication +# that the loading of `verdi` is unacceptably slowed down. LOAD_LIMIT=0.4 MAX_NUMBER_ATTEMPTS=5 diff --git a/docs/source/nitpick-exceptions b/docs/source/nitpick-exceptions index d3fdc420f0..3e6bd2b5bb 100644 --- a/docs/source/nitpick-exceptions +++ b/docs/source/nitpick-exceptions @@ -192,8 +192,11 @@ py:class _asyncio.Future py:class tqdm.std.tqdm +py:class _pytest.tmpdir.TempPathFactory +py:class pytest.tmpdir.TempPathFactory py:class pytest.TempPathFactory py:class PGTest +py:class pgtest.pgtest.PGTest py:class IPython.core.magic.Magics @@ -207,8 +210,6 @@ py:class flask_restful.Resource py:class flask.app.Flask py:class Flask -py:class pytest.tmpdir.TempPathFactory - py:class scoped_session py:class sqlalchemy.orm.decl_api.SqliteModel py:class sqlalchemy.orm.decl_api.Base diff --git a/pyproject.toml b/pyproject.toml index 6f105cb447..85b5795a99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -351,8 +351,11 @@ ignore_errors = true module = 'plumpy' [tool.pytest.ini_options] -addopts = '--benchmark-skip --durations=50 --strict-config --strict-markers -ra --cov-report xml --cov-append ' +addopts = '--benchmark-skip --durations=50 --durations-min=1 --strict-config --strict-markers -ra --cov-report xml --cov-append ' filterwarnings = [ + 'ignore:.*and will be removed in NumPy 2.0.*:DeprecationWarning:ase:', + 'ignore:datetime.datetime.utcfromtimestamp:DeprecationWarning:pytz|dateutil|tqdm|aio_pika:', + 'ignore:datetime.datetime.utcnow:DeprecationWarning:aio_pika|pytz|pgtest:', 'ignore::DeprecationWarning:babel:', 'ignore::DeprecationWarning:frozendict:', 'ignore::DeprecationWarning:sqlalchemy:', @@ -360,9 +363,12 @@ filterwarnings = [ 'ignore::DeprecationWarning:pymatgen:', 'ignore::DeprecationWarning:jsonbackend:', 'ignore::DeprecationWarning:pkg_resources:', + 'ignore:ast.* is deprecated.*:DeprecationWarning:docstring_parser:', + 'ignore::SyntaxWarning:CifFile:', 'ignore::pytest.PytestCollectionWarning', 'ignore:Creating AiiDA configuration folder.*:UserWarning', 'ignore:Object of type .* not in session, .* operation along .* will not proceed:sqlalchemy.exc.SAWarning', + 'ignore:Identity map already had an identity for .* inside of an event handler within the flush?:sqlalchemy.exc.SAWarning', 'ignore:The `aiida.orm.nodes.data.upf` module is deprecated.*:aiida.common.warnings.AiidaDeprecationWarning', 'ignore:The `Code` class is deprecated.*:aiida.common.warnings.AiidaDeprecationWarning', 'default::ResourceWarning' @@ -370,6 +376,8 @@ filterwarnings = [ markers = [ 'nightly: long running tests that should rarely be affected and so only run nightly', 'requires_rmq: requires a connection (on port 5672) to RabbitMQ', + 'requires_psql: requires a connection to PostgreSQL DB', + 'presto: automatic marker meaning "not requires_rmq and not requires_psql"', 'sphinx: set parameters for the sphinx `app` fixture' ] minversion = '7.0' diff --git a/requirements/requirements-py-3.10.txt b/requirements/requirements-py-3.10.txt index bc28d7eb39..f8c0903e2d 100644 --- a/requirements/requirements-py-3.10.txt +++ b/requirements/requirements-py-3.10.txt @@ -199,7 +199,6 @@ tomli==2.0.1 tornado==6.3.2 tqdm==4.65.0 traitlets==5.9.0 -trogon==0.5.0 typing-extensions==4.6.3 tzdata==2023.3 uc-micro-py==1.0.2 diff --git a/requirements/requirements-py-3.11.txt b/requirements/requirements-py-3.11.txt index dfa35673a8..1de3c788d7 100644 --- a/requirements/requirements-py-3.11.txt +++ b/requirements/requirements-py-3.11.txt @@ -197,7 +197,6 @@ tinycss2==1.2.1 tornado==6.3.2 tqdm==4.65.0 traitlets==5.9.0 -trogon==0.5.0 typing-extensions==4.6.3 tzdata==2023.3 uc-micro-py==1.0.2 diff --git a/requirements/requirements-py-3.9.txt b/requirements/requirements-py-3.9.txt index d59b8e2f1d..69ffaf4f80 100644 --- a/requirements/requirements-py-3.9.txt +++ b/requirements/requirements-py-3.9.txt @@ -201,7 +201,6 @@ tomli==2.0.1 tornado==6.3.2 tqdm==4.65.0 traitlets==5.9.0 -trogon==0.5.0 typing-extensions==4.6.3 tzdata==2023.3 uc-micro-py==1.0.2 diff --git a/src/aiida/storage/psql_dos/migrations/utils/integrity.py b/src/aiida/storage/psql_dos/migrations/utils/integrity.py index 5004ca1c82..32fb195200 100644 --- a/src/aiida/storage/psql_dos/migrations/utils/integrity.py +++ b/src/aiida/storage/psql_dos/migrations/utils/integrity.py @@ -161,7 +161,7 @@ def write_database_integrity_violation(results, headers, reason_message, action_ :param reason_message: a human readable message detailing the reason of the integrity violation :param action_message: an optional human readable message detailing a performed action, if any """ - from datetime import datetime + from datetime import datetime, timezone from tempfile import NamedTemporaryFile from tabulate import tabulate @@ -183,7 +183,7 @@ def write_database_integrity_violation(results, headers, reason_message, action_ ) ) - handle.write(f'# {datetime.utcnow().isoformat()}\n') + handle.write(f'# {datetime.datetime.now(timezone.utc).isoformat()}\n') handle.write(f'# Violation reason: {reason_message}\n') handle.write(f'# Performed action: {action_message}\n') handle.write('\n') diff --git a/src/aiida/tools/pytest_fixtures/__init__.py b/src/aiida/tools/pytest_fixtures/__init__.py index 6181183d13..1b2c38e285 100644 --- a/src/aiida/tools/pytest_fixtures/__init__.py +++ b/src/aiida/tools/pytest_fixtures/__init__.py @@ -25,7 +25,7 @@ aiida_localhost, ssh_key, ) -from .storage import config_psql_dos, postgres_cluster +from .storage import config_psql_dos, config_sqlite_dos, postgres_cluster __all__ = ( 'aiida_code_installed', @@ -44,6 +44,7 @@ 'aiida_profile_tmp', 'aiida_profile', 'config_psql_dos', + 'config_sqlite_dos', 'daemon_client', 'entry_points', 'postgres_cluster', diff --git a/src/aiida/tools/pytest_fixtures/storage.py b/src/aiida/tools/pytest_fixtures/storage.py index d76c9b4452..2f13cf25a1 100644 --- a/src/aiida/tools/pytest_fixtures/storage.py +++ b/src/aiida/tools/pytest_fixtures/storage.py @@ -2,32 +2,43 @@ from __future__ import annotations +import pathlib import typing as t +from uuid import uuid4 import pytest +from pgtest.pgtest import PGTest if t.TYPE_CHECKING: from pgtest.pgtest import PGTest -@pytest.fixture(scope='session') -def postgres_cluster(): - """Create a temporary and isolated PostgreSQL cluster using ``pgtest`` and cleanup after the yield. +class PostgresCluster: + def __init__(self): + # We initialize the cluster lazily + self.cluster = None - :param database_name: Name of the database. - :param database_username: Username to use for authentication. - :param database_password: Password to use for authentication. - :returns: Dictionary with parameters to connect to the PostgreSQL cluster. - """ - from uuid import uuid4 + def _create(self): + try: + self.cluster = PGTest() + except OSError as e: + raise RuntimeError('Could not initialize PostgreSQL cluster') from e - from pgtest.pgtest import PGTest + def _close(self): + if self.cluster is not None: + self.cluster.close() def create_database( - database_name: str | None = None, database_username: str | None = None, database_password: str | None = None + self, + database_name: str | None = None, + database_username: str | None = None, + database_password: str | None = None, ) -> dict[str, str]: from aiida.manage.external.postgres import Postgres + if self.cluster is None: + self._create() + postgres_config = { 'database_engine': 'postgresql_psycopg2', 'database_name': database_name or str(uuid4()), @@ -35,7 +46,7 @@ def create_database( 'database_password': database_password or 'guest', } - postgres = Postgres(interactive=False, quiet=True, dbinfo=cluster.dsn) # type: ignore[union-attr] + postgres = Postgres(interactive=False, quiet=True, dbinfo=self.cluster.dsn) # type: ignore[union-attr] if not postgres.dbuser_exists(postgres_config['database_username']): postgres.create_dbuser( postgres_config['database_username'], postgres_config['database_password'], 'CREATEDB' @@ -47,14 +58,21 @@ def create_database( return postgres_config - cluster = None - try: - cluster = PGTest() - cluster.create_database = create_database - yield cluster - finally: - if cluster is not None: - cluster.close() + +# TODO: Update docstring accordingly +@pytest.fixture(scope='session') +def postgres_cluster(): + """Create a temporary and isolated PostgreSQL cluster using ``pgtest`` and cleanup after the yield. + + :param database_name: Name of the database. + :param database_username: Username to use for authentication. + :param database_password: Password to use for authentication. + :returns: Dictionary with parameters to connect to the PostgreSQL cluster. + """ + + cluster = PostgresCluster() + yield cluster + cluster._close() @pytest.fixture(scope='session') @@ -85,3 +103,21 @@ def factory( return storage_config return factory + + +@pytest.fixture(scope='session') +def config_sqlite_dos( + tmp_path_factory: pytest.TempPathFactory, +) -> t.Callable[[str | pathlib.Path | None], dict[str, t.Any]]: + """Return a profile configuration for the :class:`~aiida.storage.sqlite_dos.backend.SqliteDosStorage`. + + The factory has the following signature to allow further configuring the database that is created: + + :param filepath: Optional path to the sqlite database file. + :returns: The dictionary with the storage configuration for the ``core.sqlite_dos`` storage plugin. + """ + + def factory(filepath: str | pathlib.Path | None = None) -> dict[str, t.Any]: + return {'filepath': str(filepath or tmp_path_factory.mktemp('test_sqlite_dos_storage'))} + + return factory diff --git a/tests/brokers/test_rabbitmq.py b/tests/brokers/test_rabbitmq.py index 0e828fa12c..fc27a3eaf6 100644 --- a/tests/brokers/test_rabbitmq.py +++ b/tests/brokers/test_rabbitmq.py @@ -19,6 +19,8 @@ from kiwipy.rmq import RmqThreadCommunicator from packaging.version import parse +pytestmark = pytest.mark.requires_rmq + @pytest.mark.parametrize( ('version', 'supported'), @@ -67,7 +69,6 @@ def test_get_rmq_url(args, kwargs, expected): utils.get_rmq_url(*args, **kwargs) -@pytest.mark.requires_rmq @pytest.mark.parametrize('url', ('amqp://guest:guest@127.0.0.1:5672',)) def test_communicator(url): """Test the instantiation of a ``kiwipy.rmq.RmqThreadCommunicator``. @@ -77,19 +78,16 @@ def test_communicator(url): RmqThreadCommunicator.connect(connection_params={'url': url}) -@pytest.mark.requires_rmq def test_add_rpc_subscriber(communicator): """Test ``add_rpc_subscriber``.""" communicator.add_rpc_subscriber(None) -@pytest.mark.requires_rmq def test_add_broadcast_subscriber(communicator): """Test ``add_broadcast_subscriber``.""" communicator.add_broadcast_subscriber(None) -@pytest.mark.requires_rmq @pytest.mark.usefixtures('aiida_profile_clean') def test_duplicate_subscriber_identifier(aiida_code_installed, started_daemon_client, submit_and_await): """Test that a ``DuplicateSubscriberError`` in ``ProcessLauncher._continue`` does not except the process. @@ -148,7 +146,6 @@ def rabbitmq_client(aiida_profile): ) -@pytest.mark.requires_rmq class TestRabbitmqManagementClient: """Tests for the :class:`aiida.brokers.rabbitmq.client.RabbitmqManagementClient`.""" diff --git a/tests/cmdline/commands/test_calcjob.py b/tests/cmdline/commands/test_calcjob.py index c20d991c46..0b42ac0096 100644 --- a/tests/cmdline/commands/test_calcjob.py +++ b/tests/cmdline/commands/test_calcjob.py @@ -241,7 +241,7 @@ def test_calcjob_outputcat(self): retrieved.base.repository._repository.put_object_from_filelike(io.BytesIO(b'5\n'), 'aiida.out') retrieved.base.repository._update_repository_metadata() - def test_calcjob_cleanworkdir(self): + def test_calcjob_cleanworkdir_basic(self, pytestconfig): """Test verdi calcjob cleanworkdir""" # Specifying no filtering options and no explicit calcjobs should exit with non-zero status options = [] @@ -258,14 +258,21 @@ def test_calcjob_cleanworkdir(self): result = self.cli_runner.invoke(command.calcjob_cleanworkdir, options) assert result.exception is None, result.output + # The flag should have been set + assert self.result_job.outputs.remote_folder.base.extras.get('cleaned') is True + + # TODO: This currently fails with sqlite backend, + # since the filtering relies on the `has_key` filter which is not implemented in SQLite. + # https://github.com/aiidateam/aiida-core/issues/6256 + marker_opt = pytestconfig.getoption('-m') + if 'not requires_psql' in marker_opt or 'presto' in marker_opt: + pytest.xfail('Known sqlite backend failure') # Do it again should fail as the calcjob has been cleaned options = ['-f', str(self.result_job.uuid)] result = self.cli_runner.invoke(command.calcjob_cleanworkdir, options) assert result.exception is not None, result.output - # The flag should have been set - assert self.result_job.outputs.remote_folder.base.extras.get('cleaned') is True - + def test_calcjob_cleanworkdir_advanced(self): # Check applying both p and o filters for flag_p in ['-p', '--past-days']: for flag_o in ['-o', '--older-than']: diff --git a/tests/cmdline/commands/test_daemon.py b/tests/cmdline/commands/test_daemon.py index 88b4038ad3..8bf5a9cbff 100644 --- a/tests/cmdline/commands/test_daemon.py +++ b/tests/cmdline/commands/test_daemon.py @@ -16,6 +16,8 @@ from aiida.cmdline.commands import cmd_daemon from aiida.engine.daemon.client import DaemonClient +pytestmark = pytest.mark.requires_rmq + def format_local_time(timestamp, format_str='%Y-%m-%d %H:%M:%S'): """Format a datetime object or UNIX timestamp in a human readable format @@ -26,9 +28,9 @@ def format_local_time(timestamp, format_str='%Y-%m-%d %H:%M:%S'): :param timestamp: a datetime object or a float representing a UNIX timestamp :param format_str: optional string format to pass to strftime """ - from datetime import datetime + from datetime import datetime, timezone - return datetime.utcfromtimestamp(timestamp).strftime(format_str) + return datetime.fromtimestamp(timestamp, timezone.utc).strftime(format_str) def test_daemon_start(run_cli_command, stopped_daemon_client): diff --git a/tests/cmdline/commands/test_devel.py b/tests/cmdline/commands/test_devel.py index 7dc2fdff0a..f164813625 100644 --- a/tests/cmdline/commands/test_devel.py +++ b/tests/cmdline/commands/test_devel.py @@ -15,6 +15,7 @@ from aiida.orm import Node, ProcessNode, QueryBuilder +@pytest.mark.requires_psql def test_run_sql(run_cli_command): """Test ``verdi devel run-sql``.""" options = ['SELECT COUNT(*) FROM db_dbnode;'] diff --git a/tests/cmdline/commands/test_presto.py b/tests/cmdline/commands/test_presto.py index c8503e0577..13760a53b7 100644 --- a/tests/cmdline/commands/test_presto.py +++ b/tests/cmdline/commands/test_presto.py @@ -28,14 +28,12 @@ def get_profile_names(self): @pytest.mark.usefixtures('empty_config') -@pytest.mark.parametrize('with_broker', (True, False)) -def test_presto(run_cli_command, with_broker, monkeypatch): - """Test ``verdi presto`` with and without a broker present.""" +def test_presto_without_rmq(pytestconfig, run_cli_command, monkeypatch): + """Test the ``verdi presto`` without RabbitMQ.""" from aiida.brokers.rabbitmq import defaults - if not with_broker: - # Patch the RabbitMQ detection function to pretend it could not find the service - monkeypatch.setattr(defaults, 'detect_rabbitmq_config', lambda: None) + # Patch the RabbitMQ detection function to pretend it could not find the service + monkeypatch.setattr(defaults, 'detect_rabbitmq_config', lambda: None) result = run_cli_command(verdi_presto, ['--non-interactive']) assert 'Created new profile `presto`.' in result.output @@ -44,12 +42,24 @@ def test_presto(run_cli_command, with_broker, monkeypatch): assert profile.name == 'presto' localhost = Computer.collection.get(label='localhost') assert localhost.is_configured - if with_broker: - assert profile.process_control_backend == 'core.rabbitmq' - else: - assert profile.process_control_backend is None + assert profile.process_control_backend is None +@pytest.mark.requires_rmq +@pytest.mark.usefixtures('empty_config') +def test_presto_with_rmq(pytestconfig, run_cli_command, monkeypatch): + """Test the ``verdi presto``.""" + result = run_cli_command(verdi_presto, ['--non-interactive']) + assert 'Created new profile `presto`.' in result.output + + with profile_context('presto', allow_switch=True) as profile: + assert profile.name == 'presto' + localhost = Computer.collection.get(label='localhost') + assert localhost.is_configured + assert profile.process_control_backend == 'core.rabbitmq' + + +@pytest.mark.requires_psql @pytest.mark.usefixtures('empty_config') def test_presto_use_postgres(run_cli_command, manager): """Test the ``verdi presto`` with the ``--use-postgres`` flag.""" diff --git a/tests/cmdline/commands/test_process.py b/tests/cmdline/commands/test_process.py index 73c9ac7084..162a471db2 100644 --- a/tests/cmdline/commands/test_process.py +++ b/tests/cmdline/commands/test_process.py @@ -371,6 +371,7 @@ def test_process_dump(self, run_cli_command, tmp_path, generate_workchain_multip @pytest.mark.usefixtures('aiida_profile_clean') @pytest.mark.parametrize('numprocesses, percentage', ((0, 100), (1, 90))) +@pytest.mark.requires_rmq def test_list_worker_slot_warning(run_cli_command, monkeypatch, numprocesses, percentage): """Test that the if the number of used worker process slots exceeds a threshold, that the warning message is displayed to the user when running `verdi process list` diff --git a/tests/cmdline/commands/test_profile.py b/tests/cmdline/commands/test_profile.py index 4c382e732c..b0a83cdc00 100644 --- a/tests/cmdline/commands/test_profile.py +++ b/tests/cmdline/commands/test_profile.py @@ -15,6 +15,10 @@ from aiida.tools.archive.create import create_archive from pgtest.pgtest import PGTest +# NOTE: Most of these tests would work with sqlite_dos, +# but would require generalizing a bunch of fixtures ('profile_factory' et al) in tests/conftest.py +pytestmark = pytest.mark.requires_psql + @pytest.fixture(scope='module') def pg_test_cluster(): diff --git a/tests/cmdline/commands/test_rabbitmq.py b/tests/cmdline/commands/test_rabbitmq.py index 4d9c0ac449..990fb50767 100644 --- a/tests/cmdline/commands/test_rabbitmq.py +++ b/tests/cmdline/commands/test_rabbitmq.py @@ -16,6 +16,7 @@ from plumpy.process_comms import RemoteProcessThreadController +@pytest.mark.requires_rmq def test_queues_list(run_cli_command): """Test the ``queues list``""" result = run_cli_command(cmd_rabbitmq.cmd_queues_list) diff --git a/tests/cmdline/commands/test_setup.py b/tests/cmdline/commands/test_setup.py index 5b5f7d126e..9a12e3fb88 100644 --- a/tests/cmdline/commands/test_setup.py +++ b/tests/cmdline/commands/test_setup.py @@ -19,6 +19,8 @@ from aiida.manage.external.postgres import Postgres from pgtest.pgtest import PGTest +pytestmark = pytest.mark.requires_psql + @pytest.fixture(scope='class') def pg_test_cluster(): diff --git a/tests/cmdline/commands/test_status.py b/tests/cmdline/commands/test_status.py index 1d49f0c569..d02aff07d2 100644 --- a/tests/cmdline/commands/test_status.py +++ b/tests/cmdline/commands/test_status.py @@ -26,7 +26,7 @@ def test_status(run_cli_command): assert 'The daemon is not running' in result.output assert result.exit_code is ExitCode.SUCCESS.value - for string in ['config', 'profile', 'postgres', 'broker', 'daemon']: + for string in ['config', 'profile', 'storage', 'broker', 'daemon']: assert string in result.output assert __version__ in result.output @@ -48,10 +48,11 @@ def test_status_no_rmq(run_cli_command): assert 'rabbitmq' not in result.output assert result.exit_code is ExitCode.SUCCESS.value - for string in ['config', 'profile', 'postgres', 'daemon']: + for string in ['config', 'profile', 'storage', 'daemon']: assert string in result.output +@pytest.mark.requires_psql def test_storage_unable_to_connect(run_cli_command): """Test `verdi status` when there is an unknown error while connecting to the storage.""" profile = get_profile() diff --git a/tests/common/test_hashing.py b/tests/common/test_hashing.py index 2ef50253f4..d51c543195 100644 --- a/tests/common/test_hashing.py +++ b/tests/common/test_hashing.py @@ -12,7 +12,7 @@ import hashlib import itertools import uuid -from datetime import datetime +from datetime import datetime, timezone from decimal import Decimal import numpy as np @@ -150,7 +150,7 @@ def test_datetime(self): == '714138f1114daa5fdc74c3483260742952b71b568d634c6093bb838afad76646' ) assert ( - make_hash(datetime.utcfromtimestamp(0)) + make_hash(datetime.fromtimestamp(0, timezone.utc)) == 'b4d97d9d486937775bcc25a5cba073f048348c3cd93d4460174a4f72a6feb285' ) @@ -167,7 +167,7 @@ def test_datetime(self): def test_datetime_precision_hashing(self): dt_prec = DatetimePrecision(datetime(2018, 8, 18, 8, 18), 10) assert make_hash(dt_prec) == '837ab70b3b7bd04c1718834a0394a2230d81242c442e4aa088abeab15622df37' - dt_prec_utc = DatetimePrecision(datetime.utcfromtimestamp(0), 0) + dt_prec_utc = DatetimePrecision(datetime.fromtimestamp(0, timezone.utc), 0) assert make_hash(dt_prec_utc) == '8c756ee99eaf9655bb00166839b9d40aa44eac97684b28f6e3c07d4331ae644e' def test_numpy_types(self): diff --git a/tests/conftest.py b/tests/conftest.py index 55bf01a185..d21dd553e0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,6 +21,7 @@ import types import typing as t import warnings +from pathlib import Path import click import pytest @@ -34,16 +35,61 @@ pytest_plugins = ['aiida.tools.pytest_fixtures', 'sphinx.testing.fixtures'] +def pytest_collection_modifyitems(items): + """Automatically generate markers for certain tests. + + Most notably, we add the 'presto' marker for all tests that + are not marked with either requires_rmq or requires_psql. + """ + filepath_psqldos = Path(__file__).parent / 'storage' / 'psql_dos' + filepath_django = Path(__file__).parent / 'storage' / 'psql_dos' / 'migrations' / 'django_branch' + filepath_sqla = Path(__file__).parent / 'storage' / 'psql_dos' / 'migrations' / 'sqlalchemy_branch' + + for item in items: + filepath_item = Path(item.fspath) + + # Add 'nightly' marker to all tests in 'storage/psql_dos/migrations/_branch' + if filepath_item.is_relative_to(filepath_django) or filepath_item.is_relative_to(filepath_sqla): + item.add_marker('nightly') + + # Add 'requires_rmq' for all tests that depend 'daemon_client' and its dependant fixtures + if 'daemon_client' in item.fixturenames: + item.add_marker('requires_rmq') + + # All tests in 'storage/psql_dos' require PostgreSQL + if filepath_item.is_relative_to(filepath_psqldos): + item.add_marker('requires_psql') + + # Add 'presto' marker to all tests that require neither PostgreSQL nor RabbitMQ services. + markers = [marker.name for marker in item.iter_markers()] + if 'requires_rmq' not in markers and 'requires_psql' not in markers and 'nightly' not in markers: + item.add_marker('presto') + + @pytest.fixture(scope='session') -def aiida_profile(aiida_config, aiida_profile_factory, config_psql_dos): +def aiida_profile(pytestconfig, aiida_config, aiida_profile_factory, config_psql_dos, config_sqlite_dos): """Create and load a profile with ``core.psql_dos`` as a storage backend and RabbitMQ as the broker. This overrides the ``aiida_profile`` fixture provided by ``aiida-core`` which runs with ``core.sqlite_dos`` and without broker. However, tests in this package make use of the daemon which requires a broker and the tests should be run against the main storage backend, which is ``core.sqlite_dos``. """ + marker_opts = pytestconfig.getoption('-m') + + # By default we use RabbitMQ broker and psql_dos storage + broker = 'core.rabbitmq' + if 'not requires_rmq' in marker_opts or 'presto' in marker_opts: + broker = None + + if 'not requires_psql' in marker_opts or 'presto' in marker_opts: + storage = 'core.sqlite_dos' + config = config_sqlite_dos() + else: + storage = 'core.psql_dos' + config = config_psql_dos() + with aiida_profile_factory( - aiida_config, storage_backend='core.psql_dos', storage_config=config_psql_dos(), broker_backend='core.rabbitmq' + aiida_config, storage_backend=storage, storage_config=config, broker_backend=broker ) as profile: yield profile diff --git a/tests/engine/daemon/test_client.py b/tests/engine/daemon/test_client.py index 29b6fdefdc..80a66a8632 100644 --- a/tests/engine/daemon/test_client.py +++ b/tests/engine/daemon/test_client.py @@ -19,6 +19,8 @@ get_daemon_client, ) +pytestmark = pytest.mark.requires_rmq + def test_ipc_socket_file_length_limit(): """The maximum length of socket filepaths is often limited by the operating system. diff --git a/tests/engine/processes/calcjobs/test_calc_job.py b/tests/engine/processes/calcjobs/test_calc_job.py index 52e3602d73..4583d9ea18 100644 --- a/tests/engine/processes/calcjobs/test_calc_job.py +++ b/tests/engine/processes/calcjobs/test_calc_job.py @@ -1292,6 +1292,7 @@ def _parse_submit_output(self, *args): assert node.exit_status == 418 +@pytest.mark.requires_rmq def test_restart_after_daemon_reset(get_calcjob_builder, daemon_client, submit_and_await): """Test that a job can be restarted when it is launched and the daemon is restarted. diff --git a/tests/engine/test_process_function.py b/tests/engine/test_process_function.py index 8880f70169..9f1cfadbc8 100644 --- a/tests/engine/test_process_function.py +++ b/tests/engine/test_process_function.py @@ -429,8 +429,8 @@ def test_function_default_label(): assert node.description == CUSTOM_DESCRIPTION -def test_launchers(): - """Verify that the various launchers are working.""" +def test_run_launchers(): + """Verify that the various non-daemon launchers are working.""" result = run(function_return_true) assert result @@ -439,6 +439,10 @@ def test_launchers(): assert result == get_true_node() assert isinstance(node, orm.CalcFunctionNode) + +@pytest.mark.requires_rmq +def test_submit_launchers(): + """Verify that submit to daemon works.""" # Process function can be submitted and will be run by a daemon worker as long as the function is importable # Note that the actual running is not tested here but is done so in `.github/system_tests/test_daemon.py`. node = submit(add_multiply, x=orm.Int(1), y=orm.Int(2), z=orm.Int(3)) diff --git a/tests/manage/external/test_postgres.py b/tests/manage/external/test_postgres.py index 94785231d6..be829ae4c9 100644 --- a/tests/manage/external/test_postgres.py +++ b/tests/manage/external/test_postgres.py @@ -10,9 +10,11 @@ from unittest import TestCase +import pytest from aiida.manage.external.postgres import Postgres +@pytest.mark.requires_psql class PostgresTest(TestCase): """Test the public API provided by the `Postgres` class""" diff --git a/tests/orm/nodes/test_node.py b/tests/orm/nodes/test_node.py index 049dde985a..6b757d995a 100644 --- a/tests/orm/nodes/test_node.py +++ b/tests/orm/nodes/test_node.py @@ -837,6 +837,11 @@ def test_tab_completable_properties(self): class TestNodeDelete: """Tests for deleting nodes.""" + # TODO: Why is this failing for SQLite?? + # sqlalchemy.orm.exc.ObjectDeletedError: Instance '' has been deleted, + # or its row is otherwise not present. + # https://github.com/aiidateam/aiida-core/issues/6436 + @pytest.mark.requires_psql def test_delete_through_backend(self): """Test deletion works correctly through the backend.""" backend = get_manager().get_profile_storage() diff --git a/tests/orm/test_querybuilder.py b/tests/orm/test_querybuilder.py index 7e05a5a2ec..e39f20a7b9 100644 --- a/tests/orm/test_querybuilder.py +++ b/tests/orm/test_querybuilder.py @@ -24,6 +24,7 @@ class TestBasic: @pytest.mark.usefixtures('aiida_profile_clean') + @pytest.mark.requires_psql def test_date_filters_support(self): """Verify that `datetime.date` is supported in filters.""" from aiida.common import timezone @@ -723,18 +724,21 @@ def test_str(self): qb = orm.QueryBuilder().append(orm.Data, project=['id', 'uuid']).order_by({orm.Data: 'id'}) self.regress_str(str(qb)) + @pytest.mark.requires_psql def test_as_sql(self): """Test ``qb.as_sql(inline=False)`` returns the correct string.""" qb = orm.QueryBuilder() qb.append(orm.Node, project=['uuid'], filters={'extras.tag4': 'appl_pecoal'}) self.regress_str(qb.as_sql(inline=False)) + @pytest.mark.requires_psql def test_as_sql_inline(self): """Test ``qb.as_sql(inline=True)`` returns the correct string.""" qb = orm.QueryBuilder() qb.append(orm.Node, project=['uuid'], filters={'extras.tag4': 'appl_pecoal'}) self.regress_str(qb.as_sql(inline=True)) + @pytest.mark.requires_psql def test_as_sql_literal_quote(self): """Test that literal values can be rendered.""" qb = orm.QueryBuilder() @@ -801,6 +805,7 @@ def test_round_trip_append(self): assert sorted([uuid for (uuid,) in qb.all()]) == sorted([uuid for (uuid,) in qb_new.all()]) +@pytest.mark.requires_psql def test_analyze_query(): """Test the query plan is correctly generated.""" qb = orm.QueryBuilder() @@ -843,6 +848,7 @@ def test_empty_filters(self): class TestAttributes: + @pytest.mark.requires_psql @pytest.mark.usefixtures('aiida_profile_clean') def test_attribute_existence(self): # I'm storing a value under key whatever: @@ -866,6 +872,7 @@ def test_attribute_existence(self): res_query = {str(_[0]) for _ in qb.all()} assert res_query == res_uuids + @pytest.mark.requires_psql def test_attribute_type(self): key = 'value_test_attr_type' n_int, n_float, n_str, n_str2, n_bool, n_arr = [orm.Data() for _ in range(6)] @@ -1477,6 +1484,9 @@ def test_iterall_with_mutation(self): for pk in pks: assert orm.load_node(pk).base.extras.get('key') == 'value' + # TODO: This test seems to hang (or takes a looong time), specifically in + # pydantic/_internal/_core_utils.py:400 + @pytest.mark.requires_psql @pytest.mark.usefixtures('aiida_profile_clean') def test_iterall_with_store(self): """Test that nodes can be stored while being iterated using ``QueryBuilder.iterall``. @@ -1499,6 +1509,8 @@ def test_iterall_with_store(self): for pk, pk_clone in zip(pks, sorted(pk_clones)): assert orm.load_node(pk) == orm.load_node(pk_clone) + # TODO: This test seems to hang (or takes a looong time) + @pytest.mark.requires_psql @pytest.mark.usefixtures('aiida_profile_clean') def test_iterall_with_store_group(self): """Test that nodes can be stored and added to groups while being iterated using ``QueryBuilder.iterall``. @@ -1563,6 +1575,9 @@ class TestManager: def init_db(self, backend): self.backend = backend + # This fails with sqlite with: + # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) no such function: date_trunc + @pytest.mark.requires_psql def test_statistics(self): """Test if the statistics query works properly. @@ -1599,6 +1614,9 @@ def store_and_add(n, statistics): assert new_db_statistics == expected_db_statistics + # This fails with sqlite with: + # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) no such function: date_trunc + @pytest.mark.requires_psql def test_statistics_default_class(self): """Test if the statistics query works properly. diff --git a/tests/restapi/test_statistics.py b/tests/restapi/test_statistics.py index d0d5c3f408..e059de70ac 100644 --- a/tests/restapi/test_statistics.py +++ b/tests/restapi/test_statistics.py @@ -33,7 +33,10 @@ def linearize_namespace(tree_namespace, linear_namespace=None): return linear_namespace +# This test does not work with SQLite since it uses the `statistics` endpoint, +# which uses `date_trunc` under the hood, which is not implemented in SQLite. @pytest.mark.usefixtures('populate_restapi_database') +@pytest.mark.requires_psql def test_count_consistency(restapi_server, server_url): """Test the consistency in values between full_type_count and statistics""" server = restapi_server() diff --git a/tests/restapi/test_threaded_restapi.py b/tests/restapi/test_threaded_restapi.py index 48624c9d78..bad1a8a76f 100644 --- a/tests/restapi/test_threaded_restapi.py +++ b/tests/restapi/test_threaded_restapi.py @@ -21,6 +21,14 @@ NO_OF_REQUESTS = 100 +# This fails with SQLite backend: +# ERROR tests/restapi/test_threaded_restapi.py::test_run_threaded_server - assert 30.0 == 1 +# where 30.0 = >() +# where > = +# .timeout +# where = Engine(sqlite:////tmp/.../database.sqlite).pool +# where Engine(sqlite:////tmp/.../database.sqlite) = .bind +@pytest.mark.requires_psql @pytest.mark.usefixtures('restrict_db_connections') def test_run_threaded_server(restapi_server, server_url, aiida_localhost): """Run AiiDA REST API threaded in a separate thread and perform many sequential requests. diff --git a/tests/storage/psql_dos/conftest.py b/tests/storage/psql_dos/conftest.py index 8bcad4d061..16136b8df9 100644 --- a/tests/storage/psql_dos/conftest.py +++ b/tests/storage/psql_dos/conftest.py @@ -14,8 +14,13 @@ from aiida.manage.configuration import get_config try: - STORAGE_BACKEND_ENTRY_POINT = get_config().get_profile(os.environ.get('AIIDA_TEST_PROFILE', None)).storage_backend + if test_profile := os.environ.get('AIIDA_TEST_PROFILE'): + STORAGE_BACKEND_ENTRY_POINT = get_config().get_profile(test_profile).storage_backend + # TODO: The else branch is wrong + else: + STORAGE_BACKEND_ENTRY_POINT = 'core.psql_dos' except MissingConfigurationError: + # TODO: This is actually not true anymore! # Case when ``pytest`` is invoked without existing config, in which case it will rely on the automatic test profile # creation which currently always uses ``core.psql_dos`` for the storage backend STORAGE_BACKEND_ENTRY_POINT = 'core.psql_dos' diff --git a/tests/storage/psql_dos/migrations/conftest.py b/tests/storage/psql_dos/migrations/conftest.py index f28e179305..088b2b73b1 100644 --- a/tests/storage/psql_dos/migrations/conftest.py +++ b/tests/storage/psql_dos/migrations/conftest.py @@ -8,7 +8,6 @@ ########################################################################### """Tests for the migration engine (Alembic) as well as for the AiiDA migrations for SQLAlchemy.""" -from pathlib import Path from uuid import uuid4 import pytest @@ -19,18 +18,6 @@ from sqlalchemy import text -def pytest_collection_modifyitems(config, items): - """Dynamically add the ``nightly`` marker to all tests in ``django_branch`` and ``sqlalchemy_branch`` modules.""" - filepath_django = Path(__file__).parent / 'django_branch' - filepath_sqla = Path(__file__).parent / 'sqlalchemy_branch' - - for item in items: - filepath_item = Path(item.fspath) - - if filepath_item.is_relative_to(filepath_django) or filepath_item.is_relative_to(filepath_sqla): - item.add_marker(getattr(pytest.mark, 'nightly')) - - @pytest.fixture(scope='session') def empty_pg_cluster(): """Create an empty PostgreSQL cluster, for the duration of the session.""" diff --git a/tests/tools/archive/test_schema.py b/tests/tools/archive/test_schema.py index 55458509fb..b117a9553a 100644 --- a/tests/tools/archive/test_schema.py +++ b/tests/tools/archive/test_schema.py @@ -10,6 +10,7 @@ from contextlib import suppress +import pytest import yaml from aiida import get_profile from aiida.storage.psql_dos.utils import create_sqlalchemy_engine @@ -23,6 +24,7 @@ from tests.utils.archives import get_archive_file +@pytest.mark.requires_psql def test_psql_sync_init(tmp_path): """Test the schema is in-sync with the ``psql_dos`` backend, when initialising a new archive.""" # note, directly using the global profile's engine here left connections open @@ -38,6 +40,7 @@ def test_psql_sync_init(tmp_path): raise AssertionError(f'Schema is not in-sync with the psql backend:\n{yaml.safe_dump(diffs)}') +@pytest.mark.requires_psql def test_psql_sync_migrate(tmp_path): """Test the schema is in-sync with the ``psql_dos`` backend, when migrating an old archive to the latest version.""" # note, directly using the global profile's engine here left connections open