diff --git a/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml new file mode 100644 index 0000000000..ed7ced1d05 --- /dev/null +++ b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml @@ -0,0 +1,15 @@ +- name: Check supported distros + hosts: centos-manager*,windows-agent* + any_errors_fatal: true + tasks: + + - name: Check if the host distribution is compatible + include_role: + name: host_checker + tasks_from: check_supported_distro + vars: + os: "{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}" + dist: "{{ ansible_distribution if ansible_os_family != 'Windows' else 'Windows' }}" + manager_distros: ['CentOS'] + agent_distros: ['Windows'] + diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 3b4e495dcd..3ff4ceab83 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -23,7 +23,12 @@ def validate_environments(request): Step 1: Collect the data related to the selected tests that will be executed. Step 2: Generate a playbook containing cross-checks for selected tests. Step 3: Run the generated playbook. - Step 4: Execute test-specific validations (if any). It will run one validation for each selected test set. + Step 4: Generate a test-specific playbook to validate the environment required by that test, then execute that + playbook. This will run one validation for each selected test set. + To add specific validation tasks to a test its only necessary to add a new jinja2 template inside the + `playbooks` folder in the test suite. E.g: + test_basic_cases/test_fim/test_fim_linux/data/playbooks/validation.j2 + (See end_to_end/data/validation_templates for a guide to create the file) Args: request (fixture): Gives access to the requesting test context. @@ -40,7 +45,7 @@ def validate_environments(request): if not inventory_path: raise ValueError('Inventory not specified') - # -------------------------- Step 1: Prepare the necessary data ---------------- + #--------------------------------------- Step 1: Prepare the necessary data ---------------------------------------- # Get the path of the tests from collected items. collected_paths = [item.fspath for item in collected_items] # Remove duplicates caused by the existence of 2 or more test cases @@ -63,9 +68,9 @@ def validate_environments(request): # Get the largest number of manager/agent instances num_of_managers = max(manager_instances) num_of_agents = max(agent_instances) - # -------------------------- End of Step 1 ------------------------------------- + #-------------------------------------------------- End of Step 1 -------------------------------------------------- - # ---- Step 2: Run the playbook to generate the general validation playbook ---- + #---------------------- Step 2: Run the playbook to generate the general validation playbook ----------------------- gen_parameters = { 'playbook': playbook_generator, 'inventory': inventory_path, 'extravars': { @@ -74,9 +79,9 @@ def validate_environments(request): } } ansible_runner.run(**gen_parameters) - # -------------------------- End of Step 2 ------------------------------------- + #-------------------------------------------------- End of Step 2 -------------------------------------------------- - # -------------------- Step 3: Run the general validation playbook ------------- + #----------------------------------- Step 3: Run the general validation playbook ----------------------------------- parameters = { 'playbook': general_playbook, 'inventory': inventory_path, @@ -89,9 +94,9 @@ def validate_environments(request): if general_validation_runner.status == 'failed': raise Exception(f"The general validations have failed. Please check that the environments meet the expected " 'requirements.') - # -------------------------- End of Step 3 ------------------------------------- + #-------------------------------------------------- End of Step 3 -------------------------------------------------- - # ---------------- Step 4: Execute test-specific validations (if any) ---------- + #------------------------------------ Step 4: Execute test-specific validations ------------------------------------ playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_test_specific_play.yaml') playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'test_specific_validation.j2') @@ -136,7 +141,7 @@ def validate_environments(request): if validation_runner.status == 'failed': raise Exception(f"The validation phase of {test_suite_name} has failed. Please check that the environments " 'meet the expected requirements.') - # -------------------------- End of Step 4 ------------------------------------- + #-------------------------------------------------- End of Step 4 -------------------------------------------------- @pytest.fixture(scope='function') diff --git a/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml b/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml deleted file mode 100644 index b9926b1aa3..0000000000 --- a/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# REQUIRED VARIABLES -# ------------------- -# (String) os: Target operating system - -- name: Try connection (Linux) - ping: - when: os == 'Linux' - ignore_errors: true - register: result - -- name: Check if host is reachable (Linux) - set_fact: - failed: true - errors: "{{ inventory_hostname }} is unreachable." - when: '"ping" not in result.keys() and os == "Linux"' - -- name: Try connection (Windows) - win_ping: - when: os == 'Windows' - ignore_errors: true - register: result - -- name: Check if host is reachable (Windows) - set_fact: - failed: true - errors: "{{ inventory_hostname }} is unreachable." - when: '"ping" not in result.keys() and os == "Windows"' diff --git a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml index cc73eb9122..982342f859 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml @@ -15,8 +15,13 @@ delegate_to: localhost when: (os == 'Linux' and 'server' in wazuh_info.stdout) +- debug: var=test_result + when: test_result is failed + +- debug: var=test_result.stderr + - name: Check the connection between Controller node and Wazuh Indexer set_fact: - failed: true - errors: "{{ errors }}\nAnsible Controller node cannot connect correctly with Wazuh Indexer." - when: (test_result is failed and test_result.stdout is defined and 'refused' in test_result.stdout) + check_result: 'true' + errors: "{{ errors }}Ansible Controller node cannot connect correctly with Wazuh Indexer.\n" + when: (test_result is failed and test_result.stdout is defined and 'refused' in test_result.stderr) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml index 1e4adbcd12..5a4981a8e2 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml @@ -16,6 +16,6 @@ - name: Check the connection between Filebeat and Wazuh Indexer set_fact: - failed: true - errors: "{{ errors }}\nFilebeat cannot connect correctly with Wazuh Indexer." + check_result: 'true' + errors: "{{ errors }}Filebeat cannot connect correctly with Wazuh Indexer.\n" when: (os == 'Linux' and 'server' in wazuh_info.stdout and 'ERROR' in test_result.stdout) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml index 129caefccf..028e9b57fe 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml @@ -4,12 +4,12 @@ - name: Check OS (Linux) set_fact: - failed: true - errors: "{{ errors }}\nThe {{ ansible_system }} OS was not expected, but the {{ os }} OS." + check_result: 'true' + errors: "{{ errors }}The {{ ansible_system }} OS was not expected, but the {{ os }} OS.\n" when: (os != ansible_system and os == "Linux") - name: Check OS (Windows) set_fact: - failed: true - errors: "{{ errors }}\nThe {{ ansible_os_family }} OS was not expected, but the {{ os }} OS." + check_result: 'true' + errors: "{{ errors }}The {{ ansible_os_family }} OS was not expected, but the {{ os }} OS.\n" when: (os != ansible_os_family and os == "Windows") diff --git a/tests/end_to_end/roles/host_checker/tasks/check_python.yaml b/tests/end_to_end/roles/host_checker/tasks/check_python.yaml index 9a30ecf5bf..452ae7e773 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_python.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_python.yaml @@ -4,8 +4,8 @@ - name: Check default Python version (Linux) set_fact: - failed: true - errors: "{{ errors }}\nPython version is less than 3. Current version: {{ ansible_python_version }}" + check_result: 'true' + errors: "{{ errors }}Python version is less than 3. Current version: {{ ansible_python_version }}\n" when: (os == "Linux" and ansible_python['version']['major'] < 3) - name: Get Python version (Windows) @@ -15,6 +15,6 @@ - name: Check default Python version (Linux) set_fact: - failed: true - errors: "{{ errors }}\nPython version is less than 3. Current version: {{ version.stdout }}" + check_result: 'true' + errors: "{{ errors }}Python version is less than 3. Current version: {{ version.stdout }}\n" when: (os == "Windows" and version.stdout.split(" ")[1].split(".")[0] | int < 3) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml index ec0bff2209..d8abbf11e7 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml @@ -10,9 +10,9 @@ tasks_from: get_installation_type - fail: - msg: "{{ dist }} is not supported by this test: {{ manager_distros }}" + msg: "{{ dist }} is not supported by this test: {{ manager_distros }}\n" when: (os == 'Linux' and 'server' in wazuh_info.stdout and dist not in manager_distros) - fail: - msg: "{{ dist }} is not supported by this test: {{ agent_distros }}" + msg: "{{ dist }} is not supported by this test: {{ agent_distros }}\n" when: (os == 'Linux' and 'agent' in wazuh_info.stdout and dist not in agent_distros) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml b/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml new file mode 100644 index 0000000000..a6a69c3167 --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml @@ -0,0 +1,32 @@ +# REQUIRED VARIABLES +# ------------------- +# (String) os: Target operating system + +- name: Get Wazuh installation + include_role: + name: service_controller + tasks_from: get_installation_type + +- name: Populate services facts + service_facts: + when: os == 'Linux' + +- name: Check the status of Wazuh components (Manager) + set_fact: + check_result: 'true' + errors: "{{ errors }}{{ ansible_facts.services[item] }} is not running.\n" + when: (os == 'Linux' and 'server' in wazuh_info.stdout and ansible_facts.services[item].state != 'running') + with_items: + - 'wazuh-manager.service' + - 'wazuh-indexer.service' + - 'filebeat.service' + +- set_fact: + service: 'wazuh-agent.service' + when: (os == 'Linux' and 'agent' in wazuh_info.stdout) + +- name: Check the status of Wazuh Agent + set_fact: + check_result: 'true' + errors: "{{ errors }}{{ ansible_facts.services[service].name }} is not running.\n" + when: (os == 'Linux' and 'agent' in wazuh_info.stdout and ansible_facts.services[service].state != 'running') diff --git a/tests/end_to_end/roles/host_checker/tasks/main.yaml b/tests/end_to_end/roles/host_checker/tasks/main.yaml index 4bb3e9ec30..71794d7eb4 100644 --- a/tests/end_to_end/roles/host_checker/tasks/main.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/main.yaml @@ -1,30 +1,30 @@ # -------- Task to identify whether the validation step fails or not. -------- - name: Set flag and informative variable set_fact: - failed: false - errors: null + check_result: 'false' + errors: '' # ---------------------------------------------------------------------------- # -------- Checks ------------------------------------------------------------ -- name: Check host connection - include_tasks: check_connection.yaml - - name: Check Python import_tasks: check_python.yaml - name: Check OS import_tasks: check_os.yaml +- name: Check the status of Wazuh components + import_tasks: check_wazuh_components.yaml + - name: Check the connection between Filebeat and Wazuh Indexer import_tasks: check_filebeat_indexer.yaml - name: Check the connection between Controller node and Wazuh Indexer import_tasks: check_controller_indexer.yaml # ---------------------------------------------------------------------------- - +- debug: var=errors # -------- Task to identify whether the validation step fails or not. -------- - name: Verify if any check have failed fail: msg: "Some validations were fail:\n'{{ errors }}'" - when: failed == true + when: (check_result == 'true' or errors != '') # ----------------------------------------------------------------------------