From 8b8f33e7cc5da5af841e46c8459f08837757e9c1 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Mon, 15 Aug 2022 19:31:58 -0300 Subject: [PATCH 01/27] feat: add validation phase. #3142 --- tests/end_to_end/conftest.py | 108 ++++++++++++++++++ tests/end_to_end/data/environment.json | 58 ++++++++++ .../data/generate_general_play.yaml | 10 ++ tests/end_to_end/data/validation_template.j2 | 12 ++ .../host_checker/tasks/check_connection.yaml | 27 +++++ .../roles/host_checker/tasks/check_os.yaml | 15 +++ .../host_checker/tasks/check_python.yaml | 20 ++++ .../roles/host_checker/tasks/main.yaml | 24 ++++ 8 files changed, 274 insertions(+) create mode 100644 tests/end_to_end/data/environment.json create mode 100644 tests/end_to_end/data/generate_general_play.yaml create mode 100644 tests/end_to_end/data/validation_template.j2 create mode 100644 tests/end_to_end/roles/host_checker/tasks/check_connection.yaml create mode 100644 tests/end_to_end/roles/host_checker/tasks/check_os.yaml create mode 100644 tests/end_to_end/roles/host_checker/tasks/check_python.yaml create mode 100644 tests/end_to_end/roles/host_checker/tasks/main.yaml diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index c1d6fe70c5..b18f65a104 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -4,6 +4,7 @@ import os import ansible_runner import pytest +import json from tempfile import gettempdir from wazuh_testing.tools.file import remove_file @@ -11,6 +12,104 @@ alerts_json = os.path.join(gettempdir(), 'alerts.json') +suite_path = os.path.dirname(os.path.realpath(__file__)) + + +@pytest.fixture(scope='session', autouse=True) +def validate_environments(request): + """Fixture with session scope to validate the environments before run the E2E tests. + + This phase is divided in 4 steps: + Step 1: Collect the data related with the selected tests that will be executed. + Step 2: Generate a playbook with general validations containing cross-checks for selected tests. + Step 3: Run the generated playbook. + Step 4: Execute test-specific validations (if any). It will run one validation for each selected test set. + + Args: + request (fixture): Gives access to the requesting test context. + """ + collected_items = request.session.items + roles_path = request.config.getoption('--roles-path') + inventory_path = request.config.getoption('--inventory_path') + environment_file = os.path.join(suite_path, 'data', 'environment.json') + environment_metadata = json.load(open(environment_file)) + playbook_generator = os.path.join(suite_path, 'data', 'generate_general_play.yaml') + playbook_template = os.path.join(suite_path, 'data', 'validation_template.j2') + general_playbook = os.path.join(suite_path, 'data', 'general_validation.yaml') + + if not inventory_path: + raise ValueError('Inventory not specified') + + # -------------------------- Step 1: Prepare the necessary data ---------------- + # Get the path of the tests from collected items. + collected_paths = [item.fspath for item in collected_items] + # Remove duplicates caused by the existence of 2 or more test cases + collected_paths = list(dict.fromkeys(collected_paths)) + test_suites_paths = [] + manager_instances = [] + agent_instances = [] + + for path in collected_paths: + # Remove the name of the file from the path + path = str(path).rsplit('/', 1)[0] + # Add the test suite path + test_suites_paths.append(path) + # Get the test suite name + test_suite_name = path.split('/')[-1:][0] + # Save the test environment metadata in lists + manager_instances.append(environment_metadata[test_suite_name]['managers']) + agent_instances.append(environment_metadata[test_suite_name]['agents']) + + # Get the largest number of manager/agent instances + num_of_managers = max(manager_instances) + num_of_agents = max(agent_instances) + # -------------------------- End of Step 1 ------------------------------------- + + # ---- Step 2: Run the playbook to generate the general validation playbook ---- + parameters = { + 'playbook': playbook_generator, 'inventory': inventory_path, + 'extravars': { + 'template_path': playbook_template, 'dest_path': general_playbook, + 'num_of_managers': num_of_managers, 'num_of_agents': num_of_agents + } + } + ansible_runner.run(**parameters) + # -------------------------- End of Step 2 ------------------------------------- + + # -------------------- Step 3: Run the general validation playbook ------------- + parameters = { + 'playbook': general_playbook, + 'inventory': inventory_path, + 'envvars': {'ANSIBLE_ROLES_PATH': roles_path} + } + general_validation_runner = ansible_runner.run(**parameters) + # Remove the generated playbook + remove_file(general_playbook) + # If the general validations have failed, then abort the execution finishing with an error. Else, continue. + if general_validation_runner.status == 'failed': + raise Exception(f"The general validations have failed. Please check that the environments meet the expected " \ + 'requirements.') + # -------------------------- End of Step 3 ------------------------------------- + + # Step 4: Execute test-specific validations (if any) + for path in test_suites_paths: + validation_playbook = os.path.join(path, 'data', 'playbooks', 'validation.yaml') + + if os.path.exists(validation_playbook): + # Set Ansible parameters + parameters = { + 'playbook': validation_playbook, + 'inventory': inventory_path, + 'envvars': {'ANSIBLE_ROLES_PATH': roles_path} + } + # Run the validations of the test suite. + validation_runner = ansible_runner.run(**parameters) + + # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. + if validation_runner.status == 'failed': + raise Exception(f"The validation phase of {{ path }} has failed. Please check that the environments " \ + 'meet the expected requirements.') + # -------------------------- End of Step 4 ------------------------------------- @pytest.fixture(scope='function') @@ -126,3 +225,12 @@ def pytest_addoption(parser): type=str, help='Inventory path', ) + + parser.addoption( + '--roles-path', + action='store', + metavar='ROLES_PATH', + default=os.path.join(suite_path, 'roles'), + type=str, + help='Ansible roles path.', + ) diff --git a/tests/end_to_end/data/environment.json b/tests/end_to_end/data/environment.json new file mode 100644 index 0000000000..bdb65b1183 --- /dev/null +++ b/tests/end_to_end/data/environment.json @@ -0,0 +1,58 @@ +{ + "test_audit": { + "managers": 1, + "agents": 0 + }, + "test_aws_infrastructure_monitoring": { + "managers": 1, + "agents": 0 + }, + "test_brute_force": { + "managers": 1, + "agents": 1 + }, + "test_detecting_suspicious_binaries": { + "managers": 1, + "agents": 0 + }, + "test_docker_monitoring": { + "managers": 1, + "agents": 0 + }, + "test_fim": { + "managers": 1, + "agents": 2 + }, + "test_netcat": { + "managers": 1, + "agents": 1 + }, + "test_osquery_integration": { + "managers": 1, + "agents": 0 + }, + "test_shellshock_attack_detection": { + "managers": 1, + "agents": 0 + }, + "test_sql_injection": { + "managers": 1, + "agents": 1 + }, + "test_virustotal": { + "managers": 1, + "agents": 1 + }, + "test_vulnerability_detector": { + "managers": 1, + "agents": 2 + }, + "test_windows_defender": { + "managers": 1, + "agents": 1 + }, + "test_yara_integration": { + "managers": 1, + "agents": 0 + } +} \ No newline at end of file diff --git a/tests/end_to_end/data/generate_general_play.yaml b/tests/end_to_end/data/generate_general_play.yaml new file mode 100644 index 0000000000..d025f3da65 --- /dev/null +++ b/tests/end_to_end/data/generate_general_play.yaml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + vars: + num_of_managers: "{{ num_of_managers }}" + num_of_agents: "{{ num_of_agents }}" + tasks: + - name: Generate a general validation playbook + template: + src: "{{ template_path }}" + dest: "{{ dest_path }}" diff --git a/tests/end_to_end/data/validation_template.j2 b/tests/end_to_end/data/validation_template.j2 new file mode 100644 index 0000000000..c6d0d6e590 --- /dev/null +++ b/tests/end_to_end/data/validation_template.j2 @@ -0,0 +1,12 @@ +{% if num_of_agents == 0 %} +- hosts: managers +{% elif num_of_managers == 0 %} +- hosts: agents +{% else %} +- hosts: managers:agents +{% endif %} + any_errors_fatal: true + roles: + - role: host_checker + vars: + os: "{% raw %}{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}{% endraw %}" diff --git a/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml b/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml new file mode 100644 index 0000000000..0774e4147b --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml @@ -0,0 +1,27 @@ +# REQUIRED VARIABLES +#------------------- +# (String) os: Target operating system + +- name: Try connection (Linux) + ping: + when: os == 'Linux' + ignore_errors: true + register: result + +- name: Check if host is reachable (Linux) + set_fact: + failed: true + errors: "{{ inventory_hostname }} is unreachable." + when: '"ping" not in result.keys() and os == "Linux"' + +- name: Try connection (Windows) + win_ping: + when: os == 'Windows' + ignore_errors: true + register: result + +- name: Check if host is reachable (Windows) + set_fact: + failed: true + errors: "{{ inventory_hostname }} is unreachable." + when: '"ping" not in result.keys() and os == "Windows"' diff --git a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml new file mode 100644 index 0000000000..0575103567 --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml @@ -0,0 +1,15 @@ +# REQUIRED VARIABLES +#------------------- +# (String) os: Target operating system + +- name: Check OS (Linux) + set_fact: + failed: true + errors: "{{ errors }}\nThe {{ ansible_system }} OS was not expected, but the {{ os }} OS." + when: (os != ansible_system and os == "Linux") + +- name: Check OS (Windows) + set_fact: + failed: true + errors: "{{ errors }}\nThe {{ ansible_os_family }} OS was not expected, but the {{ os }} OS." + when: (os != ansible_os_family and os == "Windows") diff --git a/tests/end_to_end/roles/host_checker/tasks/check_python.yaml b/tests/end_to_end/roles/host_checker/tasks/check_python.yaml new file mode 100644 index 0000000000..1809eb3d27 --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/check_python.yaml @@ -0,0 +1,20 @@ +# REQUIRED VARIABLES +#------------------- +# (String) os: Target operating system + +- name: Check default Python version (Linux) + set_fact: + failed: true + errors: "{{ errors }}\nPython version is less than 3. Current version: {{ ansible_python_version }}" + when: (os == "Linux" and ansible_python['version']['major'] < 3) + +- name: Get Python version (Windows) + win_shell: python -V + register: version + when: os == 'Windows' + +- name: Check default Python version (Linux) + set_fact: + failed: true + errors: "{{ errors }}\nPython version is less than 3. Current version: {{ version.stdout }}" + when: (os == "Windows" and version.stdout.split(" ")[1].split(".")[0] | int < 3) diff --git a/tests/end_to_end/roles/host_checker/tasks/main.yaml b/tests/end_to_end/roles/host_checker/tasks/main.yaml new file mode 100644 index 0000000000..f4c4281f2e --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/main.yaml @@ -0,0 +1,24 @@ +# -------- Task to identify whether the validation step fails or not. -------- +- name: Set flag and informative variable + set_fact: + failed: false + errors: null +# ---------------------------------------------------------------------------- + +# -------- Checks ------------------------------------------------------------ +- name: Check host connection + include_tasks: check_connection.yaml + +- name: Check Python + import_tasks: check_python.yaml + +- name: Check OS + import_tasks: check_os.yaml +# ---------------------------------------------------------------------------- + +# -------- Task to identify whether the validation step fails or not. -------- +- name: Verify if any check have failed + fail: + msg: "Some validations were fail:\n'{{ errors }}'" + when: failed == true +# ---------------------------------------------------------------------------- From 5fb7fdea534419eadedc639a7b7c2f3b650d1330 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Mon, 15 Aug 2022 19:33:26 -0300 Subject: [PATCH 02/27] fix: test_fim fixed. #2830 --- .../end_to_end/test_fim/data/playbooks/generate_events.yaml | 4 ++-- tests/end_to_end/test_fim/test_fim.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/end_to_end/test_fim/data/playbooks/generate_events.yaml b/tests/end_to_end/test_fim/data/playbooks/generate_events.yaml index 4ddc7c0196..200187088f 100644 --- a/tests/end_to_end/test_fim/data/playbooks/generate_events.yaml +++ b/tests/end_to_end/test_fim/data/playbooks/generate_events.yaml @@ -18,13 +18,13 @@ path: "{{ path }}" state: "{{ state }}" mode: "{{ mode }}" - when: os == "Linux" + when: ansible_system == os - name: "{{ event_description }}" win_file: path: "{{ path }}" state: "{{ state }}" - when: os == ansible_facts['os_family'] + when: ansible_facts['os_family'] == os - name: Get alerts hosts: wazuh-manager diff --git a/tests/end_to_end/test_fim/test_fim.py b/tests/end_to_end/test_fim/test_fim.py index d46bb6dd33..6adaee3e48 100644 --- a/tests/end_to_end/test_fim/test_fim.py +++ b/tests/end_to_end/test_fim/test_fim.py @@ -28,13 +28,14 @@ def test_fim(configure_environment, metadata, get_dashboard_credentials, generat rule_level = metadata['rule.level'] rule_description = metadata['rule.description'] syscheck_path = metadata['extra']['syscheck.path'] + timestamp = r'\d+\-\d+\-\w+\:\d+\:\d+\.\d+[+|-]\d+' - expected_alert_json = fr'\{{"timestamp":"(\d+\-\d+\-\w+\:\d+\:\d+\.\d+\+\d+)","rule":{{"level":{rule_level},' \ + expected_alert_json = fr'.*\{{"timestamp":"({timestamp})","rule":{{"level":{rule_level},' \ fr'"description":"{rule_description}","id":"{rule_id}".*"syscheck":{{"path":' \ fr'"{syscheck_path}".*\}}' expected_indexed_alert = fr'.*"path": "{syscheck_path}".*"rule":.*"level": {rule_level},.*"description": ' \ - fr'"{rule_description}".*"timestamp": "(\d+\-\d+\-\w+\:\d+\:\d+\.\d+\+\d+)".*' + fr'"{rule_description}".*"timestamp": "({timestamp})".*' # Check that alert has been raised and save timestamp raised_alert = evm.check_event(callback=expected_alert_json, file_to_monitor=alerts_json, From e907f8b769503ae684be1f2faf730f73cfc91bc8 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Mon, 15 Aug 2022 19:41:42 -0300 Subject: [PATCH 03/27] fix: grammatical errors corrected. #3142 --- tests/end_to_end/conftest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index b18f65a104..5f612f6dd7 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -19,9 +19,9 @@ def validate_environments(request): """Fixture with session scope to validate the environments before run the E2E tests. - This phase is divided in 4 steps: - Step 1: Collect the data related with the selected tests that will be executed. - Step 2: Generate a playbook with general validations containing cross-checks for selected tests. + This phase is divided into 4 steps: + Step 1: Collect the data related to the selected tests that will be executed. + Step 2: Generate a playbook containing cross-checks for selected tests. Step 3: Run the generated playbook. Step 4: Execute test-specific validations (if any). It will run one validation for each selected test set. From f84dc10a45d7b37e84182cbd8f55552c8ab12d5a Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Tue, 16 Aug 2022 16:11:11 -0300 Subject: [PATCH 04/27] feat and fix: several tasks and changes. #3142 Tasks to check filebeat-indexer and controller-indexer connections. New role related to Wazuh services added. Some changes related to linter corrections. --- tests/end_to_end/conftest.py | 4 ++-- .../data/generate_general_play.yaml | 1 - tests/end_to_end/data/validation_template.j2 | 7 +++--- .../host_checker/tasks/check_connection.yaml | 4 ++-- .../tasks/check_controller_indexer.yaml | 22 +++++++++++++++++++ .../tasks/check_filebeat_indexer.yaml | 21 ++++++++++++++++++ .../roles/host_checker/tasks/check_os.yaml | 4 ++-- .../host_checker/tasks/check_python.yaml | 4 ++-- .../roles/host_checker/tasks/main.yaml | 6 +++++ .../tasks/get_installation_type.yaml | 9 ++++++++ .../data/playbooks/generate_events.yaml | 4 ++-- 11 files changed, 72 insertions(+), 14 deletions(-) create mode 100644 tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml create mode 100644 tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml create mode 100644 tests/end_to_end/roles/service_controller/tasks/get_installation_type.yaml diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 5f612f6dd7..fa975e4f5f 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -87,7 +87,7 @@ def validate_environments(request): remove_file(general_playbook) # If the general validations have failed, then abort the execution finishing with an error. Else, continue. if general_validation_runner.status == 'failed': - raise Exception(f"The general validations have failed. Please check that the environments meet the expected " \ + raise Exception(f"The general validations have failed. Please check that the environments meet the expected " 'requirements.') # -------------------------- End of Step 3 ------------------------------------- @@ -107,7 +107,7 @@ def validate_environments(request): # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. if validation_runner.status == 'failed': - raise Exception(f"The validation phase of {{ path }} has failed. Please check that the environments " \ + raise Exception(f"The validation phase of {{ path }} has failed. Please check that the environments " 'meet the expected requirements.') # -------------------------- End of Step 4 ------------------------------------- diff --git a/tests/end_to_end/data/generate_general_play.yaml b/tests/end_to_end/data/generate_general_play.yaml index d025f3da65..ada390fb1e 100644 --- a/tests/end_to_end/data/generate_general_play.yaml +++ b/tests/end_to_end/data/generate_general_play.yaml @@ -1,4 +1,3 @@ ---- - hosts: localhost vars: num_of_managers: "{{ num_of_managers }}" diff --git a/tests/end_to_end/data/validation_template.j2 b/tests/end_to_end/data/validation_template.j2 index c6d0d6e590..fe2d3829c0 100644 --- a/tests/end_to_end/data/validation_template.j2 +++ b/tests/end_to_end/data/validation_template.j2 @@ -1,9 +1,10 @@ +- name: General validation phase {% if num_of_agents == 0 %} -- hosts: managers + hosts: managers {% elif num_of_managers == 0 %} -- hosts: agents + hosts: agents {% else %} -- hosts: managers:agents + hosts: managers:agents {% endif %} any_errors_fatal: true roles: diff --git a/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml b/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml index 0774e4147b..b9926b1aa3 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml @@ -1,6 +1,6 @@ # REQUIRED VARIABLES -#------------------- -# (String) os: Target operating system +# ------------------- +# (String) os: Target operating system - name: Try connection (Linux) ping: diff --git a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml new file mode 100644 index 0000000000..cc73eb9122 --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml @@ -0,0 +1,22 @@ +# REQUIRED VARIABLES +# ------------------- +# (String) os: Target operating system + +- name: Get Wazuh installation + include_role: + name: service_controller + tasks_from: get_installation_type + +- name: Test connection with host + shell: nc -v -4 {{ inventory_hostname }} 9200 + timeout: 3 + ignore_errors: true + register: test_result + delegate_to: localhost + when: (os == 'Linux' and 'server' in wazuh_info.stdout) + +- name: Check the connection between Controller node and Wazuh Indexer + set_fact: + failed: true + errors: "{{ errors }}\nAnsible Controller node cannot connect correctly with Wazuh Indexer." + when: (test_result is failed and test_result.stdout is defined and 'refused' in test_result.stdout) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml new file mode 100644 index 0000000000..1e4adbcd12 --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml @@ -0,0 +1,21 @@ +# REQUIRED VARIABLES +# ------------------- +# (String) os: Target operating system + +- name: Get Wazuh installation + include_role: + name: service_controller + tasks_from: get_installation_type + +- name: Run filebeat test + become: true + shell: filebeat test output + register: test_result + ignore_errors: true + when: (os == 'Linux' and 'server' in wazuh_info.stdout) + +- name: Check the connection between Filebeat and Wazuh Indexer + set_fact: + failed: true + errors: "{{ errors }}\nFilebeat cannot connect correctly with Wazuh Indexer." + when: (os == 'Linux' and 'server' in wazuh_info.stdout and 'ERROR' in test_result.stdout) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml index 0575103567..129caefccf 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml @@ -1,6 +1,6 @@ # REQUIRED VARIABLES -#------------------- -# (String) os: Target operating system +# ------------------- +# (String) os: Target operating system - name: Check OS (Linux) set_fact: diff --git a/tests/end_to_end/roles/host_checker/tasks/check_python.yaml b/tests/end_to_end/roles/host_checker/tasks/check_python.yaml index 1809eb3d27..9a30ecf5bf 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_python.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_python.yaml @@ -1,6 +1,6 @@ # REQUIRED VARIABLES -#------------------- -# (String) os: Target operating system +# ------------------- +# (String) os: Target operating system - name: Check default Python version (Linux) set_fact: diff --git a/tests/end_to_end/roles/host_checker/tasks/main.yaml b/tests/end_to_end/roles/host_checker/tasks/main.yaml index f4c4281f2e..4bb3e9ec30 100644 --- a/tests/end_to_end/roles/host_checker/tasks/main.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/main.yaml @@ -14,6 +14,12 @@ - name: Check OS import_tasks: check_os.yaml + +- name: Check the connection between Filebeat and Wazuh Indexer + import_tasks: check_filebeat_indexer.yaml + +- name: Check the connection between Controller node and Wazuh Indexer + import_tasks: check_controller_indexer.yaml # ---------------------------------------------------------------------------- # -------- Task to identify whether the validation step fails or not. -------- diff --git a/tests/end_to_end/roles/service_controller/tasks/get_installation_type.yaml b/tests/end_to_end/roles/service_controller/tasks/get_installation_type.yaml new file mode 100644 index 0000000000..7396a6aea7 --- /dev/null +++ b/tests/end_to_end/roles/service_controller/tasks/get_installation_type.yaml @@ -0,0 +1,9 @@ +# REQUIRED VARIABLES +# ------------------- +# (String) os: Target operating system + +- name: Get installation type + become: true + shell: /var/ossec/bin/wazuh-control info + register: wazuh_info + when: os == 'Linux' diff --git a/tests/end_to_end/test_fim/data/playbooks/generate_events.yaml b/tests/end_to_end/test_fim/data/playbooks/generate_events.yaml index 200187088f..a2a1ac2005 100644 --- a/tests/end_to_end/test_fim/data/playbooks/generate_events.yaml +++ b/tests/end_to_end/test_fim/data/playbooks/generate_events.yaml @@ -4,7 +4,7 @@ - name: Truncate file shell: echo "" > /var/ossec/logs/alerts/alerts.json - become: True + become: true - name: Generate events hosts: agents @@ -13,7 +13,7 @@ tasks: - name: "{{ event_description }}" - become: True + become: true file: path: "{{ path }}" state: "{{ state }}" From 583d11c706b70ffc57e23e7e1ab3736ed9fc9bcf Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Wed, 17 Aug 2022 15:06:36 -0300 Subject: [PATCH 05/27] feat: the task in each test to validate the supported OS was added. #3142 --- tests/end_to_end/conftest.py | 69 +++++++++++++------ tests/end_to_end/data/env_requirements.json | 62 +++++++++++++++++ tests/end_to_end/data/environment.json | 58 ---------------- .../generate_general_play.yaml | 0 .../generate_test_specific_play.yaml | 10 +++ .../general_validation.j2} | 0 .../test_specific_validation.j2 | 19 +++++ .../tasks/check_supported_distro.yaml | 24 +++++++ 8 files changed, 162 insertions(+), 80 deletions(-) create mode 100644 tests/end_to_end/data/env_requirements.json delete mode 100644 tests/end_to_end/data/environment.json rename tests/end_to_end/data/{ => validation_playbooks}/generate_general_play.yaml (100%) create mode 100644 tests/end_to_end/data/validation_playbooks/generate_test_specific_play.yaml rename tests/end_to_end/data/{validation_template.j2 => validation_templates/general_validation.j2} (100%) create mode 100644 tests/end_to_end/data/validation_templates/test_specific_validation.j2 create mode 100644 tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index fa975e4f5f..fb1b54aa42 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -31,11 +31,11 @@ def validate_environments(request): collected_items = request.session.items roles_path = request.config.getoption('--roles-path') inventory_path = request.config.getoption('--inventory_path') - environment_file = os.path.join(suite_path, 'data', 'environment.json') + environment_file = os.path.join(suite_path, 'data', 'env_requirements.json') environment_metadata = json.load(open(environment_file)) - playbook_generator = os.path.join(suite_path, 'data', 'generate_general_play.yaml') - playbook_template = os.path.join(suite_path, 'data', 'validation_template.j2') - general_playbook = os.path.join(suite_path, 'data', 'general_validation.yaml') + playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_general_play.yaml') + playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'general_validation.j2') + general_playbook = os.path.join(suite_path, 'data', 'validation_playbooks', 'general_validation.yaml') if not inventory_path: raise ValueError('Inventory not specified') @@ -56,9 +56,9 @@ def validate_environments(request): test_suites_paths.append(path) # Get the test suite name test_suite_name = path.split('/')[-1:][0] - # Save the test environment metadata in lists - manager_instances.append(environment_metadata[test_suite_name]['managers']) - agent_instances.append(environment_metadata[test_suite_name]['agents']) + # Save the test environment metadata + manager_instances.append(environment_metadata[test_suite_name]['manager']['instances']) + agent_instances.append(environment_metadata[test_suite_name]['agent']['instances']) # Get the largest number of manager/agent instances num_of_managers = max(manager_instances) @@ -66,14 +66,14 @@ def validate_environments(request): # -------------------------- End of Step 1 ------------------------------------- # ---- Step 2: Run the playbook to generate the general validation playbook ---- - parameters = { + gen_parameters = { 'playbook': playbook_generator, 'inventory': inventory_path, 'extravars': { 'template_path': playbook_template, 'dest_path': general_playbook, 'num_of_managers': num_of_managers, 'num_of_agents': num_of_agents } } - ansible_runner.run(**parameters) + ansible_runner.run(**gen_parameters) # -------------------------- End of Step 2 ------------------------------------- # -------------------- Step 3: Run the general validation playbook ------------- @@ -91,24 +91,49 @@ def validate_environments(request): 'requirements.') # -------------------------- End of Step 3 ------------------------------------- - # Step 4: Execute test-specific validations (if any) + # ---------------- Step 4: Execute test-specific validations (if any) ---------- + playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_test_specific_play.yaml') + playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'test_specific_validation.j2') + for path in test_suites_paths: + validation_template = os.path.join(path, 'data', 'playbooks', 'validation.j2') + validation_template = validation_template if os.path.exists(validation_template) else '' + # Define the path where the resulting playbook will be stored validation_playbook = os.path.join(path, 'data', 'playbooks', 'validation.yaml') - if os.path.exists(validation_playbook): - # Set Ansible parameters - parameters = { - 'playbook': validation_playbook, - 'inventory': inventory_path, - 'envvars': {'ANSIBLE_ROLES_PATH': roles_path} + # Get distros by instances type + test_suite_name = path.split('/')[-1:][0] + target_hosts = [] + distros = {"manager": [], "agent": []} + for key in environment_metadata[test_suite_name]: + if environment_metadata[test_suite_name][key]['instances'] > 0: + # Save manager/agent distros for the current test + distros[key] = environment_metadata[test_suite_name][key]['distros'] + # Add the target host to the list (following the standard host name: "-*") + target_hosts.extend([distro.lower() + f"-{key}*" for distro in distros[key]]) + + # Generate test_specific validation playbook + gen_parameters = { + 'playbook': playbook_generator, 'inventory': inventory_path, 'envvars': {'ANSIBLE_ROLES_PATH': roles_path}, + 'extravars': { + 'template_path': playbook_template, 'dest_path': validation_playbook, + 'num_of_managers': num_of_managers, 'num_of_agents': num_of_agents, + 'validation_template': validation_template, 'target_hosts': ','.join(target_hosts), + 'manager_distros': distros['manager'], 'agent_distros': distros['agent'] } - # Run the validations of the test suite. - validation_runner = ansible_runner.run(**parameters) + } + ansible_runner.run(**gen_parameters) + + # Run test_specific validation playbook + parameters = { + 'playbook': validation_playbook, 'inventory': inventory_path, 'envvars': {'ANSIBLE_ROLES_PATH': roles_path} + } + validation_runner = ansible_runner.run(**parameters) - # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. - if validation_runner.status == 'failed': - raise Exception(f"The validation phase of {{ path }} has failed. Please check that the environments " - 'meet the expected requirements.') + # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. + if validation_runner.status == 'failed': + raise Exception(f"The validation phase of {{ path }} has failed. Please check that the environments " + 'meet the expected requirements.') # -------------------------- End of Step 4 ------------------------------------- diff --git a/tests/end_to_end/data/env_requirements.json b/tests/end_to_end/data/env_requirements.json new file mode 100644 index 0000000000..2325494b9d --- /dev/null +++ b/tests/end_to_end/data/env_requirements.json @@ -0,0 +1,62 @@ +{ + "test_audit": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 0, "distros": [""]} + }, + "test_aws_infrastructure_monitoring": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 0, "distros": [""]} + }, + "test_brute_force": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 2, "distros": ["CentOS", "Windows"]} + }, + "test_detecting_suspicious_binaries": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 0, "distros": [""]} + }, + "test_docker_monitoring": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 0, "distros": [""]} + }, + "test_emotet": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 1, "distros": ["Windows"]} + }, + "test_fim": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 2, "distros": ["CentOS", "Windows"]} + }, + "test_netcat": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 1, "distros": [""]} + }, + "test_osquery_integration": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 0, "distros": [""]} + }, + "test_shellshock_attack_detection": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 0, "distros": [""]} + }, + "test_sql_injection": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 1, "distros": [""]} + }, + "test_virustotal": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 1, "distros": [""]} + }, + "test_vulnerability_detector": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 2, "distros": [""]} + }, + "test_windows_defender": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 1, "distros": [""]} + }, + "test_yara_integration": { + "manager": {"instances": 1, "distros": ["CentOS"]}, + "agent": {"instances": 0, "distros": [""]} + } +} \ No newline at end of file diff --git a/tests/end_to_end/data/environment.json b/tests/end_to_end/data/environment.json deleted file mode 100644 index bdb65b1183..0000000000 --- a/tests/end_to_end/data/environment.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "test_audit": { - "managers": 1, - "agents": 0 - }, - "test_aws_infrastructure_monitoring": { - "managers": 1, - "agents": 0 - }, - "test_brute_force": { - "managers": 1, - "agents": 1 - }, - "test_detecting_suspicious_binaries": { - "managers": 1, - "agents": 0 - }, - "test_docker_monitoring": { - "managers": 1, - "agents": 0 - }, - "test_fim": { - "managers": 1, - "agents": 2 - }, - "test_netcat": { - "managers": 1, - "agents": 1 - }, - "test_osquery_integration": { - "managers": 1, - "agents": 0 - }, - "test_shellshock_attack_detection": { - "managers": 1, - "agents": 0 - }, - "test_sql_injection": { - "managers": 1, - "agents": 1 - }, - "test_virustotal": { - "managers": 1, - "agents": 1 - }, - "test_vulnerability_detector": { - "managers": 1, - "agents": 2 - }, - "test_windows_defender": { - "managers": 1, - "agents": 1 - }, - "test_yara_integration": { - "managers": 1, - "agents": 0 - } -} \ No newline at end of file diff --git a/tests/end_to_end/data/generate_general_play.yaml b/tests/end_to_end/data/validation_playbooks/generate_general_play.yaml similarity index 100% rename from tests/end_to_end/data/generate_general_play.yaml rename to tests/end_to_end/data/validation_playbooks/generate_general_play.yaml diff --git a/tests/end_to_end/data/validation_playbooks/generate_test_specific_play.yaml b/tests/end_to_end/data/validation_playbooks/generate_test_specific_play.yaml new file mode 100644 index 0000000000..53cc6e8ac4 --- /dev/null +++ b/tests/end_to_end/data/validation_playbooks/generate_test_specific_play.yaml @@ -0,0 +1,10 @@ +- hosts: localhost + vars: + validation_template: "{{ validation_template }}" + num_of_managers: "{{ num_of_managers }}" + num_of_agents: "{{ num_of_agents }}" + tasks: + - name: Generate a test specific validation playbook + template: + src: "{{ template_path }}" + dest: "{{ dest_path }}" diff --git a/tests/end_to_end/data/validation_template.j2 b/tests/end_to_end/data/validation_templates/general_validation.j2 similarity index 100% rename from tests/end_to_end/data/validation_template.j2 rename to tests/end_to_end/data/validation_templates/general_validation.j2 diff --git a/tests/end_to_end/data/validation_templates/test_specific_validation.j2 b/tests/end_to_end/data/validation_templates/test_specific_validation.j2 new file mode 100644 index 0000000000..d39b93c04c --- /dev/null +++ b/tests/end_to_end/data/validation_templates/test_specific_validation.j2 @@ -0,0 +1,19 @@ +- name: Check supported distros + hosts: {{ target_hosts }} + any_errors_fatal: true + tasks: + + - name: Check if the host distribution is compatible + include_role: + name: host_checker + tasks_from: check_supported_distro + vars: + os: "{% raw %}{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}{% endraw %}" + dist: "{% raw %}{{ ansible_distribution if ansible_os_family != 'Windows' else 'Windows' }}{% endraw %}" + manager_distros: {{ manager_distros }} + agent_distros: {{ agent_distros }} + +{# Include the test-specific validation tasks if exist #} +{% if validation_template != '' %} +{% include validation_template %} +{% endif %} diff --git a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml new file mode 100644 index 0000000000..dafecaae0f --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml @@ -0,0 +1,24 @@ +# REQUIRED VARIABLES +# ------------------- +# (String) os: Target operating system +# (String) dist: Target distribution +# (String) supported_dist: List of ditros supported by the current test + +- name: Get Wazuh installation + include_role: + name: service_controller + tasks_from: get_installation_type + +- debug: var=wazuh_info.stdout + +- debug: var=manager_distros + +- debug: var=agent_distros + +- fail: + msg: "{{ dist }} is not supported by this test: {{ manager_distros }}" + when: ('server' in wazuh_info.stdout and dist not in manager_distros) + +- fail: + msg: "{{ dist }} is not supported by this test: {{ manager_distros }}" + when: ('agent' in wazuh_info.stdout and dist not in manager_distros) From 605842ea6458c0b258c01735e59263f66df5c8e1 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Thu, 18 Aug 2022 09:13:32 -0300 Subject: [PATCH 06/27] fix(#3142): check fixed and some other changes were made. --- tests/end_to_end/data/env_requirements.json | 256 +++++++++++++++--- .../tasks/check_supported_distro.yaml | 12 +- .../test_fim/data/playbooks/validation.yaml | 15 + 3 files changed, 241 insertions(+), 42 deletions(-) create mode 100644 tests/end_to_end/test_fim/data/playbooks/validation.yaml diff --git a/tests/end_to_end/data/env_requirements.json b/tests/end_to_end/data/env_requirements.json index 2325494b9d..10bc1694ac 100644 --- a/tests/end_to_end/data/env_requirements.json +++ b/tests/end_to_end/data/env_requirements.json @@ -1,62 +1,252 @@ { "test_audit": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 0, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } }, "test_aws_infrastructure_monitoring": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 0, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } }, "test_brute_force": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 2, "distros": ["CentOS", "Windows"]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 2, + "distros": [ + "CentOS", + "Windows" + ] + } }, "test_detecting_suspicious_binaries": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 0, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } }, "test_docker_monitoring": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 0, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } }, "test_emotet": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 1, "distros": ["Windows"]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "Windows" + ] + } }, "test_fim": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 2, "distros": ["CentOS", "Windows"]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 2, + "distros": [ + "CentOS", + "Windows" + ] + } + }, + "test_ip_reputation": { + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "Windows" + ] + } }, "test_netcat": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 1, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "CentOS" + ] + } }, "test_osquery_integration": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 0, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } }, "test_shellshock_attack_detection": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 0, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } + }, + "test_slack_integration": { + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } }, "test_sql_injection": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 1, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "CentOS" + ] + } + }, + "test_suricata_integration": { + "manager": { + "instances": 1, + "distros": [ + "Ubuntu" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } + }, + "test_virustotal_integration": { + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "CentOS" + ] + } }, - "test_virustotal": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 1, "distros": [""]} + "test_vulnerability_detector_linux": { + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "Ubuntu" + ] + } }, - "test_vulnerability_detector": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 2, "distros": [""]} + "test_vulnerability_detector_windows": { + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "Windows" + ] + } }, "test_windows_defender": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 1, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "Windows" + ] + } }, "test_yara_integration": { - "manager": {"instances": 1, "distros": ["CentOS"]}, - "agent": {"instances": 0, "distros": [""]} + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 0, + "distros": [] + } } -} \ No newline at end of file +} diff --git a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml index dafecaae0f..ec0bff2209 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml @@ -9,16 +9,10 @@ name: service_controller tasks_from: get_installation_type -- debug: var=wazuh_info.stdout - -- debug: var=manager_distros - -- debug: var=agent_distros - - fail: msg: "{{ dist }} is not supported by this test: {{ manager_distros }}" - when: ('server' in wazuh_info.stdout and dist not in manager_distros) + when: (os == 'Linux' and 'server' in wazuh_info.stdout and dist not in manager_distros) - fail: - msg: "{{ dist }} is not supported by this test: {{ manager_distros }}" - when: ('agent' in wazuh_info.stdout and dist not in manager_distros) + msg: "{{ dist }} is not supported by this test: {{ agent_distros }}" + when: (os == 'Linux' and 'agent' in wazuh_info.stdout and dist not in agent_distros) diff --git a/tests/end_to_end/test_fim/data/playbooks/validation.yaml b/tests/end_to_end/test_fim/data/playbooks/validation.yaml new file mode 100644 index 0000000000..3d8e1158b0 --- /dev/null +++ b/tests/end_to_end/test_fim/data/playbooks/validation.yaml @@ -0,0 +1,15 @@ +- name: Check supported distros + hosts: centos-manager*,centos-agent*,windows-agent* + any_errors_fatal: true + tasks: + + - name: Check if the host distribution is compatible + include_role: + name: host_checker + tasks_from: check_supported_distro + vars: + os: "{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}" + dist: "{{ ansible_distribution if ansible_os_family != 'Windows' else 'Windows' }}" + manager_distros: ['CentOS'] + agent_distros: ['CentOS', 'Windows'] + From 642e3bde77f70e0052f110bf6a27b96f2e97fbe3 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Thu, 18 Aug 2022 10:46:58 -0300 Subject: [PATCH 07/27] fix(#3142): test_fim_windows fixed. The alert timestamp was corrected to allow for negative and positive offsets. Timeout for a task has been removed because it already had an implicit timeout. --- .../test_fim_windows/data/playbooks/configuration.yaml | 1 - .../test_fim/test_fim_windows/test_fim_windows.py | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/configuration.yaml b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/configuration.yaml index a04a1e298e..44b5cda65e 100644 --- a/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/configuration.yaml +++ b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/configuration.yaml @@ -28,4 +28,3 @@ win_wait_for: path: C:\Program Files (x86)\ossec-agent\ossec.log search_regex: File integrity monitoring real-time Whodata engine started. - timeout: 20 diff --git a/tests/end_to_end/basic_cases/test_fim/test_fim_windows/test_fim_windows.py b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/test_fim_windows.py index 249e09b2a7..186e109298 100644 --- a/tests/end_to_end/basic_cases/test_fim/test_fim_windows/test_fim_windows.py +++ b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/test_fim_windows.py @@ -28,13 +28,14 @@ def test_fim_windows(configure_environment, metadata, get_dashboard_credentials, rule_level = metadata['rule.level'] rule_description = metadata['rule.description'] syscheck_path = metadata['extra']['syscheck.path'] + timestamp = r'\d+\-\d+\-\w+\:\d+\:\d+\.\d+[+|-]\d+' - expected_alert_json = fr'\{{"timestamp":"(\d+\-\d+\-\w+\:\d+\:\d+\.\d+\+\d+)","rule":{{"level":{rule_level},' \ + expected_alert_json = fr'\{{"timestamp":"({timestamp})","rule":{{"level":{rule_level},' \ fr'"description":"{rule_description}","id":"{rule_id}".*"syscheck":{{"path":' \ fr'"{syscheck_path}".*\}}' expected_indexed_alert = fr'.*"path": "{syscheck_path}".*"rule":.*"level": {rule_level},.*"description": ' \ - fr'"{rule_description}".*"timestamp": "(\d+\-\d+\-\w+\:\d+\:\d+\.\d+\+\d+)".*' + fr'"{rule_description}".*"timestamp": "({timestamp})".*' # Check that alert has been raised and save timestamp raised_alert = evm.check_event(callback=expected_alert_json, file_to_monitor=alerts_json, From 880d931fe2520f3ba288bd73c4faf2787aea7f77 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Thu, 18 Aug 2022 10:52:02 -0300 Subject: [PATCH 08/27] fix(#3142): the generation of test-specific validation was fixed. Now this phase deletes the generated file at the end of the execution. --- tests/end_to_end/conftest.py | 4 +++- tests/end_to_end/data/env_requirements.json | 19 ++++++++++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index fb1b54aa42..3b4e495dcd 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -129,10 +129,12 @@ def validate_environments(request): 'playbook': validation_playbook, 'inventory': inventory_path, 'envvars': {'ANSIBLE_ROLES_PATH': roles_path} } validation_runner = ansible_runner.run(**parameters) + # Remove the generated playbook + remove_file(validation_playbook) # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. if validation_runner.status == 'failed': - raise Exception(f"The validation phase of {{ path }} has failed. Please check that the environments " + raise Exception(f"The validation phase of {test_suite_name} has failed. Please check that the environments " 'meet the expected requirements.') # -------------------------- End of Step 4 ------------------------------------- diff --git a/tests/end_to_end/data/env_requirements.json b/tests/end_to_end/data/env_requirements.json index 10bc1694ac..991a844c32 100644 --- a/tests/end_to_end/data/env_requirements.json +++ b/tests/end_to_end/data/env_requirements.json @@ -76,7 +76,7 @@ ] } }, - "test_fim": { + "test_fim_linux": { "manager": { "instances": 1, "distros": [ @@ -84,9 +84,22 @@ ] }, "agent": { - "instances": 2, + "instances": 1, + "distros": [ + "CentOS" + ] + } + }, + "test_fim_windows": { + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, "distros": [ - "CentOS", "Windows" ] } From 49aceb0865f89457e9e53eaa14d1d7382142c6b5 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Fri, 19 Aug 2022 08:29:27 -0300 Subject: [PATCH 09/27] fix(#3142): several fixes were applied. Debug tasks removed. New task to check Wazuh components added. The error when setting a variable used to search for failures was fixed. --- .../data/playbooks/validation.yaml | 15 +++++++++ tests/end_to_end/conftest.py | 23 +++++++------ .../host_checker/tasks/check_connection.yaml | 27 ---------------- .../tasks/check_controller_indexer.yaml | 11 +++++-- .../tasks/check_filebeat_indexer.yaml | 4 +-- .../roles/host_checker/tasks/check_os.yaml | 8 ++--- .../host_checker/tasks/check_python.yaml | 8 ++--- .../tasks/check_supported_distro.yaml | 4 +-- .../tasks/check_wazuh_components.yaml | 32 +++++++++++++++++++ .../roles/host_checker/tasks/main.yaml | 14 ++++---- 10 files changed, 88 insertions(+), 58 deletions(-) create mode 100644 tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml delete mode 100644 tests/end_to_end/roles/host_checker/tasks/check_connection.yaml create mode 100644 tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml diff --git a/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml new file mode 100644 index 0000000000..ed7ced1d05 --- /dev/null +++ b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml @@ -0,0 +1,15 @@ +- name: Check supported distros + hosts: centos-manager*,windows-agent* + any_errors_fatal: true + tasks: + + - name: Check if the host distribution is compatible + include_role: + name: host_checker + tasks_from: check_supported_distro + vars: + os: "{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}" + dist: "{{ ansible_distribution if ansible_os_family != 'Windows' else 'Windows' }}" + manager_distros: ['CentOS'] + agent_distros: ['Windows'] + diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 3b4e495dcd..3ff4ceab83 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -23,7 +23,12 @@ def validate_environments(request): Step 1: Collect the data related to the selected tests that will be executed. Step 2: Generate a playbook containing cross-checks for selected tests. Step 3: Run the generated playbook. - Step 4: Execute test-specific validations (if any). It will run one validation for each selected test set. + Step 4: Generate a test-specific playbook to validate the environment required by that test, then execute that + playbook. This will run one validation for each selected test set. + To add specific validation tasks to a test its only necessary to add a new jinja2 template inside the + `playbooks` folder in the test suite. E.g: + test_basic_cases/test_fim/test_fim_linux/data/playbooks/validation.j2 + (See end_to_end/data/validation_templates for a guide to create the file) Args: request (fixture): Gives access to the requesting test context. @@ -40,7 +45,7 @@ def validate_environments(request): if not inventory_path: raise ValueError('Inventory not specified') - # -------------------------- Step 1: Prepare the necessary data ---------------- + #--------------------------------------- Step 1: Prepare the necessary data ---------------------------------------- # Get the path of the tests from collected items. collected_paths = [item.fspath for item in collected_items] # Remove duplicates caused by the existence of 2 or more test cases @@ -63,9 +68,9 @@ def validate_environments(request): # Get the largest number of manager/agent instances num_of_managers = max(manager_instances) num_of_agents = max(agent_instances) - # -------------------------- End of Step 1 ------------------------------------- + #-------------------------------------------------- End of Step 1 -------------------------------------------------- - # ---- Step 2: Run the playbook to generate the general validation playbook ---- + #---------------------- Step 2: Run the playbook to generate the general validation playbook ----------------------- gen_parameters = { 'playbook': playbook_generator, 'inventory': inventory_path, 'extravars': { @@ -74,9 +79,9 @@ def validate_environments(request): } } ansible_runner.run(**gen_parameters) - # -------------------------- End of Step 2 ------------------------------------- + #-------------------------------------------------- End of Step 2 -------------------------------------------------- - # -------------------- Step 3: Run the general validation playbook ------------- + #----------------------------------- Step 3: Run the general validation playbook ----------------------------------- parameters = { 'playbook': general_playbook, 'inventory': inventory_path, @@ -89,9 +94,9 @@ def validate_environments(request): if general_validation_runner.status == 'failed': raise Exception(f"The general validations have failed. Please check that the environments meet the expected " 'requirements.') - # -------------------------- End of Step 3 ------------------------------------- + #-------------------------------------------------- End of Step 3 -------------------------------------------------- - # ---------------- Step 4: Execute test-specific validations (if any) ---------- + #------------------------------------ Step 4: Execute test-specific validations ------------------------------------ playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_test_specific_play.yaml') playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'test_specific_validation.j2') @@ -136,7 +141,7 @@ def validate_environments(request): if validation_runner.status == 'failed': raise Exception(f"The validation phase of {test_suite_name} has failed. Please check that the environments " 'meet the expected requirements.') - # -------------------------- End of Step 4 ------------------------------------- + #-------------------------------------------------- End of Step 4 -------------------------------------------------- @pytest.fixture(scope='function') diff --git a/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml b/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml deleted file mode 100644 index b9926b1aa3..0000000000 --- a/tests/end_to_end/roles/host_checker/tasks/check_connection.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# REQUIRED VARIABLES -# ------------------- -# (String) os: Target operating system - -- name: Try connection (Linux) - ping: - when: os == 'Linux' - ignore_errors: true - register: result - -- name: Check if host is reachable (Linux) - set_fact: - failed: true - errors: "{{ inventory_hostname }} is unreachable." - when: '"ping" not in result.keys() and os == "Linux"' - -- name: Try connection (Windows) - win_ping: - when: os == 'Windows' - ignore_errors: true - register: result - -- name: Check if host is reachable (Windows) - set_fact: - failed: true - errors: "{{ inventory_hostname }} is unreachable." - when: '"ping" not in result.keys() and os == "Windows"' diff --git a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml index cc73eb9122..982342f859 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml @@ -15,8 +15,13 @@ delegate_to: localhost when: (os == 'Linux' and 'server' in wazuh_info.stdout) +- debug: var=test_result + when: test_result is failed + +- debug: var=test_result.stderr + - name: Check the connection between Controller node and Wazuh Indexer set_fact: - failed: true - errors: "{{ errors }}\nAnsible Controller node cannot connect correctly with Wazuh Indexer." - when: (test_result is failed and test_result.stdout is defined and 'refused' in test_result.stdout) + check_result: 'true' + errors: "{{ errors }}Ansible Controller node cannot connect correctly with Wazuh Indexer.\n" + when: (test_result is failed and test_result.stdout is defined and 'refused' in test_result.stderr) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml index 1e4adbcd12..5a4981a8e2 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml @@ -16,6 +16,6 @@ - name: Check the connection between Filebeat and Wazuh Indexer set_fact: - failed: true - errors: "{{ errors }}\nFilebeat cannot connect correctly with Wazuh Indexer." + check_result: 'true' + errors: "{{ errors }}Filebeat cannot connect correctly with Wazuh Indexer.\n" when: (os == 'Linux' and 'server' in wazuh_info.stdout and 'ERROR' in test_result.stdout) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml index 129caefccf..028e9b57fe 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml @@ -4,12 +4,12 @@ - name: Check OS (Linux) set_fact: - failed: true - errors: "{{ errors }}\nThe {{ ansible_system }} OS was not expected, but the {{ os }} OS." + check_result: 'true' + errors: "{{ errors }}The {{ ansible_system }} OS was not expected, but the {{ os }} OS.\n" when: (os != ansible_system and os == "Linux") - name: Check OS (Windows) set_fact: - failed: true - errors: "{{ errors }}\nThe {{ ansible_os_family }} OS was not expected, but the {{ os }} OS." + check_result: 'true' + errors: "{{ errors }}The {{ ansible_os_family }} OS was not expected, but the {{ os }} OS.\n" when: (os != ansible_os_family and os == "Windows") diff --git a/tests/end_to_end/roles/host_checker/tasks/check_python.yaml b/tests/end_to_end/roles/host_checker/tasks/check_python.yaml index 9a30ecf5bf..452ae7e773 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_python.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_python.yaml @@ -4,8 +4,8 @@ - name: Check default Python version (Linux) set_fact: - failed: true - errors: "{{ errors }}\nPython version is less than 3. Current version: {{ ansible_python_version }}" + check_result: 'true' + errors: "{{ errors }}Python version is less than 3. Current version: {{ ansible_python_version }}\n" when: (os == "Linux" and ansible_python['version']['major'] < 3) - name: Get Python version (Windows) @@ -15,6 +15,6 @@ - name: Check default Python version (Linux) set_fact: - failed: true - errors: "{{ errors }}\nPython version is less than 3. Current version: {{ version.stdout }}" + check_result: 'true' + errors: "{{ errors }}Python version is less than 3. Current version: {{ version.stdout }}\n" when: (os == "Windows" and version.stdout.split(" ")[1].split(".")[0] | int < 3) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml index ec0bff2209..d8abbf11e7 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml @@ -10,9 +10,9 @@ tasks_from: get_installation_type - fail: - msg: "{{ dist }} is not supported by this test: {{ manager_distros }}" + msg: "{{ dist }} is not supported by this test: {{ manager_distros }}\n" when: (os == 'Linux' and 'server' in wazuh_info.stdout and dist not in manager_distros) - fail: - msg: "{{ dist }} is not supported by this test: {{ agent_distros }}" + msg: "{{ dist }} is not supported by this test: {{ agent_distros }}\n" when: (os == 'Linux' and 'agent' in wazuh_info.stdout and dist not in agent_distros) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml b/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml new file mode 100644 index 0000000000..a6a69c3167 --- /dev/null +++ b/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml @@ -0,0 +1,32 @@ +# REQUIRED VARIABLES +# ------------------- +# (String) os: Target operating system + +- name: Get Wazuh installation + include_role: + name: service_controller + tasks_from: get_installation_type + +- name: Populate services facts + service_facts: + when: os == 'Linux' + +- name: Check the status of Wazuh components (Manager) + set_fact: + check_result: 'true' + errors: "{{ errors }}{{ ansible_facts.services[item] }} is not running.\n" + when: (os == 'Linux' and 'server' in wazuh_info.stdout and ansible_facts.services[item].state != 'running') + with_items: + - 'wazuh-manager.service' + - 'wazuh-indexer.service' + - 'filebeat.service' + +- set_fact: + service: 'wazuh-agent.service' + when: (os == 'Linux' and 'agent' in wazuh_info.stdout) + +- name: Check the status of Wazuh Agent + set_fact: + check_result: 'true' + errors: "{{ errors }}{{ ansible_facts.services[service].name }} is not running.\n" + when: (os == 'Linux' and 'agent' in wazuh_info.stdout and ansible_facts.services[service].state != 'running') diff --git a/tests/end_to_end/roles/host_checker/tasks/main.yaml b/tests/end_to_end/roles/host_checker/tasks/main.yaml index 4bb3e9ec30..71794d7eb4 100644 --- a/tests/end_to_end/roles/host_checker/tasks/main.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/main.yaml @@ -1,30 +1,30 @@ # -------- Task to identify whether the validation step fails or not. -------- - name: Set flag and informative variable set_fact: - failed: false - errors: null + check_result: 'false' + errors: '' # ---------------------------------------------------------------------------- # -------- Checks ------------------------------------------------------------ -- name: Check host connection - include_tasks: check_connection.yaml - - name: Check Python import_tasks: check_python.yaml - name: Check OS import_tasks: check_os.yaml +- name: Check the status of Wazuh components + import_tasks: check_wazuh_components.yaml + - name: Check the connection between Filebeat and Wazuh Indexer import_tasks: check_filebeat_indexer.yaml - name: Check the connection between Controller node and Wazuh Indexer import_tasks: check_controller_indexer.yaml # ---------------------------------------------------------------------------- - +- debug: var=errors # -------- Task to identify whether the validation step fails or not. -------- - name: Verify if any check have failed fail: msg: "Some validations were fail:\n'{{ errors }}'" - when: failed == true + when: (check_result == 'true' or errors != '') # ---------------------------------------------------------------------------- From 59ba41eed0a969d3ceafeb3f4bdec34b4739de15 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Fri, 19 Aug 2022 08:39:42 -0300 Subject: [PATCH 10/27] docs(#3142): grammatical errors fixed. --- tests/end_to_end/conftest.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 3ff4ceab83..8fc5eb4b81 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -24,9 +24,8 @@ def validate_environments(request): Step 2: Generate a playbook containing cross-checks for selected tests. Step 3: Run the generated playbook. Step 4: Generate a test-specific playbook to validate the environment required by that test, then execute that - playbook. This will run one validation for each selected test set. - To add specific validation tasks to a test its only necessary to add a new jinja2 template inside the - `playbooks` folder in the test suite. E.g: + playbook. This will run one validation for each selected test set. To add specific validation tasks to a + test,a new jinja2 template must be added inside the `playbooks` folder in the test suite. E.g: test_basic_cases/test_fim/test_fim_linux/data/playbooks/validation.j2 (See end_to_end/data/validation_templates for a guide to create the file) From 2c4f175a7a9bb520419ed6ef046dbadc407f5d47 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Fri, 19 Aug 2022 09:48:15 -0300 Subject: [PATCH 11/27] style(#3142): linter corrections applied. --- .../data/playbooks/validation.yaml | 15 --------------- tests/end_to_end/conftest.py | 16 ++++++++-------- .../tasks/check_wazuh_components.yaml | 8 ++++---- 3 files changed, 12 insertions(+), 27 deletions(-) delete mode 100644 tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml diff --git a/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml b/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml deleted file mode 100644 index ed7ced1d05..0000000000 --- a/tests/end_to_end/basic_cases/test_fim/test_fim_windows/data/playbooks/validation.yaml +++ /dev/null @@ -1,15 +0,0 @@ -- name: Check supported distros - hosts: centos-manager*,windows-agent* - any_errors_fatal: true - tasks: - - - name: Check if the host distribution is compatible - include_role: - name: host_checker - tasks_from: check_supported_distro - vars: - os: "{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}" - dist: "{{ ansible_distribution if ansible_os_family != 'Windows' else 'Windows' }}" - manager_distros: ['CentOS'] - agent_distros: ['Windows'] - diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 8fc5eb4b81..5b3fd363f2 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -44,7 +44,7 @@ def validate_environments(request): if not inventory_path: raise ValueError('Inventory not specified') - #--------------------------------------- Step 1: Prepare the necessary data ---------------------------------------- + # --------------------------------------- Step 1: Prepare the necessary data --------------------------------------- # Get the path of the tests from collected items. collected_paths = [item.fspath for item in collected_items] # Remove duplicates caused by the existence of 2 or more test cases @@ -67,9 +67,9 @@ def validate_environments(request): # Get the largest number of manager/agent instances num_of_managers = max(manager_instances) num_of_agents = max(agent_instances) - #-------------------------------------------------- End of Step 1 -------------------------------------------------- + # -------------------------------------------------- End of Step 1 ------------------------------------------------- - #---------------------- Step 2: Run the playbook to generate the general validation playbook ----------------------- + # ---------------------- Step 2: Run the playbook to generate the general validation playbook ---------------------- gen_parameters = { 'playbook': playbook_generator, 'inventory': inventory_path, 'extravars': { @@ -78,9 +78,9 @@ def validate_environments(request): } } ansible_runner.run(**gen_parameters) - #-------------------------------------------------- End of Step 2 -------------------------------------------------- + # -------------------------------------------------- End of Step 2 ------------------------------------------------- - #----------------------------------- Step 3: Run the general validation playbook ----------------------------------- + # ----------------------------------- Step 3: Run the general validation playbook ---------------------------------- parameters = { 'playbook': general_playbook, 'inventory': inventory_path, @@ -93,9 +93,9 @@ def validate_environments(request): if general_validation_runner.status == 'failed': raise Exception(f"The general validations have failed. Please check that the environments meet the expected " 'requirements.') - #-------------------------------------------------- End of Step 3 -------------------------------------------------- + # -------------------------------------------------- End of Step 3 ------------------------------------------------- - #------------------------------------ Step 4: Execute test-specific validations ------------------------------------ + # ------------------------------------ Step 4: Execute test-specific validations ----------------------------------- playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_test_specific_play.yaml') playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'test_specific_validation.j2') @@ -140,7 +140,7 @@ def validate_environments(request): if validation_runner.status == 'failed': raise Exception(f"The validation phase of {test_suite_name} has failed. Please check that the environments " 'meet the expected requirements.') - #-------------------------------------------------- End of Step 4 -------------------------------------------------- + # -------------------------------------------------- End of Step 4 ------------------------------------------------- @pytest.fixture(scope='function') diff --git a/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml b/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml index a6a69c3167..c0e33ef6ae 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml @@ -17,12 +17,12 @@ errors: "{{ errors }}{{ ansible_facts.services[item] }} is not running.\n" when: (os == 'Linux' and 'server' in wazuh_info.stdout and ansible_facts.services[item].state != 'running') with_items: - - 'wazuh-manager.service' - - 'wazuh-indexer.service' - - 'filebeat.service' + - wazuh-manager.service + - wazuh-indexer.service + - filebeat.service - set_fact: - service: 'wazuh-agent.service' + service: wazuh-agent.service when: (os == 'Linux' and 'agent' in wazuh_info.stdout) - name: Check the status of Wazuh Agent From e6d11cd5e878157b6852beee1b62a426145bcae4 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Fri, 19 Aug 2022 10:35:56 -0300 Subject: [PATCH 12/27] fix(#3142): debug task deleted. --- tests/end_to_end/roles/host_checker/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end_to_end/roles/host_checker/tasks/main.yaml b/tests/end_to_end/roles/host_checker/tasks/main.yaml index 71794d7eb4..644a41f2b7 100644 --- a/tests/end_to_end/roles/host_checker/tasks/main.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/main.yaml @@ -21,7 +21,7 @@ - name: Check the connection between Controller node and Wazuh Indexer import_tasks: check_controller_indexer.yaml # ---------------------------------------------------------------------------- -- debug: var=errors + # -------- Task to identify whether the validation step fails or not. -------- - name: Verify if any check have failed fail: From 0d81f1b1949ea38b057a879d7df67267326c175a Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Fri, 19 Aug 2022 12:15:00 -0300 Subject: [PATCH 13/27] refactor(#3142): collection of errors and some fixes. --- tests/end_to_end/conftest.py | 9 ++++++++- tests/end_to_end/data/env_requirements.json | 17 +++++++++++++++-- .../tasks/check_controller_indexer.yaml | 5 ----- .../roles/host_checker/tasks/main.yaml | 4 ++++ 4 files changed, 27 insertions(+), 8 deletions(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 5b3fd363f2..2324b0083c 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -91,8 +91,15 @@ def validate_environments(request): remove_file(general_playbook) # If the general validations have failed, then abort the execution finishing with an error. Else, continue. if general_validation_runner.status == 'failed': + # Collect inventory_hostnames with errors + hosts_with_errors = [key for key in general_validation_runner.stats['failures']] + # Collect list of errors + errors = [] + errors.extend([general_validation_runner.get_fact_cache(host)['phase_results'] for host in hosts_with_errors]) + errors = ''.join(errors) + # Raise the exception with errors details raise Exception(f"The general validations have failed. Please check that the environments meet the expected " - 'requirements.') + f"requirements. Result:\n{errors}") # -------------------------------------------------- End of Step 3 ------------------------------------------------- # ------------------------------------ Step 4: Execute test-specific validations ----------------------------------- diff --git a/tests/end_to_end/data/env_requirements.json b/tests/end_to_end/data/env_requirements.json index 991a844c32..57c7d09292 100644 --- a/tests/end_to_end/data/env_requirements.json +++ b/tests/end_to_end/data/env_requirements.json @@ -23,7 +23,21 @@ "distros": [] } }, - "test_brute_force": { + "test_brute_force_ssh": { + "manager": { + "instances": 1, + "distros": [ + "CentOS" + ] + }, + "agent": { + "instances": 1, + "distros": [ + "CentOS" + ] + } + }, + "test_brute_force_rdp": { "manager": { "instances": 1, "distros": [ @@ -33,7 +47,6 @@ "agent": { "instances": 2, "distros": [ - "CentOS", "Windows" ] } diff --git a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml index 982342f859..ced42bf37c 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml @@ -15,11 +15,6 @@ delegate_to: localhost when: (os == 'Linux' and 'server' in wazuh_info.stdout) -- debug: var=test_result - when: test_result is failed - -- debug: var=test_result.stderr - - name: Check the connection between Controller node and Wazuh Indexer set_fact: check_result: 'true' diff --git a/tests/end_to_end/roles/host_checker/tasks/main.yaml b/tests/end_to_end/roles/host_checker/tasks/main.yaml index 644a41f2b7..20933d8e4e 100644 --- a/tests/end_to_end/roles/host_checker/tasks/main.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/main.yaml @@ -23,6 +23,10 @@ # ---------------------------------------------------------------------------- # -------- Task to identify whether the validation step fails or not. -------- +- set_fact: + phase_results: "{{ errors }}" + cacheable: yes + - name: Verify if any check have failed fail: msg: "Some validations were fail:\n'{{ errors }}'" From 0322d2845a4d91fef3294d1ae1197cc7a6f248c7 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Fri, 19 Aug 2022 12:46:12 -0300 Subject: [PATCH 14/27] fix(#3142): minor fixes applied. --- .../roles/host_checker/tasks/check_controller_indexer.yaml | 2 +- .../roles/host_checker/tasks/check_wazuh_components.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml index ced42bf37c..991f6ef1a9 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml @@ -7,7 +7,7 @@ name: service_controller tasks_from: get_installation_type -- name: Test connection with host +- name: Test connection with Wazuh Indexer shell: nc -v -4 {{ inventory_hostname }} 9200 timeout: 3 ignore_errors: true diff --git a/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml b/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml index c0e33ef6ae..88ead12940 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml @@ -14,7 +14,7 @@ - name: Check the status of Wazuh components (Manager) set_fact: check_result: 'true' - errors: "{{ errors }}{{ ansible_facts.services[item] }} is not running.\n" + errors: "{{ errors }}{{ ansible_facts.services[item].name }} is not running.\n" when: (os == 'Linux' and 'server' in wazuh_info.stdout and ansible_facts.services[item].state != 'running') with_items: - wazuh-manager.service From bcbfab2e8ec228e7e28f8b010aad57bbb12b1f48 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Fri, 19 Aug 2022 17:59:19 -0300 Subject: [PATCH 15/27] refactor(#3142): check distro and OS unified. --- tests/end_to_end/conftest.py | 92 +++++++++---------- .../generate_general_play.yaml | 4 +- .../generate_test_specific_play.yaml | 10 -- .../general_validation.j2 | 9 +- .../test_specific_validation.j2 | 19 ---- .../roles/host_checker/tasks/check_os.yaml | 9 +- .../tasks/check_supported_distro.yaml | 18 ---- 7 files changed, 50 insertions(+), 111 deletions(-) delete mode 100644 tests/end_to_end/data/validation_playbooks/generate_test_specific_play.yaml delete mode 100644 tests/end_to_end/data/validation_templates/test_specific_validation.j2 delete mode 100644 tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 2324b0083c..b1aad716f4 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -23,11 +23,7 @@ def validate_environments(request): Step 1: Collect the data related to the selected tests that will be executed. Step 2: Generate a playbook containing cross-checks for selected tests. Step 3: Run the generated playbook. - Step 4: Generate a test-specific playbook to validate the environment required by that test, then execute that - playbook. This will run one validation for each selected test set. To add specific validation tasks to a - test,a new jinja2 template must be added inside the `playbooks` folder in the test suite. E.g: - test_basic_cases/test_fim/test_fim_linux/data/playbooks/validation.j2 - (See end_to_end/data/validation_templates for a guide to create the file) + Step 4: Execute a test-specific playbook (if any). This will run one validation for each selected test set. Args: request (fixture): Gives access to the requesting test context. @@ -45,13 +41,26 @@ def validate_environments(request): raise ValueError('Inventory not specified') # --------------------------------------- Step 1: Prepare the necessary data --------------------------------------- + test_suites_paths = [] + manager_instances = [] + agent_instances = [] + target_hosts = [] + target_distros = [] + distros_by = {'manager': [], 'agent': []} + + def set_target_and_distros(suite): + for key in environment_metadata[test_suite_name]: + if environment_metadata[test_suite_name][key]['instances'] > 0: + # Save manager/agent distros + distros_by[key] = environment_metadata[test_suite_name][key]['distros'] + target_distros.extend(environment_metadata[test_suite_name][key]['distros']) + # Add the target host to the list (following the standard host name: "-*") + target_hosts.extend([distro.lower() + f"-{key}" for distro in distros_by[key]]) + # Get the path of the tests from collected items. collected_paths = [item.fspath for item in collected_items] # Remove duplicates caused by the existence of 2 or more test cases collected_paths = list(dict.fromkeys(collected_paths)) - test_suites_paths = [] - manager_instances = [] - agent_instances = [] for path in collected_paths: # Remove the name of the file from the path @@ -63,18 +72,22 @@ def validate_environments(request): # Save the test environment metadata manager_instances.append(environment_metadata[test_suite_name]['manager']['instances']) agent_instances.append(environment_metadata[test_suite_name]['agent']['instances']) + set_target_and_distros(test_suite_name) + + # Remove duplicates + target_distros = list(dict.fromkeys(target_distros)) + target_hosts = list(dict.fromkeys(target_hosts)) - # Get the largest number of manager/agent instances - num_of_managers = max(manager_instances) - num_of_agents = max(agent_instances) # -------------------------------------------------- End of Step 1 ------------------------------------------------- # ---------------------- Step 2: Run the playbook to generate the general validation playbook ---------------------- gen_parameters = { 'playbook': playbook_generator, 'inventory': inventory_path, 'extravars': { - 'template_path': playbook_template, 'dest_path': general_playbook, - 'num_of_managers': num_of_managers, 'num_of_agents': num_of_agents + 'template_path': playbook_template, + 'dest_path': general_playbook, + 'target_hosts': ','.join(target_hosts), + 'distros': target_distros } } ansible_runner.run(**gen_parameters) @@ -102,51 +115,28 @@ def validate_environments(request): f"requirements. Result:\n{errors}") # -------------------------------------------------- End of Step 3 ------------------------------------------------- - # ------------------------------------ Step 4: Execute test-specific validations ----------------------------------- - playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_test_specific_play.yaml') - playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'test_specific_validation.j2') - + # -------------------------------- Step 4: Execute test-specific validations (if any) ------------------------------ for path in test_suites_paths: - validation_template = os.path.join(path, 'data', 'playbooks', 'validation.j2') - validation_template = validation_template if os.path.exists(validation_template) else '' - # Define the path where the resulting playbook will be stored validation_playbook = os.path.join(path, 'data', 'playbooks', 'validation.yaml') - # Get distros by instances type test_suite_name = path.split('/')[-1:][0] target_hosts = [] - distros = {"manager": [], "agent": []} - for key in environment_metadata[test_suite_name]: - if environment_metadata[test_suite_name][key]['instances'] > 0: - # Save manager/agent distros for the current test - distros[key] = environment_metadata[test_suite_name][key]['distros'] - # Add the target host to the list (following the standard host name: "-*") - target_hosts.extend([distro.lower() + f"-{key}*" for distro in distros[key]]) - - # Generate test_specific validation playbook - gen_parameters = { - 'playbook': playbook_generator, 'inventory': inventory_path, 'envvars': {'ANSIBLE_ROLES_PATH': roles_path}, - 'extravars': { - 'template_path': playbook_template, 'dest_path': validation_playbook, - 'num_of_managers': num_of_managers, 'num_of_agents': num_of_agents, - 'validation_template': validation_template, 'target_hosts': ','.join(target_hosts), - 'manager_distros': distros['manager'], 'agent_distros': distros['agent'] + distros_by = {"manager": [], "agent": []} + set_target_and_distros(test_suite_name) + + # Run test-specific validation playbook (if any) + if os.path.exists(validation_playbook): + parameters = { + 'playbook': validation_playbook, 'inventory': inventory_path, + 'envvars': {'ANSIBLE_ROLES_PATH': roles_path}, + 'extravars': {'target_hosts': ','.join(target_hosts)} } - } - ansible_runner.run(**gen_parameters) + validation_runner = ansible_runner.run(**parameters) - # Run test_specific validation playbook - parameters = { - 'playbook': validation_playbook, 'inventory': inventory_path, 'envvars': {'ANSIBLE_ROLES_PATH': roles_path} - } - validation_runner = ansible_runner.run(**parameters) - # Remove the generated playbook - remove_file(validation_playbook) - - # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. - if validation_runner.status == 'failed': - raise Exception(f"The validation phase of {test_suite_name} has failed. Please check that the environments " - 'meet the expected requirements.') + # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. + if validation_runner.status == 'failed': + raise Exception(f"The validation phase of {test_suite_name} has failed. Please check that the " + 'environments meet the expected requirements.') # -------------------------------------------------- End of Step 4 ------------------------------------------------- diff --git a/tests/end_to_end/data/validation_playbooks/generate_general_play.yaml b/tests/end_to_end/data/validation_playbooks/generate_general_play.yaml index ada390fb1e..214fcabd0e 100644 --- a/tests/end_to_end/data/validation_playbooks/generate_general_play.yaml +++ b/tests/end_to_end/data/validation_playbooks/generate_general_play.yaml @@ -1,7 +1,7 @@ - hosts: localhost vars: - num_of_managers: "{{ num_of_managers }}" - num_of_agents: "{{ num_of_agents }}" + target_hosts: "{{ target_hosts }}" + distros: "{{ distros }}" tasks: - name: Generate a general validation playbook template: diff --git a/tests/end_to_end/data/validation_playbooks/generate_test_specific_play.yaml b/tests/end_to_end/data/validation_playbooks/generate_test_specific_play.yaml deleted file mode 100644 index 53cc6e8ac4..0000000000 --- a/tests/end_to_end/data/validation_playbooks/generate_test_specific_play.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- hosts: localhost - vars: - validation_template: "{{ validation_template }}" - num_of_managers: "{{ num_of_managers }}" - num_of_agents: "{{ num_of_agents }}" - tasks: - - name: Generate a test specific validation playbook - template: - src: "{{ template_path }}" - dest: "{{ dest_path }}" diff --git a/tests/end_to_end/data/validation_templates/general_validation.j2 b/tests/end_to_end/data/validation_templates/general_validation.j2 index fe2d3829c0..27c54dc219 100644 --- a/tests/end_to_end/data/validation_templates/general_validation.j2 +++ b/tests/end_to_end/data/validation_templates/general_validation.j2 @@ -1,13 +1,8 @@ - name: General validation phase -{% if num_of_agents == 0 %} - hosts: managers -{% elif num_of_managers == 0 %} - hosts: agents -{% else %} - hosts: managers:agents -{% endif %} + hosts: {{ target_hosts }} any_errors_fatal: true roles: - role: host_checker vars: os: "{% raw %}{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}{% endraw %}" + supported_distros: {{ distros }} diff --git a/tests/end_to_end/data/validation_templates/test_specific_validation.j2 b/tests/end_to_end/data/validation_templates/test_specific_validation.j2 deleted file mode 100644 index d39b93c04c..0000000000 --- a/tests/end_to_end/data/validation_templates/test_specific_validation.j2 +++ /dev/null @@ -1,19 +0,0 @@ -- name: Check supported distros - hosts: {{ target_hosts }} - any_errors_fatal: true - tasks: - - - name: Check if the host distribution is compatible - include_role: - name: host_checker - tasks_from: check_supported_distro - vars: - os: "{% raw %}{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}{% endraw %}" - dist: "{% raw %}{{ ansible_distribution if ansible_os_family != 'Windows' else 'Windows' }}{% endraw %}" - manager_distros: {{ manager_distros }} - agent_distros: {{ agent_distros }} - -{# Include the test-specific validation tasks if exist #} -{% if validation_template != '' %} -{% include validation_template %} -{% endif %} diff --git a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml index 028e9b57fe..fdaaeb9d21 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_os.yaml @@ -1,15 +1,16 @@ # REQUIRED VARIABLES # ------------------- # (String) os: Target operating system +# (String) supported_distros: List of ditros supported by the current test - name: Check OS (Linux) set_fact: check_result: 'true' - errors: "{{ errors }}The {{ ansible_system }} OS was not expected, but the {{ os }} OS.\n" - when: (os != ansible_system and os == "Linux") + errors: "{{ errors }}The {{ ansible_distribution }} distro isn't supported for the selected tests currently.\n" + when: (ansible_distribution not in supported_distros and os == "Linux") - name: Check OS (Windows) set_fact: check_result: 'true' - errors: "{{ errors }}The {{ ansible_os_family }} OS was not expected, but the {{ os }} OS.\n" - when: (os != ansible_os_family and os == "Windows") + errors: "{{ errors }}The {{ os }} OS isn't supported for the selected tests currently.\n" + when: (os == "Windows" and os not in supported_distros) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml b/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml deleted file mode 100644 index d8abbf11e7..0000000000 --- a/tests/end_to_end/roles/host_checker/tasks/check_supported_distro.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# REQUIRED VARIABLES -# ------------------- -# (String) os: Target operating system -# (String) dist: Target distribution -# (String) supported_dist: List of ditros supported by the current test - -- name: Get Wazuh installation - include_role: - name: service_controller - tasks_from: get_installation_type - -- fail: - msg: "{{ dist }} is not supported by this test: {{ manager_distros }}\n" - when: (os == 'Linux' and 'server' in wazuh_info.stdout and dist not in manager_distros) - -- fail: - msg: "{{ dist }} is not supported by this test: {{ agent_distros }}\n" - when: (os == 'Linux' and 'agent' in wazuh_info.stdout and dist not in agent_distros) From bc8a6716fa31e99320420eaaf92e04c092b14027 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Mon, 22 Aug 2022 08:06:01 -0300 Subject: [PATCH 16/27] fix(#3142): unnecessary function and some code have been removed. --- tests/end_to_end/conftest.py | 32 +++++++++----------------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index b1aad716f4..5e44be613b 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -42,21 +42,10 @@ def validate_environments(request): # --------------------------------------- Step 1: Prepare the necessary data --------------------------------------- test_suites_paths = [] - manager_instances = [] - agent_instances = [] target_hosts = [] target_distros = [] distros_by = {'manager': [], 'agent': []} - def set_target_and_distros(suite): - for key in environment_metadata[test_suite_name]: - if environment_metadata[test_suite_name][key]['instances'] > 0: - # Save manager/agent distros - distros_by[key] = environment_metadata[test_suite_name][key]['distros'] - target_distros.extend(environment_metadata[test_suite_name][key]['distros']) - # Add the target host to the list (following the standard host name: "-*") - target_hosts.extend([distro.lower() + f"-{key}" for distro in distros_by[key]]) - # Get the path of the tests from collected items. collected_paths = [item.fspath for item in collected_items] # Remove duplicates caused by the existence of 2 or more test cases @@ -69,15 +58,17 @@ def set_target_and_distros(suite): test_suites_paths.append(path) # Get the test suite name test_suite_name = path.split('/')[-1:][0] - # Save the test environment metadata - manager_instances.append(environment_metadata[test_suite_name]['manager']['instances']) - agent_instances.append(environment_metadata[test_suite_name]['agent']['instances']) - set_target_and_distros(test_suite_name) - + # Set target hosts and distros + for key in environment_metadata[test_suite_name]: + if environment_metadata[test_suite_name][key]['instances'] > 0: + # Save manager/agent distros + distros_by[key] = environment_metadata[test_suite_name][key]['distros'] + target_distros.extend(environment_metadata[test_suite_name][key]['distros']) + # Add the target host to the list (following the standard host name: "-*") + target_hosts.extend([distro.lower() + f"-{key}" for distro in distros_by[key]]) # Remove duplicates target_distros = list(dict.fromkeys(target_distros)) target_hosts = list(dict.fromkeys(target_hosts)) - # -------------------------------------------------- End of Step 1 ------------------------------------------------- # ---------------------- Step 2: Run the playbook to generate the general validation playbook ---------------------- @@ -119,17 +110,12 @@ def set_target_and_distros(suite): for path in test_suites_paths: validation_playbook = os.path.join(path, 'data', 'playbooks', 'validation.yaml') - test_suite_name = path.split('/')[-1:][0] - target_hosts = [] - distros_by = {"manager": [], "agent": []} - set_target_and_distros(test_suite_name) - # Run test-specific validation playbook (if any) if os.path.exists(validation_playbook): parameters = { 'playbook': validation_playbook, 'inventory': inventory_path, 'envvars': {'ANSIBLE_ROLES_PATH': roles_path}, - 'extravars': {'target_hosts': ','.join(target_hosts)} + 'extravars': {'target_hosts': ','.join(target_hosts), 'distros': target_distros} } validation_runner = ansible_runner.run(**parameters) From 646f97977e570d31c33a47c6ccaa818c6b1a2075 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Mon, 22 Aug 2022 09:35:20 -0300 Subject: [PATCH 17/27] refactor(#3142): some changes were made on env_requirements. --- tests/end_to_end/data/env_requirements.json | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/end_to_end/data/env_requirements.json b/tests/end_to_end/data/env_requirements.json index 57c7d09292..ad9baf33cc 100644 --- a/tests/end_to_end/data/env_requirements.json +++ b/tests/end_to_end/data/env_requirements.json @@ -131,7 +131,7 @@ ] } }, - "test_netcat": { + "test_unauthorized_processes_detection": { "manager": { "instances": 1, "distros": [ @@ -199,12 +199,14 @@ "manager": { "instances": 1, "distros": [ - "Ubuntu" + "CentOS" ] }, "agent": { - "instances": 0, - "distros": [] + "instances": 1, + "distros": [ + "Ubuntu" + ] } }, "test_virustotal_integration": { From 50608300b557c4e54350eba3b64fb59b239f054a Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Mon, 22 Aug 2022 11:08:29 -0300 Subject: [PATCH 18/27] fix(#3142): hostname replaced by ip address in netcat command. --- .../roles/host_checker/tasks/check_controller_indexer.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml index 991f6ef1a9..ddb7e71675 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml @@ -8,7 +8,7 @@ tasks_from: get_installation_type - name: Test connection with Wazuh Indexer - shell: nc -v -4 {{ inventory_hostname }} 9200 + shell: nc -v -4 {{ hostvars[inventory_hostname]['ansible_host'] }} 9200 timeout: 3 ignore_errors: true register: test_result From 8d1c71dd799b953e3165f500fe62483678882895 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Mon, 22 Aug 2022 12:19:40 -0300 Subject: [PATCH 19/27] fix(#3142): stdout validation changed by stderr. --- .../roles/host_checker/tasks/check_controller_indexer.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml index ddb7e71675..686d837ae5 100644 --- a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml +++ b/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml @@ -19,4 +19,4 @@ set_fact: check_result: 'true' errors: "{{ errors }}Ansible Controller node cannot connect correctly with Wazuh Indexer.\n" - when: (test_result is failed and test_result.stdout is defined and 'refused' in test_result.stderr) + when: (test_result is failed and test_result.stderr is defined and 'refused' in test_result.stderr) From 99eaafc4879d121ddde78e3b3093d126dbe08882 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Tue, 23 Aug 2022 01:04:15 -0300 Subject: [PATCH 20/27] fix(#3142): replace the inventory_hostname with IP. --- .../test_brute_force_rdp/data/playbooks/generate_events.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end_to_end/test_basic_cases/test_brute_force/test_brute_force_rdp/data/playbooks/generate_events.yaml b/tests/end_to_end/test_basic_cases/test_brute_force/test_brute_force_rdp/data/playbooks/generate_events.yaml index 450c09a4e6..a83d9b8653 100644 --- a/tests/end_to_end/test_basic_cases/test_brute_force/test_brute_force_rdp/data/playbooks/generate_events.yaml +++ b/tests/end_to_end/test_basic_cases/test_brute_force/test_brute_force_rdp/data/playbooks/generate_events.yaml @@ -11,7 +11,7 @@ tasks: - name: Attempt a RDP brute force attack - shell: hydra -l {{ item }} -p invalid_password rdp://wazuh-windows + shell: hydra -l {{ item }} -p invalid_password rdp://{{ hostvars['windows-agent']['ansible_host'] }} loop: - test_user - test_user From e4d4b48246cfd570db00d8692fcb04ae26a71ada Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Wed, 24 Aug 2022 08:40:31 -0300 Subject: [PATCH 21/27] refactor(#3142): split phase into 2 fixtures with different scopes. This change allows no stopping all execution when 1 specific validation fails. --- tests/end_to_end/README.md | 34 ++++++++ tests/end_to_end/conftest.py | 80 ++++++++++++------- .../tasks/check_controller_indexer.yaml | 0 .../tasks/check_filebeat_indexer.yaml | 0 .../host_checker/tasks/check_os.yaml | 0 .../host_checker/tasks/check_python.yaml | 0 .../tasks/check_wazuh_components.yaml | 0 .../host_checker/tasks/main.yaml | 0 .../tasks/get_installation_type.yaml | 0 9 files changed, 83 insertions(+), 31 deletions(-) rename tests/end_to_end/{roles => data/ansible_roles}/host_checker/tasks/check_controller_indexer.yaml (100%) rename tests/end_to_end/{roles => data/ansible_roles}/host_checker/tasks/check_filebeat_indexer.yaml (100%) rename tests/end_to_end/{roles => data/ansible_roles}/host_checker/tasks/check_os.yaml (100%) rename tests/end_to_end/{roles => data/ansible_roles}/host_checker/tasks/check_python.yaml (100%) rename tests/end_to_end/{roles => data/ansible_roles}/host_checker/tasks/check_wazuh_components.yaml (100%) rename tests/end_to_end/{roles => data/ansible_roles}/host_checker/tasks/main.yaml (100%) rename tests/end_to_end/{roles => data/ansible_roles}/service_controller/tasks/get_installation_type.yaml (100%) diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index 38e1dfec9c..1e51b4bb2e 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -18,6 +18,7 @@ Our E2E tests will verify that, after generating an event, an alert will be trig To run these tests we need to use a **Linux** machine and install the following tools: - [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +- [netcat](https://en.wikipedia.org/wiki/Netcat) Then, you will need to have an inventory with the needed hosts and variables. For example: @@ -155,6 +156,39 @@ To execute these tests, we need to run the following command: python -m pytest --inventory_path= ``` +### Adding or modifying E2E tests + +When adding or modifying any test it is necesry to modify the file with the environment data, placed in `tests/end_to_end/data/env_requirements.json` + +This file is used to validate the environments where the selected tests will be executed and it follows this structure: +``` +"": { + "manager": { + "instances": , + "distros": + }, + "agent": { + "instances": , + "distros": + } +} +``` + +### Add specific validation tasks (for a test module) + +To add specific validation tasks to a test, its necessary to add a new playbook inside the test module, in the playbook folder with the default Play structure: + +``` +- name: + hosts: "{{ target_hosts }}" + tasks: + +``` + +E.g: Add validation tasks for test_audit by creating a playbook called `validation.yaml` in `tests/end_to_end/test_basic_cases/test_audit/data/playbooks` + +> The file name must always be "validation.yaml" + #### Audit tests examples ```shell script diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 8238d22a70..9dcfe69226 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -15,6 +15,25 @@ suite_path = os.path.dirname(os.path.realpath(__file__)) +def get_target_hosts_and_distros(test_suite_name, target_distros=[], target_hosts=[]): + environment_file = os.path.join(suite_path, 'data', 'env_requirements.json') + environment_metadata = json.load(open(environment_file)) + distros_by = {'manager': [], 'agent': []} + + for key in environment_metadata[test_suite_name]: + if environment_metadata[test_suite_name][key]['instances'] > 0: + # Save manager/agent distros + distros_by[key] = environment_metadata[test_suite_name][key]['distros'] + target_distros.extend(environment_metadata[test_suite_name][key]['distros']) + # Add the target host to the list (following the standard host name: "-*") + target_hosts.extend([distro.lower() + f"-{key}" for distro in distros_by[key]]) + # Remove duplicates + target_hosts = list(dict.fromkeys(target_hosts)) + target_distros = list(dict.fromkeys(target_distros)) + + return target_hosts, target_distros + + @pytest.fixture(scope='session', autouse=True) def validate_environments(request): """Fixture with session scope to validate the environments before run the E2E tests. @@ -23,7 +42,6 @@ def validate_environments(request): Step 1: Collect the data related to the selected tests that will be executed. Step 2: Generate a playbook containing cross-checks for selected tests. Step 3: Run the generated playbook. - Step 4: Execute a test-specific playbook (if any). This will run one validation for each selected test set. Args: request (fixture): Gives access to the requesting test context. @@ -31,8 +49,6 @@ def validate_environments(request): collected_items = request.session.items roles_path = request.config.getoption('--roles-path') inventory_path = request.config.getoption('--inventory_path') - environment_file = os.path.join(suite_path, 'data', 'env_requirements.json') - environment_metadata = json.load(open(environment_file)) playbook_generator = os.path.join(suite_path, 'data', 'validation_playbooks', 'generate_general_play.yaml') playbook_template = os.path.join(suite_path, 'data', 'validation_templates', 'general_validation.j2') general_playbook = os.path.join(suite_path, 'data', 'validation_playbooks', 'general_validation.yaml') @@ -44,7 +60,6 @@ def validate_environments(request): test_suites_paths = [] target_hosts = [] target_distros = [] - distros_by = {'manager': [], 'agent': []} # Get the path of the tests from collected items. collected_paths = [item.fspath for item in collected_items] @@ -59,16 +74,7 @@ def validate_environments(request): # Get the test suite name test_suite_name = path.split('/')[-1:][0] # Set target hosts and distros - for key in environment_metadata[test_suite_name]: - if environment_metadata[test_suite_name][key]['instances'] > 0: - # Save manager/agent distros - distros_by[key] = environment_metadata[test_suite_name][key]['distros'] - target_distros.extend(environment_metadata[test_suite_name][key]['distros']) - # Add the target host to the list (following the standard host name: "-*") - target_hosts.extend([distro.lower() + f"-{key}" for distro in distros_by[key]]) - # Remove duplicates - target_distros = list(dict.fromkeys(target_distros)) - target_hosts = list(dict.fromkeys(target_hosts)) + target_hosts, target_distros = get_target_hosts_and_distros(test_suite_name, target_distros, target_hosts) # -------------------------------------------------- End of Step 1 ------------------------------------------------- # ---------------------- Step 2: Run the playbook to generate the general validation playbook ---------------------- @@ -106,24 +112,36 @@ def validate_environments(request): f"requirements. Result:\n{errors}") # -------------------------------------------------- End of Step 3 ------------------------------------------------- - # -------------------------------- Step 4: Execute test-specific validations (if any) ------------------------------ - for path in test_suites_paths: - validation_playbook = os.path.join(path, 'data', 'playbooks', 'validation.yaml') - # Run test-specific validation playbook (if any) - if os.path.exists(validation_playbook): - parameters = { - 'playbook': validation_playbook, 'inventory': inventory_path, - 'envvars': {'ANSIBLE_ROLES_PATH': roles_path}, - 'extravars': {'target_hosts': ','.join(target_hosts), 'distros': target_distros} - } - validation_runner = ansible_runner.run(**parameters) +@pytest.fixture(scope='module', autouse=True) +def run_specific_validations(request): + """Fixture with module scope to validate the environment of an specific tests with specific validation tasks. + + Execute a test-specific playbook (if any). This will run one validation playbook for each test module. + + Args: + request (fixture): Gives access to the requesting test context. + """ + roles_path = request.config.getoption('--roles-path') + inventory_path = request.config.getoption('--inventory_path') + test_suite_path = os.path.dirname(request.fspath) + test_suite_name = test_suite_path.split('/')[-1:][0] + target_hosts, target_distros = get_target_hosts_and_distros(test_suite_name) + validation_playbook = os.path.join(test_suite_path, 'data', 'playbooks', 'validation.yaml') + + # Run test-specific validation playbook (if any) + if os.path.exists(validation_playbook): + parameters = { + 'playbook': validation_playbook, 'inventory': inventory_path, + 'envvars': {'ANSIBLE_ROLES_PATH': roles_path}, + 'extravars': {'target_hosts': ','.join(target_hosts), 'distros': target_distros} + } + validation_runner = ansible_runner.run(**parameters) - # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. - if validation_runner.status == 'failed': - raise Exception(f"The validation phase of {test_suite_name} has failed. Please check that the " - 'environments meet the expected requirements.') - # -------------------------------------------------- End of Step 4 ------------------------------------------------- + # If the validation phase has failed, then abort the execution finishing with an error. Else, continue. + if validation_runner.status == 'failed': + raise Exception(f"The validation phase of {test_suite_name} has failed. Please check that the " + 'environments meet the expected requirements.') @pytest.fixture(scope='function') @@ -265,7 +283,7 @@ def pytest_addoption(parser): '--roles-path', action='store', metavar='ROLES_PATH', - default=os.path.join(suite_path, 'roles'), + default=os.path.join(suite_path, 'data', 'ansible_roles'), type=str, help='Ansible roles path.', ) diff --git a/tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_controller_indexer.yaml similarity index 100% rename from tests/end_to_end/roles/host_checker/tasks/check_controller_indexer.yaml rename to tests/end_to_end/data/ansible_roles/host_checker/tasks/check_controller_indexer.yaml diff --git a/tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_filebeat_indexer.yaml similarity index 100% rename from tests/end_to_end/roles/host_checker/tasks/check_filebeat_indexer.yaml rename to tests/end_to_end/data/ansible_roles/host_checker/tasks/check_filebeat_indexer.yaml diff --git a/tests/end_to_end/roles/host_checker/tasks/check_os.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_os.yaml similarity index 100% rename from tests/end_to_end/roles/host_checker/tasks/check_os.yaml rename to tests/end_to_end/data/ansible_roles/host_checker/tasks/check_os.yaml diff --git a/tests/end_to_end/roles/host_checker/tasks/check_python.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml similarity index 100% rename from tests/end_to_end/roles/host_checker/tasks/check_python.yaml rename to tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml diff --git a/tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_wazuh_components.yaml similarity index 100% rename from tests/end_to_end/roles/host_checker/tasks/check_wazuh_components.yaml rename to tests/end_to_end/data/ansible_roles/host_checker/tasks/check_wazuh_components.yaml diff --git a/tests/end_to_end/roles/host_checker/tasks/main.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/main.yaml similarity index 100% rename from tests/end_to_end/roles/host_checker/tasks/main.yaml rename to tests/end_to_end/data/ansible_roles/host_checker/tasks/main.yaml diff --git a/tests/end_to_end/roles/service_controller/tasks/get_installation_type.yaml b/tests/end_to_end/data/ansible_roles/service_controller/tasks/get_installation_type.yaml similarity index 100% rename from tests/end_to_end/roles/service_controller/tasks/get_installation_type.yaml rename to tests/end_to_end/data/ansible_roles/service_controller/tasks/get_installation_type.yaml From 8d0920ee161007aa3306a75e1ee04c60eabaf188 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Wed, 24 Aug 2022 08:47:04 -0300 Subject: [PATCH 22/27] style(#3142): linter corrections applied. --- tests/end_to_end/conftest.py | 12 ++++++------ .../data/ansible_roles/host_checker/tasks/main.yaml | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index c3695f6c25..4763cc8545 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -21,12 +21,12 @@ def get_target_hosts_and_distros(test_suite_name, target_distros=[], target_host distros_by = {'manager': [], 'agent': []} for key in environment_metadata[test_suite_name]: - if environment_metadata[test_suite_name][key]['instances'] > 0: - # Save manager/agent distros - distros_by[key] = environment_metadata[test_suite_name][key]['distros'] - target_distros.extend(environment_metadata[test_suite_name][key]['distros']) - # Add the target host to the list (following the standard host name: "-*") - target_hosts.extend([distro.lower() + f"-{key}" for distro in distros_by[key]]) + if environment_metadata[test_suite_name][key]['instances'] > 0: + # Save manager/agent distros + distros_by[key] = environment_metadata[test_suite_name][key]['distros'] + target_distros.extend(environment_metadata[test_suite_name][key]['distros']) + # Add the target host to the list (following the standard host name: "-*") + target_hosts.extend([distro.lower() + f"-{key}" for distro in distros_by[key]]) # Remove duplicates target_hosts = list(dict.fromkeys(target_hosts)) target_distros = list(dict.fromkeys(target_distros)) diff --git a/tests/end_to_end/data/ansible_roles/host_checker/tasks/main.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/main.yaml index 20933d8e4e..761337e6ee 100644 --- a/tests/end_to_end/data/ansible_roles/host_checker/tasks/main.yaml +++ b/tests/end_to_end/data/ansible_roles/host_checker/tasks/main.yaml @@ -25,7 +25,7 @@ # -------- Task to identify whether the validation step fails or not. -------- - set_fact: phase_results: "{{ errors }}" - cacheable: yes + cacheable: true - name: Verify if any check have failed fail: From 1c4424cab00f2b684ab74cdcfba30648f996fc1e Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Wed, 24 Aug 2022 08:52:03 -0300 Subject: [PATCH 23/27] fix(#3142): typo fixed. --- .../data/ansible_roles/host_checker/tasks/check_python.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml index 452ae7e773..6c20df37bc 100644 --- a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml +++ b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml @@ -13,7 +13,7 @@ register: version when: os == 'Windows' -- name: Check default Python version (Linux) +- name: Check default Python version (Windows) set_fact: check_result: 'true' errors: "{{ errors }}Python version is less than 3. Current version: {{ version.stdout }}\n" From b157bd7bd724dc3402e99904c7e9b1297dbbd66c Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Thu, 25 Aug 2022 10:46:55 -0300 Subject: [PATCH 24/27] docs(#3142): README modified, minor changes were made. --- tests/end_to_end/README.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index 1e51b4bb2e..c304301192 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -18,7 +18,7 @@ Our E2E tests will verify that, after generating an event, an alert will be trig To run these tests we need to use a **Linux** machine and install the following tools: - [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) -- [netcat](https://en.wikipedia.org/wiki/Netcat) +- [Netcat](https://www.tecmint.com/netcat-nc-command-examples/) Then, you will need to have an inventory with the needed hosts and variables. For example: @@ -156,6 +156,22 @@ To execute these tests, we need to run the following command: python -m pytest --inventory_path= ``` +#### Audit tests examples + +```shell script +python3 -m pytest tests/end_to_end/test_basic_cases/test_audit/ --inventory_path=/home/juliamagan/Desktop/QA/2893/inventory.yml +======================================================================== test session starts ======================================================================== +platform linux -- Python 3.9.7, pytest-6.2.2, py-1.10.0, pluggy-0.13.1 +rootdir: /home/juliamagan/Desktop/QA/wazuh-qa +plugins: metadata-2.0.1, html-3.1.1, testinfra-5.0.0 +collected 1 item + +tests/end_to_end/test_basic_cases/test_audit/test_audit.py . [100%] + +======================================================================== 1 passed in 16.05s ========================================================================= + +``` + ### Adding or modifying E2E tests When adding or modifying any test it is necesry to modify the file with the environment data, placed in `tests/end_to_end/data/env_requirements.json` @@ -188,19 +204,3 @@ To add specific validation tasks to a test, its necessary to add a new playbook E.g: Add validation tasks for test_audit by creating a playbook called `validation.yaml` in `tests/end_to_end/test_basic_cases/test_audit/data/playbooks` > The file name must always be "validation.yaml" - -#### Audit tests examples - -```shell script -python3 -m pytest tests/end_to_end/test_basic_cases/test_audit/ --inventory_path=/home/juliamagan/Desktop/QA/2893/inventory.yml -======================================================================== test session starts ======================================================================== -platform linux -- Python 3.9.7, pytest-6.2.2, py-1.10.0, pluggy-0.13.1 -rootdir: /home/juliamagan/Desktop/QA/wazuh-qa -plugins: metadata-2.0.1, html-3.1.1, testinfra-5.0.0 -collected 1 item - -tests/end_to_end/test_basic_cases/test_audit/test_audit.py . [100%] - -======================================================================== 1 passed in 16.05s ========================================================================= - -``` From 0e9681e65c80d9a8bcd3449ce976b4fb643a7274 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Thu, 25 Aug 2022 12:21:00 -0300 Subject: [PATCH 25/27] fix(#3142)!: roles path option fixed. --- tests/end_to_end/conftest.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 5e502c3448..1ea1fa841e 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -190,7 +190,7 @@ def configure_environment(request): request (fixture): Provide information on the executing test function. """ inventory_playbook = request.config.getoption('--inventory_path') - roles_path = request.config.getoption('--roles_path') + roles_path = request.config.getoption('--roles-path') if not inventory_playbook: raise ValueError('Inventory not specified') @@ -240,7 +240,7 @@ def generate_events(request, metadata): metadata (dict): Dictionary with test case metadata. """ inventory_playbook = request.config.getoption('--inventory_path') - roles_path = request.config.getoption('--roles_path') + roles_path = request.config.getoption('--roles-path') if not inventory_playbook: raise ValueError('Inventory not specified') @@ -291,3 +291,12 @@ def pytest_addoption(parser): type=str, help='Inventory path', ) + + parser.addoption( + '--roles-path', + action='store', + metavar='ROLES_PATH', + default=os.path.join(suite_path, 'data', 'ansible_roles'), + type=str, + help='Ansible roles path.', + ) From 47c199b80ea907e21cc8d14f8c6fedcd084ca485 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Thu, 25 Aug 2022 13:51:24 -0300 Subject: [PATCH 26/27] docs(#3142): fix typo in README.md --- tests/end_to_end/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index c304301192..e72f579fd2 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -174,7 +174,7 @@ tests/end_to_end/test_basic_cases/test_audit/test_audit.py . ### Adding or modifying E2E tests -When adding or modifying any test it is necesry to modify the file with the environment data, placed in `tests/end_to_end/data/env_requirements.json` +When adding or modifying any test it is necessary to modify the file with the environment data, placed in `tests/end_to_end/data/env_requirements.json` This file is used to validate the environments where the selected tests will be executed and it follows this structure: ``` From f44f8b15a528839f30b11baa4a3ec84de7876255 Mon Sep 17 00:00:00 2001 From: Mauro Malara Date: Thu, 25 Aug 2022 14:02:44 -0300 Subject: [PATCH 27/27] fix(#3142): fix OS nomenclature --- .../host_checker/tasks/check_controller_indexer.yaml | 2 +- .../host_checker/tasks/check_filebeat_indexer.yaml | 4 ++-- .../data/ansible_roles/host_checker/tasks/check_os.yaml | 4 ++-- .../ansible_roles/host_checker/tasks/check_python.yaml | 6 +++--- .../host_checker/tasks/check_wazuh_components.yaml | 8 ++++---- .../service_controller/tasks/get_installation_type.yaml | 2 +- .../data/validation_templates/general_validation.j2 | 2 +- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_controller_indexer.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_controller_indexer.yaml index 686d837ae5..7508c51c3f 100644 --- a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_controller_indexer.yaml +++ b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_controller_indexer.yaml @@ -13,7 +13,7 @@ ignore_errors: true register: test_result delegate_to: localhost - when: (os == 'Linux' and 'server' in wazuh_info.stdout) + when: (os == 'linux' and 'server' in wazuh_info.stdout) - name: Check the connection between Controller node and Wazuh Indexer set_fact: diff --git a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_filebeat_indexer.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_filebeat_indexer.yaml index 5a4981a8e2..16021dc517 100644 --- a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_filebeat_indexer.yaml +++ b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_filebeat_indexer.yaml @@ -12,10 +12,10 @@ shell: filebeat test output register: test_result ignore_errors: true - when: (os == 'Linux' and 'server' in wazuh_info.stdout) + when: (os == 'linux' and 'server' in wazuh_info.stdout) - name: Check the connection between Filebeat and Wazuh Indexer set_fact: check_result: 'true' errors: "{{ errors }}Filebeat cannot connect correctly with Wazuh Indexer.\n" - when: (os == 'Linux' and 'server' in wazuh_info.stdout and 'ERROR' in test_result.stdout) + when: (os == 'linux' and 'server' in wazuh_info.stdout and 'ERROR' in test_result.stdout) diff --git a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_os.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_os.yaml index fdaaeb9d21..c4f2b30719 100644 --- a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_os.yaml +++ b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_os.yaml @@ -7,10 +7,10 @@ set_fact: check_result: 'true' errors: "{{ errors }}The {{ ansible_distribution }} distro isn't supported for the selected tests currently.\n" - when: (ansible_distribution not in supported_distros and os == "Linux") + when: (ansible_distribution not in supported_distros and os == "linux") - name: Check OS (Windows) set_fact: check_result: 'true' errors: "{{ errors }}The {{ os }} OS isn't supported for the selected tests currently.\n" - when: (os == "Windows" and os not in supported_distros) + when: (os == "windows" and os not in supported_distros) diff --git a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml index 6c20df37bc..03a8f03393 100644 --- a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml +++ b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_python.yaml @@ -6,15 +6,15 @@ set_fact: check_result: 'true' errors: "{{ errors }}Python version is less than 3. Current version: {{ ansible_python_version }}\n" - when: (os == "Linux" and ansible_python['version']['major'] < 3) + when: (os == "linux" and ansible_python['version']['major'] < 3) - name: Get Python version (Windows) win_shell: python -V register: version - when: os == 'Windows' + when: os == 'windows' - name: Check default Python version (Windows) set_fact: check_result: 'true' errors: "{{ errors }}Python version is less than 3. Current version: {{ version.stdout }}\n" - when: (os == "Windows" and version.stdout.split(" ")[1].split(".")[0] | int < 3) + when: (os == "windows" and version.stdout.split(" ")[1].split(".")[0] | int < 3) diff --git a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_wazuh_components.yaml b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_wazuh_components.yaml index 88ead12940..b62a8e81cb 100644 --- a/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_wazuh_components.yaml +++ b/tests/end_to_end/data/ansible_roles/host_checker/tasks/check_wazuh_components.yaml @@ -9,13 +9,13 @@ - name: Populate services facts service_facts: - when: os == 'Linux' + when: os == 'linux' - name: Check the status of Wazuh components (Manager) set_fact: check_result: 'true' errors: "{{ errors }}{{ ansible_facts.services[item].name }} is not running.\n" - when: (os == 'Linux' and 'server' in wazuh_info.stdout and ansible_facts.services[item].state != 'running') + when: (os == 'linux' and 'server' in wazuh_info.stdout and ansible_facts.services[item].state != 'running') with_items: - wazuh-manager.service - wazuh-indexer.service @@ -23,10 +23,10 @@ - set_fact: service: wazuh-agent.service - when: (os == 'Linux' and 'agent' in wazuh_info.stdout) + when: (os == 'linux' and 'agent' in wazuh_info.stdout) - name: Check the status of Wazuh Agent set_fact: check_result: 'true' errors: "{{ errors }}{{ ansible_facts.services[service].name }} is not running.\n" - when: (os == 'Linux' and 'agent' in wazuh_info.stdout and ansible_facts.services[service].state != 'running') + when: (os == 'linux' and 'agent' in wazuh_info.stdout and ansible_facts.services[service].state != 'running') diff --git a/tests/end_to_end/data/ansible_roles/service_controller/tasks/get_installation_type.yaml b/tests/end_to_end/data/ansible_roles/service_controller/tasks/get_installation_type.yaml index 7396a6aea7..76b7285be7 100644 --- a/tests/end_to_end/data/ansible_roles/service_controller/tasks/get_installation_type.yaml +++ b/tests/end_to_end/data/ansible_roles/service_controller/tasks/get_installation_type.yaml @@ -6,4 +6,4 @@ become: true shell: /var/ossec/bin/wazuh-control info register: wazuh_info - when: os == 'Linux' + when: os == 'linux' diff --git a/tests/end_to_end/data/validation_templates/general_validation.j2 b/tests/end_to_end/data/validation_templates/general_validation.j2 index 27c54dc219..767aa3ba60 100644 --- a/tests/end_to_end/data/validation_templates/general_validation.j2 +++ b/tests/end_to_end/data/validation_templates/general_validation.j2 @@ -4,5 +4,5 @@ roles: - role: host_checker vars: - os: "{% raw %}{{ 'Windows' if ansible_os_family == 'Windows' else 'Linux' }}{% endraw %}" + os: "{% raw %}{{ 'windows' if ansible_os_family == 'Windows' else 'linux' }}{% endraw %}" supported_distros: {{ distros }}