From f66eaad18ae6e4c29609bb5b46d94dc1900ed0c9 Mon Sep 17 00:00:00 2001 From: Julia Date: Tue, 24 Oct 2023 18:10:11 +0200 Subject: [PATCH 1/6] fix: restore environment --- .../test_agent_groups_forced_change.py | 14 ++++++++++++-- .../test_agent_info_sync/test_agent_info_sync.py | 7 +++++-- .../test_agent_key_polling/data/config.yml | 9 +++++++++ tests/system/test_multigroups/test_multigroups.py | 12 +++++++++--- 4 files changed, 35 insertions(+), 7 deletions(-) diff --git a/tests/system/test_cluster/test_agent_groups/test_agent_groups_forced_change.py b/tests/system/test_cluster/test_agent_groups/test_agent_groups_forced_change.py index 468c2f7d73..0dd461912c 100644 --- a/tests/system/test_cluster/test_agent_groups/test_agent_groups_forced_change.py +++ b/tests/system/test_cluster/test_agent_groups/test_agent_groups_forced_change.py @@ -65,10 +65,20 @@ local_path = os.path.dirname(os.path.abspath(__file__)) timeout = 10 +# Fixtures + +@pytest.fixture(scope='module') +def delete_group(): + yield + master_token = host_manager.get_api_token(test_infra_managers[0]) + # Remove groups + host_manager.make_api_call(host=test_infra_managers[0], method='DELETE', token=master_token, + endpoint=f"/groups?groups_list={agent_groups}") + # Tests @pytest.mark.parametrize("agent_host", test_infra_managers[0:2]) -def test_sync_when_forced_to_change_a_group(agent_host, clean_environment): +def test_sync_when_forced_to_change_a_group(agent_host, clean_environment, delete_group): ''' description: Check that having an agent with a group assigned, when the change is forced with a wdb command, the new group @@ -119,7 +129,7 @@ def test_sync_when_forced_to_change_a_group(agent_host, clean_environment): # Tests -def test_force_group_change_during_sync(clean_environment): +def test_force_group_change_during_sync(clean_environment, delete_group): ''' description: Check that having an agent with a group assigned, when the change is forced with a wdb command, and the agent's group is changed again during the sync timeframe, the agent has the correct group diff --git a/tests/system/test_cluster/test_agent_info_sync/test_agent_info_sync.py b/tests/system/test_cluster/test_agent_info_sync/test_agent_info_sync.py index ef2867a6c8..6fefe82208 100644 --- a/tests/system/test_cluster/test_agent_info_sync/test_agent_info_sync.py +++ b/tests/system/test_cluster/test_agent_info_sync/test_agent_info_sync.py @@ -17,6 +17,7 @@ # Hosts testinfra_hosts = ['wazuh-master', 'wazuh-worker1', 'wazuh-worker2'] +test_infra_agents = ['wazuh-agent2', 'wazuh-agent3'] master_host = 'wazuh-master' inventory_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), @@ -54,8 +55,10 @@ def clean_cluster_logs(): # Its required to restart each node after clearing the log files host_manager.get_host(host).ansible('command', 'service wazuh-manager restart', check=False) - host_manager.clear_file(host='wazuh-agent3', file_path=os.path.join(WAZUH_LOGS_PATH, 'ossec.log')) - host_manager.get_host('wazuh-agent3').ansible('command', 'service wazuh-agent restart', check=False) + for agent in test_infra_agents: + host_manager.clear_file(host=agent, file_path=os.path.join(WAZUH_LOGS_PATH, 'ossec.log')) + host_manager.get_host(agent).ansible('command', 'service wazuh-agent restart', check=False) + @pytest.fixture(scope='function') diff --git a/tests/system/test_cluster/test_agent_key_polling/data/config.yml b/tests/system/test_cluster/test_agent_key_polling/data/config.yml index bd42a85e20..a625707b1a 100644 --- a/tests/system/test_cluster/test_agent_key_polling/data/config.yml +++ b/tests/system/test_cluster/test_agent_key_polling/data/config.yml @@ -67,3 +67,12 @@ wazuh-worker1: - queue_size: value: 131072 +wazuh-agent2: + description: Set manager address + sections: + - section: client + elements: + - server: + elements: + - address: + value: wazuh-worker1 diff --git a/tests/system/test_multigroups/test_multigroups.py b/tests/system/test_multigroups/test_multigroups.py index 6a0d5cec98..20b1d95601 100644 --- a/tests/system/test_multigroups/test_multigroups.py +++ b/tests/system/test_multigroups/test_multigroups.py @@ -10,6 +10,7 @@ import pytest +from system import restart_cluster from wazuh_testing.tools import WAZUH_PATH from wazuh_testing.tools.system import HostManager @@ -21,6 +22,7 @@ test_hosts = ['wazuh-master', 'wazuh-worker1', 'wazuh-worker2'] agent_groups = {'wazuh-agent1': ['default', 'test_mg_0'], 'wazuh-agent2': ['default', 'test_mg_1']} +test_infra_agents = ['wazuh-agent1', 'wazuh-agent2'] shared_folder_path = os.path.join(WAZUH_PATH, 'etc', 'shared') mg_folder_path = os.path.join(WAZUH_PATH, 'var', 'multigroups') @@ -104,6 +106,10 @@ def delete_groups(): # Fixtures +@pytest.fixture(scope='function') +def start_agents(): + """Start agents.""" + restart_cluster(test_infra_agents, host_manager) @pytest.fixture(scope='function') def agent_healthcheck(): @@ -161,7 +167,7 @@ def create_multigroups(): # Tests -def test_multigroups_not_reloaded(clean_environment, agent_healthcheck, create_multigroups): +def test_multigroups_not_reloaded(clean_environment, start_agents, agent_healthcheck, create_multigroups): """Check that the files are not regenerated when there are no changes. Check and store the modification time of all group and multigroup files. Wait 10 seconds @@ -188,7 +194,7 @@ def test_multigroups_not_reloaded(clean_environment, agent_healthcheck, create_m agent_groups['wazuh-agent1'][1], 'default' ]) -def test_multigroups_updated(clean_environment, agent_healthcheck, create_multigroups, target_group): +def test_multigroups_updated(clean_environment, start_agents, agent_healthcheck, create_multigroups, target_group): """Check that only the appropriate multi-groups are regenerated when a group file is created. Check and store the modification time of all group and multigroup files. Create a new file inside @@ -234,7 +240,7 @@ def test_multigroups_updated(clean_environment, agent_healthcheck, create_multig assert mtime == host_files[host][file], f"This file changed its modification time in {host}: {file}" -def test_multigroups_deleted(clean_environment, agent_healthcheck, create_multigroups): +def test_multigroups_deleted(clean_environment, start_agents, agent_healthcheck, create_multigroups): """Check that multigroups are removed when expected. Unassign an agent from their groups or delete the groups. Check that the associated multigroup disappears From f4733ac5747ed28f154d97306f83d8e60bb9d995 Mon Sep 17 00:00:00 2001 From: Julia Date: Wed, 25 Oct 2023 17:38:54 +0200 Subject: [PATCH 2/6] fix: clean environments --- .../test_agent_files_deletion.py | 61 ++++++------------- .../test_agent_groups_forced_change.py | 2 +- .../test_multigroups/test_multigroups.py | 7 ++- 3 files changed, 24 insertions(+), 46 deletions(-) diff --git a/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py b/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py index 870fdd5351..7a4a546f76 100644 --- a/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py +++ b/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py @@ -4,16 +4,20 @@ import os import re from os.path import join, dirname, abspath -from time import time, sleep +from time import sleep import pytest -from wazuh_testing.tools import WAZUH_PATH, WAZUH_LOGS_PATH +from wazuh_testing.tools import WAZUH_PATH from wazuh_testing.tools.monitoring import HostMonitor from wazuh_testing.tools.system import HostManager +from system.test_cluster.test_agent_groups.common import register_agent +from system import restart_cluster, check_agent_status, AGENT_STATUS_ACTIVE pytestmark = [pytest.mark.cluster, pytest.mark.basic_cluster_env] +test_infra_agents = ['wazuh-agent3'] +test_infra_managers = ['wazuh-master', 'wazuh-worker2'] master_host = 'wazuh-master' worker_host = 'wazuh-worker2' agent_host = 'wazuh-agent3' @@ -22,7 +26,6 @@ script_path = os.path.join(re.sub(r'^.*?wazuh-qa', '/wazuh-qa', local_path), '../utils/get_wdb_agent.py') tmp_path = os.path.join(local_path, 'tmp') -managers_hosts = [master_host, worker_host] inventory_path = join(dirname(dirname(dirname(abspath(__file__)))), 'provisioning', 'basic_cluster', 'inventory.yml') host_manager = HostManager(inventory_path) while_time = 5 @@ -30,7 +33,7 @@ time_to_agent_reconnect = 180 # Each file should exist in all hosts specified in 'hosts'. -files = [{'path': join(WAZUH_PATH, 'queue', 'rids', '{id}'), 'hosts': managers_hosts}, +files = [{'path': join(WAZUH_PATH, 'queue', 'rids', '{id}'), 'hosts': test_infra_managers}, {'path': join(WAZUH_PATH, 'queue', 'diff', '{name}'), 'hosts': [worker_host]}, {'path': join(WAZUH_PATH, 'queue', 'db', '{id}.db'), 'hosts': [worker_host]}] @@ -38,50 +41,27 @@ 'global sql select * from belongs where id_agent={id}'] -def agent_healthcheck(master_token): - """Check if the agent is active and reporting.""" - timeout = time() + time_to_agent_reconnect - healthy = False - - while not healthy: - response = host_manager.make_api_call(host=master_host, method='GET', token=master_token, - endpoint='/agents?status=active') +def test_agent_files_deletion(clean_environment): + """Check that when an agent is deleted, all its related files in managers are also removed.""" + agent_data = register_agent(agent_host, worker_host, host_manager) + agent_id = agent_data[1] + agent_name = agent_data[2] - assert response['status'] == 200, 'Failed when trying to get the active agents' - if int(response['json']['data']['total_affected_items']) == 4: - for item in response['json']['data']['affected_items']: - if item['name'] == agent_host and item['manager'] == worker_host: - healthy = True - elif time() > timeout: - raise TimeoutError("The agent 'wazuh-agent3' is not 'Active' yet.") - sleep(while_time) + restart_cluster(test_infra_agents+test_infra_managers, host_manager) sleep(time_to_sync) - -def test_agent_files_deletion(): - """Check that when an agent is deleted, all its related files in managers are also removed.""" - # Clean ossec.log and cluster.log - for hosts in managers_hosts: - host_manager.clear_file(host=hosts, file_path=os.path.join(WAZUH_LOGS_PATH, 'ossec.log')) - host_manager.clear_file(host=hosts, file_path=os.path.join(WAZUH_LOGS_PATH, 'cluster.log')) - host_manager.control_service(host=hosts, service='wazuh', state='restarted') + # Check if the agent is connected + check_agent_status(agent_id, agent_name, agent_data[0], AGENT_STATUS_ACTIVE, + host_manager, test_infra_managers) # Get the token master_token = host_manager.get_api_token(master_host) - - # Check if the agent is connected and reporting - agent_healthcheck(master_token) - # Get the current ID and name of the agent that is reporting to worker_host. response = host_manager.make_api_call(host=master_host, method='GET', token=master_token, endpoint=f"/agents?select=id,name&q=manager={worker_host}") assert response['status'] == 200, f"Failed when trying to obtain agent ID: {response}" - try: - agent_id = response['json']['data']['affected_items'][0]['id'] - agent_name = response['json']['data']['affected_items'][0]['name'] - except IndexError as e: - pytest.fail(f"Could not find any agent reporting to {worker_host}: {response['json']}") + assert response['json']['data']['affected_items'][0]['id'] == agent_id and response['json']['data']['affected_items'][0]['name'] == agent_name # Check that expected files exist in each node before removing the agent. for file in files: @@ -93,7 +73,7 @@ def test_agent_files_deletion(): f"{file['path'].format(id=agent_id, name=agent_name)}" # Check that agent information is in the wdb socket - for host in managers_hosts: + for host in test_infra_managers: for query in queries: result = host_manager.run_command(host, f"{WAZUH_PATH}/framework/python/bin/python3 " @@ -119,12 +99,9 @@ def test_agent_files_deletion(): f"{file['path'].format(id=agent_id, name=agent_name)}" # Check that agent information is not in the wdb socket - for host in managers_hosts: + for host in test_infra_managers: for query in queries: result = host_manager.run_command(host, f"{WAZUH_PATH}/framework/python/bin/python3 " f"{script_path} '{query.format(id=agent_id)}'") assert not result, f"This db query should have not returned anything in {host}, but it did: {result}" - - host_manager.control_service(host=agent_host, service='wazuh', state='restarted') - agent_healthcheck(master_token) diff --git a/tests/system/test_cluster/test_agent_groups/test_agent_groups_forced_change.py b/tests/system/test_cluster/test_agent_groups/test_agent_groups_forced_change.py index 0dd461912c..cc15a92d39 100644 --- a/tests/system/test_cluster/test_agent_groups/test_agent_groups_forced_change.py +++ b/tests/system/test_cluster/test_agent_groups/test_agent_groups_forced_change.py @@ -65,8 +65,8 @@ local_path = os.path.dirname(os.path.abspath(__file__)) timeout = 10 -# Fixtures +# Fixtures @pytest.fixture(scope='module') def delete_group(): yield diff --git a/tests/system/test_multigroups/test_multigroups.py b/tests/system/test_multigroups/test_multigroups.py index 20b1d95601..9bce1b4161 100644 --- a/tests/system/test_multigroups/test_multigroups.py +++ b/tests/system/test_multigroups/test_multigroups.py @@ -10,7 +10,7 @@ import pytest -from system import restart_cluster +from system import restart_cluster, remove_cluster_agents from wazuh_testing.tools import WAZUH_PATH from wazuh_testing.tools.system import HostManager @@ -111,6 +111,7 @@ def start_agents(): """Start agents.""" restart_cluster(test_infra_agents, host_manager) + @pytest.fixture(scope='function') def agent_healthcheck(): """Check if expected agents are active.""" @@ -128,10 +129,10 @@ def agent_healthcheck(): @pytest.fixture(scope='function') def clean_environment(): - """Remove test groups and multigroups before and after running a test.""" - delete_groups() + """Remove test groups and multigroups after running a test.""" yield delete_groups() + remove_cluster_agents(test_hosts[0], test_infra_agents, host_manager) @pytest.fixture(scope='function') From 023039c7507d1478628227cf52f277b04795430f Mon Sep 17 00:00:00 2001 From: Julia Date: Wed, 25 Oct 2023 18:00:17 +0200 Subject: [PATCH 3/6] fix: fix linter errors --- .../test_agent_files_deletion.py | 4 +- .../test_agent_info_sync.py | 1 - .../test_agent_key_polling/data/config.yml | 131 +++++++++--------- 3 files changed, 68 insertions(+), 68 deletions(-) diff --git a/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py b/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py index 7a4a546f76..5b95776162 100644 --- a/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py +++ b/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py @@ -61,7 +61,9 @@ def test_agent_files_deletion(clean_environment): endpoint=f"/agents?select=id,name&q=manager={worker_host}") assert response['status'] == 200, f"Failed when trying to obtain agent ID: {response}" - assert response['json']['data']['affected_items'][0]['id'] == agent_id and response['json']['data']['affected_items'][0]['name'] == agent_name + assert (response['json']['data']['affected_items'][0]['id'] == agent_id and + response['json']['data']['affected_items'][0]['name'] == agent_name), f"Agent {agent_id} {agent_name}" \ + 'is not active' # Check that expected files exist in each node before removing the agent. for file in files: diff --git a/tests/system/test_cluster/test_agent_info_sync/test_agent_info_sync.py b/tests/system/test_cluster/test_agent_info_sync/test_agent_info_sync.py index 6fefe82208..2ed2ec8c73 100644 --- a/tests/system/test_cluster/test_agent_info_sync/test_agent_info_sync.py +++ b/tests/system/test_cluster/test_agent_info_sync/test_agent_info_sync.py @@ -60,7 +60,6 @@ def clean_cluster_logs(): host_manager.get_host(agent).ansible('command', 'service wazuh-agent restart', check=False) - @pytest.fixture(scope='function') def remove_labels(): """Remove any label set to the modified wazuh-agent and restart it to apply the new config.""" diff --git a/tests/system/test_cluster/test_agent_key_polling/data/config.yml b/tests/system/test_cluster/test_agent_key_polling/data/config.yml index a625707b1a..e611e57111 100644 --- a/tests/system/test_cluster/test_agent_key_polling/data/config.yml +++ b/tests/system/test_cluster/test_agent_key_polling/data/config.yml @@ -1,78 +1,77 @@ ---- wazuh-master: description: Enable remoted key_request at master side sections: - - section: remote - elements: - - connection: - value: secure - - port: - value: 1514 - - protocol: - value: tcp - - queue_size: - value: 131072 - - section: auth - elements: - - disabled: - value: 'no' - - port: - value: 1515 - - use_source_ip: - value: 'yes' - - force_insert: - value: 'yes' - - force_time: - value: 0 - - purge: - value: 'yes' - - use_password: - value: 'no' - - limit_maxagents: - value: 'yes' - - ciphers: - value: 'HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH' - - ssl_verify_host: - value: 'no' - - ssl_manager_cert: - value: '/var/ossec/etc/sslmanager.cert' - - ssl_manager_key: - value: '/var/ossec/etc/sslmanager.key' - - ssl_auto_negotiate: - value: 'no' - - key_request: - elements: - - enabled: - value: 'yes' - - exec_path: - value: '/var/ossec/framework/python/bin/python3 /tmp/fetch_keys.py' - - timeout: - value: 60 - - threads: - value: 1 - - queue_size: - value: 1024 + - section: remote + elements: + - connection: + value: secure + - port: + value: 1514 + - protocol: + value: tcp + - queue_size: + value: 131072 + - section: auth + elements: + - disabled: + value: 'no' + - port: + value: 1515 + - use_source_ip: + value: 'yes' + - force_insert: + value: 'yes' + - force_time: + value: 0 + - purge: + value: 'yes' + - use_password: + value: 'no' + - limit_maxagents: + value: 'yes' + - ciphers: + value: HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH + - ssl_verify_host: + value: 'no' + - ssl_manager_cert: + value: /var/ossec/etc/sslmanager.cert + - ssl_manager_key: + value: /var/ossec/etc/sslmanager.key + - ssl_auto_negotiate: + value: 'no' + - key_request: + elements: + - enabled: + value: 'yes' + - exec_path: + value: /var/ossec/framework/python/bin/python3 /tmp/fetch_keys.py + - timeout: + value: 60 + - threads: + value: 1 + - queue_size: + value: 1024 wazuh-worker1: description: Enable remoted agent_key_polling at worker side sections: - section: remote - elements: - - connection: - value: secure - - port: - value: 1514 - - protocol: - value: tcp - - queue_size: - value: 131072 + elements: + - connection: + value: secure + - port: + value: 1514 + - protocol: + value: tcp + - queue_size: + value: 131072 wazuh-agent2: description: Set manager address sections: - - section: client - elements: - - server: - elements: - - address: - value: wazuh-worker1 + - section: client + elements: + - server: + elements: + - address: + value: wazuh-worker1 From 03db44d1e36f58448a917442f510b32c9b8efc10 Mon Sep 17 00:00:00 2001 From: Julia Date: Wed, 25 Oct 2023 18:04:18 +0200 Subject: [PATCH 4/6] fix: rename .yml to yaml --- .../data/{config.yml => config.yaml} | 20 +++++++++---------- .../data/{messages.yml => messages.yaml} | 0 .../test_agent_key_polling.py | 6 +++--- 3 files changed, 13 insertions(+), 13 deletions(-) rename tests/system/test_cluster/test_agent_key_polling/data/{config.yml => config.yaml} (87%) rename tests/system/test_cluster/test_agent_key_polling/data/{messages.yml => messages.yaml} (100%) diff --git a/tests/system/test_cluster/test_agent_key_polling/data/config.yml b/tests/system/test_cluster/test_agent_key_polling/data/config.yaml similarity index 87% rename from tests/system/test_cluster/test_agent_key_polling/data/config.yml rename to tests/system/test_cluster/test_agent_key_polling/data/config.yaml index e611e57111..d48e4b78df 100644 --- a/tests/system/test_cluster/test_agent_key_polling/data/config.yml +++ b/tests/system/test_cluster/test_agent_key_polling/data/config.yaml @@ -55,16 +55,16 @@ wazuh-master: wazuh-worker1: description: Enable remoted agent_key_polling at worker side sections: - - section: remote - elements: - - connection: - value: secure - - port: - value: 1514 - - protocol: - value: tcp - - queue_size: - value: 131072 + - section: remote + elements: + - connection: + value: secure + - port: + value: 1514 + - protocol: + value: tcp + - queue_size: + value: 131072 wazuh-agent2: description: Set manager address diff --git a/tests/system/test_cluster/test_agent_key_polling/data/messages.yml b/tests/system/test_cluster/test_agent_key_polling/data/messages.yaml similarity index 100% rename from tests/system/test_cluster/test_agent_key_polling/data/messages.yml rename to tests/system/test_cluster/test_agent_key_polling/data/messages.yaml diff --git a/tests/system/test_cluster/test_agent_key_polling/test_agent_key_polling.py b/tests/system/test_cluster/test_agent_key_polling/test_agent_key_polling.py index 12a6561b60..c3029f80fd 100644 --- a/tests/system/test_cluster/test_agent_key_polling/test_agent_key_polling.py +++ b/tests/system/test_cluster/test_agent_key_polling/test_agent_key_polling.py @@ -32,7 +32,7 @@ def configure_environment(host_manager): host_manager.move_file(host='wazuh-master', src_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files/fetch_keys.py'), dest_path='/tmp/fetch_keys.py') - host_manager.apply_config(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/config.yml'), + host_manager.apply_config(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/config.yaml'), clear_files=[os.path.join(WAZUH_LOGS_PATH, 'ossec.log')], restart_services=['wazuh']) host_manager.add_block_to_file(host='wazuh-master', path='/var/ossec/etc/client.keys', replace='NOTVALIDKEY', @@ -46,7 +46,7 @@ def configure_environment(host_manager): def test_agent_key_polling(): """Check that the agent key polling cycle works correctly. To do this, we use the messages and the hosts defined - in data/messages.yml and the hosts inventory. + in data/messages.yaml and the hosts inventory. Parameters ---------- @@ -57,6 +57,6 @@ def test_agent_key_polling(): configure_environment(host_manager) host_monitor = HostMonitor(inventory_path=inventory_path, - messages_path=os.path.join(actual_path, 'data/messages.yml'), + messages_path=os.path.join(actual_path, 'data/messages.yaml'), tmp_path=os.path.join(actual_path, 'tmp')) host_monitor.run() From b32cb89dd2c2e68ab690e30db7faa9346cd71134 Mon Sep 17 00:00:00 2001 From: Julia Date: Wed, 25 Oct 2023 18:06:01 +0200 Subject: [PATCH 5/6] fix: fix yaml indentation --- .../test_agent_key_polling/data/config.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/system/test_cluster/test_agent_key_polling/data/config.yaml b/tests/system/test_cluster/test_agent_key_polling/data/config.yaml index d48e4b78df..5e0b62f9f7 100644 --- a/tests/system/test_cluster/test_agent_key_polling/data/config.yaml +++ b/tests/system/test_cluster/test_agent_key_polling/data/config.yaml @@ -56,15 +56,15 @@ wazuh-worker1: description: Enable remoted agent_key_polling at worker side sections: - section: remote - elements: - - connection: - value: secure - - port: - value: 1514 - - protocol: - value: tcp - - queue_size: - value: 131072 + elements: + - connection: + value: secure + - port: + value: 1514 + - protocol: + value: tcp + - queue_size: + value: 131072 wazuh-agent2: description: Set manager address From cea5844787b1309bc4bc8b8670ad86af1d454ef8 Mon Sep 17 00:00:00 2001 From: Julia Date: Thu, 26 Oct 2023 12:19:44 +0200 Subject: [PATCH 6/6] fix: revert changes in test_agent_files_deletion --- .../test_agent_files_deletion.py | 63 ++++++++++++------- 1 file changed, 42 insertions(+), 21 deletions(-) diff --git a/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py b/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py index 5b95776162..870fdd5351 100644 --- a/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py +++ b/tests/system/test_cluster/test_agent_files_deletion/test_agent_files_deletion.py @@ -4,20 +4,16 @@ import os import re from os.path import join, dirname, abspath -from time import sleep +from time import time, sleep import pytest -from wazuh_testing.tools import WAZUH_PATH +from wazuh_testing.tools import WAZUH_PATH, WAZUH_LOGS_PATH from wazuh_testing.tools.monitoring import HostMonitor from wazuh_testing.tools.system import HostManager -from system.test_cluster.test_agent_groups.common import register_agent -from system import restart_cluster, check_agent_status, AGENT_STATUS_ACTIVE pytestmark = [pytest.mark.cluster, pytest.mark.basic_cluster_env] -test_infra_agents = ['wazuh-agent3'] -test_infra_managers = ['wazuh-master', 'wazuh-worker2'] master_host = 'wazuh-master' worker_host = 'wazuh-worker2' agent_host = 'wazuh-agent3' @@ -26,6 +22,7 @@ script_path = os.path.join(re.sub(r'^.*?wazuh-qa', '/wazuh-qa', local_path), '../utils/get_wdb_agent.py') tmp_path = os.path.join(local_path, 'tmp') +managers_hosts = [master_host, worker_host] inventory_path = join(dirname(dirname(dirname(abspath(__file__)))), 'provisioning', 'basic_cluster', 'inventory.yml') host_manager = HostManager(inventory_path) while_time = 5 @@ -33,7 +30,7 @@ time_to_agent_reconnect = 180 # Each file should exist in all hosts specified in 'hosts'. -files = [{'path': join(WAZUH_PATH, 'queue', 'rids', '{id}'), 'hosts': test_infra_managers}, +files = [{'path': join(WAZUH_PATH, 'queue', 'rids', '{id}'), 'hosts': managers_hosts}, {'path': join(WAZUH_PATH, 'queue', 'diff', '{name}'), 'hosts': [worker_host]}, {'path': join(WAZUH_PATH, 'queue', 'db', '{id}.db'), 'hosts': [worker_host]}] @@ -41,29 +38,50 @@ 'global sql select * from belongs where id_agent={id}'] -def test_agent_files_deletion(clean_environment): - """Check that when an agent is deleted, all its related files in managers are also removed.""" - agent_data = register_agent(agent_host, worker_host, host_manager) - agent_id = agent_data[1] - agent_name = agent_data[2] +def agent_healthcheck(master_token): + """Check if the agent is active and reporting.""" + timeout = time() + time_to_agent_reconnect + healthy = False + + while not healthy: + response = host_manager.make_api_call(host=master_host, method='GET', token=master_token, + endpoint='/agents?status=active') - restart_cluster(test_infra_agents+test_infra_managers, host_manager) + assert response['status'] == 200, 'Failed when trying to get the active agents' + if int(response['json']['data']['total_affected_items']) == 4: + for item in response['json']['data']['affected_items']: + if item['name'] == agent_host and item['manager'] == worker_host: + healthy = True + elif time() > timeout: + raise TimeoutError("The agent 'wazuh-agent3' is not 'Active' yet.") + sleep(while_time) sleep(time_to_sync) - # Check if the agent is connected - check_agent_status(agent_id, agent_name, agent_data[0], AGENT_STATUS_ACTIVE, - host_manager, test_infra_managers) + +def test_agent_files_deletion(): + """Check that when an agent is deleted, all its related files in managers are also removed.""" + # Clean ossec.log and cluster.log + for hosts in managers_hosts: + host_manager.clear_file(host=hosts, file_path=os.path.join(WAZUH_LOGS_PATH, 'ossec.log')) + host_manager.clear_file(host=hosts, file_path=os.path.join(WAZUH_LOGS_PATH, 'cluster.log')) + host_manager.control_service(host=hosts, service='wazuh', state='restarted') # Get the token master_token = host_manager.get_api_token(master_host) + + # Check if the agent is connected and reporting + agent_healthcheck(master_token) + # Get the current ID and name of the agent that is reporting to worker_host. response = host_manager.make_api_call(host=master_host, method='GET', token=master_token, endpoint=f"/agents?select=id,name&q=manager={worker_host}") assert response['status'] == 200, f"Failed when trying to obtain agent ID: {response}" - assert (response['json']['data']['affected_items'][0]['id'] == agent_id and - response['json']['data']['affected_items'][0]['name'] == agent_name), f"Agent {agent_id} {agent_name}" \ - 'is not active' + try: + agent_id = response['json']['data']['affected_items'][0]['id'] + agent_name = response['json']['data']['affected_items'][0]['name'] + except IndexError as e: + pytest.fail(f"Could not find any agent reporting to {worker_host}: {response['json']}") # Check that expected files exist in each node before removing the agent. for file in files: @@ -75,7 +93,7 @@ def test_agent_files_deletion(clean_environment): f"{file['path'].format(id=agent_id, name=agent_name)}" # Check that agent information is in the wdb socket - for host in test_infra_managers: + for host in managers_hosts: for query in queries: result = host_manager.run_command(host, f"{WAZUH_PATH}/framework/python/bin/python3 " @@ -101,9 +119,12 @@ def test_agent_files_deletion(clean_environment): f"{file['path'].format(id=agent_id, name=agent_name)}" # Check that agent information is not in the wdb socket - for host in test_infra_managers: + for host in managers_hosts: for query in queries: result = host_manager.run_command(host, f"{WAZUH_PATH}/framework/python/bin/python3 " f"{script_path} '{query.format(id=agent_id)}'") assert not result, f"This db query should have not returned anything in {host}, but it did: {result}" + + host_manager.control_service(host=agent_host, service='wazuh', state='restarted') + agent_healthcheck(master_token)