Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Relay Publish: Extend coverage #2

Merged
merged 18 commits into from
Nov 17, 2023
8 changes: 4 additions & 4 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ on:
node1:
required: false
type: string
default: "wakuorg/nwaku:deploy-wakuv2-test"
default: "wakuorg/nwaku:latest"
node2:
required: false
type: string
Expand All @@ -27,8 +27,8 @@ on:

env:
FORCE_COLOR: "1"
NODE_1: ${{ inputs.node1 || 'wakuorg/nwaku:deploy-wakuv2-test' }}
NODE_2: ${{ inputs.node2 || 'wakuorg/go-waku:latest' }}
NODE_1: ${{ inputs.node1 }}
NODE_2: ${{ inputs.node2 }}
PROTOCOL: ${{ inputs.protocol || 'REST' }}

jobs:
Expand All @@ -49,7 +49,7 @@ jobs:
- run: pip install -r requirements.txt

- name: Run tests
run: pytest -n 3 --reruns 1 --alluredir=allure-results
run: pytest -n 4 --reruns 2 --alluredir=allure-results

- name: Get allure history
if: always()
Expand Down
1 change: 1 addition & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@ log_cli = True
log_file = log/test.log
log_cli_format = %(asctime)s %(name)s %(levelname)s %(message)s
log_file_format = %(asctime)s %(name)s %(levelname)s %(message)s
timeout = 300
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@ pre-commit
pyright
pytest
pytest-instafail
pytest-timeout
pytest-xdist
pytest-rerunfailures
python-dotenv
requests
tenacity
typeguard
14 changes: 4 additions & 10 deletions src/data_classes.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,17 @@
from dataclasses import dataclass, field
from marshmallow_dataclass import class_schema
from typing import Optional


@dataclass
class MessageRpcQuery:
payload: str
contentTopic: str
timestamp: Optional[int] = None
from typing import Optional, Union


@dataclass
class MessageRpcResponse:
payload: str
contentTopic: str
version: Optional[int]
timestamp: int
timestamp: Optional[int]
ephemeral: Optional[bool]
rateLimitProof: Optional[dict] = field(default_factory=dict)
meta: Optional[str]
rateLimitProof: Optional[Union[dict, str]] = field(default_factory=dict)
rate_limit_proof: Optional[dict] = field(default_factory=dict)


Expand Down
17 changes: 10 additions & 7 deletions src/env_vars.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,20 +6,23 @@

def get_env_var(var_name, default=None):
env_var = os.getenv(var_name, default)
if env_var is not None:
print(f"{var_name}: {env_var}")
else:
if env_var in [None, ""]:
AlejandroCabeza marked this conversation as resolved.
Show resolved Hide resolved
print(f"{var_name} is not set; using default value: {default}")
env_var = default
print(f"{var_name}: {env_var}")
return env_var


# Configuration constants. Need to be upercase to appear in reports
NODE_1 = get_env_var("NODE_1", "wakuorg/nwaku:latest")
NODE_2 = get_env_var("NODE_2", "wakuorg/go-waku:latest")
LOG_DIR = get_env_var("LOG_DIR", "./log")
NODE_1 = get_env_var("NODE_1", "wakuorg/go-waku:latest")
NODE_2 = get_env_var("NODE_2", "wakuorg/nwaku:latest")
DOCKER_LOG_DIR = get_env_var("DOCKER_LOG_DIR", "./log/docker")
NETWORK_NAME = get_env_var("NETWORK_NAME", "waku")
SUBNET = get_env_var("SUBNET", "172.18.0.0/16")
IP_RANGE = get_env_var("IP_RANGE", "172.18.0.0/24")
GATEWAY = get_env_var("GATEWAY", "172.18.0.1")
DEFAULT_PUBSUBTOPIC = get_env_var("DEFAULT_PUBSUBTOPIC", "/waku/2/default-waku/proto")
DEFAULT_PUBSUB_TOPIC = get_env_var("DEFAULT_PUBSUB_TOPIC", "/waku/2/default-waku/proto")
PROTOCOL = get_env_var("PROTOCOL", "REST")
RUNNING_IN_CI = get_env_var("CI")
NODEKEY = get_env_var("NODEKEY", "30348dd51465150e04a5d9d932c72864c8967f806cce60b5d26afeca1e77eb68")
API_REQUEST_TIMEOUT = get_env_var("API_REQUEST_TIMEOUT", 10)
12 changes: 9 additions & 3 deletions src/libs/common.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import logging
from time import sleep
from src.libs.custom_logger import get_custom_logger
import os
import base64
import allure

logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)


def bytes_to_hex(byte_array):
Expand All @@ -24,5 +25,10 @@ def to_base64(input_data):


def attach_allure_file(file):
logger.debug("Attaching file %s", file)
logger.debug(f"Attaching file {file}")
allure.attach.file(file, name=os.path.basename(file), attachment_type=allure.attachment_type.TEXT)


def delay(num_seconds):
logger.debug(f"Sleeping for {num_seconds} seconds")
sleep(num_seconds)
24 changes: 24 additions & 0 deletions src/libs/custom_logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import logging

max_log_line_length = 5000


def log_length_filter(max_length):
class logLengthFilter(logging.Filter):
def filter(self, record):
if len(record.getMessage()) > max_length:
logging.getLogger(record.name).log(
record.levelno, f"Log line was discarded because it's longer than max_log_line_length={max_log_line_length}"
)
return False
return True

return logLengthFilter()


def get_custom_logger(name):
AlejandroCabeza marked this conversation as resolved.
Show resolved Hide resolved
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("docker").setLevel(logging.WARNING)
logger = logging.getLogger(name)
logger.addFilter(log_length_filter(max_log_line_length))
return logger
17 changes: 9 additions & 8 deletions src/node/api_clients/base_client.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,31 @@
import logging
import requests
from tenacity import retry, stop_after_delay, wait_fixed
from abc import ABC, abstractmethod
from src.env_vars import API_REQUEST_TIMEOUT
from src.libs.custom_logger import get_custom_logger

logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)


class BaseClient(ABC):
# The retry decorator is applied to handle transient errors gracefully. This is particularly
# useful when running tests in parallel, where occasional network-related errors such as
# connection drops, timeouts, or temporary unavailability of a service can occur. Retrying
# ensures that such intermittent issues don't cause the tests to fail outright.
@retry(stop=stop_after_delay(2), wait=wait_fixed(0.1), reraise=True)
@retry(stop=stop_after_delay(0.5), wait=wait_fixed(0.1), reraise=True)
def make_request(self, method, url, headers=None, data=None):
logger.debug("%s call: %s with payload: %s", method.upper(), url, data)
response = requests.request(method.upper(), url, headers=headers, data=data)
logger.debug(f"{method.upper()} call: {url} with payload: {data}")
response = requests.request(method.upper(), url, headers=headers, data=data, timeout=API_REQUEST_TIMEOUT)
try:
response.raise_for_status()
except requests.HTTPError as http_err:
logger.error("HTTP error occurred: %s", http_err)
logger.error(f"HTTP error occurred: {http_err}. Response content: {response.content}")
raise
except Exception as err:
logger.error("An error occurred: %s", err)
logger.error(f"An error occurred: {err}. Response content: {response.content}")
raise
else:
logger.info("Response status code: %s", response.status_code)
logger.info(f"Response status code: {response.status_code}. Response content: {response.content}")
return response

@abstractmethod
Expand Down
6 changes: 3 additions & 3 deletions src/node/api_clients/rest.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import logging
from src.libs.custom_logger import get_custom_logger
import json
from dataclasses import asdict
from urllib.parse import quote
from src.node.api_clients.base_client import BaseClient

logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)


class REST(BaseClient):
Expand All @@ -24,7 +24,7 @@ def set_subscriptions(self, pubsub_topics):
return self.rest_call("post", "relay/v1/subscriptions", json.dumps(pubsub_topics))

def send_message(self, message, pubsub_topic):
return self.rest_call("post", f"relay/v1/messages/{quote(pubsub_topic, safe='')}", json.dumps(asdict(message)))
return self.rest_call("post", f"relay/v1/messages/{quote(pubsub_topic, safe='')}", json.dumps(message))

def get_messages(self, pubsub_topic):
get_messages_response = self.rest_call("get", f"relay/v1/messages/{quote(pubsub_topic, safe='')}")
Expand Down
6 changes: 3 additions & 3 deletions src/node/api_clients/rpc.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import logging
from src.libs.custom_logger import get_custom_logger
import json
from dataclasses import asdict
from src.node.api_clients.base_client import BaseClient

logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)


class RPC(BaseClient):
Expand All @@ -28,7 +28,7 @@ def set_subscriptions(self, pubsub_topics):
return self.rpc_call("post_waku_v2_relay_v1_subscription", [pubsub_topics])

def send_message(self, message, pubsub_topic):
return self.rpc_call("post_waku_v2_relay_v1_message", [pubsub_topic, asdict(message)])
return self.rpc_call("post_waku_v2_relay_v1_message", [pubsub_topic, message])

def get_messages(self, pubsub_topic):
get_messages_response = self.rpc_call("get_waku_v2_relay_v1_messages", [pubsub_topic])
Expand Down
24 changes: 12 additions & 12 deletions src/node/docker_mananger.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,34 @@
import os
import logging
from src.libs.custom_logger import get_custom_logger
import random
import threading
import docker
from src.env_vars import NETWORK_NAME, SUBNET, IP_RANGE, GATEWAY
from docker.types import IPAMConfig, IPAMPool
from docker.errors import NotFound

logger = logging.getLogger(__name__)
logger = get_custom_logger(__name__)


class DockerManager:
def __init__(self, image):
self._image = image
self._client = docker.from_env()
logger.debug("Docker client initialized with image %s", self._image)
logger.debug(f"Docker client initialized with image {self._image}")

def create_network(self, network_name=NETWORK_NAME):
logger.debug("Attempting to create or retrieve network %s", network_name)
logger.debug(f"Attempting to create or retrieve network {network_name}")
networks = self._client.networks.list(names=[network_name])
if networks:
logger.debug("Network %s already exists", network_name)
logger.debug(f"Network {network_name} already exists")
return networks[0]

network = self._client.networks.create(
network_name,
driver="bridge",
ipam=IPAMConfig(driver="default", pool_configs=[IPAMPool(subnet=SUBNET, iprange=IP_RANGE, gateway=GATEWAY)]),
)
logger.debug("Network %s created", network_name)
logger.debug(f"Network {network_name} created")
return network

def start_container(self, image_name, ports, args, log_path, container_ip):
Expand All @@ -39,14 +39,14 @@ def start_container(self, image_name, ports, args, log_path, container_ip):
else:
cli_args.append(f"--{key}={value}") # Add a single command
port_bindings = {f"{port}/tcp": ("", port) for port in ports}
logger.debug("Starting container with image %s", image_name)
logger.debug("Using args %s", cli_args)
logger.debug(f"Starting container with image {image_name}")
logger.debug(f"Using args {cli_args}")
container = self._client.containers.run(image_name, command=cli_args, ports=port_bindings, detach=True, remove=True, auto_remove=True)

network = self._client.networks.get(NETWORK_NAME)
network.connect(container, ipv4_address=container_ip)

logger.debug("Container started with ID %s. Setting up logs at %s", container.short_id, log_path)
logger.debug(f"Container started with ID {container.short_id}. Setting up logs at {log_path}")
log_thread = threading.Thread(target=self._log_container_output, args=(container, log_path))
log_thread.daemon = True
log_thread.start()
Expand All @@ -63,22 +63,22 @@ def generate_ports(self, base_port=None, count=5):
if base_port is None:
base_port = random.randint(1024, 65535 - count)
ports = [base_port + i for i in range(count)]
logger.debug("Generated ports %s", ports)
logger.debug(f"Generated ports {ports}")
return ports

@staticmethod
def generate_random_ext_ip():
base_ip_fragments = ["172", "18"]
ext_ip = ".".join(base_ip_fragments + [str(random.randint(0, 255)) for _ in range(2)])
logger.debug("Generated random external IP %s", ext_ip)
logger.debug(f"Generated random external IP {ext_ip}")
return ext_ip

def is_container_running(self, container):
try:
refreshed_container = self._client.containers.get(container.id)
return refreshed_container.status == "running"
except NotFound:
logger.error("Container with ID %s not found", container.id)
logger.error(f"Container with ID {container.id} not found")
return False

@property
Expand Down
Loading
Loading