diff --git a/.github/workflows/e2e_tests.yaml b/.github/workflows/e2e_tests.yaml index 5f041bcf..d2a259d7 100644 --- a/.github/workflows/e2e_tests.yaml +++ b/.github/workflows/e2e_tests.yaml @@ -12,7 +12,7 @@ on: required: true default: 'main' testbed: - description: 'Testbed to run the tests on. Default: oak4-pro' + description: 'Testbed to run the tests on. Available: oak4-pro, oak4-s' required: true default: 'oak4-pro' depthai-version: diff --git a/.github/workflows/integration_tests.yaml b/.github/workflows/integration_tests.yaml index e51bf549..0da35ec2 100644 --- a/.github/workflows/integration_tests.yaml +++ b/.github/workflows/integration_tests.yaml @@ -12,7 +12,7 @@ on: required: true default: 'main' testbed: - description: 'Testbed to run the tests on. Default: oak4-s' + description: 'Testbed to run the tests on. Available: oak4-pro, oak4-s' required: true default: 'oak4-s' depthai-version: diff --git a/tests/conftest.py b/tests/conftest.py index f4e5a52d..aa401ec6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,9 +2,7 @@ def pytest_addoption(parser): parser.addoption( "--nn_archive_path", action="store", default="", help="NN archive path" ) - parser.addoption( - "--slug", action="store", default="", help="Model slug from the ZOO." - ) + parser.addoption("--model", action="store", default="", help="Model from the ZOO.") parser.addoption( "--platform", action="store", diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index 4da53868..f2c7cb7e 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -5,23 +5,32 @@ The tests check if the device is able to run a model with neural network node an ## Running the tests on GitHub Actions -You can manually trigger the Github action to run the HIL tests. There is a workflow parameter `additional-parameter` that can be used to specify the desired test. Default is `-all`. The available options are: `-all`, `-p `, `-s ...`, `-nn ...`. You can also specify with branch you want to run the tests on. The default is `main`. NOTE: There is a custom parameter `branch` and not the built-in one. +You can manually trigger the Github action to run the HIL tests. There is a workflow parameter `additional-parameter` that can be used to specify the desired test. Default is `-all`. The available options are: `-all`, `-p `, `-m ...`, `-nn ...`. You can also specify with branch you want to run the tests on. The default is `main`. NOTE: There is a custom parameter `branch` and not the built-in one. + +### Required parameters + +There are 4 required parameters that need to be set when triggering the Github action: + +- `additional-parameter`: The parameter that specifies the desired test. Default is `-all` which runs tests on all public models. The available options are: `-all`, `-p `, `-m ...`. +- `branch`: The branch on which the tests will be run. Default is `main`. +- `testbed`: The testbed on which the tests will be run. Default is `oak4-pro`. Available: `oak4-pro`, `oak4-s`. +- `depthai-version`: The version of the DepthAI that will be used for the tests. Default is `3.0.0a6`. ## Running the tests locally Currently, you must specify the device IP address in the ENV variables: `RVC2_IP` and `RVC4_IP`. If the ENV variable is empty the script will take the connected device via USB. For sending requests to HubAI you also need to specify `HUBAI_TEAM_ID` and `HUBAI_API_KEY` ENV variables. -For running the tests locally you can use `main.py` script. You can specify the model slugs to test the models from ZOO or specify the path to the local NN archive paths. If you want to test all available models you can use `--all` flag and for testing specific parser on all models you can use `--parser` or `-p` flag. +For running the tests locally you can use `main.py` script. You can specify the models from ZOO or specify the path to the local NN archive paths. If you want to test all available models you can use `--all` flag and for testing specific parser on all models you can use `--parser` or `-p` flag. -Test all public models on ZOO: +Test all public models from the ZOO: ```bash python main.py --all ``` -Test specific models on ZOO given the slugs: +Test specific models from the ZOO: ```bash -python main.py -s ... +python main.py -m ... ``` Test local NN archives: @@ -36,7 +45,7 @@ Test specific parser on all models: python main.py -p ``` -You can also run `manual.py` with `-s` or `-nn` if want to debug parser quickly (without pytest) and by specifying `-ip` you can connect to the specifi device with IP or mxid. +You can also run `manual.py` with `-m` or `-nn` if want to debug parser quickly (without pytest) and by specifying `-ip` you can connect to the specifi device with IP or mxid. ## Limitations diff --git a/tests/end_to_end/main.py b/tests/end_to_end/main.py index bb22aa79..054c823f 100644 --- a/tests/end_to_end/main.py +++ b/tests/end_to_end/main.py @@ -17,12 +17,12 @@ def main(): help="Path(s) to the NNArchive.", ) arg_parser.add_argument( - "-s", - "--slug", + "-m", + "--model", type=str, nargs="+", default="", - help="Slug(s) of the model from HubAI.", + help="Model from HubAI.", ) arg_parser.add_argument("-all", action="store_true", help="Run all tests") arg_parser.add_argument( @@ -37,7 +37,7 @@ def main(): args = arg_parser.parse_args() nn_archive_path = args.nn_archive_path # it is a list of paths - slug = args.slug + model = args.model run_all = args.all parser = args.parser rvc_platform = "both" if args.platform == "" else args.platform @@ -46,23 +46,23 @@ def main(): print(f"RVC4 IP: {os.getenv('RVC4_IP', '')}") print(f"RVC platform: {'RVC2 & RVC4' if rvc_platform == '' else rvc_platform}") - if run_all and (nn_archive_path or slug): - raise ValueError("You can't pass both -all and -nn_archive_path or -slug") + if run_all and (nn_archive_path or model): + raise ValueError("You can't pass both -all and -nn_archive_path or -model") if run_all: - slug = get_model_slugs_from_zoo() + model = get_model_slugs_from_zoo() if parser: - slug = find_slugs_from_zoo(parser) - if len(slug) == 0: + model = find_slugs_from_zoo(parser) + if len(model) == 0: raise ValueError(f"No models found for parser {parser}") else: - print(f"Found model slugs for parser {parser}: {slug}") + print(f"Found models for parser {parser}: {model}") - if not nn_archive_path and not slug: - raise ValueError("You have to pass either path to NNArchive or model slug") + if not nn_archive_path and not model: + raise ValueError("You have to pass either path to NNArchive or model") - slug = [f"{s}" for s in slug] + model = [f"{m}" for m in model] command = [ "test_e2e.py", @@ -75,10 +75,10 @@ def main(): "--color=yes", ] - if slug: + if model: command = [ "test_e2e.py", - f"--slug={slug}", + f"--model={model}", f"--platform={rvc_platform}", "-v", "--tb=no", diff --git a/tests/end_to_end/manual.py b/tests/end_to_end/manual.py index e898aabf..b58f643f 100644 --- a/tests/end_to_end/manual.py +++ b/tests/end_to_end/manual.py @@ -1,7 +1,7 @@ import argparse import depthai as dai -from utils import get_input_shape, get_num_inputs, parse_model_slug +from utils import get_input_shape, get_num_inputs from depthai_nodes.parsing_neural_network import ParsingNeuralNetwork @@ -9,19 +9,17 @@ parser.add_argument( "-nn", "--nn_archive", type=str, default=None, help="Path to the NNArchive." ) -parser.add_argument( - "-s", "--model_slug", type=str, default=None, help="Slug of the model from HubAI." -) +parser.add_argument("-m", "--model", type=str, default=None, help="Model from HubAI.") parser.add_argument("-ip", type=str, default="", help="IP of the device") args = parser.parse_args() -if args.model_slug: - if "xfeat" in args.model_slug: +if args.model: + if "xfeat" in args.model: print("XFeat model is not supported in this test.") exit(8) -if not (args.nn_archive or args.model_slug): - raise ValueError("You have to pass either path to NNArchive or model slug") +if not (args.nn_archive or args.model): + raise ValueError("You have to pass either path to NNArchive or model.") try: device = dai.Device(dai.DeviceInfo(args.ip)) @@ -33,11 +31,9 @@ with dai.Pipeline(device) as pipeline: camera_node = pipeline.create(dai.node.Camera).build() - if args.model_slug: - model_slug, model_version_slug = parse_model_slug(args.model_slug) + if args.model: model_desc = dai.NNModelDescription( - modelSlug=model_slug, - modelVersionSlug=model_version_slug, + model=args.model, platform=device.getPlatform().name, ) try: @@ -45,7 +41,7 @@ except Exception as e: print(e) print( - f"Couldn't find model {args.model_slug} for {device.getPlatform().name} in the ZOO" + f"Couldn't find model {args.model} for {device.getPlatform().name} in the ZOO" ) device.close() exit(7) @@ -53,7 +49,7 @@ nn_archive = dai.NNArchive(nn_archive_path) except Exception as e: print(e) - print(f"Couldn't load the model {args.model_slug} from NN archive.") + print(f"Couldn't load the model {args.model} from NN archive.") device.close() exit(9) diff --git a/tests/end_to_end/test_e2e.py b/tests/end_to_end/test_e2e.py index 6cd7cd7b..cd3722dc 100644 --- a/tests/end_to_end/test_e2e.py +++ b/tests/end_to_end/test_e2e.py @@ -13,8 +13,8 @@ def nn_archive_paths(request): @pytest.fixture -def slugs(request): - return request.config.getoption("--slug") +def models(request): + return request.config.getoption("--model") @pytest.fixture @@ -23,7 +23,7 @@ def platform(request): def get_parametrized_values( - slugs: List[str], nn_archive_paths: List[str], platform: str + models: List[str], nn_archive_paths: List[str], platform: str ): test_cases = [] rvc2_ip = os.getenv("RVC2_IP", "") @@ -38,9 +38,9 @@ def get_parametrized_values( elif platform == "rvc4": platforms = [(rvc4_ip, "RVC4")] - if slugs: - slugs = ast.literal_eval(slugs) - test_cases.extend([(*IP, None, slug) for slug in slugs for IP in platforms]) + if models: + models = ast.literal_eval(models) + test_cases.extend([(*IP, None, model) for model in models for IP in platforms]) if nn_archive_paths: nn_archive_paths = ast.literal_eval(nn_archive_paths) test_cases.extend( @@ -55,46 +55,46 @@ def get_parametrized_values( def pytest_generate_tests(metafunc): nn_archive_paths = metafunc.config.getoption("nn_archive_path") - slugs = metafunc.config.getoption("slug") + models = metafunc.config.getoption("model") platform = metafunc.config.getoption("platform") - params = get_parametrized_values(slugs, nn_archive_paths, platform) - metafunc.parametrize("IP, ip_platform, nn_archive_path, slug", params) + params = get_parametrized_values(models, nn_archive_paths, platform) + metafunc.parametrize("IP, ip_platform, nn_archive_path, model", params) -def test_pipelines(IP: str, ip_platform: str, nn_archive_path, slug): +def test_pipelines(IP: str, ip_platform: str, nn_archive_path, model): time.sleep(3) - if not (nn_archive_path or slug): - raise ValueError("You have to pass either path to NNArchive or model slug") + if not (nn_archive_path or model): + raise ValueError("You have to pass either path to NNArchive or model model") try: - if slug: + if model: subprocess.run( - f"python manual.py -s {slug} -ip {IP}", + f"python manual.py -m {model} {'-ip' if IP else ''} {IP}", shell=True, check=True, timeout=90, ) else: subprocess.run( - f"python manual.py -nn {nn_archive_path} -ip {IP}", + f"python manual.py -nn {nn_archive_path} {'-ip' if IP else ''} {IP}", shell=True, check=True, timeout=90, ) except subprocess.CalledProcessError as e: if e.returncode == 5: - pytest.skip(f"Model {slug} not supported on {ip_platform}.") + pytest.skip(f"Model {model} not supported on {ip_platform}.") elif e.returncode == 6: pytest.skip(f"Can't connect to the device with IP/mxid: {IP}") elif e.returncode == 7: - pytest.skip(f"Couldn't find model {slug} in the ZOO") + pytest.skip(f"Couldn't find model {model} in the ZOO") elif e.returncode == 8: pytest.skip( - f"The model {slug} is not supported in this test. (small input size, grayscale image, etc.)" + f"The model {model} is not supported in this test. (small input size, grayscale image, etc.)" ) elif e.returncode == 9: - pytest.skip(f"Couldn't load the model {slug} from NN archive.") + pytest.skip(f"Couldn't load the model {model} from NN archive.") else: raise RuntimeError("Pipeline crashed.") from e except subprocess.TimeoutExpired: diff --git a/tests/end_to_end/utils.py b/tests/end_to_end/utils.py index c40011cd..59bccaef 100644 --- a/tests/end_to_end/utils.py +++ b/tests/end_to_end/utils.py @@ -6,14 +6,14 @@ import requests API_KEY = os.getenv("HUBAI_API_KEY", None) -HUBAI_TEAM_ID = os.getenv("HUBAI_TEAM_ID", None) +HUBAI_TEAM_SLUG = os.getenv("HUBAI_TEAM_SLUG", None) if not API_KEY: raise ValueError( "You must specify your HubAI API key in order to get the model config." ) -if not HUBAI_TEAM_ID: +if not HUBAI_TEAM_SLUG: raise ValueError( "You must specify your HubAI team ID in order to get the model config." ) @@ -68,19 +68,6 @@ def get_num_inputs(nn_archive: dai.NNArchive) -> int: return len(inputs) -def parse_model_slug(full_slug) -> Tuple[str, str]: - """Parse the model slug into model_slug and model_version_slug.""" - if ":" not in full_slug: - raise NameError( - "Please provide the model slug in the format of 'model_slug:model_version_slug'" - ) - model_slug_parts = full_slug.split(":") - model_slug = model_slug_parts[0] - model_version_slug = model_slug_parts[1] - - return model_slug, model_version_slug - - def get_models() -> List[Dict]: """Get all the models from the ZOO that correspond to the HubAI team.""" url = "https://easyml.cloud.luxonis.com/models/api/v1/models?is_public=true&limit=1000" @@ -94,7 +81,7 @@ def get_models() -> List[Dict]: valid_models = [] for model in response: - if model["is_public"] and model["team_id"] == HUBAI_TEAM_ID: + if model["is_public"] and model["team_slug"] == HUBAI_TEAM_SLUG: model_dict = { "name": model["name"], "slug": model["slug"], diff --git a/tests/integration_tests/README.md b/tests/integration_tests/README.md index 6aa39365..9e9bbdca 100644 --- a/tests/integration_tests/README.md +++ b/tests/integration_tests/README.md @@ -18,9 +18,9 @@ We are storing tests in the B2 bucket. In the beginning, we download the tests f ``` nn_datas ├── -│ ├── # Contains the NNData -│ └── # Contains the expected message -│ └── # Contains the input image +│ ├── # Contains the NNData +│ └── # Contains the expected message +│ └── # Contains the input image ``` for example: @@ -39,7 +39,7 @@ nn_datas ## Test generation -To generate a new test for the parser, you can use `extract_nn_data.py` script. The script will extract the `NNData` from the neural network output and store it in the pickle file. The script requires the following arguments: `-m` for the model slug, `-img` for the input image, and optional `-ip` for the device IP or mxid. +To generate a new test for the parser, you can use `extract_nn_data.py` script. The script will extract the `NNData` from the neural network output and store it in the pickle file. The script requires the following arguments: `-m` for the model, `-img` for the input image, and optional `-ip` for the device IP or mxid. The script does not generate the expected message because each parser has its own message format and DAI messages can not be dumped in the pickle file. @@ -59,7 +59,16 @@ with open('nn_datas/ClassificationParser/efficientnet-lite_output.pkl', 'wb') as In the end, you should have all the files in the parser-specific directory inside `nn_datas` directory. You need to upload the parser directory to the B2 bucket. -## Running the tests +## Running the tests locally To run the tests, you can use the `main.py` script. You can use `--all` flag to test all parsers or test a specific parser with `-p` flag. You would need the B2 credentials to download the tests from the bucket and set it in the ENV variables `B2_APPLICATION_KEY_ID` and `B2_APPLICATION_KEY`. + +## Running the tests in the CI + +The integration tests are triggered in every PR. But you can also trigger them manually. Required parameters are: + +- `additional-parameter`: The parameter that specifies the desired test. Default is `-all` which runs tests on all parsers. The available options are: `-all`, `-p `. +- `branch`: The branch on which the tests will be run. Default is `main`. +- `testbed`: The testbed on which the tests will be run. Default is `oak4-s`. Available: `oak4-pro`, `oak4-s`. +- `depthai-version`: The version of the DepthAI that will be used for the tests. Default is `3.0.0a6`. diff --git a/tests/integration_tests/extract_nn_data.py b/tests/integration_tests/extract_nn_data.py index c87ebc41..1189d371 100644 --- a/tests/integration_tests/extract_nn_data.py +++ b/tests/integration_tests/extract_nn_data.py @@ -12,13 +12,13 @@ "-img", "--img_path", help="Path to the input image", required=True, type=str ) argparser.add_argument( - "-m", "--model_slug", help="Slug of the model from HubAI", required=True, type=str + "-m", "--model", help="Model from HubAI", required=True, type=str ) args = argparser.parse_args() IP_mxid = args.ip img_path = args.img_path -model: str = args.model_slug +model: str = args.model device = dai.Device(dai.DeviceInfo(IP_mxid)) device_platform = device.getPlatform().name diff --git a/tests/integration_tests/main.py b/tests/integration_tests/main.py index cdb462bd..9377ebc2 100644 --- a/tests/integration_tests/main.py +++ b/tests/integration_tests/main.py @@ -15,7 +15,7 @@ def main(): type=str, nargs="+", default="", - help="Model slug from HubAI.", + help="Model from HubAI.", ) arg_parser.add_argument("-all", action="store_true", help="Run all tests") arg_parser.add_argument( @@ -55,7 +55,7 @@ def main(): if not models: raise ValueError(f"No models found for parser {parser}") else: - print(f"Found model slugs for parser {parser}: {models}") + print(f"Found models for parser {parser}: {models}") if not models: raise ValueError("No models provided") diff --git a/tests/integration_tests/manual.py b/tests/integration_tests/manual.py index 4c0baae2..53cb5a9c 100644 --- a/tests/integration_tests/manual.py +++ b/tests/integration_tests/manual.py @@ -11,7 +11,7 @@ argparser.add_argument( "-m", "--model", - help="The model slug from which the parser is built", + help="The model from which the parser is built", required=True, type=str, )