diff --git a/app/tests/integration/nimbus/conftest.py b/app/tests/integration/nimbus/conftest.py index ec897a5409..6b80ec785a 100644 --- a/app/tests/integration/nimbus/conftest.py +++ b/app/tests/integration/nimbus/conftest.py @@ -24,7 +24,7 @@ from requests.packages.urllib3.util.retry import Retry APPLICATION_FEATURE_IDS = { - BaseExperimentApplications.DESKTOP: "1", + BaseExperimentApplications.FIREFOX_DESKTOP: "1", BaseExperimentApplications.FENIX: "2", BaseExperimentApplications.IOS: "3", BaseExperimentApplications.FOCUS_ANDROID: "4", @@ -32,7 +32,7 @@ } APPLICATION_KINTO_REVIEW_PATH = { - BaseExperimentApplications.DESKTOP: ( + BaseExperimentApplications.FIREFOX_DESKTOP: ( "#/buckets/main-workspace/collections/nimbus-desktop-experiments/simple-review" ), BaseExperimentApplications.FENIX: ( @@ -243,7 +243,7 @@ def _create_basic_experiment(name, app, targeting, languages=[]): @pytest.fixture def create_desktop_experiment(create_basic_experiment): - def _create_desktop_experiment(slug, app, targeting, **data): + def _create_desktop_experiment(slug, app, targeting, data): # create a basic experiment via graphql so we can get an ID create_basic_experiment( slug, @@ -269,27 +269,11 @@ def _create_desktop_experiment(slug, app, targeting, **data): ) experiment_id = response.json()["data"]["experimentBySlug"]["id"] + data.update({"id": experiment_id}) + query = { "operationName": "updateExperiment", - "variables": { - "input": { - "id": experiment_id, - "name": f"test_check_telemetry_enrollment-{experiment_id}", - "hypothesis": "Test hypothesis", - "application": app.upper(), - "changelogMessage": "test updated", - "targetingConfigSlug": targeting, - "publicDescription": data.get("public_description", "Fancy Words"), - "riskRevenue": data.get("risk_revenue"), - "riskPartnerRelated": data.get("risk_partner_related"), - "riskBrand": data.get("risk_brand"), - "featureConfigId": data.get("feature_config"), - "referenceBranch": data.get("reference_branch"), - "treatmentBranches": data.get("treatement_branch"), - "populationPercent": data.get("population_percent"), - "totalEnrolledClients": data.get("total_enrolled_clients"), - } - }, + "variables": {"input": data}, "query": "mutation updateExperiment($input: ExperimentInput!) \ {\n updateExperiment(input: $input) \ {\n message\n __typename\n }\n}\n", @@ -320,6 +304,48 @@ def _language_database_id_loader(languages=None): return _language_database_id_loader +@pytest.fixture(name="countries_database_id_loader") +def fixture_countries_database_id_loader(): + """Return database id's for languages""" + + def _countries_database_id_loader(countries=None): + country_list = [] + path = Path().resolve() + path = str(path) + path = path.strip("/tests/integration/nimbus") + path = os.path.join("/", path, "experimenter/base/fixtures/countries.json") + with open(path) as file: + data = json.loads(file.read()) + for country in countries: + for item in data: + if country in item["fields"]["code"][:2]: + country_list.append(item["pk"]) + return country_list + + return _countries_database_id_loader + + +@pytest.fixture(name="locales_database_id_loader") +def fixture_locales_database_id_loader(): + """Return database id's for languages""" + + def _locales_database_id_loader(locales=None): + locale_list = [] + path = Path().resolve() + path = str(path) + path = path.strip("/tests/integration/nimbus") + path = os.path.join("/", path, "experimenter/base/fixtures/locales.json") + with open(path) as file: + data = json.loads(file.read()) + for locale in locales: + for item in data: + if locale in item["fields"]["code"]: + locale_list.append(item["pk"]) + return locale_list + + return _locales_database_id_loader + + @pytest.fixture def trigger_experiment_loader(selenium): def _trigger_experiment_loader(): diff --git a/app/tests/integration/nimbus/models/base_dataclass.py b/app/tests/integration/nimbus/models/base_dataclass.py index 009fa8202a..90ba58824e 100644 --- a/app/tests/integration/nimbus/models/base_dataclass.py +++ b/app/tests/integration/nimbus/models/base_dataclass.py @@ -4,7 +4,7 @@ class BaseExperimentApplications(Enum): - DESKTOP = "DESKTOP" + FIREFOX_DESKTOP = "DESKTOP" FENIX = "FENIX" IOS = "IOS" FOCUS_ANDROID = "FOCUS_ANDROID" diff --git a/app/tests/integration/nimbus/parallel_pytest_args.txt b/app/tests/integration/nimbus/parallel_pytest_args.txt index 4b1a2c2dc1..d4a1125e1f 100644 --- a/app/tests/integration/nimbus/parallel_pytest_args.txt +++ b/app/tests/integration/nimbus/parallel_pytest_args.txt @@ -1,7 +1,7 @@ --k DESKTOP -m run_per_app +-k FIREFOX_DESKTOP -m run_per_app -k FENIX -m run_per_app -k IOS -m run_per_app -k FOCUS_ANDROID -m run_per_app -k FOCUS_IOS -m run_per_app --k DESKTOP -m run_once --dist=loadgroup -n=2 --k DESKTOP -m run_targeting -n 2 +-k FIREFOX_DESKTOP -m run_once --dist=loadgroup -n=2 +-k FIREFOX_DESKTOP -m run_targeting -n 2 diff --git a/app/tests/integration/nimbus/test_firefox_targeting.py b/app/tests/integration/nimbus/test_firefox_targeting.py index 488ba6dbb0..7b796a56d7 100644 --- a/app/tests/integration/nimbus/test_firefox_targeting.py +++ b/app/tests/integration/nimbus/test_firefox_targeting.py @@ -3,8 +3,8 @@ import pytest import requests -from nimbus.models.base_dataclass import BaseExperimentApplications from nimbus.pages.browser import Browser +from nimbus.utils import helpers LOAD_DATA_RETRIES = 10 LOAD_DATA_RETRY_DELAY = 1.0 @@ -76,22 +76,59 @@ def targeting_config_slug(request): @pytest.mark.run_targeting def test_check_targeting( selenium, + slugify, default_data, - create_experiment, + experiment_name, targeting_config_slug, + create_desktop_experiment, + countries_database_id_loader, + locales_database_id_loader, ): - # TODO #6791 - # If the targeting config slug includes the word desktop it will cause this test - # to run against applications other than desktop, which will then fail. - # This check will prevent the test from executing fully but we should dig - # into preventing this case altogether when we have time. - if default_data.application != BaseExperimentApplications.DESKTOP: - return - - default_data.audience.targeting = targeting_config_slug - experiment = create_experiment(selenium) - - experiment_data = load_experiment_data(experiment.experiment_slug) + targeting = helpers.load_targeting_configs()[1] + experiment_slug = str(slugify(experiment_name)) + data = { + "hypothesis": "Test Hypothesis", + "application": "DESKTOP", + "changelogMessage": "test updates", + "targetingConfigSlug": targeting, + "publicDescription": "Some sort of Fancy Words", + "riskRevenue": False, + "riskPartnerRelated": False, + "riskBrand": False, + "featureConfigId": 1, + "referenceBranch": { + "description": "reference branch", + "name": "Branch 1", + "ratio": 50, + "featureEnabled": True, + "featureValue": "{}", + }, + "treatmentBranches": [ + { + "description": "treatment branch", + "name": "Branch 2", + "ratio": 50, + "featureEnabled": False, + "featureValue": "", + } + ], + "populationPercent": "100", + "totalEnrolledClients": 55, + "channel": "NIGHTLY", + "firefoxMinVersion": "FIREFOX_100", + "firefoxMaxVersion": "FIREFOX_120", + "locales": locales_database_id_loader(["en-CA"]), + "countries": countries_database_id_loader(["CA"]), + "proposedEnrollment": "14", + "proposedDuration": "30", + } + create_desktop_experiment( + experiment_slug, + "desktop", + targeting_config_slug, + data, + ) + experiment_data = load_experiment_data(experiment_slug) targeting = experiment_data["data"]["experimentBySlug"]["jexlTargetingExpression"] recipe = experiment_data["data"]["experimentBySlug"]["recipeJson"] diff --git a/app/tests/integration/nimbus/test_telemetry.py b/app/tests/integration/nimbus/test_telemetry.py index 939050e25e..fbd616eb60 100644 --- a/app/tests/integration/nimbus/test_telemetry.py +++ b/app/tests/integration/nimbus/test_telemetry.py @@ -72,23 +72,24 @@ def test_check_telemetry_enrollment_unenrollment( requests.delete("http://ping-server:5000/pings") targeting = helpers.load_targeting_configs()[0] experiment_slug = str(slugify(experiment_name)) - create_desktop_experiment( - experiment_slug, - "desktop", - targeting, - public_description="Some sort of words", - risk_revenue=False, - risk_partner_related=False, - risk_brand=False, - feature_config=1, - reference_branch={ + data = { + "hypothesis": "Test Hypothesis", + "application": "DESKTOP", + "changelogMessage": "test updates", + "targetingConfigSlug": targeting, + "publicDescription": "Some sort of Fancy Words", + "riskRevenue": False, + "riskPartnerRelated": False, + "riskBrand": False, + "featureConfigId": 1, + "referenceBranch": { "description": "reference branch", "name": "Branch 1", "ratio": 50, "featureEnabled": True, "featureValue": "{}", }, - treatement_branch=[ + "treatmentBranches": [ { "description": "treatment branch", "name": "Branch 2", @@ -97,8 +98,14 @@ def test_check_telemetry_enrollment_unenrollment( "featureValue": "", } ], - population_percent="100", - total_enrolled_clients=55, + "populationPercent": "100", + "totalEnrolledClients": 55, + } + create_desktop_experiment( + experiment_slug, + "desktop", + targeting, + data, ) summary = SummaryPage(selenium, urljoin(base_url, experiment_slug)).open() summary.launch_and_approve()