diff --git a/.github/workflows/ab_tests.yml b/.github/workflows/ab_tests.yml index 51e3f6a0db..63b6051170 100644 --- a/.github/workflows/ab_tests.yml +++ b/.github/workflows/ab_tests.yml @@ -90,12 +90,19 @@ jobs: - name: Convert dask config into environment variables run: python ci/scripts/dask_config_to_env.py AB_environments/${{ matrix.runtime-version }}.dask.yaml >> $GITHUB_ENV + - name: Google auth + uses: "google-github-actions/auth@v2" + with: + credentials_json: "${{ secrets.GCP_CREDENTIALS }}" + - name: Run Coiled Runtime Tests env: DASK_COILED__TOKEN: ${{ secrets.COILED_BENCHMARK_BOT_TOKEN }} AWS_ACCESS_KEY_ID: ${{ secrets.RUNTIME_CI_BOT_AWS_ACCESS_KEY_ID }} AWS_DEFAULT_REGION: us-east-2 # this is needed for boto for some reason AWS_SECRET_ACCESS_KEY: ${{ secrets.RUNTIME_CI_BOT_AWS_SECRET_ACCESS_KEY }} + AZURE_STORAGE_ACCOUNT_NAME: ${{ secrets.AZURE_STORAGE_ACCOUNT_NAME}} + AZURE_STORAGE_SAS_TOKEN: ${{ secrets.AZURE_STORAGE_SAS_TOKEN}} PYTHON_STUB_PAT: ${{ secrets.PYTHON_STUB_PAT }} SNOWFLAKE_USER: ${{ secrets.SNOWFLAKE_USER }} SNOWFLAKE_PASSWORD: ${{ secrets.SNOWFLAKE_PASSWORD }} diff --git a/AB_environments/config.yaml b/AB_environments/config.yaml index 294eb4f8fe..569b64f2cf 100644 --- a/AB_environments/config.yaml +++ b/AB_environments/config.yaml @@ -27,6 +27,9 @@ markers: geo_execution # markers: shuffle_p2p or shuffle_tasks # markers: not shuffle_tasks +geo: + scale: small + # Enable specific H2O datasets h2o_datasets: # - 0.5 GB (csv) diff --git a/ci/scripts/discover_ab_environments.py b/ci/scripts/discover_ab_environments.py index f546e02ed2..9dc4293579 100644 --- a/ci/scripts/discover_ab_environments.py +++ b/ci/scripts/discover_ab_environments.py @@ -66,7 +66,9 @@ def build_json() -> JSONOutput: pytest_args.append(f"-m '{cfg['markers']}'") for target in cfg["targets"]: pytest_args.append(f"'{target}'") - + for geo in cfg.get("geo", {}): + if scale := geo.get("scale"): + pytest_args.append(f"--scale {scale}") return { "run_AB": True, "repeat": list(range(1, cfg["repeat"] + 1)),