diff --git a/.github/workflows/test_e2e_deploy_scheduled.yml b/.github/workflows/test_e2e_deploy_scheduled.yml index 02729a8c4a7ce..eda3efb352903 100644 --- a/.github/workflows/test_e2e_deploy_scheduled.yml +++ b/.github/workflows/test_e2e_deploy_scheduled.yml @@ -33,7 +33,7 @@ jobs: strategy: fail-fast: false matrix: - group: [1, 2] + group: [1/5, 2/5, 3/5, 4/5, 5/5] steps: - name: Setup Node.js @@ -60,17 +60,20 @@ jobs: - name: Run tests run: | docker run --rm -v $(pwd):/work mcr.microsoft.com/playwright:v1.41.2-jammy /bin/bash -c "cd /work && \ - NODE_VERSION=${{ env.NODE_LTS_VERSION }} ./scripts/setup-node.sh && \ + NODE_VERSION=${NODE_LTS_VERSION} ./scripts/setup-node.sh && \ corepack enable > /dev/null && \ - NEXT_JUNIT_TEST_REPORT=${{ env.NEXT_JUNIT_TEST_REPORT }} \ - DATADOG_API_KEY=${{ env.DATADOG_API_KEY }} \ - DD_ENV=${{ env.DD_ENV }} \ - VERCEL_TEST_TOKEN=${{ env.VERCEL_TEST_TOKEN }} \ - VERCEL_TEST_TEAM=${{ env.VERCEL_TEST_TEAM }} \ - NEXT_TEST_JOB=${{ env.NEXT_TEST_JOB }} \ - NEXT_TEST_MODE=${{ env.NEXT_TEST_MODE }} \ - TEST_TIMINGS_TOKEN=${{ env.TEST_TIMINGS_TOKEN }} \ - xvfb-run node run-tests.js --type e2e --timings -g ${{ matrix.group }}/2 -c 1 >> /proc/1/fd/1" + NEXT_JUNIT_TEST_REPORT=${NEXT_JUNIT_TEST_REPORT} \ + NEXT_TELEMETRY_DISABLED=${NEXT_TELEMETRY_DISABLED} \ + DATADOG_API_KEY=${DATADOG_API_KEY} \ + DD_ENV=${DD_ENV} \ + VERCEL_TEST_TOKEN=${VERCEL_TEST_TOKEN} \ + VERCEL_TEST_TEAM=${VERCEL_TEST_TEAM} \ + NEXT_TEST_JOB=${NEXT_TEST_JOB} \ + NEXT_TEST_MODE=${NEXT_TEST_MODE} \ + TEST_TIMINGS_TOKEN=${TEST_TIMINGS_TOKEN} \ + NEXT_TEST_CONTINUE_ON_ERROR=${NEXT_TEST_CONTINUE_ON_ERROR} \ + NEXT_EXTERNAL_TESTS_FILTERS=./test/deploy-tests-manifest.json \ + xvfb-run node run-tests.js --type e2e --timings -g ${{ matrix.group }} -c 1 >> /proc/1/fd/1" - name: Save test report as artifacts if: always() diff --git a/run-tests.js b/run-tests.js index 98dd35e5de120..412f429ee16bf 100644 --- a/run-tests.js +++ b/run-tests.js @@ -227,7 +227,12 @@ async function main() { } } - console.log('Running tests with concurrency:', options.concurrency) + console.log( + 'Running tests with concurrency:', + options.concurrency, + 'in test mode', + process.env.NEXT_TEST_MODE + ) /** @type TestFile[] */ let tests = argv._.filter((arg) => @@ -470,7 +475,8 @@ ${ENDGROUP}`) RECORD_REPLAY: shouldRecordTestWithReplay, // run tests in headless mode by default HEADLESS: 'true', - TRACE_PLAYWRIGHT: 'true', + TRACE_PLAYWRIGHT: + process.env.NEXT_TEST_MODE === 'deploy' ? undefined : 'true', NEXT_TELEMETRY_DISABLED: '1', // unset CI env so CI behavior is only explicitly // tested when enabled @@ -691,32 +697,6 @@ ${ENDGROUP}`) } } - // Emit test output if test failed or if we're continuing tests on error - if ((!passed || shouldContinueTestsOnError) && isTestJob) { - try { - const testsOutput = await fsp.readFile( - `${test.file}${RESULTS_EXT}`, - 'utf8' - ) - const obj = JSON.parse(testsOutput) - obj.processEnv = { - NEXT_TEST_MODE: process.env.NEXT_TEST_MODE, - HEADLESS: process.env.HEADLESS, - } - await outputSema.acquire() - if (GROUP) console.log(`${GROUP}Result as JSON for tooling`) - console.log( - `--test output start--`, - JSON.stringify(obj), - `--test output end--` - ) - if (ENDGROUP) console.log(ENDGROUP) - outputSema.release() - } catch (err) { - console.log(`Failed to load test output`, err) - } - } - sema.release() if (dirSema) dirSema.release() }) diff --git a/test/deploy-tests-manifest.json b/test/deploy-tests-manifest.json new file mode 100644 index 0000000000000..4eb82c58a026a --- /dev/null +++ b/test/deploy-tests-manifest.json @@ -0,0 +1,11 @@ +{ + "version": 2, + "suites": {}, + "rules": { + "include": [ + "test/e2e/**/*.test.{t,j}s{,x}", + "test/production/**/*.test.{t,j}s{,x}" + ], + "exclude": [] + } +} diff --git a/test/get-test-filter.js b/test/get-test-filter.js index 76840e2e6dcc8..bf6cc218524b1 100644 --- a/test/get-test-filter.js +++ b/test/get-test-filter.js @@ -7,6 +7,11 @@ function getTestFilter() { : null if (!manifest) return null + console.log( + 'Filtering tests using manifest:', + process.env.NEXT_EXTERNAL_TESTS_FILTERS + ) + // For the legacy manifest without a version, we assume it's a complete list // of all the tests. if (!manifest.version || typeof manifest.version !== 'number') { diff --git a/test/lib/next-modes/next-deploy.ts b/test/lib/next-modes/next-deploy.ts index 78ecf836739b5..8eaa4bf58d7d6 100644 --- a/test/lib/next-modes/next-deploy.ts +++ b/test/lib/next-modes/next-deploy.ts @@ -76,6 +76,7 @@ export class NextDeployInstance extends NextInstance { { cwd: this.testDir, env: vercelEnv, + reject: false, } ) @@ -117,6 +118,7 @@ export class NextDeployInstance extends NextInstance { { cwd: this.testDir, env: vercelEnv, + reject: false, } ) @@ -151,6 +153,7 @@ export class NextDeployInstance extends NextInstance { ['logs', this._url, '--output', 'raw', ...vercelFlags], { env: vercelEnv, + reject: false, } ) if (logs.exitCode !== 0) {