ci: Refactor and optimizes E2E test runs in CI (#28968)

Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Declan Carroll 2026-04-24 08:09:32 +01:00 committed by GitHub
parent 8b33424d0f
commit 20d8e90c95
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 959 additions and 829 deletions

View File

@ -242,8 +242,7 @@ CALLER REUSABLE WORKFLOW
ci-pull-requests.yml
├──────────────────────────▶ test-unit-reusable.yml
├──────────────────────────▶ test-linting-reusable.yml
├──────────────────────────▶ test-e2e-ci-reusable.yml
│ └──────────▶ test-e2e-reusable.yml
├──────────────────────────▶ test-e2e-reusable.yml
└──────────────────────────▶ sec-ci-reusable.yml
└──────────▶ sec-poutine-reusable.yml
@ -260,8 +259,7 @@ test-workflows-nightly.yml
└──────────────────────────▶ test-workflows-callable.yml
test-e2e-vm-expressions-nightly.yml
└──────────────────────────▶ test-e2e-ci-reusable.yml
└──────────▶ test-e2e-reusable.yml
└──────────────────────────▶ test-e2e-reusable.yml
PR Comment Dispatchers (triggered by /command in PR comments):
test-workflows-pr-comment.yml
@ -421,8 +419,6 @@ Workflows with `workflow_call` trigger:
| `test-unit-reusable.yml` | `ref`, `nodeVersion`, `collectCoverage` | Unit tests |
| `test-linting-reusable.yml` | `ref`, `nodeVersion` | ESLint |
| `test-e2e-reusable.yml` | `branch`, `test-mode`, `shards`, `runner` | Core E2E executor |
| `test-e2e-ci-reusable.yml` | `branch` | E2E orchestrator |
| `test-e2e-docker-pull-reusable.yml`| `branch`, `n8n_version` | E2E with pulled image |
| `test-workflows-callable.yml` | `git_ref`, `compare_schemas` | Workflow tests |
| `docker-build-push.yml` | `n8n_version`, `release_type`, `push_enabled` | Docker build |
| `sec-ci-reusable.yml` | `ref` | Security orchestrator |

View File

@ -24,6 +24,12 @@ outputs:
results:
description: 'JSON object: { "filter-name": true/false }'
value: ${{ steps.run.outputs.results }}
changed-files:
description: 'Newline-separated list of changed files (filter mode only)'
value: ${{ steps.run.outputs.changed-files }}
base-ref:
description: 'Resolved base ref used for the diff (filter mode only)'
value: ${{ steps.run.outputs.base-ref }}
runs:
using: 'composite'

View File

@ -170,6 +170,8 @@ export function runFilter() {
}
setOutput('results', JSON.stringify(results));
setOutput('changed-files', changedFiles.join('\n'));
setOutput('base-ref', baseRef);
}
// --- Mode: validate ---

File diff suppressed because it is too large Load Diff

40
.github/test-metrics/quarantine.json vendored Normal file
View File

@ -0,0 +1,40 @@
{
"updatedAt": "2026-04-23T14:38:52.015Z",
"source": "currents",
"projectId": "LRxcNt",
"quarantined": [
"Canvas Actions > Node hover actions > should execute node",
"Data Mapping > maps expressions to updated fields correctly @fixme",
"Data pinning > Advanced pinning scenarios > should be able to reference paired items in node before pinned data",
"Executions Filter > should reset filter and remove badge",
"HITL for Tools @capability:proxy > should add a HITL tool node and run it",
"Langchain Integration @capability:proxy > Advanced Workflow Features > should render runItems for sub-nodes and allow switching between them",
"Loads template setup modal correctly",
"Resource Locator > should retrieve list options when other params throw errors",
"Tools usage @capability:proxy > use web search tool in conversation",
"Workflow Executions > when new workflow is not saved > should open executions tab",
"Workflow agent @capability:proxy > sharing workflow agent with project chat user",
"can configure, connect, and sync secrets from LocalStack",
"can create a connection pointing to LocalStack",
"manage workflow agents @auth:admin",
"maps expressions to updated fields correctly @fixme",
"sharing workflow agent with project chat user",
"should add switch node and test connections",
"should allow re-running workflow after initial execution",
"should be able to reference paired items in node before pinned data",
"should clear required-parameter issue indicator when the field is filled",
"should execute node",
"should filter executions by status and show filter badge",
"should maintain zoom functionality after switching between Editor and Workflow history and Workflow list",
"should not send workflow context if nothing changed",
"should open executions tab",
"should populate logs as manual execution progresses",
"should preserve resource mapper values when navigating between connected nodes via floating nodes",
"should render runItems for sub-nodes and allow switching between them",
"should reset filter and remove badge",
"should retrieve list options when other params throw errors",
"should save template id with the workflow",
"should send proper payload for node rerun",
"use web search tool in conversation"
]
}

View File

@ -29,6 +29,8 @@ jobs:
e2e_performance: ${{ fromJSON(steps.ci-filter.outputs.results)['e2e-performance'] == true }}
instance_ai_workflow_eval: ${{ fromJSON(steps.ci-filter.outputs.results)['instance-ai-workflow-eval'] == true }}
commit_sha: ${{ steps.commit-sha.outputs.sha }}
matrix: ${{ steps.generate-matrix.outputs.matrix }}
skip_tests: ${{ steps.generate-matrix.outputs.skip-tests }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
@ -93,13 +95,26 @@ jobs:
.github/workflows/test-db-reusable.yml
- name: Setup and Build
if: fromJSON(steps.ci-filter.outputs.results).ci
if: fromJSON(steps.ci-filter.outputs.results).ci || fromJSON(steps.ci-filter.outputs.results).e2e
uses: ./.github/actions/setup-nodejs
with:
build-command: ${{ fromJSON(steps.ci-filter.outputs.results).ci && 'pnpm build' || 'pnpm turbo run build --filter=@n8n/playwright-janitor' }}
- name: Run format check
if: fromJSON(steps.ci-filter.outputs.results).ci
run: pnpm format:check
- name: Generate shard matrix
id: generate-matrix
if: fromJSON(steps.ci-filter.outputs.results).ci || fromJSON(steps.ci-filter.outputs.results).e2e
env:
CHANGED_FILES: ${{ steps.ci-filter.outputs.changed-files }}
run: |
FILES_CSV=$(echo "$CHANGED_FILES" | tr '\n' ',' | sed 's/,$//')
MATRIX=$(node packages/testing/playwright/scripts/distribute-tests.mjs --matrix 16 --orchestrate --impact "--files=$FILES_CSV" --base=FETCH_HEAD)
echo "matrix=$MATRIX" >> "$GITHUB_OUTPUT"
echo "skip-tests=$(node -e "process.stdout.write(JSON.parse(process.argv[1])[0]?.skip === true ? 'true' : 'false')" "$MATRIX")" >> "$GITHUB_OUTPUT"
unit-test:
name: Unit tests
if: needs.install-and-build.outputs.unit == 'true'
@ -168,18 +183,42 @@ jobs:
branch: ${{ needs.install-and-build.outputs.commit_sha }}
secrets: inherit
e2e-tests:
name: E2E Tests
# Internal-only 1-spec fail-fast sanity check on sqlite.
sqlite-sanity:
name: 'SQLite: Sanity Check'
needs: [install-and-build, prepare-docker]
if: >-
needs.prepare-docker.result == 'success' &&
(needs.install-and-build.outputs.ci == 'true' || needs.install-and-build.outputs.e2e == 'true') &&
github.repository == 'n8n-io/n8n' &&
github.event_name != 'merge_group'
uses: ./.github/workflows/test-e2e-ci-reusable.yml
github.event_name != 'merge_group' &&
github.event.pull_request.head.repo.fork != true
uses: ./.github/workflows/test-e2e-reusable.yml
with:
branch: ${{ needs.install-and-build.outputs.commit_sha }}
playwright-only: ${{ needs.install-and-build.outputs.e2e == 'true' && needs.install-and-build.outputs.unit == 'false' }}
test-mode: docker-artifact
test-command: pnpm --filter=n8n-playwright test:container:sqlite:e2e tests/e2e/building-blocks/workflow-entry-points.spec.ts
workers: '1'
secrets: inherit
# Full e2e run. Internal PRs run multi-main (postgres + redis + caddy + 2 mains + 1 worker).
# Fork PRs run sqlite-only and skip @licensed tests (no enterprise license secrets on forks).
e2e:
name: E2E
needs: [install-and-build, prepare-docker]
if: >-
needs.prepare-docker.result == 'success' &&
(needs.install-and-build.outputs.ci == 'true' || needs.install-and-build.outputs.e2e == 'true') &&
needs.install-and-build.outputs.skip_tests != 'true' &&
github.event_name != 'merge_group'
uses: ./.github/workflows/test-e2e-reusable.yml
with:
branch: ${{ needs.install-and-build.outputs.commit_sha }}
test-mode: docker-artifact
test-command: ${{ github.event.pull_request.head.repo.fork == true && 'pnpm --filter=n8n-playwright test:container:sqlite:e2e --grep-invert="@licensed"' || 'pnpm --filter=n8n-playwright test:container:multi-main:e2e' }}
workers: '1'
pre-generated-matrix: ${{ needs.install-and-build.outputs.matrix }}
upload-failure-artifacts: ${{ github.event.pull_request.head.repo.fork == true }}
secrets: inherit
db-tests:
@ -250,7 +289,8 @@ jobs:
typecheck,
lint,
check-packaging,
e2e-tests,
sqlite-sanity,
e2e,
db-tests,
performance,
security-checks,

View File

@ -1,137 +0,0 @@
name: 'Test: E2E CI'
on:
workflow_call:
inputs:
branch:
description: 'GitHub branch/ref to test'
required: false
type: string
default: ''
playwright-only:
description: 'Only Playwright files changed — run impacted tests only'
required: false
type: boolean
default: false
n8n-env:
description: 'JSON string of n8n env vars to inject into test containers, e.g. {"N8N_EXPRESSION_ENGINE":"vm"}'
required: false
type: string
default: ''
workflow_dispatch:
inputs:
branch:
description: 'GitHub branch/ref to test'
required: false
type: string
default: ''
playwright-only:
description: 'Only Playwright files changed — run impacted tests only'
required: false
type: boolean
default: false
n8n-env:
description: 'JSON string of n8n env vars to inject into test containers (e.g. {"N8N_EXPRESSION_ENGINE":"vm"})'
required: false
type: string
default: ''
# Fork PRs route to the community path (sqlite-only, no @licensed tests) because
# they have no license-secret access. Non-PR events (schedule, manual dispatch)
# always use the internal path.
jobs:
# Ensures the n8n + runners image tarball is cached under the SHA-derived
# key. Cache-aware: no-op if an ancestor workflow already populated it.
prepare-docker:
uses: ./.github/workflows/prepare-docker-reusable.yml
with:
branch: ${{ inputs.branch }}
secrets: inherit
prepare-matrix:
name: 'Prepare Matrix'
runs-on: blacksmith-2vcpu-ubuntu-2204
outputs:
matrix: ${{ steps.generate-matrix.outputs.matrix }}
skip-tests: ${{ steps.generate-matrix.outputs.skip-tests }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ inputs.branch || github.ref }}
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-nodejs
with:
# Matrix generation calls the janitor CLI (via distribute-tests.mjs),
# which needs packages/testing/janitor/dist/cli.js on disk.
build-command: 'pnpm turbo run build --filter=@n8n/playwright-janitor'
- name: Get changed files for impact analysis
if: ${{ inputs.playwright-only }}
id: changed-files
run: |
git fetch --depth=1 origin ${{ github.event.pull_request.base.ref || 'master' }}
echo "list=$(git diff --name-only FETCH_HEAD HEAD | tr '\n' ',' | sed 's/,$//')" >> "$GITHUB_OUTPUT"
- name: Generate shard matrix
id: generate-matrix
env:
CHANGED_FILES: ${{ steps.changed-files.outputs.list }}
run: |
ARGS=(--matrix 16 --orchestrate)
if [[ "${{ inputs.playwright-only }}" == "true" ]]; then
ARGS+=(--impact "--files=$CHANGED_FILES" "--base=FETCH_HEAD")
fi
MATRIX=$(node packages/testing/playwright/scripts/distribute-tests.mjs "${ARGS[@]}")
echo "matrix=$MATRIX" >> "$GITHUB_OUTPUT"
echo "skip-tests=$(node -e "process.stdout.write(JSON.parse(process.argv[1])[0]?.skip === true ? 'true' : 'false')" "$MATRIX")" >> "$GITHUB_OUTPUT"
# Internal-only 1-spec fail-fast sanity check on sqlite.
sqlite-sanity:
name: 'SQLite: Sanity Check'
needs: [prepare-docker]
if: ${{ github.event.pull_request.head.repo.fork != true }}
uses: ./.github/workflows/test-e2e-reusable.yml
with:
branch: ${{ inputs.branch }}
test-mode: docker-artifact
test-command: pnpm --filter=n8n-playwright test:container:sqlite:e2e tests/e2e/building-blocks/workflow-entry-points.spec.ts
workers: '1'
n8n-env: ${{ inputs.n8n-env }}
secrets: inherit
# Internal-only full run: postgres + redis + caddy + 2 mains + 1 worker.
# Requires enterprise license secrets so cannot run in community mode.
multi-main-e2e:
needs: [prepare-docker, prepare-matrix]
name: 'Multi-Main: E2E'
if: ${{ github.event.pull_request.head.repo.fork != true && needs.prepare-matrix.outputs.skip-tests != 'true' }}
uses: ./.github/workflows/test-e2e-reusable.yml
with:
branch: ${{ inputs.branch }}
test-mode: docker-artifact
test-command: pnpm --filter=n8n-playwright test:container:multi-main:e2e
workers: '1'
pre-generated-matrix: ${{ needs.prepare-matrix.outputs.matrix }}
n8n-env: ${{ inputs.n8n-env }}
secrets: inherit
# Community-mode full run on sqlite (no enterprise license needed).
# Excludes @licensed tests since community contributors have no license secrets.
community-sqlite-e2e:
needs: [prepare-docker, prepare-matrix]
name: 'SQLite: E2E (community)'
if: ${{ github.event.pull_request.head.repo.fork == true && needs.prepare-matrix.outputs.skip-tests != 'true' }}
uses: ./.github/workflows/test-e2e-reusable.yml
with:
branch: ${{ inputs.branch }}
test-mode: docker-artifact
test-command: pnpm --filter=n8n-playwright test:container:sqlite:e2e --grep-invert="@licensed"
workers: '1'
pre-generated-matrix: ${{ needs.prepare-matrix.outputs.matrix }}
n8n-env: ${{ inputs.n8n-env }}
upload-failure-artifacts: true
secrets: inherit

View File

@ -39,7 +39,7 @@ jobs:
--workers=${{ env.PLAYWRIGHT_WORKERS }}
env:
BUILD_WITH_COVERAGE: 'true'
CURRENTS_RECORD_KEY: ${{ vars.CURRENTS_RECORD_VAR }}
CURRENTS_RECORD_KEY: ${{ secrets.CURRENTS_RECORD_KEY }}
CURRENTS_PROJECT_ID: 'LRxcNt'
QA_METRICS_WEBHOOK_URL: ${{ secrets.QA_METRICS_WEBHOOK_URL }}
QA_METRICS_WEBHOOK_USER: ${{ secrets.QA_METRICS_WEBHOOK_USER }}

View File

@ -1,49 +0,0 @@
name: 'Test: E2E Docker Pull'
# This workflow is used to run Playwright tests in a Docker container pulled from the registry
on:
workflow_call:
inputs:
shards:
description: 'Shards for parallel execution'
required: false
default: 1
type: number
image:
description: 'Image to use'
required: false
default: 'n8nio/n8n:nightly'
type: string
workflow_dispatch:
inputs:
shards:
description: 'Shards for parallel execution'
required: false
default: 1
type: number
image:
description: 'Image to use'
required: false
default: 'n8nio/n8n:nightly'
type: string
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.gen.outputs.matrix }}
steps:
- id: gen
run: |
MATRIX=$(seq 1 ${{ inputs.shards }} | awk 'BEGIN{printf "["} {printf "%s{\"shard\":%s,\"images\":\"\"}", (NR>1?",":""), $1} END{printf "]"}')
echo "matrix=$MATRIX" >> "$GITHUB_OUTPUT"
build-and-test:
needs: generate-matrix
uses: ./.github/workflows/test-e2e-reusable.yml
with:
test-mode: docker-pull
docker-image: ${{ inputs.image }}
test-command: pnpm --filter=n8n-playwright test:container:standard
pre-generated-matrix: ${{ needs.generate-matrix.outputs.matrix }}
secrets: inherit

View File

@ -8,7 +8,7 @@ on:
required: false
type: string
test-mode:
description: 'Test mode: local, docker-artifact, or docker-pull'
description: 'Test mode: local or docker-artifact'
required: false
default: 'local'
type: string
@ -17,11 +17,6 @@ on:
required: false
default: 'pnpm --filter=n8n-playwright test:local'
type: string
docker-image:
description: 'Docker image to use (docker-pull mode only). Ignored in docker-artifact mode (image is loaded as n8nio/n8n:local).'
required: false
default: 'n8nio/n8n:nightly'
type: string
workers:
description: 'Number of parallel workers'
required: false
@ -48,7 +43,7 @@ on:
default: 'LRxcNt'
type: string
pre-generated-matrix:
description: 'Pre-generated shard matrix JSON. Defaults to 1 shard; multi-shard callers pass their own (see test-e2e-ci-reusable.yml prepare-matrix).'
description: 'Pre-generated shard matrix JSON. Defaults to 1 shard; multi-shard callers pass their own.'
required: false
default: '[{"shard":1,"images":""}]'
type: string
@ -63,8 +58,8 @@ env:
PLAYWRIGHT_WORKERS: ${{ inputs.workers != '' && inputs.workers || '2' }}
# Browser cache location - must match install-browsers script
PLAYWRIGHT_BROWSERS_PATH: packages/testing/playwright/.playwright-browsers
# docker-artifact loads the image locally as n8nio/n8n:local.
TEST_IMAGE_N8N: ${{ inputs.test-mode == 'docker-artifact' && 'n8nio/n8n:local' || inputs.docker-image }}
# docker-artifact loads the image locally as n8nio/n8n:local; unused in local mode.
TEST_IMAGE_N8N: 'n8nio/n8n:local'
N8N_SKIP_LICENSES: 'true'
CURRENTS_CI_BUILD_ID: ${{ github.repository }}-${{ github.run_id }}-${{ github.run_attempt }}
CURRENTS_PROJECT_ID: ${{ inputs.currents-project-id }}
@ -110,7 +105,7 @@ jobs:
# Uses pre-distributed specs if orchestration enabled, otherwise falls back to Playwright sharding
run: ${{ inputs.test-command }} --workers=${{ env.PLAYWRIGHT_WORKERS }} ${{ matrix.specs || format('--shard={0}/{1}', matrix.shard, strategy.job-total) }}
env:
CURRENTS_RECORD_KEY: ${{ vars.CURRENTS_RECORD_VAR }}
CURRENTS_RECORD_KEY: ${{ secrets.CURRENTS_RECORD_KEY }}
QA_METRICS_WEBHOOK_URL: ${{ secrets.QA_METRICS_WEBHOOK_URL }}
QA_METRICS_WEBHOOK_USER: ${{ secrets.QA_METRICS_WEBHOOK_USER }}
QA_METRICS_WEBHOOK_PASSWORD: ${{ secrets.QA_METRICS_WEBHOOK_PASSWORD }}

View File

@ -12,17 +12,29 @@ on:
default: 'master'
jobs:
e2e-vm-expressions:
uses: ./.github/workflows/test-e2e-ci-reusable.yml
prepare-docker:
uses: ./.github/workflows/prepare-docker-reusable.yml
with:
branch: ${{ github.event_name == 'schedule' && 'master' || inputs.branch }}
secrets: inherit
e2e:
name: E2E
needs: [prepare-docker]
uses: ./.github/workflows/test-e2e-reusable.yml
with:
branch: ${{ github.event_name == 'schedule' && 'master' || inputs.branch }}
test-mode: docker-artifact
test-command: pnpm --filter=n8n-playwright test:container:multi-main:e2e
workers: '1'
pre-generated-matrix: '[{"shard":1},{"shard":2},{"shard":3},{"shard":4},{"shard":5},{"shard":6},{"shard":7},{"shard":8},{"shard":9},{"shard":10},{"shard":11},{"shard":12},{"shard":13},{"shard":14},{"shard":15},{"shard":16}]'
n8n-env: '{"N8N_EXPRESSION_ENGINE":"vm"}'
secrets: inherit
notify-on-failure:
name: Notify Slack on failure
runs-on: ubuntu-slim
needs: [e2e-vm-expressions]
needs: [e2e]
if: failure() && github.event_name == 'schedule'
steps:
- uses: act10ns/slack@44541246747a30eb3102d87f7a4cc5471b0ffb7d # v2.1.0

View File

@ -16,6 +16,9 @@ on:
ENCRYPTION_KEY:
description: 'Encryption key for n8n operations.'
required: true
CURRENTS_RECORD_KEY:
description: 'Currents record key for test reporting. Empty on fork PRs.'
required: false
env:
NODE_OPTIONS: --max-old-space-size=3072
@ -41,12 +44,12 @@ jobs:
- name: Run Workflow Tests
run: pnpm --filter=n8n-playwright test:workflows --workers 4
env:
CURRENTS_RECORD_KEY: ${{ vars.CURRENTS_RECORD_VAR }}
CURRENTS_RECORD_KEY: ${{ secrets.CURRENTS_RECORD_KEY }}
CURRENTS_PROJECT_ID: 'mpLFH9'
- name: Run Workflow Schema Tests
if: ${{ inputs.compare_schemas == 'true' }}
run: pnpm --filter=n8n-playwright test:workflows:schema
env:
CURRENTS_RECORD_KEY: ${{ vars.CURRENTS_RECORD_VAR }}
CURRENTS_RECORD_KEY: ${{ secrets.CURRENTS_RECORD_KEY }}
CURRENTS_PROJECT_ID: 'mpLFH9'

View File

@ -10,6 +10,11 @@ import { consoleErrorFixtures } from './console-error-monitor';
import { N8N_AUTH_COOKIE } from '../config/constants';
import { setupDefaultInterceptors } from '../config/intercepts';
import { observabilityFixtures, type ObservabilityTestFixtures } from '../fixtures/observability';
import {
quarantineFixtures,
type QuarantineTestFixtures,
type QuarantineWorkerFixtures,
} from '../fixtures/quarantine';
import { n8nPage } from '../pages/n8nPage';
import { ApiHelpers } from '../services/api-helper';
import { TestError, type TestRequirements } from '../Types';
@ -50,14 +55,15 @@ type CapabilityOption = Capability | N8NConfig;
type ProjectUse = { containerConfig?: N8NConfig };
export const test = base.extend<
TestFixtures & CurrentsFixtures & ObservabilityTestFixtures,
WorkerFixtures & CurrentsWorkerFixtures
TestFixtures & CurrentsFixtures & ObservabilityTestFixtures & QuarantineTestFixtures,
WorkerFixtures & CurrentsWorkerFixtures & QuarantineWorkerFixtures
>({
...currentsFixtures.baseFixtures,
...currentsFixtures.coverageFixtures,
...currentsFixtures.actionFixtures,
...observabilityFixtures,
...consoleErrorFixtures,
...quarantineFixtures,
// Option for test.use({ capability: 'proxy' }) - transformed into N8NStack by n8nContainer
capability: [undefined, { scope: 'worker', option: true }],

View File

@ -0,0 +1,82 @@
import type { Fixtures, TestInfo } from '@playwright/test';
import fs from 'fs';
import path from 'path';
import { findPackagesRoot } from '../utils/path-helper';
const QUARANTINE_FILE = path.join(
findPackagesRoot('packages'),
'.github',
'test-metrics',
'quarantine.json',
);
type QuarantineFile = { quarantined?: string[] };
function loadQuarantineList(): Set<string> {
try {
const raw = fs.readFileSync(QUARANTINE_FILE, 'utf-8');
const parsed = JSON.parse(raw) as QuarantineFile;
const titles = parsed.quarantined ?? [];
console.log(
`🔒 Loaded ${titles.length} quarantined test titles from ${path.basename(QUARANTINE_FILE)}`,
);
return new Set(titles);
} catch (loadError) {
console.warn(
`⚠️ Quarantine list unavailable (${(loadError as Error).message}) — quarantined tests will run this session`,
);
return new Set();
}
}
// Currents stores titles as the concatenated describe + test path joined by " > ".
// Playwright's titlePath includes the project name and file path at the head, so
// try each suffix of the titlePath against the quarantine set.
function isQuarantined(testInfo: TestInfo, list: Set<string>): boolean {
if (list.size === 0) return false;
const path = testInfo.titlePath;
for (let i = 0; i < path.length; i++) {
const candidate = path.slice(i).join(' > ');
if (list.has(candidate)) return true;
}
return false;
}
export type QuarantineTestFixtures = {
quarantineCheck: undefined;
};
export type QuarantineWorkerFixtures = {
quarantineList: Set<string>;
};
/**
* Auto-skips any test whose title (describe chain joined with " > ") is listed
* in `.github/test-metrics/quarantine.json`. Applies identically on internal
* and fork PRs the list is committed so no API key is needed at test time.
*
* Refresh the list with:
* CURRENTS_API_KEY=<key> node packages/testing/playwright/scripts/fetch-currents-quarantine.mjs --project=<id>
*
* Soft-fails: if the file is missing or malformed, tests run as if nothing is
* quarantined (with a warning).
*/
export const quarantineFixtures: Fixtures<QuarantineTestFixtures, QuarantineWorkerFixtures> = {
quarantineList: [
async ({}, use) => {
await use(loadQuarantineList());
},
{ scope: 'worker', auto: true },
],
quarantineCheck: [
async ({ quarantineList }, use, testInfo) => {
if (isQuarantined(testInfo, quarantineList)) {
testInfo.skip(true, 'Currents quarantine');
}
await use(undefined);
},
{ auto: true },
],
};

View File

@ -15,10 +15,8 @@
"test:container:sqlite:e2e": "playwright test --project='sqlite:e2e'",
"test:container:postgres": "playwright test --project='postgres:*'",
"test:container:queue": "playwright test --project='queue:*'",
"test:container:queue:e2e": "playwright test --project='queue:e2e'",
"test:container:multi-main": "playwright test --project='multi-main:*'",
"test:container:multi-main:e2e": "playwright test --project='multi-main:e2e'",
"test:container:trial": "playwright test --project='trial:*'",
"test:workflows:setup": "tsx ./tests/cli-workflows/setup-workflow-tests.ts",
"test:workflows": "playwright test --project=cli-workflows",
"test:workflows:schema": "SCHEMA=true playwright test --project=cli-workflows",

View File

@ -0,0 +1,76 @@
#!/usr/bin/env node
/**
* Fetches the active quarantine list from Currents and writes it to a committed
* JSON file so test runs (internal + fork) read it locally without needing a key.
*
* Usage:
* CURRENTS_API_KEY=<key> node packages/testing/playwright/scripts/fetch-currents-quarantine.mjs --project=<id>
*
* Output: .github/test-metrics/quarantine.json
*/
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const ROOT_DIR = path.resolve(__dirname, '../../../..');
const OUTPUT_PATH = path.join(ROOT_DIR, '.github', 'test-metrics', 'quarantine.json');
const CURRENTS_API = 'https://api.currents.dev/v1';
const PROJECT_ID = process.argv.find((a) => a.startsWith('--project='))?.split('=')[1];
if (!PROJECT_ID) {
console.error('Usage: CURRENTS_API_KEY=<key> node fetch-currents-quarantine.mjs --project=<id>');
process.exit(1);
}
const API_KEY = process.env.CURRENTS_API_KEY;
if (!API_KEY) {
console.error('CURRENTS_API_KEY required');
process.exit(1);
}
async function fetchActiveActions() {
const url = new URL(`${CURRENTS_API}/actions`);
url.searchParams.set('projectId', PROJECT_ID);
url.searchParams.set('status', 'active');
const res = await fetch(url, {
headers: { Authorization: `Bearer ${API_KEY}` },
});
if (!res.ok) throw new Error(`API error: ${res.status}`);
const json = await res.json();
return json.data ?? [];
}
async function main() {
console.log(`Fetching active quarantine actions for project ${PROJECT_ID}...`);
const actions = await fetchActiveActions();
const titles = actions
.filter((a) => a.action?.[0]?.op === 'quarantine')
.map((a) => a.matcher?.cond?.[0]?.value)
.filter((v) => typeof v === 'string' && v.length > 0)
.sort();
const output = {
updatedAt: new Date().toISOString(),
source: 'currents',
projectId: PROJECT_ID,
quarantined: titles,
};
fs.mkdirSync(path.dirname(OUTPUT_PATH), { recursive: true });
fs.writeFileSync(OUTPUT_PATH, JSON.stringify(output, null, 2) + '\n');
console.log(
`Wrote ${titles.length} quarantined titles to ${path.relative(ROOT_DIR, OUTPUT_PATH)}`,
);
}
main().catch((error) => {
console.error(error);
process.exit(1);
});