Merge branch 'master' into master

This commit is contained in:
Evgeny 2026-05-03 12:59:01 +03:00 committed by GitHub
commit 0c8f178035
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
255 changed files with 9186 additions and 3709 deletions

View File

@ -46,12 +46,11 @@ runs:
fi
- name: Install Aikido SafeChain
if: runner.os != 'Windows'
run: |
VERSION="1.4.1"
EXPECTED_SHA256="628235987175072a4255aa3f5f0128f31795b63970f1970ae8a04d07bf8527b0"
node .github/scripts/retry.mjs --attempts 3 --delay 10 \
"curl -fsSL -o install-safe-chain.sh https://github.com/AikidoSec/safe-chain/releases/download/${VERSION}/install-safe-chain.sh"
VERSION="1.5.1"
EXPECTED_SHA256="7c910fff717649c86cc8ca960e6c054d3734da2d660050e3bcfc54029e3b485b"
node .github/scripts/retry.mjs --attempts 3 --delay 10 -- \
curl -fsSL -o install-safe-chain.sh "https://github.com/AikidoSec/safe-chain/releases/download/${VERSION}/install-safe-chain.sh"
echo "${EXPECTED_SHA256} install-safe-chain.sh" | sha256sum -c -
sh install-safe-chain.sh --ci
rm install-safe-chain.sh
@ -60,16 +59,11 @@ runs:
- name: Install Dependencies
if: ${{ inputs.install-command != '' }}
env:
INSTALL_COMMAND: ${{ inputs.install-command }}
INSTALL_COMMAND: ${{ inputs.install-command }}
run: |
$INSTALL_COMMAND
shell: bash
- name: Disable safe-chain
if: runner.os != 'Windows'
run: safe-chain teardown
shell: bash
- name: Configure Turborepo Cache
uses: rharkor/caching-for-turbo@0abc2381e688c4d2832f0665a68a01c6e82f0d6c # v2.3.11

View File

@ -266,10 +266,15 @@ jobs:
ref: ${{ needs.install-and-build.outputs.commit_sha }}
secrets: inherit
# Depends on prepare-docker so the eval workflow can load the SHA-keyed image cache.
# prepare-docker may be skipped (its filter excludes .github/**); the eval falls back to a local build.
instance-ai-workflow-evals:
name: Instance AI Workflow Evals
needs: install-and-build
needs: [install-and-build, prepare-docker]
if: >-
!cancelled() &&
needs.install-and-build.result == 'success' &&
(needs.prepare-docker.result == 'success' || needs.prepare-docker.result == 'skipped') &&
needs.install-and-build.outputs.instance_ai_workflow_eval == 'true' &&
github.repository == 'n8n-io/n8n' &&
(github.event_name != 'pull_request' || !github.event.pull_request.head.repo.fork)

View File

@ -29,6 +29,12 @@ jobs:
name: 'Run Evals'
runs-on: blacksmith-4vcpu-ubuntu-2204
timeout-minutes: 45
env:
# Each port hosts an independent n8n container. The eval CLI's
# work-stealing allocator dispatches builds across them, capped per-lane.
# 9 lanes on 4vcpu — builds are LLM-bound so CPU headroom is sufficient;
# bump back to 8vcpu if contention shows up.
LANE_PORTS: '5678,5679,5680,5681,5682,5683,5684,5685,5686'
permissions:
contents: read
pull-requests: write
@ -45,56 +51,80 @@ jobs:
with:
build-command: 'pnpm build'
- name: Build Docker image
# Cache populated by prepare-docker; fallback covers PRs that only touch this workflow file.
- name: Load n8n Docker image
id: load-image
continue-on-error: true
uses: ./.github/actions/load-n8n-docker
- name: Build Docker image (fallback on cache miss)
if: steps.load-image.outcome == 'failure'
run: pnpm build:docker
env:
INCLUDE_TEST_CONTROLLER: 'true'
- name: Start n8n container
- name: Start n8n containers
env:
EVALS_ANTHROPIC_KEY: ${{ secrets.EVALS_ANTHROPIC_KEY }}
N8N_LICENSE_ACTIVATION_KEY: ${{ secrets.N8N_LICENSE_ACTIVATION_KEY }}
N8N_LICENSE_CERT: ${{ secrets.N8N_LICENSE_CERT }}
N8N_ENCRYPTION_KEY: ${{ secrets.N8N_ENCRYPTION_KEY }}
run: |
docker run -d --name n8n-eval \
-e E2E_TESTS=true \
-e N8N_ENABLED_MODULES=instance-ai \
-e N8N_AI_ENABLED=true \
-e N8N_INSTANCE_AI_MODEL_API_KEY=${{ secrets.EVALS_ANTHROPIC_KEY }} \
-e N8N_LICENSE_ACTIVATION_KEY=${{ secrets.N8N_LICENSE_ACTIVATION_KEY }} \
-e N8N_LICENSE_CERT=${{ secrets.N8N_LICENSE_CERT }} \
-e N8N_ENCRYPTION_KEY=${{ secrets.N8N_ENCRYPTION_KEY }} \
-p 5678:5678 \
n8nio/n8n:local
echo "Waiting for n8n to be ready..."
for i in $(seq 1 60); do
if curl -s http://localhost:5678/healthz/readiness -o /dev/null -w "%{http_code}" | grep -q 200; then
echo "n8n ready after ${i}s"
exit 0
fi
sleep 1
IFS=',' read -ra PORTS <<< "$LANE_PORTS"
for i in "${!PORTS[@]}"; do
port="${PORTS[$i]}"
docker run -d --name "n8n-eval-$((i+1))" \
-e E2E_TESTS=true \
-e N8N_ENABLED_MODULES=instance-ai \
-e N8N_AI_ENABLED=true \
-e N8N_INSTANCE_AI_MODEL_API_KEY="$EVALS_ANTHROPIC_KEY" \
-e N8N_AI_ASSISTANT_BASE_URL="" \
-e N8N_LICENSE_ACTIVATION_KEY="$N8N_LICENSE_ACTIVATION_KEY" \
-e N8N_LICENSE_CERT="$N8N_LICENSE_CERT" \
-e N8N_ENCRYPTION_KEY="$N8N_ENCRYPTION_KEY" \
-p "$port:5678" \
n8nio/n8n:local
done
# 120s budget per port: containers booting in parallel on a shared
# 4vcpu runner contend for CPU/disk during n8n's startup (DB migrations,
# license init), so each takes longer than a solo boot.
for port in "${PORTS[@]}"; do
ready=false
for i in $(seq 1 120); do
if curl -s "http://localhost:$port/healthz/readiness" -o /dev/null -w "%{http_code}" | grep -q 200; then
echo "n8n on port $port ready after ${i}s"
ready=true
break
fi
sleep 1
done
if [ "$ready" != "true" ]; then
echo "::error::n8n on port $port failed to start within 120s"
for n in $(docker ps -aq --filter "name=n8n-eval-"); do
echo "Logs for $n:"
docker logs "$n" --tail 30 || true
done
exit 1
fi
done
echo "::error::n8n failed to start within 60s"
docker logs n8n-eval --tail 30
exit 1
- name: Create test user
- name: Create test users
run: |
curl -sf -X POST http://localhost:5678/rest/e2e/reset \
-H "Content-Type: application/json" \
-d '{
"owner":{"email":"nathan@n8n.io","password":"PlaywrightTest123","firstName":"Eval","lastName":"Owner"},
"admin":{"email":"admin@n8n.io","password":"PlaywrightTest123","firstName":"Admin","lastName":"User"},
"members":[],
"chat":{"email":"chat@n8n.io","password":"PlaywrightTest123","firstName":"Chat","lastName":"User"}
}'
IFS=',' read -ra PORTS <<< "$LANE_PORTS"
for port in "${PORTS[@]}"; do
curl -sf -X POST "http://localhost:$port/rest/e2e/reset" \
-H "Content-Type: application/json" \
-d '{
"owner":{"email":"nathan@n8n.io","password":"PlaywrightTest123","firstName":"Eval","lastName":"Owner"},
"admin":{"email":"admin@n8n.io","password":"PlaywrightTest123","firstName":"Admin","lastName":"User"},
"members":[],
"chat":{"email":"chat@n8n.io","password":"PlaywrightTest123","firstName":"Chat","lastName":"User"}
}'
done
- name: Run Instance AI Evals
continue-on-error: true
working-directory: packages/@n8n/instance-ai
run: >-
pnpm eval:instance-ai
--base-url http://localhost:5678
--concurrency 4
--verbose
--iterations 3
${{ inputs.filter && format('--filter "{0}"', inputs.filter) || '' }}
env:
N8N_INSTANCE_AI_MODEL_API_KEY: ${{ secrets.EVALS_ANTHROPIC_KEY }}
LANGSMITH_TRACING: 'true'
@ -102,10 +132,28 @@ jobs:
LANGSMITH_API_KEY: ${{ secrets.EVALS_LANGSMITH_API_KEY }}
LANGSMITH_REVISION_ID: ${{ github.sha }}
LANGSMITH_BRANCH: ${{ github.head_ref || github.ref_name }}
run: |
IFS=',' read -ra PORTS <<< "$LANE_PORTS"
URLS=()
for port in "${PORTS[@]}"; do
URLS+=("http://localhost:$port")
done
BASE_URLS=$(IFS=,; printf '%s' "${URLS[*]}")
pnpm eval:instance-ai \
--base-url "$BASE_URLS" \
--concurrency 32 \
--verbose \
--iterations 3 \
${{ inputs.filter && format('--filter "{0}"', inputs.filter) || '' }}
- name: Stop n8n container
- name: Stop n8n containers
if: ${{ always() }}
run: docker stop n8n-eval && docker rm n8n-eval || true
run: |
mapfile -t ids < <(docker ps -aq --filter "name=n8n-eval-")
if [ "${#ids[@]}" -gt 0 ]; then
docker stop "${ids[@]}" 2>/dev/null || true
docker rm "${ids[@]}" 2>/dev/null || true
fi
- name: Post eval results to PR
if: ${{ always() && github.event_name == 'pull_request' }}

View File

@ -73,7 +73,7 @@
"jest-mock-extended": "^3.0.4",
"lefthook": "^1.7.15",
"license-checker": "^25.0.1",
"nock": "^14.0.1",
"nock": "^14.0.13",
"nodemon": "^3.0.1",
"npm-run-all2": "^7.0.2",
"p-limit": "^3.1.0",

View File

@ -53,6 +53,11 @@ export class ProjectRepository extends Repository<Project> {
return await query.getManyAndCount();
}
// Strict semantics: returns only projects the user has a relation to
// (their personal project + projects they are explicitly a member of).
// Do not broaden — peer-personal-project discovery for the share modal lives
// in `getShareableProjectsAndCount` below; conflating the two has regressed
// the share dropdown before (see IAM-591).
async getAccessibleProjectsAndCount(
userId: string,
options: ProjectListOptions,
@ -62,6 +67,40 @@ export class ProjectRepository extends Repository<Project> {
.innerJoin('p.projectRelations', 'pr')
.where('pr.userId = :userId', { userId });
this.applyIdsQueryFilters(idsQuery, options);
return await this.runProjectListByIdsQuery(idsQuery, options);
}
// Wide semantics: returns peer personal projects in addition to projects
// the user has a relation to. Used only by the sharing-discovery endpoint
// (`GET /rest/projects/sharing-candidates`) so the workflow / credential
// share dropdowns can list other users as share targets.
async getShareableProjectsAndCount(
userId: string,
options: ProjectListOptions,
): Promise<[Project[], number]> {
// DISTINCT + LEFT JOIN avoids duplicate rows from the relation join
// while still allowing personal projects with no caller relation to match.
const idsQuery = this.createQueryBuilder('p')
.select('DISTINCT p.id', 'id')
.leftJoin('p.projectRelations', 'pr')
.where(
new Brackets((qb) => {
qb.where('p.type = :personalType', { personalType: 'personal' }).orWhere(
'pr.userId = :userId',
{ userId },
);
}),
);
this.applyIdsQueryFilters(idsQuery, options);
return await this.runProjectListByIdsQuery(idsQuery, options);
}
private applyIdsQueryFilters(
idsQuery: SelectQueryBuilder<Project>,
options: ProjectListOptions,
): void {
if (options.search) {
idsQuery.andWhere('LOWER(p.name) LIKE LOWER(:search)', {
search: `%${options.search}%`,
@ -81,7 +120,12 @@ export class ProjectRepository extends Repository<Project> {
}),
);
}
}
private async runProjectListByIdsQuery(
idsQuery: SelectQueryBuilder<Project>,
options: ProjectListOptions,
): Promise<[Project[], number]> {
const query = this.createQueryBuilder('project')
.leftJoin('project.creator', 'creator')
.where(`project.id IN (${idsQuery.getQuery()})`);

View File

@ -210,15 +210,16 @@ Each test's HTTP exchanges are stored as individual JSON files:
```
expectations/instance-ai/should-send-message-and-receive-assistant-response/
1775805992870-unknown-host-POST-_v1_messages-8a23f6c2.json ← Anthropic API call
1775805993100-api-staging.n8n.io-GET-_api_community_nodes-272f77d5.json ← Node catalog
0000-1775805992870-unknown-host-POST-_v1_messages-8a23f6c2.json ← Anthropic API call
0001-1775805993100-api-staging.n8n.io-GET-_api_community_nodes-272f77d5.json ← Node catalog
trace.jsonl ← Tool trace
```
### File Naming
`<timestamp>-<host>-<method>-<path_slugified>-<8char_sha256>.json`
`<sequence>-<timestamp>-<host>-<method>-<path_slugified>-<8char_sha256>.json`
- `sequence` = zero-padded write order within the recording, used to keep replay deterministic when multiple matching requests are recorded in the same millisecond
- `unknown-host` = Anthropic API (CONNECT tunneling hides the real host)
- `api-staging.n8n.io` = n8n community nodes API

View File

@ -0,0 +1,39 @@
import { parseCliArgs } from '../cli/args';
describe('parseCliArgs --base-url', () => {
it('defaults to a single localhost URL when --base-url is not provided', () => {
const args = parseCliArgs([]);
expect(args.baseUrls).toEqual(['http://localhost:5678']);
});
it('accepts a single URL', () => {
const args = parseCliArgs(['--base-url', 'http://localhost:5678']);
expect(args.baseUrls).toEqual(['http://localhost:5678']);
});
it('splits comma-separated URLs into a list of lanes', () => {
const args = parseCliArgs([
'--base-url',
'http://localhost:5678,http://localhost:5679,http://localhost:5680',
]);
expect(args.baseUrls).toEqual([
'http://localhost:5678',
'http://localhost:5679',
'http://localhost:5680',
]);
});
it('trims surrounding whitespace from each URL', () => {
const args = parseCliArgs(['--base-url', ' http://localhost:5678 , http://localhost:5679 ']);
expect(args.baseUrls).toEqual(['http://localhost:5678', 'http://localhost:5679']);
});
it('drops empty entries from a stray comma', () => {
const args = parseCliArgs(['--base-url', 'http://localhost:5678,,http://localhost:5679']);
expect(args.baseUrls).toEqual(['http://localhost:5678', 'http://localhost:5679']);
});
it('rejects a non-URL entry', () => {
expect(() => parseCliArgs(['--base-url', 'http://localhost:5678,not-a-url'])).toThrow();
});
});

View File

@ -0,0 +1,94 @@
import { LaneAllocator, type AllocatableLane } from '../cli/lane-allocator';
interface TestLane extends AllocatableLane {
id: number;
}
function newLanes(count: number): TestLane[] {
return Array.from({ length: count }, (_, i) => ({
id: i,
activeBuilds: 0,
inflightPrompts: new Set<string>(),
}));
}
describe('LaneAllocator', () => {
it('spreads builds across lanes by picking the least-loaded eligible lane', async () => {
const lanes = newLanes(3);
const a = new LaneAllocator(lanes, 4);
const l1 = await a.acquire('p1');
const l2 = await a.acquire('p2');
const l3 = await a.acquire('p3');
expect([l1.id, l2.id, l3.id]).toEqual([0, 1, 2]);
expect(lanes.map((l) => l.activeBuilds)).toEqual([1, 1, 1]);
});
it('skips a lane already running the same prompt', async () => {
const lanes = newLanes(2);
const a = new LaneAllocator(lanes, 4);
const l1 = await a.acquire('p1');
const l2 = await a.acquire('p1');
expect(l1.id).toBe(0);
expect(l2.id).toBe(1);
expect(lanes[0].inflightPrompts.has('p1')).toBe(true);
expect(lanes[1].inflightPrompts.has('p1')).toBe(true);
});
it('queues acquires when no lane can serve the prompt', async () => {
const lanes = newLanes(1);
const a = new LaneAllocator(lanes, 4);
await a.acquire('p1');
let resolvedSecond = false;
const second = a.acquire('p1').then((l) => {
resolvedSecond = true;
return l;
});
await new Promise((r) => setImmediate(r));
expect(resolvedSecond).toBe(false);
a.release(lanes[0], 'p1');
const lane = await second;
expect(lane.id).toBe(0);
expect(lanes[0].inflightPrompts.has('p1')).toBe(true);
});
it('respects maxConcurrentBuilds per lane', async () => {
const lanes = newLanes(1);
const a = new LaneAllocator(lanes, 2);
await a.acquire('p1');
await a.acquire('p2');
let resolved = false;
const blocked = a.acquire('p3').then((l) => {
resolved = true;
return l;
});
await new Promise((r) => setImmediate(r));
expect(resolved).toBe(false);
a.release(lanes[0], 'p1');
await blocked;
expect(resolved).toBe(true);
});
it('skips queued waiters with conflicting prompts when a lane frees up', async () => {
const lanes = newLanes(1);
const a = new LaneAllocator(lanes, 2);
await a.acquire('p1');
await a.acquire('p2');
const order: string[] = [];
const w1 = a.acquire('p1').then((l) => {
order.push('p1');
return l;
});
const w3 = a.acquire('p3').then((l) => {
order.push('p3');
return l;
});
await new Promise((r) => setImmediate(r));
expect(order).toEqual([]);
a.release(lanes[0], 'p2');
await w3;
expect(order).toEqual(['p3']);
a.release(lanes[0], 'p1');
await w1;
expect(order).toEqual(['p3', 'p1']);
});
});

View File

@ -0,0 +1,117 @@
import { expandWithIterations, partitionRoundRobin } from '../cli/lanes';
describe('partitionRoundRobin', () => {
it('splits 9 items into 3 lanes by index modulo', () => {
const items = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'];
expect(partitionRoundRobin(items, 3)).toEqual([
['a', 'd', 'g'],
['b', 'e', 'h'],
['c', 'f', 'i'],
]);
});
it('returns a single bucket containing every item when laneCount is 1', () => {
const items = [1, 2, 3, 4, 5];
expect(partitionRoundRobin(items, 1)).toEqual([[1, 2, 3, 4, 5]]);
});
it('returns empty buckets for lanes that get no items when laneCount > items.length', () => {
const items = ['only'];
expect(partitionRoundRobin(items, 3)).toEqual([['only'], [], []]);
});
it('returns laneCount empty buckets when items is empty', () => {
expect(partitionRoundRobin([], 3)).toEqual([[], [], []]);
});
it('preserves item identity (no clone)', () => {
const a = { id: 'a' };
const b = { id: 'b' };
const buckets = partitionRoundRobin([a, b], 2);
expect(buckets[0][0]).toBe(a);
expect(buckets[1][0]).toBe(b);
});
it('throws when laneCount < 1', () => {
expect(() => partitionRoundRobin([1, 2], 0)).toThrow(/laneCount must be >= 1/);
expect(() => partitionRoundRobin([1, 2], -1)).toThrow(/laneCount must be >= 1/);
});
it('reconstructs source order when re-sorted by an embedded original index', () => {
// Mirrors runDirectLoop's flow: tag each item with its origIdx, partition
// across lanes, flatten lane outputs, sort back by origIdx.
const items = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'];
const indexed = items.map((value, origIdx) => ({ value, origIdx }));
const buckets = partitionRoundRobin(indexed, 3);
const flat = buckets.flat();
flat.sort((x, y) => x.origIdx - y.origIdx);
expect(flat.map((x) => x.value)).toEqual(items);
});
});
describe('expandWithIterations', () => {
type Item = { file: string; scen: string; iter?: number };
const tag = (item: Item, iter: number): Item => ({ ...item, iter });
const getFile = (item: Item): string => item.file;
it('round-robins across files in the first round', () => {
const items: Item[] = [
{ file: 'A', scen: '1' },
{ file: 'A', scen: '2' },
{ file: 'B', scen: '1' },
{ file: 'C', scen: '1' },
];
const out = [...expandWithIterations(items, getFile, 1, tag)];
// Round 1 yields one scenario per file in insertion order, then round 2 picks up A's second scenario.
expect(out.map((x) => `${x.file}.${x.scen}`)).toEqual(['A.1', 'B.1', 'C.1', 'A.2']);
});
it('iter-interleaves per scenario before moving on', () => {
const items: Item[] = [
{ file: 'A', scen: '1' },
{ file: 'B', scen: '1' },
];
const out = [...expandWithIterations(items, getFile, 3, tag)];
expect(out.map((x) => `${x.file}.${x.scen}.${String(x.iter)}`)).toEqual([
'A.1.0',
'A.1.1',
'A.1.2',
'B.1.0',
'B.1.1',
'B.1.2',
]);
});
it('skips files that ran out of scenarios in later rounds', () => {
const items: Item[] = [
{ file: 'A', scen: '1' },
{ file: 'A', scen: '2' },
{ file: 'B', scen: '1' },
];
const out = [...expandWithIterations(items, getFile, 1, tag)];
// Round 1: A.1, B.1. Round 2: A.2 (B has no second scenario, skipped).
expect(out.map((x) => `${x.file}.${x.scen}`)).toEqual(['A.1', 'B.1', 'A.2']);
});
it('yields nothing for empty input', () => {
expect([...expandWithIterations<Item>([], getFile, 3, tag)]).toEqual([]);
});
it('yields nothing when iterations is 0', () => {
const items: Item[] = [{ file: 'A', scen: '1' }];
expect([...expandWithIterations(items, getFile, 0, tag)]).toEqual([]);
});
it('first wave covers all files after enough items pulled', () => {
const items: Item[] = [];
for (const f of ['A', 'B', 'C', 'D', 'E']) {
for (const s of ['1', '2', '3']) items.push({ file: f, scen: s });
}
const out = [...expandWithIterations(items, getFile, 3, tag)];
// Total: 5 files × 3 scenarios × 3 iters = 45 yielded items.
expect(out).toHaveLength(45);
// First 5×3 = 15 items cover one scenario per file × all 3 iterations.
const firstWave = out.slice(0, 15).map((x) => x.file);
expect(new Set(firstWave)).toEqual(new Set(['A', 'B', 'C', 'D', 'E']));
});
});

View File

@ -15,6 +15,7 @@ import { hasNodes } from './has-nodes';
import { hasStartNode } from './has-start-node';
import { hasTrigger } from './has-trigger';
import { memoryProperlyConnected } from './memory-properly-connected';
import { memorySessionKeyExpression } from './memory-session-key-expression';
import { noDisabledNodes } from './no-disabled-nodes';
import { noEmptySetNodes } from './no-empty-set-nodes';
import { noHardcodedCredentials } from './no-hardcoded-credentials';
@ -40,6 +41,7 @@ export const DETERMINISTIC_CHECKS: BinaryCheck[] = [
agentHasDynamicPrompt,
agentHasLanguageModel,
memoryProperlyConnected,
memorySessionKeyExpression,
vectorStoreHasEmbeddings,
noHardcodedCredentials,
noUnnecessaryCodeNodes,

View File

@ -0,0 +1,84 @@
import { memorySessionKeyExpression } from './memory-session-key-expression';
import type { WorkflowResponse } from '../../clients/n8n-client';
function createWorkflow(memoryParameters: Record<string, unknown>): WorkflowResponse {
return {
id: 'workflow-1',
name: 'Memory expression test',
active: false,
nodes: [
{
name: 'Telegram Trigger',
type: 'n8n-nodes-base.telegramTrigger',
parameters: {},
},
{
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
parameters: {},
},
{
name: 'Conversation Memory',
type: '@n8n/n8n-nodes-langchain.memoryBufferWindow',
parameters: memoryParameters,
},
],
connections: {
'Telegram Trigger': {
main: [[{ node: 'AI Agent', type: 'main', index: 0 }]],
},
'Conversation Memory': {
ai_memory: [[{ node: 'AI Agent', type: 'ai_memory', index: 0 }]],
},
},
};
}
describe('memorySessionKeyExpression', () => {
it('fails when a connected memory node uses $json in a custom sessionKey', async () => {
const workflow = createWorkflow({
sessionIdType: 'customKey',
sessionKey: '={{ $json.chatId }}',
});
const result = await memorySessionKeyExpression.run(workflow, { prompt: '' });
expect(result.pass).toBe(false);
expect(result.comment).toContain('Conversation Memory');
expect(result.comment).toContain('sessionKey');
});
it('fails when a connected legacy memory node uses $json in sessionId', async () => {
const workflow = createWorkflow({
sessionIdType: 'customKey',
sessionId: '={{ $json.chatId }}',
});
const result = await memorySessionKeyExpression.run(workflow, { prompt: '' });
expect(result.pass).toBe(false);
expect(result.comment).toContain('sessionId');
});
it('passes when a connected memory node references the trigger explicitly', async () => {
const workflow = createWorkflow({
sessionIdType: 'customKey',
sessionKey: "={{ $('Telegram Trigger').item.json.message.chat.id }}",
});
const result = await memorySessionKeyExpression.run(workflow, { prompt: '' });
expect(result).toEqual({ pass: true });
});
it('passes for the Chat Trigger fromInput session ID mode', async () => {
const workflow = createWorkflow({
sessionIdType: 'fromInput',
sessionKey: '={{ $json.sessionId }}',
});
const result = await memorySessionKeyExpression.run(workflow, { prompt: '' });
expect(result).toEqual({ pass: true });
});
});

View File

@ -0,0 +1,61 @@
import type { WorkflowNodeResponse } from '../../clients/n8n-client';
import type { BinaryCheck } from '../types';
import { collectSourcesByConnectionType } from '../utils';
const SESSION_KEY_PARAMETERS = ['sessionKey', 'sessionId'];
function isMemoryNode(type: string): boolean {
const shortName = type.split('.').pop() ?? '';
return shortName.toLowerCase().includes('memory');
}
function isExpressionUsingJson(value: unknown): value is string {
return (
typeof value === 'string' &&
value.includes('$json') &&
(value.startsWith('=') || value.includes('{{'))
);
}
function usesFromInputSessionId(parameters: Record<string, unknown>): boolean {
return parameters.sessionIdType === 'fromInput';
}
function getUnsafeSessionKeyParameters(node: WorkflowNodeResponse): string[] {
const parameters = node.parameters;
if (!parameters || usesFromInputSessionId(parameters)) return [];
return SESSION_KEY_PARAMETERS.filter((parameterName) =>
isExpressionUsingJson(parameters[parameterName]),
);
}
export const memorySessionKeyExpression: BinaryCheck = {
name: 'memory_session_key_expression',
description: 'AI memory custom session keys use explicit source node references',
kind: 'deterministic',
run(workflow) {
const connectedMemoryNodeNames = collectSourcesByConnectionType(
workflow.connections ?? {},
'ai_memory',
);
const memoryNodes = (workflow.nodes ?? []).filter(
(node) => connectedMemoryNodeNames.has(node.name) && isMemoryNode(node.type),
);
const issues = memoryNodes.flatMap((node) =>
getUnsafeSessionKeyParameters(node).map(
(parameterName) => `"${node.name}" uses $json in ${parameterName}`,
),
);
return {
pass: issues.length === 0,
...(issues.length > 0
? {
comment: `Memory session keys should reference the trigger/source node explicitly: ${issues.join('; ')}`,
}
: {}),
};
},
};

View File

@ -14,7 +14,11 @@ import { z } from 'zod';
export interface CliArgs {
/** TimeoutMs is defined per iteration, not as the total timeout for all iterations */
timeoutMs: number;
baseUrl: string;
/** One or more n8n base URLs. Multi-lane runs use a work-stealing allocator
* that dispatches each build to a lane that isn't already running its
* prompt, capped per-lane at MAX_CONCURRENT_BUILDS=4. Pass comma-separated
* to `--base-url`. */
baseUrls: string[];
email?: string;
password?: string;
verbose: boolean;
@ -26,7 +30,8 @@ export interface CliArgs {
outputDir?: string;
/** LangSmith dataset name (synced from JSON test cases before each run) */
dataset: string;
/** Max concurrent scenarios in evaluate(). Builds are separately limited to 4 by semaphore. */
/** Max concurrent target() calls in LangSmith evaluate(). Build concurrency is
* enforced separately by the LaneAllocator (cap=4 per lane). */
concurrency: number;
/** LangSmith experiment name prefix (auto-generated if not set) */
experimentName?: string;
@ -41,7 +46,7 @@ export interface CliArgs {
const cliArgsSchema = z.object({
timeoutMs: z.number().int().positive().default(600_000),
baseUrl: z.string().url().default('http://localhost:5678'),
baseUrls: z.array(z.string().url()).min(1).default(['http://localhost:5678']),
email: z.string().optional(),
password: z.string().optional(),
verbose: z.boolean().default(false),
@ -64,7 +69,7 @@ export function parseCliArgs(argv: string[]): CliArgs {
return {
timeoutMs: validated.timeoutMs,
baseUrl: validated.baseUrl,
baseUrls: validated.baseUrls,
email: validated.email,
password: validated.password,
verbose: validated.verbose,
@ -84,7 +89,7 @@ export function parseCliArgs(argv: string[]): CliArgs {
interface RawArgs {
timeoutMs: number;
baseUrl: string;
baseUrls: string[];
email?: string;
password?: string;
verbose: boolean;
@ -100,7 +105,7 @@ interface RawArgs {
function parseRawArgs(argv: string[]): RawArgs {
const result: RawArgs = {
timeoutMs: 600_000,
baseUrl: 'http://localhost:5678',
baseUrls: ['http://localhost:5678'],
verbose: false,
keepWorkflows: false,
outputDir: undefined,
@ -119,10 +124,15 @@ function parseRawArgs(argv: string[]): RawArgs {
i++;
break;
case '--base-url':
result.baseUrl = nextArg(argv, i, '--base-url');
case '--base-url': {
const raw = nextArg(argv, i, '--base-url');
result.baseUrls = raw
.split(',')
.map((s) => s.trim())
.filter((s) => s.length > 0);
i++;
break;
}
case '--email':
result.email = nextArg(argv, i, '--email');

View File

@ -14,13 +14,14 @@ import { evaluate } from 'langsmith/evaluation';
import type { EvaluationResult } from 'langsmith/evaluation';
import type { Example, Run } from 'langsmith/schemas';
import { traceable } from 'langsmith/traceable';
import pLimit from 'p-limit';
import { join } from 'path';
import { z } from 'zod';
import { aggregateResults, passAtK, passHatK } from './aggregator';
import { parseCliArgs } from './args';
import { buildCIMetadata, computeExperimentPrefix } from './ci-metadata';
import { LaneAllocator } from './lane-allocator';
import { expandWithIterations, partitionRoundRobin } from './lanes';
import { N8nClient } from '../clients/n8n-client';
import { seedCredentials, cleanupCredentials } from '../credentials/seeder';
import { loadWorkflowTestCasesWithFiles } from '../data/workflows';
@ -112,21 +113,46 @@ const runInputsSchema = z
/** Target input shape with the iteration index we inject for multi-run. */
type TargetInputs = DatasetExampleInputs & { _iteration?: number };
interface Lane {
client: N8nClient;
preRunWorkflowIds: Set<string>;
claimedWorkflowIds: Set<string>;
seedResult: { seededTypes: string[]; credentialIds: string[] };
}
interface RunConfig {
args: ReturnType<typeof parseCliArgs>;
lanes: Lane[];
logger: EvalLogger;
}
async function main(): Promise<void> {
const args = parseCliArgs(process.argv.slice(2));
const logger = createLogger(args.verbose);
const client = new N8nClient(args.baseUrl);
logger.info(`Authenticating with ${args.baseUrl}...`);
await client.login(args.email, args.password);
logger.success('Authenticated');
// One lane per base URL. The LangSmith path then uses a work-stealing
// allocator (lane-allocator.ts) to dispatch builds across lanes; the direct
// path partitions test cases statically per lane.
const lanes: Lane[] = await Promise.all(
args.baseUrls.map(async (baseUrl, idx) => {
const tag =
args.baseUrls.length > 1
? ` [lane ${String(idx + 1)}/${String(args.baseUrls.length)}]`
: '';
const client = new N8nClient(baseUrl);
logger.info(`Authenticating with ${baseUrl}...${tag}`);
await client.login(args.email, args.password);
logger.success(`Authenticated${tag}`);
logger.info('Seeding credentials...');
const seedResult = await seedCredentials(client, undefined, logger);
logger.info(`Seeded ${String(seedResult.credentialIds.length)} credential(s)`);
logger.info(`Seeding credentials...${tag}`);
const seedResult = await seedCredentials(client, undefined, logger);
logger.info(`Seeded ${String(seedResult.credentialIds.length)} credential(s)${tag}`);
const preRunWorkflowIds = await snapshotWorkflowIds(client);
const claimedWorkflowIds = new Set<string>();
const preRunWorkflowIds = await snapshotWorkflowIds(client);
const claimedWorkflowIds = new Set<string>();
return { client, preRunWorkflowIds, claimedWorkflowIds, seedResult };
}),
);
const startTime = Date.now();
@ -137,24 +163,10 @@ async function main(): Promise<void> {
if (hasLangSmith) {
logger.info('LangSmith API key detected, using evaluate() with experiment tracking');
evaluation = await runWithLangSmith({
args,
client,
preRunWorkflowIds,
claimedWorkflowIds,
logger,
seedResult,
});
evaluation = await runWithLangSmith({ args, lanes, logger });
} else {
logger.info('No LANGSMITH_API_KEY, running direct loop (results in eval-results.json only)');
evaluation = await runDirectLoop({
args,
client,
preRunWorkflowIds,
claimedWorkflowIds,
logger,
seedResult,
});
evaluation = await runDirectLoop({ args, lanes, logger });
}
const totalDuration = Date.now() - startTime;
@ -164,7 +176,11 @@ async function main(): Promise<void> {
console.log(`Report: ${htmlPath}`);
printSummary(evaluation);
} finally {
await cleanupCredentials(client, seedResult.credentialIds).catch(() => {});
await Promise.all(
lanes.map(async (lane) => {
await cleanupCredentials(lane.client, lane.seedResult.credentialIds).catch(() => {});
}),
);
}
}
@ -172,79 +188,111 @@ async function main(): Promise<void> {
// LangSmith mode: evaluate() with dataset sync, tracing, experiments
// ---------------------------------------------------------------------------
interface RunConfig {
args: ReturnType<typeof parseCliArgs>;
client: N8nClient;
preRunWorkflowIds: Set<string>;
claimedWorkflowIds: Set<string>;
logger: EvalLogger;
seedResult: { seededTypes: string[]; credentialIds: string[] };
}
async function runWithLangSmith(config: RunConfig): Promise<MultiRunEvaluation> {
const { args, client, preRunWorkflowIds, claimedWorkflowIds, logger } = config;
const { args, lanes, logger } = config;
const lsClient = new Client();
const datasetName = await syncDataset(lsClient, args.dataset, logger, args.filter);
const testCasesWithFiles = loadWorkflowTestCasesWithFiles(args.filter);
const buildLimiter = pLimit(MAX_CONCURRENT_BUILDS);
// Keyed by `${iteration}:${prompt}` so the same prompt gets a fresh build
// per iteration — pass@k captures real builder variance.
const buildCache = new Map<string, Promise<BuildResult>>();
const buildDurations = new Map<string, number>();
// LaneState carries the allocator-managed counters (activeBuilds,
// inflightPrompts) plus the lane's traced LangSmith wrappers. `runner` is
// the underlying Lane (n8n client, credential state) — named distinctly so
// it doesn't shadow the iteration variable `lane` in lanes.map().
interface LaneState {
runner: Lane;
activeBuilds: number;
inflightPrompts: Set<string>;
tracedBuild: (prompt: string) => Promise<BuildResult>;
tracedExecute: (execArgs: {
workflowId: string;
scenario: TestScenario;
workflowJsons: BuildResult['workflowJsons'];
}) => Promise<Awaited<ReturnType<typeof executeScenario>>>;
}
// Traceable wraps the actual build call *inside* the limiter — otherwise the
// LangSmith span would include queue-wait time, which accumulates across
// iterations as later builds queue behind earlier ones.
const tracedBuildWorkflow = traceable(
async (prompt: string) =>
await buildWorkflow({
client,
prompt,
timeoutMs: args.timeoutMs,
preRunWorkflowIds,
claimedWorkflowIds,
logger,
}),
{ name: 'workflow_build', run_type: 'chain', client: lsClient },
);
const laneStates: LaneState[] = lanes.map((lane, idx) => {
const laneNum = idx + 1;
const laneTag = lanes.length > 1 ? ` [lane ${String(laneNum)}/${String(lanes.length)}]` : '';
return {
runner: lane,
activeBuilds: 0,
inflightPrompts: new Set<string>(),
tracedBuild: traceable(
async (prompt: string) =>
await buildWorkflow({
client: lane.client,
prompt,
timeoutMs: args.timeoutMs,
preRunWorkflowIds: lane.preRunWorkflowIds,
claimedWorkflowIds: lane.claimedWorkflowIds,
logger,
laneTag,
}),
{
name: 'workflow_build',
run_type: 'chain',
client: lsClient,
metadata: { lane: laneNum },
},
),
tracedExecute: traceable(
async (execArgs: {
workflowId: string;
scenario: TestScenario;
workflowJsons: BuildResult['workflowJsons'];
}) =>
await executeScenario(
lane.client,
execArgs.workflowId,
execArgs.scenario,
execArgs.workflowJsons,
logger,
args.timeoutMs,
),
{
name: 'scenario_execution',
run_type: 'chain',
client: lsClient,
metadata: { lane: laneNum },
},
),
};
});
// Work-stealing: each build acquires a lane that isn't already running its
// prompt, runs there (capped per-lane), then releases. Scenarios re-use the
// lane that built their workflow.
const allocator = new LaneAllocator(laneStates, MAX_CONCURRENT_BUILDS);
const buildCache = new Map<
string,
Promise<{ build: BuildResult; lane: LaneState; buildDurationMs: number }>
>();
const buildDurations = new Map<string, number>();
async function getOrBuild(
prompt: string,
iteration: number,
): Promise<{ build: BuildResult; buildDurationMs?: number }> {
): Promise<{ build: BuildResult; lane: LaneState; buildDurationMs: number }> {
const key = `${String(iteration)}:${prompt}`;
const existing = buildCache.get(key);
if (existing) return { build: await existing };
const promise = buildLimiter(async () => {
const start = Date.now();
const build = await tracedBuildWorkflow(prompt);
buildDurations.set(key, Date.now() - start);
return build;
});
if (existing) return await existing;
const promise = (async () => {
const lane = await allocator.acquire(prompt);
try {
const start = Date.now();
const build = await lane.tracedBuild(prompt);
const buildDurationMs = Date.now() - start;
buildDurations.set(key, buildDurationMs);
return { build, lane, buildDurationMs };
} finally {
allocator.release(lane, prompt);
}
})();
buildCache.set(key, promise);
const build = await promise;
return { build, buildDurationMs: buildDurations.get(key) };
return await promise;
}
const traceableExecute = traceable(
async (execArgs: {
workflowId: string;
scenario: TestScenario;
workflowJsons: BuildResult['workflowJsons'];
}) =>
await executeScenario(
client,
execArgs.workflowId,
execArgs.scenario,
execArgs.workflowJsons,
logger,
args.timeoutMs,
),
{ name: 'scenario_execution', run_type: 'chain', client: lsClient },
);
const target = async (inputs: TargetInputs): Promise<TargetOutput> => {
const iteration = inputs._iteration ?? 0;
const scenario: TestScenario = {
@ -254,7 +302,11 @@ async function runWithLangSmith(config: RunConfig): Promise<MultiRunEvaluation>
successCriteria: inputs.successCriteria,
};
const { build, buildDurationMs } = await getOrBuild(inputs.prompt, iteration);
const {
build,
lane: builtOnLane,
buildDurationMs,
} = await getOrBuild(inputs.prompt, iteration);
if (!build.success || !build.workflowId) {
return {
@ -274,7 +326,7 @@ async function runWithLangSmith(config: RunConfig): Promise<MultiRunEvaluation>
const nodeCount = build.workflowJsons[0]?.nodes.length ?? 0;
let result;
try {
result = await traceableExecute({
result = await builtOnLane.tracedExecute({
workflowId: build.workflowId,
scenario,
workflowJsons: build.workflowJsons,
@ -356,7 +408,7 @@ async function runWithLangSmith(config: RunConfig): Promise<MultiRunEvaluation>
const experimentPrefix = args.experimentName ?? computeExperimentPrefix();
logger.info(
`Starting evaluate() with concurrency=${String(args.concurrency)}, builds limited to ${String(MAX_CONCURRENT_BUILDS)}, iterations=${String(args.iterations)}`,
`Starting evaluate() with concurrency=${String(args.concurrency)}, ${String(lanes.length)} lane(s) × ${String(MAX_CONCURRENT_BUILDS)} concurrent builds, iterations=${String(args.iterations)}`,
);
// Always filter the LangSmith dataset by the local file slugs. The local
@ -381,6 +433,7 @@ async function runWithLangSmith(config: RunConfig): Promise<MultiRunEvaluation>
filter: args.filter ?? 'all',
concurrency: args.concurrency,
maxBuilds: MAX_CONCURRENT_BUILDS,
lanes: lanes.length,
iterations: args.iterations,
...buildCIMetadata(),
},
@ -417,10 +470,10 @@ async function runWithLangSmith(config: RunConfig): Promise<MultiRunEvaluation>
} finally {
if (!args.keepWorkflows) {
await Promise.all(
[...buildCache.values()].map(async (buildPromise) => {
[...buildCache.values()].map(async (promise) => {
try {
const build = await buildPromise;
await cleanupBuild(client, build, logger);
const { build, lane } = await promise;
await cleanupBuild(lane.runner.client, build, logger);
} catch {
// Best-effort
}
@ -431,14 +484,10 @@ async function runWithLangSmith(config: RunConfig): Promise<MultiRunEvaluation>
}
/**
* Expand a source example stream into N copies, tagging each with `_iteration`
* so the target function can key its build cache by iteration and we can
* reshape runs back into per-iteration groups afterwards. All N copies share
* the source example's id, so LangSmith's UI groups them naturally by
* `reference_example_id` useful for pass@k visualization.
*
* The source is buffered into memory once before the first yield: we need to
* emit each example N times, and an AsyncIterable can only be consumed once.
* Expand a source example stream into N copies, tagging each with `_iteration`.
* Round-robins scenarios across test cases and iter-interleaves per scenario
* so the in-flight set spans both dimensions. Concentration is handled by the
* work-stealing allocator at build time.
*/
async function* expandExamplesForIterations(
source: AsyncIterable<Example>,
@ -446,11 +495,12 @@ async function* expandExamplesForIterations(
): AsyncIterable<Example> {
const cached: Example[] = [];
for await (const ex of source) cached.push(ex);
for (let i = 0; i < iterations; i++) {
for (const ex of cached) {
yield { ...ex, inputs: { ...ex.inputs, _iteration: i } };
}
}
yield* expandWithIterations(
cached,
(ex) => (typeof ex.inputs?.testCaseFile === 'string' ? ex.inputs.testCaseFile : 'unknown'),
iterations,
(ex, i) => ({ ...ex, inputs: { ...ex.inputs, _iteration: i } }),
);
}
function filteredExamplesIterable(
@ -639,7 +689,7 @@ function reshapeLangSmithRuns(
// ---------------------------------------------------------------------------
async function runDirectLoop(config: RunConfig): Promise<MultiRunEvaluation> {
const { args, client, preRunWorkflowIds, claimedWorkflowIds, logger, seedResult } = config;
const { args, lanes, logger } = config;
const testCasesWithFiles = loadWorkflowTestCasesWithFiles(args.filter);
if (testCasesWithFiles.length === 0) {
@ -652,30 +702,47 @@ async function runDirectLoop(config: RunConfig): Promise<MultiRunEvaluation> {
0,
);
logger.info(
`Running ${String(testCasesWithFiles.length)} test case(s) with ${String(totalScenarios)} scenario(s) × ${String(args.iterations)} iteration(s)`,
`Running ${String(testCasesWithFiles.length)} test case(s) with ${String(totalScenarios)} scenario(s) × ${String(args.iterations)} iteration(s) across ${String(lanes.length)} lane(s)`,
);
// Distribute test cases across lanes by source-order index. Each bucket carries
// the original index so we can re-sort lane outputs back to source order — the
// aggregator indexes per-iteration results positionally.
const indexed = testCasesWithFiles.map((tc, origIdx) => ({ tc, origIdx }));
const buckets = partitionRoundRobin(indexed, lanes.length);
const allRunResults: WorkflowTestCaseResult[][] = [];
for (let iter = 0; iter < args.iterations; iter++) {
if (args.iterations > 1) {
logger.info(`--- Iteration #${String(iter + 1)}/${String(args.iterations)} ---`);
}
const results = await runWithConcurrency(
testCasesWithFiles,
async ({ testCase }) =>
await runWorkflowTestCase({
client,
testCase,
timeoutMs: args.timeoutMs,
seededCredentialTypes: seedResult.seededTypes,
preRunWorkflowIds,
claimedWorkflowIds,
logger,
keepWorkflows: args.keepWorkflows,
}),
MAX_CONCURRENT_BUILDS,
const laneResults = await Promise.all(
lanes.map(async (lane, laneIdx) => {
const bucket = buckets[laneIdx];
const laneTag =
lanes.length > 1 ? ` [lane ${String(laneIdx + 1)}/${String(lanes.length)}]` : '';
const results = await runWithConcurrency(
bucket,
async ({ tc }) =>
await runWorkflowTestCase({
client: lane.client,
testCase: tc.testCase,
timeoutMs: args.timeoutMs,
seededCredentialTypes: lane.seedResult.seededTypes,
preRunWorkflowIds: lane.preRunWorkflowIds,
claimedWorkflowIds: lane.claimedWorkflowIds,
logger,
keepWorkflows: args.keepWorkflows,
laneTag,
}),
MAX_CONCURRENT_BUILDS,
);
return bucket.map((b, i) => ({ origIdx: b.origIdx, result: results[i] }));
}),
);
allRunResults.push(results);
const flat = laneResults.flat();
flat.sort((a, b) => a.origIdx - b.origIdx);
allRunResults.push(flat.map((x) => x.result));
}
return aggregateResults(allRunResults, args.iterations);

View File

@ -0,0 +1,72 @@
// Pull-based lane allocator. Each lane caps at `maxConcurrentBuilds` and never
// runs the same prompt twice concurrently — pairing those rules eliminates the
// same-prompt concentration that breaks the agent under load.
export interface AllocatableLane {
activeBuilds: number;
inflightPrompts: Set<string>;
}
interface Waiter<L> {
prompt: string;
resolve: (lane: L) => void;
}
export class LaneAllocator<L extends AllocatableLane> {
private readonly waiters: Array<Waiter<L>> = [];
constructor(
private readonly lanes: L[],
private readonly maxConcurrentBuilds: number,
) {}
async acquire(prompt: string): Promise<L> {
const lane = this.findFree(prompt);
if (lane) {
this.markBusy(lane, prompt);
return lane;
}
return await new Promise<L>((resolve) => {
this.waiters.push({ prompt, resolve });
});
}
release(lane: L, prompt: string): void {
lane.activeBuilds--;
lane.inflightPrompts.delete(prompt);
this.wakeNext(lane);
}
private findFree(prompt: string): L | undefined {
// Least-loaded policy: spread builds evenly across lanes rather than
// filling lane 0 to cap before touching lane 1. Avoids hot-spotting.
let best: L | undefined;
for (const lane of this.lanes) {
if (!this.canRun(lane, prompt)) continue;
if (best === undefined || lane.activeBuilds < best.activeBuilds) best = lane;
}
return best;
}
private canRun(lane: L, prompt: string): boolean {
return lane.activeBuilds < this.maxConcurrentBuilds && !lane.inflightPrompts.has(prompt);
}
private markBusy(lane: L, prompt: string): void {
lane.activeBuilds++;
lane.inflightPrompts.add(prompt);
}
private wakeNext(lane: L): void {
// Wake the first waiter this lane can now serve. FIFO ordering.
for (let i = 0; i < this.waiters.length; i++) {
const w = this.waiters[i];
if (this.canRun(lane, w.prompt)) {
this.waiters.splice(i, 1);
this.markBusy(lane, w.prompt);
w.resolve(lane);
return;
}
}
}
}

View File

@ -0,0 +1,59 @@
// ---------------------------------------------------------------------------
// Lane partitioning helpers for multi-container eval runs
//
// Pure functions, intentionally separated from index.ts so unit tests can
// import them without triggering main()'s side effects.
// ---------------------------------------------------------------------------
/**
* Partition `items` into `laneCount` round-robin buckets by source-order index.
* Item at index i goes to bucket `i % laneCount`.
*
* Empty buckets are returned (not omitted) when laneCount > items.length so
* callers can safely zip buckets with their lanes.
*/
export function partitionRoundRobin<T>(items: T[], laneCount: number): T[][] {
if (laneCount < 1) {
throw new Error(`laneCount must be >= 1, got ${String(laneCount)}`);
}
return Array.from({ length: laneCount }, (_, laneIdx) =>
items.filter((_, i) => i % laneCount === laneIdx),
);
}
/**
* Yield items grouped by file in a round-robin order across files, with each
* item duplicated `iterations` times via `tag`. Pure ordering logic caller
* provides the file accessor and the tagger.
*
* Order: round 1 = first item of each group, round 2 = second item of each
* group, etc. Within each yielded item, all `iterations` copies are emitted
* consecutively before moving to the next item.
*/
export function* expandWithIterations<T>(
items: T[],
getFile: (item: T) => string,
iterations: number,
tag: (item: T, iter: number) => T,
): IterableIterator<T> {
const byFile = new Map<string, T[]>();
for (const item of items) {
const file = getFile(item);
let group = byFile.get(file);
if (!group) {
group = [];
byFile.set(file, group);
}
group.push(item);
}
const groups = [...byFile.values()];
const maxScenarios = groups.reduce((m, g) => Math.max(m, g.length), 0);
for (let s = 0; s < maxScenarios; s++) {
for (const group of groups) {
if (s < group.length) {
const item = group[s];
for (let i = 0; i < iterations; i++) yield tag(item, i);
}
}
}
}

View File

@ -0,0 +1,14 @@
{
"prompt": "Every two weeks I want to check the amount of n8n usage and bug reporting that the team has done and produce a leaderboard that then gets posted to Slack (channel ID: D034WT7G4CW).\n\nHere are the users in the team:\n\n- David Roberts (id: 1)\n- David Arens (id: 2)\n- Niklas Hatje (id: 3)\n\nHere is an example leaderboard:\n\n```\nUsage in the last two weeks:\n\nJonathan Clift: 7 tickets (5 execs, 3 hours)\nFabian Puehringer: 7 tickets (4 execs, 1 hours)\nTuukka Kantola: 6 tickets (16 execs, 6 hours)\n\nTickets = Linear bug tickets created\nExecs = Manual execs on registered accounts\n```\n\nIt is ordered by the number of tickets created (desc) then the number of execs (desc).\n\nTo get the number of bugs that a user has reported, query Linear and get the number of issues created by them in any team that have the `bug` label (case-sensitive), matched by name.\n\nTo get the number of hours that each user was using n8n for, connect to BigQuery and use something similar to the following query:\n\nwith\nsettings as (\n select\n timestamp('<start_cutoff>') as start_cutoff,\n timestamp('<end_cutoff>') as end_cutoff,\n),\nuser_accounts as (\n select * from unnest([\n struct<name string, user_id string>\n ...\n ])\n),\nnode_exec as (\n select\n timestamp,\n timestamp_trunc(timestamp, hour) as timestamp_hour,\n name,\n f.user_id,\n f.instance_id,\n status,\n from rudder_schema.manual_node_exec_finished f\n inner join user_accounts a on a.user_id = f.user_id\n cross join settings\n where f.timestamp between start_cutoff and end_cutoff\n),\nworkflow_exec as (\n select\n timestamp,\n timestamp_trunc(timestamp, hour) as timestamp_hour,\n name,\n f.user_id,\n f.instance_id,\n status,\n from rudder_schema.manual_workflow_exec_finished f\n inner join user_accounts a on a.user_id = f.user_id\n cross join settings\n where f.timestamp between start_cutoff and end_cutoff\n),\nexec as (\n select * from node_exec\n union all\n select * from workflow_exec\n),\nexec_summary as (\n select\n name,\n count(distinct instance_id) as instances,\n count(distinct timestamp_hour) as hours,\n count(*) as manual_execs,\n from exec\n group by 1\n)\n\nselect * from exec_summary\n\nConfigure all nodes as completely as possible and don't ask me for credentials, I'll set them up later.",
"complexity": "simple",
"tags": ["build", "schedule", "http-request", "bigquery"],
"triggerType": "schedule",
"scenarios": [
{
"name": "happy-path",
"description": "Leaderboard is posted to Slack",
"dataSetup": "Linear issues created in time period:\n - Issue1 by David Arens (has bug label)\n - Issue2 by David Arens (has no bug label)\n - Issue3 by David Roberts (has bug label)\n - Issue4 by David Arens (has bug label)\n\nResult of BQ query on n8n usage:\n - David Arens: 72 execs, 122 hours\n - David Roberts: 556 execs, 2 hours\n - Niklas Hatje: 0 execs, 0 hours",
"successCriteria": "The workflow executes without errors. A Slack message is posted in the channel specified using the format specified, and it contains the same data that was specified in the dataSetup. Niklas Hatje is included in the Slack message, and is listed as reporting 0 bugs."
}
]
}

View File

@ -0,0 +1,14 @@
{
"prompt": "Every day, fetch one post from the JSONPlaceholder API (GET https://jsonplaceholder.typicode.com/posts/1). Then use an Edit Fields (Set) node, not a Code node, to add a field called caption from the post title and a field called source with the value jsonplaceholder, while preserving all original fields from the HTTP response. Configure all nodes as completely as possible and don't ask me for credentials, I'll set them up later.",
"complexity": "medium",
"tags": ["build", "schedule", "http-request", "set", "data-transformation"],
"triggerType": "schedule",
"scenarios": [
{
"name": "preserve-fields",
"description": "HTTP data is reshaped with Set/Edit Fields while preserving the original response fields",
"dataSetup": "The HTTP Request node returns a single JSON object with id=42, userId=7, title='Launch day', and body='Ship it'.",
"successCriteria": "The workflow executes without errors. Judge the executed output values, not whether Set node parameters are literal or expression strings. The data reshaping is done with an Edit Fields/Set node, not a Code node. The final executed output contains the original id, userId, title, and body fields, plus caption resolving to 'Launch day' and source resolving to 'jsonplaceholder'. The Set/Edit Fields node uses mode='manual', valid manual assignments, and includeOtherFields=true to preserve existing input fields; it must not use keepAllExistingFields as a mode or manually re-map every original field as a substitute for includeOtherFields."
}
]
}

View File

@ -0,0 +1,13 @@
{
"prompt": "Build a Telegram chatbot workflow for a family assistant. It should receive Telegram messages, answer with an AI Agent using an OpenAI chat model, keep short-term conversation memory scoped separately for each Telegram chat, and send the AI Agent's answer back to the same Telegram chat. Configure all nodes as completely as possible and don't ask me for credentials, I'll set them up later.",
"complexity": "medium",
"tags": ["build", "telegram", "chatbot", "ai-agent", "memory", "expressions"],
"scenarios": [
{
"name": "distinct-telegram-chat",
"description": "A Telegram message from one chat is answered with memory scoped to that chat id",
"dataSetup": "The Telegram Trigger receives a text message from chat id 123456 with text 'What is on the family calendar today?' from user 'Alex'. The AI Agent returns a short helpful answer. The Telegram sendMessage call returns a success response.",
"successCriteria": "The workflow executes without errors. It contains a Telegram Trigger, an AI Agent, a chat model, and a memory node connected to the agent. The memory node scopes conversation history by Telegram chat id using an explicit source-node reference to the Telegram Trigger chat id, not $json. The final Telegram response is sent back to chat id 123456 and contains the AI Agent answer."
}
]
}

View File

@ -0,0 +1,20 @@
{
"prompt": "Every day at 8am, check the weather in Berlin using the OpenMeteo API and send me an email to david@thedavid.co.uk using the gmail node if it's going to rain",
"complexity": "simple",
"tags": ["build", "schedule", "http-request", "gmail", "conditional"],
"triggerType": "schedule",
"scenarios": [
{
"name": "happy-path",
"description": "Email is sent warning of rain",
"dataSetup": "Weather API call returns precipitation value of 0.5mm",
"successCriteria": "The workflow executes without errors. An email is sent to the email specified mentioning that it will rain today."
},
{
"name": "rain-not-expected",
"description": "No email is sent",
"dataSetup": "Weather API call returns precipitation value of 0mm",
"successCriteria": "The workflow executes without errors. No email is sent mentioning that it will rain today, and no email sending node is executed."
}
]
}

View File

@ -0,0 +1,14 @@
{
"prompt": "I want you to build a workflow that will read n8n workflow databases and extract certain information and then populate that information in a data table called 'workflows'.\n\nThe schema of the data table should be as follows:\n- instanceId\n- workflowId\n- workflowName\n- tags\n\nIf the workflow is run multiple times it should update the current rows rather than creating dupes.\n\nThe instance with the workflows is https://wonderman.users.n8n.cloud/. Configure all nodes as completely as possible and don't ask me for credentials, I'll set them up later.",
"complexity": "complex",
"tags": ["build", "schedule", "data-table", "n8n-api"],
"triggerType": "schedule",
"scenarios": [
{
"name": "happy-path",
"description": "Data table is populated",
"dataSetup": "Workflow data:\n- instanceId: 123, workflowId: abc, workflowName: 'Bob workflow', tags: bob, lovely\n- instanceId: 123, workflowId: def, workflowName: 'Raving workflow', tags: dance, boogie\n- instanceId: 456, workflowId: xyz, workflowName: 'Sir Bob', tags: knight, bob\n- instanceId: 789, workflowId: pqr, workflowName: 'Algebra', tags: maths, blackboard\n",
"successCriteria": "The workflow executes without errors. A data table called 'workflows' is created and contains the specified data. The workflow contains a mechanism to avoid inserting duplicate workflows (e.g. by using upsert rather than insert)."
}
]
}

View File

@ -50,6 +50,8 @@ interface WorkflowTestCaseConfig {
claimedWorkflowIds: Set<string>;
logger: EvalLogger;
keepWorkflows: boolean;
/** Optional " [lane N/M]" suffix appended to per-build log lines. */
laneTag?: string;
}
/**
@ -76,6 +78,7 @@ export async function runWorkflowTestCase(
preRunWorkflowIds: config.preRunWorkflowIds,
claimedWorkflowIds: config.claimedWorkflowIds,
logger,
laneTag: config.laneTag,
});
if (!build.success || !build.workflowId) {
@ -116,7 +119,7 @@ export async function runWorkflowTestCase(
const scenarioMs = Date.now() - scenarioStart;
logger.info(
` Scenarios done: ${String(result.scenarioResults.length)} scenarios [${String(Math.round(scenarioMs / 1000))}s]`,
` Scenarios done: ${String(result.scenarioResults.length)} scenarios [${String(Math.round(scenarioMs / 1000))}s]${config.laneTag ?? ''}`,
);
if (!config.keepWorkflows) {
@ -147,6 +150,8 @@ export interface BuildWorkflowConfig {
preRunWorkflowIds: Set<string>;
claimedWorkflowIds: Set<string>;
logger: EvalLogger;
/** Optional " [lane N/M]" suffix appended to the build log line. */
laneTag?: string;
}
/**
@ -165,7 +170,7 @@ export async function buildWorkflow(config: BuildWorkflowConfig): Promise<BuildR
try {
const buildStart = Date.now();
logger.info(` Building workflow: "${truncate(prompt, 60)}"`);
logger.info(` Building workflow: "${truncate(prompt, 60)}"${config.laneTag ?? ''}`);
const ssePromise = startSseConnection(client, threadId, events, abortController.signal).catch(
() => {},
@ -656,6 +661,12 @@ async function waitForBackgroundTasks(config: WaitConfig, timeoutMs: number): Pr
config.logger.verbose('Sub-agent(s) detected -- waiting for background tasks...');
// Log on count change, plus a heartbeat every 20s so a long stable wait still
// emits a liveness signal without spamming every poll interval.
const HEARTBEAT_MS = 20_000;
let lastLoggedKey = '';
let lastLogAt = 0;
while (Date.now() < deadline) {
await processConfirmationRequests(config);
@ -673,9 +684,15 @@ async function waitForBackgroundTasks(config: WaitConfig, timeoutMs: number): Pr
return;
}
config.logger.verbose(
`Waiting for ${String(restRunning.length)} REST task(s), ${String(ssePending.length)} SSE agent(s)`,
);
const key = `${String(restRunning.length)}/${String(ssePending.length)}`;
const now = Date.now();
if (key !== lastLoggedKey || now - lastLogAt >= HEARTBEAT_MS) {
config.logger.verbose(
`Waiting for ${String(restRunning.length)} REST task(s), ${String(ssePending.length)} SSE agent(s)`,
);
lastLoggedKey = key;
lastLogAt = now;
}
await delay(BACKGROUND_TASK_POLL_INTERVAL_MS);
}

View File

@ -43,7 +43,7 @@ Some trigger nodes expose HTTP endpoints. Always share the full production URL w
- **Webhook Trigger**: ${webhookBaseUrl}/{path} (where {path} is the node's webhook path parameter).
- **Form Trigger**: ${webhookBaseUrl}/{path} (or ${webhookBaseUrl}/{webhookId} if no custom path is set). Same pattern as Webhook no /chat suffix.
- **Chat Trigger**: ${webhookBaseUrl}/{webhookId}/chat (where {webhookId} is the node's unique webhook ID, visible in the workflow JSON). The /chat suffix is unique to Chat Trigger — do NOT append it to Form Trigger or Webhook URLs. The chat UI is only accessible when the node's "public" parameter is true and the workflow is published (active). Do NOT guess the webhookId read the workflow to find it.
- **Chat Trigger**: ${webhookBaseUrl}/{webhookId}/chat (where {webhookId} is the node's unique webhook ID, visible in the workflow JSON). The /chat suffix is unique to Chat Trigger — do NOT append it to Form Trigger or Webhook URLs. The public chat UI is only accessible to end users when the node's "public" parameter is true and the workflow has been published. (This applies only to end-user HTTP access your own testing via \`executions(action="run")\` and \`verify-built-workflow\` works regardless of publish state.) Do NOT guess the webhookId — read the workflow to find it.
**These URLs are for sharing with the user only.** Do NOT include them in \`build-workflow-with-agent\` task descriptions — the builder cannot reach the n8n instance via HTTP and will fail if it tries to curl/fetch these URLs.`;
}
@ -230,6 +230,9 @@ Always pass \`conversationContext\` when spawning background agents (\`build-wor
${SECRET_ASK_GUARDRAIL}
**Post-build flow** (for direct \`build-workflow-with-agent\` calls with \`bypassPlan: true\` — plan-driven builds handle their own setup/verify flow via the checkpoint):
**Publishing is never required for testing.** Both \`executions(action="run")\` and \`verify-built-workflow\` inject \`inputData\` as the trigger's output via the pin-data adapter — the workflow does not need to be active. Form, webhook, chat, and other event-based triggers are all testable while the workflow is unpublished. Never publish a workflow as a precondition for running it.
1. Builder finishes read \`outcome.workflowId\`, \`outcome.workItemId\`, and \`outcome.triggerNodes\` from the \`<background-task-completed>\` payload's \`outcome\` field (the \`result\` field is only a short text summary). If \`outcome\` is missing, the build did not submit — skip to step 2.
- If any \`outcome.triggerNodes[*].nodeType\` matches \`n8n-nodes-base.scheduleTrigger\`, \`n8n-nodes-base.webhook\`, \`@n8n/n8n-nodes-langchain.chatTrigger\`, or \`n8n-nodes-base.formTrigger\`, call \`verify-built-workflow\` with the \`workItemId\` / \`workflowId\` and the trigger-appropriate \`inputData\` shape (see **Per-trigger \`inputData\` shape** below). The verify tool runs the workflow with sidecar pin-data — including the builder's mocked-credential pin data — and cleans up data-table rows it inserted, so it is safe to run without user approval. Run verify even when \`outcome.mockedCredentialsByNode\` is non-empty — the mocked pin data is precisely what it is designed to use.
- Skip verify only when: \`outcome.workflowId\` or \`outcome.workItemId\` is missing; \`outcome.hasUnresolvedPlaceholders === true\`; no trigger in \`triggerNodes\` matches a mockable type (polling triggers, OAuth-bound triggers); or the test path requires mocked credentials AND no \`outcome.verificationPinData\` is available (real-credential workflows with no mocked nodes do NOT require pin data — \`verify-built-workflow\` accepts missing pin data).

View File

@ -52,11 +52,13 @@ describe('applyPlannedTaskPermissions', () => {
});
describe('build-workflow', () => {
it('should auto-approve workflow run and publish', () => {
it('should auto-approve workflow create, update, run, and publish', () => {
const context = makeContext();
const result = applyPlannedTaskPermissions(context, 'build-workflow');
expect(result.permissions).toMatchObject({
createWorkflow: 'always_allow',
updateWorkflow: 'always_allow',
runWorkflow: 'always_allow',
publishWorkflow: 'always_allow',
});

View File

@ -20,6 +20,8 @@ export const PLANNED_TASK_PERMISSION_OVERRIDES: Partial<
mutateDataTableRows: 'always_allow',
},
'build-workflow': {
createWorkflow: 'always_allow',
updateWorkflow: 'always_allow',
runWorkflow: 'always_allow',
publishWorkflow: 'always_allow',
},

View File

@ -1,175 +0,0 @@
import type { TemplateConnections, TemplateNode } from '../templates/types';
import { mermaidStringify } from '../utils/mermaid.utils';
describe('mermaidStringify', () => {
function makeNode(
name: string,
type: string,
position: [number, number] = [0, 0],
parameters: Record<string, unknown> = {},
typeVersion = 1,
id?: string,
): TemplateNode {
return { name, type, typeVersion, position, parameters, id };
}
it('should generate a simple linear workflow diagram', () => {
const nodes: TemplateNode[] = [
makeNode('Trigger', 'n8n-nodes-base.scheduleTrigger', [0, 0]),
makeNode('HTTP Request', 'n8n-nodes-base.httpRequest', [200, 0]),
makeNode('Set', 'n8n-nodes-base.set', [400, 0]),
];
const connections: TemplateConnections = {
Trigger: { main: [[{ node: 'HTTP Request' }]] },
'HTTP Request': { main: [[{ node: 'Set' }]] },
};
const result = mermaidStringify({ workflow: { nodes, connections } });
expect(result).toContain('```mermaid');
expect(result).toContain('flowchart TD');
expect(result).toContain('```');
expect(result).toContain('Trigger');
expect(result).toContain('HTTP Request');
expect(result).toContain('Set');
expect(result).toContain('-->');
});
it('should render conditional nodes as diamonds', () => {
const nodes: TemplateNode[] = [
makeNode('Trigger', 'n8n-nodes-base.scheduleTrigger', [0, 0]),
makeNode('Check', 'n8n-nodes-base.if', [200, 0]),
];
const connections: TemplateConnections = {
Trigger: { main: [[{ node: 'Check' }]] },
};
const result = mermaidStringify({ workflow: { nodes, connections } });
// Diamond shape uses curly braces
expect(result).toMatch(/n\d+\{"Check"\}/);
});
it('should skip sticky note nodes from main diagram', () => {
const nodes: TemplateNode[] = [
makeNode('Trigger', 'n8n-nodes-base.scheduleTrigger', [0, 0]),
makeNode('Sticky Note', 'n8n-nodes-base.stickyNote', [500, 500], {
content: 'This is a note',
width: 150,
height: 80,
}),
];
const connections: TemplateConnections = {};
const result = mermaidStringify({ workflow: { nodes, connections } });
// Sticky should appear as comment, not as a node
expect(result).toContain('%% This is a note');
expect(result).not.toContain('Sticky Note');
});
it('should include node parameters when includeNodeParameters is true', () => {
const nodes: TemplateNode[] = [
makeNode('HTTP Request', 'n8n-nodes-base.httpRequest', [0, 0], {
url: 'https://example.com',
method: 'GET',
}),
];
const connections: TemplateConnections = {};
const resultWith = mermaidStringify(
{ workflow: { nodes, connections } },
{ includeNodeParameters: true },
);
expect(resultWith).toContain('https://example.com');
const resultWithout = mermaidStringify(
{ workflow: { nodes, connections } },
{ includeNodeParameters: false },
);
expect(resultWithout).not.toContain('https://example.com');
});
it('should include node type with resource and operation in comment', () => {
const nodes: TemplateNode[] = [
makeNode('Slack', 'n8n-nodes-base.slack', [0, 0], {
resource: 'message',
operation: 'send',
}),
];
const connections: TemplateConnections = {};
const result = mermaidStringify({ workflow: { nodes, connections } });
expect(result).toContain('n8n-nodes-base.slack:message:send');
});
it('should handle agent nodes with AI subgraphs', () => {
const nodes: TemplateNode[] = [
makeNode('Chat Trigger', 'n8n-nodes-base.chatTrigger', [0, 0]),
makeNode('Agent', '@n8n/n8n-nodes-langchain.agent', [200, 0]),
makeNode('OpenAI', '@n8n/n8n-nodes-langchain.lmChatOpenAi', [200, -200]),
];
const connections: TemplateConnections = {
'Chat Trigger': { main: [[{ node: 'Agent' }]] },
OpenAI: { ai_languageModel: [[{ node: 'Agent' }]] },
};
const result = mermaidStringify({ workflow: { nodes, connections } });
expect(result).toContain('subgraph');
expect(result).toContain('end');
expect(result).toContain('ai_languageModel');
});
it('should accept WorkflowMetadata input format', () => {
const input = {
templateId: 123,
name: 'Test Template',
description: 'A test',
workflow: {
name: 'Test',
nodes: [makeNode('Trigger', 'n8n-nodes-base.scheduleTrigger', [0, 0])],
connections: {} as TemplateConnections,
},
};
const result = mermaidStringify(input);
expect(result).toContain('```mermaid');
expect(result).toContain('Trigger');
});
it('should handle workflow with no connections', () => {
const nodes: TemplateNode[] = [
makeNode('Node A', 'n8n-nodes-base.set', [0, 0]),
makeNode('Node B', 'n8n-nodes-base.code', [200, 0]),
];
const connections: TemplateConnections = {};
const result = mermaidStringify({ workflow: { nodes, connections } });
expect(result).toContain('```mermaid');
expect(result).toContain('Node A');
expect(result).toContain('Node B');
});
it('should include node ID in comments when includeNodeId is true', () => {
const nodes: TemplateNode[] = [
makeNode('My Node', 'n8n-nodes-base.set', [0, 0], {}, 1, 'abc-123'),
];
const connections: TemplateConnections = {};
const result = mermaidStringify({ workflow: { nodes, connections } }, { includeNodeId: true });
expect(result).toContain('[abc-123]');
});
});

View File

@ -1,212 +0,0 @@
import type {
NodeConfigurationEntry,
NodeConfigurationsMap,
TemplateNode,
WorkflowMetadata,
} from '../templates/types';
import {
addNodeConfigurationToMap,
collectNodeConfigurationsFromWorkflows,
collectSingleNodeConfiguration,
formatNodeConfigurationExamples,
getNodeConfigurationsFromTemplates,
} from '../utils/node-configuration.utils';
describe('node-configuration.utils', () => {
function makeNode(
name: string,
type: string,
parameters: Record<string, unknown> = {},
typeVersion = 1,
): TemplateNode {
return { name, type, typeVersion, position: [0, 0], parameters };
}
function makeWorkflow(nodes: TemplateNode[]): WorkflowMetadata {
return {
templateId: 1,
name: 'Test',
workflow: { nodes, connections: {} },
};
}
describe('collectSingleNodeConfiguration', () => {
it('should return config for node with parameters', () => {
const node = makeNode('Slack', 'n8n-nodes-base.slack', { channel: '#general' }, 2);
const result = collectSingleNodeConfiguration(node);
expect(result).toEqual({
version: 2,
parameters: { channel: '#general' },
});
});
it('should return null for node with empty parameters', () => {
const node = makeNode('Set', 'n8n-nodes-base.set', {});
const result = collectSingleNodeConfiguration(node);
expect(result).toBeNull();
});
it('should return null for node with oversized parameters', () => {
const hugeValue = 'x'.repeat(20000);
const node = makeNode('BigNode', 'n8n-nodes-base.code', { code: hugeValue });
const result = collectSingleNodeConfiguration(node);
expect(result).toBeNull();
});
});
describe('addNodeConfigurationToMap', () => {
it('should create new entry in map for new node type', () => {
const map: NodeConfigurationsMap = {};
const config: NodeConfigurationEntry = { version: 1, parameters: { key: 'value' } };
addNodeConfigurationToMap('n8n-nodes-base.slack', config, map);
expect(map['n8n-nodes-base.slack']).toHaveLength(1);
expect(map['n8n-nodes-base.slack'][0]).toBe(config);
});
it('should append to existing entries for known node type', () => {
const map: NodeConfigurationsMap = {
'n8n-nodes-base.slack': [{ version: 1, parameters: { old: true } }],
};
const config: NodeConfigurationEntry = { version: 2, parameters: { new: true } };
addNodeConfigurationToMap('n8n-nodes-base.slack', config, map);
expect(map['n8n-nodes-base.slack']).toHaveLength(2);
});
});
describe('collectNodeConfigurationsFromWorkflows', () => {
it('should collect configurations from multiple workflows', () => {
const workflows: WorkflowMetadata[] = [
makeWorkflow([
makeNode('Slack', 'n8n-nodes-base.slack', { channel: '#a' }),
makeNode('HTTP', 'n8n-nodes-base.httpRequest', { url: 'https://example.com' }),
]),
makeWorkflow([makeNode('Slack 2', 'n8n-nodes-base.slack', { channel: '#b' })]),
];
const result = collectNodeConfigurationsFromWorkflows(workflows);
expect(Object.keys(result)).toEqual(
expect.arrayContaining(['n8n-nodes-base.slack', 'n8n-nodes-base.httpRequest']),
);
expect(result['n8n-nodes-base.slack']).toHaveLength(2);
expect(result['n8n-nodes-base.httpRequest']).toHaveLength(1);
});
it('should skip sticky note nodes', () => {
const workflows: WorkflowMetadata[] = [
makeWorkflow([
makeNode('Sticky', 'n8n-nodes-base.stickyNote', { content: 'note' }),
makeNode('Slack', 'n8n-nodes-base.slack', { channel: '#a' }),
]),
];
const result = collectNodeConfigurationsFromWorkflows(workflows);
expect(result['n8n-nodes-base.stickyNote']).toBeUndefined();
expect(result['n8n-nodes-base.slack']).toHaveLength(1);
});
it('should skip nodes with no parameters', () => {
const workflows: WorkflowMetadata[] = [
makeWorkflow([makeNode('Empty', 'n8n-nodes-base.noOp', {})]),
];
const result = collectNodeConfigurationsFromWorkflows(workflows);
expect(Object.keys(result)).toHaveLength(0);
});
});
describe('getNodeConfigurationsFromTemplates', () => {
it('should filter by node type', () => {
const templates: WorkflowMetadata[] = [
makeWorkflow([
makeNode('Slack', 'n8n-nodes-base.slack', { channel: '#a' }),
makeNode('HTTP', 'n8n-nodes-base.httpRequest', { url: 'https://example.com' }),
]),
];
const result = getNodeConfigurationsFromTemplates(templates, 'n8n-nodes-base.slack');
expect(result).toHaveLength(1);
expect(result[0].parameters).toEqual({ channel: '#a' });
});
it('should filter by node type and version', () => {
const templates: WorkflowMetadata[] = [
makeWorkflow([
makeNode('Slack v1', 'n8n-nodes-base.slack', { channel: '#a' }, 1),
makeNode('Slack v2', 'n8n-nodes-base.slack', { channel: '#b' }, 2),
]),
];
const result = getNodeConfigurationsFromTemplates(templates, 'n8n-nodes-base.slack', 2);
expect(result).toHaveLength(1);
expect(result[0].version).toBe(2);
});
it('should return empty array when no matches found', () => {
const templates: WorkflowMetadata[] = [
makeWorkflow([makeNode('Slack', 'n8n-nodes-base.slack', { channel: '#a' })]),
];
const result = getNodeConfigurationsFromTemplates(templates, 'n8n-nodes-base.telegram');
expect(result).toHaveLength(0);
});
});
describe('formatNodeConfigurationExamples', () => {
it('should format configurations as markdown', () => {
const configs: NodeConfigurationEntry[] = [
{ version: 2, parameters: { channel: '#general', text: 'Hello' } },
];
const result = formatNodeConfigurationExamples('n8n-nodes-base.slack', configs);
expect(result).toContain('## Node Configuration Examples: n8n-nodes-base.slack');
expect(result).toContain('### Example (version 2)');
expect(result).toContain('```json');
expect(result).toContain('#general');
});
it('should return "No examples found" for empty configurations', () => {
const result = formatNodeConfigurationExamples('n8n-nodes-base.slack', []);
expect(result).toContain('No examples found');
});
it('should filter by version when specified', () => {
const configs: NodeConfigurationEntry[] = [
{ version: 1, parameters: { old: true } },
{ version: 2, parameters: { new: true } },
];
const result = formatNodeConfigurationExamples('n8n-nodes-base.slack', configs, 2);
expect(result).toContain('version 2');
expect(result).not.toContain('version 1');
});
it('should limit number of examples', () => {
const configs: NodeConfigurationEntry[] = [
{ version: 1, parameters: { a: 1 } },
{ version: 1, parameters: { b: 2 } },
{ version: 1, parameters: { c: 3 } },
];
const result = formatNodeConfigurationExamples('n8n-nodes-base.slack', configs, undefined, 2);
const exampleCount = (result.match(/### Example/g) ?? []).length;
expect(exampleCount).toBeLessThanOrEqual(2);
});
});
});

View File

@ -1,24 +1,5 @@
import { fetchWorkflowsFromTemplates } from '../templates/template-api';
import { createTemplatesTool } from '../templates.tool';
// Mock external dependencies — templates tool takes no context
jest.mock('../templates/template-api', () => ({
fetchWorkflowsFromTemplates: jest.fn(),
}));
jest.mock('../utils/mermaid.utils', () => ({
mermaidStringify: jest.fn().mockReturnValue('graph TD\n A-->B'),
}));
jest.mock('../utils/node-configuration.utils', () => ({
collectNodeConfigurationsFromWorkflows: jest.fn().mockReturnValue({
'n8n-nodes-base.telegram': [{ parameters: { chatId: '123' } }],
}),
formatNodeConfigurationExamples: jest
.fn()
.mockReturnValue('## n8n-nodes-base.telegram\nchatId: 123'),
}));
describe('templates tool', () => {
beforeEach(() => {
jest.clearAllMocks();
@ -96,67 +77,4 @@ describe('templates tool', () => {
expect(typed.message).toContain('Unknown technique');
});
});
describe('search-structures action', () => {
it('should call fetchWorkflowsFromTemplates and return mermaid diagrams', async () => {
(fetchWorkflowsFromTemplates as jest.Mock).mockResolvedValue({
workflows: [{ name: 'WF1', description: 'Desc1', nodes: [], connections: {} }],
totalFound: 10,
});
const tool = createTemplatesTool();
const result = await tool.execute!(
{ action: 'search-structures', search: 'slack notification' },
{} as never,
);
expect(fetchWorkflowsFromTemplates).toHaveBeenCalledWith({
search: 'slack notification',
category: undefined,
rows: undefined,
});
const typed = result as {
examples: Array<{ name: string; mermaid: string }>;
totalResults: number;
};
expect(typed.examples).toHaveLength(1);
expect(typed.examples[0].name).toBe('WF1');
expect(typed.totalResults).toBe(10);
});
});
describe('search-parameters action', () => {
it('should call fetchWorkflowsFromTemplates and return configurations', async () => {
(fetchWorkflowsFromTemplates as jest.Mock).mockResolvedValue({
workflows: [{ name: 'WF1', description: 'Desc1', nodes: [], connections: {} }],
totalFound: 5,
});
const tool = createTemplatesTool();
const result = await tool.execute!(
{
action: 'search-parameters',
search: 'telegram bot',
nodeType: 'n8n-nodes-base.telegram',
},
{} as never,
);
expect(fetchWorkflowsFromTemplates).toHaveBeenCalledWith({
search: 'telegram bot',
category: undefined,
rows: undefined,
});
const typed = result as {
configurations: Record<string, unknown>;
nodeTypes: string[];
totalTemplatesSearched: number;
formatted: string;
};
expect(typed.nodeTypes).toContain('n8n-nodes-base.telegram');
expect(typed.totalTemplatesSearched).toBe(5);
});
});
});

View File

@ -17,7 +17,6 @@ import { createVerifyBuiltWorkflowTool } from './orchestration/verify-built-work
import { createResearchTool } from './research.tool';
import { createAskUserTool } from './shared/ask-user.tool';
import { createTaskControlTool } from './task-control.tool';
import { createTemplatesTool } from './templates.tool';
import { createApplyWorkflowCredentialsTool } from './workflows/apply-workflow-credentials.tool';
import { createBuildWorkflowTool } from './workflows/build-workflow.tool';
import { createWorkflowsTool } from './workflows.tool';
@ -36,7 +35,6 @@ export function createAllTools(context: InstanceAiContext) {
workspace: createWorkspaceTool(context),
research: createResearchTool(context),
nodes: createNodesTool(context),
templates: createTemplatesTool(),
'ask-user': createAskUserTool(),
'build-workflow': createBuildWorkflowTool(context),
...(context.localMcpServer ? createToolsFromLocalMcpServer(context.localMcpServer) : {}),
@ -59,7 +57,6 @@ export function createOrchestratorDomainTools(context: InstanceAiContext) {
workspace: createWorkspaceTool(context),
research: createResearchTool(context),
nodes: createNodesTool(context, 'orchestrator'),
templates: createTemplatesTool(),
'ask-user': createAskUserTool(),
...(context.localMcpServer ? createToolsFromLocalMcpServer(context.localMcpServer) : {}),
};

View File

@ -9,7 +9,13 @@ jest.mock('@mastra/core/tools', () => ({
createTool: jest.fn((config: Record<string, unknown>) => config),
}));
import type { OrchestrationContext } from '../../../types';
import {
applyBranchReadOnlyOverrides,
DEFAULT_INSTANCE_AI_PERMISSIONS,
type InstanceAiPermissions,
} from '@n8n/api-types';
import type { InstanceAiContext, OrchestrationContext } from '../../../types';
import type { SubmitWorkflowAttempt } from '../../workflows/submit-workflow.tool';
const { resultFromPostStreamError, createBuildWorkflowAgentTool, recordSuccessfulWorkflowBuilds } =
@ -17,7 +23,10 @@ const { resultFromPostStreamError, createBuildWorkflowAgentTool, recordSuccessfu
require('../build-workflow-agent.tool') as typeof import('../build-workflow-agent.tool');
type BuildExecutable = {
execute: (input: Record<string, unknown>) => Promise<{ result: string; taskId: string }>;
execute: (
input: Record<string, unknown>,
ctx?: { agent?: { resumeData?: unknown; suspend?: jest.Mock<Promise<void>, [unknown]> } },
) => Promise<{ result: string; taskId: string }>;
};
function createMockContext(overrides: Partial<OrchestrationContext> = {}): OrchestrationContext {
@ -44,6 +53,39 @@ function createMockContext(overrides: Partial<OrchestrationContext> = {}): Orche
} as OrchestrationContext;
}
function createMockDomainContext(
permissionOverrides: Partial<InstanceAiPermissions> = {},
workflowName = 'Existing Workflow',
): InstanceAiContext {
return {
userId: 'test-user',
permissions: { ...DEFAULT_INSTANCE_AI_PERMISSIONS, ...permissionOverrides },
workflowService: {
get: jest.fn().mockResolvedValue({ name: workflowName }),
} as unknown as InstanceAiContext['workflowService'],
executionService: {} as InstanceAiContext['executionService'],
credentialService: {} as InstanceAiContext['credentialService'],
nodeService: {} as InstanceAiContext['nodeService'],
dataTableService: {} as InstanceAiContext['dataTableService'],
};
}
function createSpawnableContext(
permissionOverrides: Partial<InstanceAiPermissions> = {},
overrides: Partial<OrchestrationContext> = {},
): OrchestrationContext {
return createMockContext({
domainContext: createMockDomainContext(permissionOverrides),
domainTools: { 'build-workflow': {} },
spawnBackgroundTask: jest.fn().mockReturnValue({
status: 'started',
taskId: 'build-task',
agentId: 'agent-builder',
}),
...overrides,
});
}
const MAIN_PATH = '/home/daytona/workspace/src/workflow.ts';
describe('resultFromPostStreamError', () => {
@ -219,7 +261,9 @@ describe('createBuildWorkflowAgentTool — plan-enforcement guard', () => {
});
it('allows the call when bypassPlan=true with a reason is provided', async () => {
const context = createMockContext();
const context = createMockContext({
domainContext: createMockDomainContext({ updateWorkflow: 'always_allow' }),
});
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
const out = await tool.execute({
@ -270,6 +314,192 @@ describe('createBuildWorkflowAgentTool — plan-enforcement guard', () => {
});
});
describe('createBuildWorkflowAgentTool — existing workflow approval', () => {
const ORIGINAL_ENV = process.env.N8N_INSTANCE_AI_ENFORCE_BUILD_VIA_PLAN;
afterEach(() => {
if (ORIGINAL_ENV === undefined) {
delete process.env.N8N_INSTANCE_AI_ENFORCE_BUILD_VIA_PLAN;
} else {
process.env.N8N_INSTANCE_AI_ENFORCE_BUILD_VIA_PLAN = ORIGINAL_ENV;
}
});
const editInput = {
task: 'patch one expression',
workflowId: 'WF_EXISTING',
bypassPlan: true,
reason: 'Swap Slack channel on this notifier.',
};
it('suspends before spawning when updateWorkflow requires approval', async () => {
const context = createSpawnableContext({ updateWorkflow: 'require_approval' });
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
const out = await tool.execute(editInput, { agent: { suspend } });
expect(out).toEqual({ result: '', taskId: '' });
expect(suspend).toHaveBeenCalledWith(
expect.objectContaining({
message:
'Edit existing workflow "Existing Workflow" (ID: WF_EXISTING)? Reason: Swap Slack channel on this notifier.',
severity: 'warning',
}),
);
expect(context.spawnBackgroundTask).not.toHaveBeenCalled();
});
it('spawns when approval resume data is approved', async () => {
const context = createSpawnableContext({ updateWorkflow: 'require_approval' });
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
const out = await tool.execute(editInput, {
agent: { resumeData: { approved: true } },
});
expect(out.taskId).toMatch(/^build-/);
expect(context.spawnBackgroundTask).toHaveBeenCalledTimes(1);
});
it('does not spawn when approval resume data is denied', async () => {
const context = createSpawnableContext({ updateWorkflow: 'require_approval' });
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
const out = await tool.execute(editInput, {
agent: { resumeData: { approved: false } },
});
expect(out).toEqual({ result: 'User declined the workflow edit.', taskId: '' });
expect(context.spawnBackgroundTask).not.toHaveBeenCalled();
});
it('skips suspend when updateWorkflow is always_allow', async () => {
const context = createSpawnableContext({ updateWorkflow: 'always_allow' });
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
await tool.execute(editInput, { agent: { suspend } });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).toHaveBeenCalledTimes(1);
});
it('does not apply the edit approval gate without a workflowId', async () => {
process.env.N8N_INSTANCE_AI_ENFORCE_BUILD_VIA_PLAN = 'false';
const context = createSpawnableContext({ updateWorkflow: 'require_approval' });
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
await tool.execute({ task: 'build a new workflow' }, { agent: { suspend } });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).toHaveBeenCalledTimes(1);
});
it('does not apply the edit approval gate without domain context', async () => {
const context = createMockContext({
domainTools: { 'build-workflow': {} },
spawnBackgroundTask: jest.fn().mockReturnValue({
status: 'started',
taskId: 'build-task',
agentId: 'agent-builder',
}),
});
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
await tool.execute(editInput, { agent: { suspend } });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).toHaveBeenCalledTimes(1);
});
it('skips suspend in a replan follow-up', async () => {
const context = createSpawnableContext(
{ updateWorkflow: 'require_approval' },
{ isReplanFollowUp: true },
);
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
await tool.execute(editInput, { agent: { suspend } });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).toHaveBeenCalledTimes(1);
});
it('skips suspend in a checkpoint follow-up', async () => {
const context = createSpawnableContext(
{ updateWorkflow: 'require_approval' },
{ isCheckpointFollowUp: true },
);
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
await tool.execute(editInput, { agent: { suspend } });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).toHaveBeenCalledTimes(1);
});
it('denies without suspend or spawn when updateWorkflow is blocked', async () => {
const context = createSpawnableContext({ updateWorkflow: 'blocked' });
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
const out = await tool.execute(editInput, { agent: { suspend } });
expect(out).toEqual({ result: 'Action blocked by admin', taskId: '' });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).not.toHaveBeenCalled();
});
it('denies branch read-only edits without suspend or spawn', async () => {
const readOnlyPermissions = applyBranchReadOnlyOverrides({
...DEFAULT_INSTANCE_AI_PERMISSIONS,
});
const context = createSpawnableContext(readOnlyPermissions);
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
const out = await tool.execute(editInput, { agent: { suspend } });
expect(out).toEqual({ result: 'Action blocked by admin', taskId: '' });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).not.toHaveBeenCalled();
});
it('skips suspend when the workflow was created earlier in the plan cycle', async () => {
const context = createSpawnableContext({ updateWorkflow: 'require_approval' });
(context.domainContext as InstanceAiContext).aiCreatedWorkflowIds = new Set([
editInput.workflowId,
]);
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
await tool.execute(editInput, { agent: { suspend } });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).toHaveBeenCalledTimes(1);
});
it('still denies blocked edits even when the workflow is in the active plan cycle', async () => {
const context = createSpawnableContext({ updateWorkflow: 'blocked' });
(context.domainContext as InstanceAiContext).aiCreatedWorkflowIds = new Set([
editInput.workflowId,
]);
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
const out = await tool.execute(editInput, { agent: { suspend } });
expect(out).toEqual({ result: 'Action blocked by admin', taskId: '' });
expect(suspend).not.toHaveBeenCalled();
expect(context.spawnBackgroundTask).not.toHaveBeenCalled();
});
});
describe('recordSuccessfulWorkflowBuilds', () => {
it('records workflow IDs returned from successful build-workflow executions', async () => {
const onWorkflowId = jest.fn();

View File

@ -86,6 +86,16 @@ After writing any workflow with IF, Switch, or Filter nodes, verify:
### AI Agent with Subnodes use factory functions in subnodes config
\`\`\`javascript
const chatTrigger = trigger({
type: '@n8n/n8n-nodes-langchain.chatTrigger',
version: 1.3,
config: {
name: 'Chat Trigger',
parameters: { public: false },
output: [{ sessionId: 'chat-session-id', chatInput: 'Hello' }]
}
});
const model = languageModel({
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
version: 1.3,
@ -108,6 +118,19 @@ const parser = outputParser({
}
});
const memoryNode = memory({
type: '@n8n/n8n-nodes-langchain.memoryBufferWindow',
version: 1.3,
config: {
name: 'Conversation Memory',
parameters: {
sessionIdType: 'customKey',
sessionKey: nodeJson(chatTrigger, 'sessionId'),
contextWindowLength: 10
}
}
});
const agent = node({
type: '@n8n/n8n-nodes-langchain.agent',
version: 3.1,
@ -119,11 +142,12 @@ const agent = node({
hasOutputParser: true,
options: { systemMessage: 'You are an expert...' }
},
subnodes: { model: model, outputParser: parser }
subnodes: { model: model, memory: memoryNode, outputParser: parser }
}
});
\`\`\`
WRONG: \`.to(agent, { connectionType: 'ai_languageModel' })\` — subnodes MUST be in the config object.
For values inside AI subnodes, use explicit references such as \`nodeJson(triggerNode, 'sessionId')\` instead of \`$json.sessionId\`. For Chat Trigger memory specifically, \`sessionIdType: 'fromInput'\` is also valid.
### Code Node
\`\`\`javascript
@ -225,10 +249,6 @@ export default workflow('id', 'name')
.add(scheduleTrigger).to(processNode);
\`\`\`
### Web App (SPA served from a webhook)
When the workflow serves HTML from a webhook (dashboards, admin UIs, custom forms), call \`templates(action="best-practices", technique="web_app")\` for the full file-based HTML pattern, data-injection recipe, multi-route architecture, and a complete multi-route dashboard example. Embedding large HTML inline in Code nodes breaks at ~20KB — always use the file-based pattern from the guide.
### Google Sheets documentId and sheetName (RLC fields)
These are Resource Locator fields that require the \`__rl\` object format:

View File

@ -269,7 +269,6 @@ export async function startBuildWorkflowAgentTask(
'credentials',
'executions',
'data-tables',
'templates',
'ask-user',
];
@ -290,7 +289,6 @@ export async function startBuildWorkflowAgentTask(
'nodes',
'workflows',
'data-tables',
'templates',
'ask-user',
...(context.researchMode ? ['research'] : []),
];
@ -734,6 +732,16 @@ export const buildWorkflowAgentInputSchema = z.object({
),
});
const buildWorkflowAgentSuspendSchema = z.object({
requestId: z.string(),
message: z.string(),
severity: z.literal('warning'),
});
const buildWorkflowAgentResumeSchema = z.object({
approved: z.boolean(),
});
/**
* Replan / checkpoint follow-ups have already paid the planner's discovery cost
* and carry the checkpoint task graph from the original plan direct builder
@ -749,6 +757,19 @@ function isBuildViaPlanGuardEnabled(): boolean {
return raw.toLowerCase() !== 'false' && raw !== '0';
}
async function resolveWorkflowNameForEditConfirmation(
context: OrchestrationContext,
workflowId: string,
): Promise<string> {
try {
const workflow = await context.domainContext?.workflowService.get(workflowId);
const workflowName = workflow?.name?.trim();
return workflowName && workflowName.length > 0 ? workflowName : workflowId;
} catch {
return workflowId;
}
}
export function createBuildWorkflowAgentTool(context: OrchestrationContext) {
return createTool({
id: 'build-workflow-with-agent',
@ -763,8 +784,14 @@ export function createBuildWorkflowAgentTool(context: OrchestrationContext) {
result: z.string(),
taskId: z.string(),
}),
execute: async (input: z.infer<typeof buildWorkflowAgentInputSchema>) => {
if (isBuildViaPlanGuardEnabled() && !isPostPlanFollowUp(context)) {
suspendSchema: buildWorkflowAgentSuspendSchema,
resumeSchema: buildWorkflowAgentResumeSchema,
execute: async (
input: z.infer<typeof buildWorkflowAgentInputSchema>,
ctx?: { agent?: { resumeData?: unknown; suspend?: unknown } },
) => {
const isPostPlanFollowUpRun = isPostPlanFollowUp(context);
if (isBuildViaPlanGuardEnabled() && !isPostPlanFollowUpRun) {
if (!input.bypassPlan) {
context.logger.warn(
'build-workflow-with-agent called outside plan/replan context — rejecting',
@ -808,6 +835,46 @@ export function createBuildWorkflowAgentTool(context: OrchestrationContext) {
reason: input.reason,
});
}
if (input.workflowId && !isPostPlanFollowUpRun && context.domainContext) {
const updateWorkflowPermission =
context.domainContext.permissions?.updateWorkflow ?? 'require_approval';
if (updateWorkflowPermission === 'blocked') {
return { result: 'Action blocked by admin', taskId: '' };
}
const isOwnInFlightWorkflow =
context.domainContext.aiCreatedWorkflowIds?.has(input.workflowId) ?? false;
if (!isOwnInFlightWorkflow) {
const resumeData = ctx?.agent?.resumeData as
| z.infer<typeof buildWorkflowAgentResumeSchema>
| undefined;
const suspend = ctx?.agent?.suspend as
| ((payload: z.infer<typeof buildWorkflowAgentSuspendSchema>) => Promise<void>)
| undefined;
const needsApproval = updateWorkflowPermission !== 'always_allow';
if (needsApproval && (resumeData === undefined || resumeData === null)) {
const workflowName = await resolveWorkflowNameForEditConfirmation(
context,
input.workflowId,
);
const reason = input.reason?.trim();
await suspend?.({
requestId: nanoid(),
message: `Edit existing workflow "${workflowName}" (ID: ${input.workflowId})?${reason ? ` Reason: ${reason}` : ''}`,
severity: 'warning',
});
return { result: '', taskId: '' };
}
if (resumeData !== undefined && resumeData !== null && !resumeData.approved) {
return { result: 'User declined the workflow edit.', taskId: '' };
}
}
}
const result = await startBuildWorkflowAgentTask(context, input);
return { result: result.result, taskId: result.taskId };
},

View File

@ -37,19 +37,13 @@ import { createLlmStepTraceHooks } from '../../runtime/resumable-stream-executor
import { consumeStreamWithHitl } from '../../stream/consume-with-hitl';
import { getTraceParentRun, withTraceParentContext } from '../../tracing/langsmith-tracing';
import type { OrchestrationContext } from '../../types';
import { createTemplatesTool } from '../templates.tool';
/** Number of recent thread messages to include as planner context. */
const MESSAGE_HISTORY_COUNT = 5;
/** Read-only discovery tools the planner gets from domainTools. */
const PLANNER_DOMAIN_TOOL_NAMES = [
'nodes',
'templates',
'credentials',
'data-tables',
'workflows',
'ask-user',
];
const PLANNER_DOMAIN_TOOL_NAMES = ['nodes', 'credentials', 'data-tables', 'workflows', 'ask-user'];
/** Research tools added when available. */
const PLANNER_RESEARCH_TOOL_NAMES = ['research'];
@ -263,6 +257,9 @@ export function createPlanWithAgentTool(context: OrchestrationContext) {
}
}
// Best-practices guidance — planner-exclusive
plannerTools.templates = createTemplatesTool();
// Incremental plan accumulation + approval tools
const accumulator = new BlueprintAccumulator();
plannerTools['add-plan-item'] = createAddPlanItemTool(accumulator, context);

View File

@ -1,5 +1,5 @@
/**
* Consolidated templates tool search-structures + search-parameters + best-practices.
* Templates tool exposes best-practices guidance for n8n workflow techniques.
*/
import { createTool } from '@mastra/core/tools';
import { z } from 'zod';
@ -7,124 +7,24 @@ import { z } from 'zod';
import { sanitizeInputSchema } from '../agent/sanitize-mcp-schemas';
import { documentation } from './best-practices/index';
import { TechniqueDescription, type WorkflowTechniqueType } from './best-practices/techniques';
import { fetchWorkflowsFromTemplates } from './templates/template-api';
import { categories } from './templates/types';
import { mermaidStringify } from './utils/mermaid.utils';
import {
collectNodeConfigurationsFromWorkflows,
formatNodeConfigurationExamples,
} from './utils/node-configuration.utils';
// -- Action schemas -----------------------------------------------------------
const searchStructuresAction = z.object({
action: z
.literal('search-structures')
.describe('Search templates and return mermaid diagrams showing workflow structure'),
search: z.string().optional().describe('Free-text search query for templates'),
category: z.enum(categories).optional().describe('Filter by template category'),
rows: z
.number()
.min(1)
.max(10)
.optional()
.describe('Number of templates to return (default: 5, max: 10)'),
});
const searchParametersAction = z.object({
action: z
.literal('search-parameters')
.describe('Search templates and return node parameter configurations'),
search: z.string().optional().describe('Free-text search query for templates'),
category: z.enum(categories).optional().describe('Filter by template category'),
rows: z
.number()
.min(1)
.max(10)
.optional()
.describe('Number of templates to return (default: 5, max: 10)'),
nodeType: z
.string()
.optional()
.describe(
'Filter to show configurations for a specific node type only (e.g. "n8n-nodes-base.telegram")',
),
});
const bestPracticesAction = z.object({
action: z
.literal('best-practices')
.describe('Get workflow building best practices for a specific technique'),
technique: z
.string()
.describe(
'The workflow technique to get guidance for (e.g. "chatbot", "scheduling", "triage"). Pass "list" to see all available techniques.',
),
});
const inputSchema = sanitizeInputSchema(
z.discriminatedUnion('action', [
searchStructuresAction,
searchParametersAction,
bestPracticesAction,
]),
z.object({
action: z
.literal('best-practices')
.describe('Get workflow building best practices for a specific technique'),
technique: z
.string()
.describe(
'The workflow technique to get guidance for (e.g. "chatbot", "scheduling", "triage"). Pass "list" to see all available techniques.',
),
}),
);
type Input = z.infer<typeof inputSchema>;
// -- Handlers -----------------------------------------------------------------
async function handleSearchStructures(input: Extract<Input, { action: 'search-structures' }>) {
const result = await fetchWorkflowsFromTemplates({
search: input.search,
category: input.category,
rows: input.rows,
});
const examples = result.workflows.map((wf) => ({
name: wf.name,
description: wf.description,
mermaid: mermaidStringify(wf, { includeNodeParameters: false }),
}));
return {
examples,
totalResults: result.totalFound,
};
}
async function handleSearchParameters(input: Extract<Input, { action: 'search-parameters' }>) {
const result = await fetchWorkflowsFromTemplates({
search: input.search,
category: input.category,
rows: input.rows,
});
const allConfigurations = collectNodeConfigurationsFromWorkflows(result.workflows);
// Filter by nodeType if specified
let filteredConfigurations = allConfigurations;
if (input.nodeType) {
const matching = allConfigurations[input.nodeType];
filteredConfigurations = matching ? { [input.nodeType]: matching } : {};
}
// Format as readable text
const nodeTypes = Object.keys(filteredConfigurations);
const formattedParts = nodeTypes.map((nt) =>
formatNodeConfigurationExamples(nt, filteredConfigurations[nt], undefined, 3),
);
return {
configurations: filteredConfigurations,
nodeTypes,
totalTemplatesSearched: result.totalFound,
formatted: formattedParts.join('\n\n'),
};
}
// eslint-disable-next-line @typescript-eslint/require-await
async function handleBestPractices(input: Extract<Input, { action: 'best-practices' }>) {
async function handleBestPractices(input: Input) {
const { technique } = input;
// "list" mode: return all techniques with descriptions
@ -150,7 +50,7 @@ async function handleBestPractices(input: Extract<Input, { action: 'best-practic
if (description) {
return {
technique,
message: `Technique "${technique}" (${description}) exists but does not have detailed documentation yet. Use the templates tool with the search-structures action to find example workflows instead.`,
message: `Technique "${technique}" (${description}) does not have detailed documentation yet — proceed with general n8n knowledge.`,
};
}
@ -167,22 +67,11 @@ async function handleBestPractices(input: Extract<Input, { action: 'best-practic
};
}
// -- Tool factory -------------------------------------------------------------
export function createTemplatesTool() {
return createTool({
id: 'templates',
description: 'Search n8n workflow templates or get best practices.',
description: 'Get best practices guidance for n8n workflow techniques.',
inputSchema,
execute: async (input: Input) => {
switch (input.action) {
case 'search-structures':
return await handleSearchStructures(input);
case 'search-parameters':
return await handleSearchParameters(input);
case 'best-practices':
return await handleBestPractices(input);
}
},
execute: handleBestPractices,
});
}

View File

@ -1,180 +0,0 @@
import type {
Category,
TemplateFetchResponse,
TemplateSearchQuery,
TemplateSearchResponse,
WorkflowMetadata,
} from './types';
/**
* Base URL for n8n template API
*/
const N8N_API_BASE_URL = 'https://api.n8n.io/api';
/**
* Type guard for TemplateSearchResponse
*/
function isTemplateSearchResponse(data: unknown): data is TemplateSearchResponse {
if (typeof data !== 'object' || data === null) return false;
const obj = data as Record<string, unknown>;
return typeof obj.totalWorkflows === 'number' && Array.isArray(obj.workflows);
}
/**
* Type guard for TemplateFetchResponse
*/
function isTemplateFetchResponse(data: unknown): data is TemplateFetchResponse {
if (typeof data !== 'object' || data === null) return false;
const obj = data as Record<string, unknown>;
return (
typeof obj.id === 'number' &&
typeof obj.name === 'string' &&
typeof obj.workflow === 'object' &&
obj.workflow !== null
);
}
/**
* Build query string from search parameters
*/
function buildSearchQueryString(query: TemplateSearchQuery): string {
const params = new URLSearchParams();
// Fixed preset values (not overridable)
params.append('price', '0'); // Always free templates
params.append('combineWith', 'and'); // Don't ignore any search criteria
params.append('sort', 'createdAt:desc,rank:desc'); // Most recent templates first
params.append('rows', String(query.rows ?? 5)); // Default 5 results per page
params.append('page', '1'); // Always first page
// Optional user-provided values
if (query.search) params.append('search', query.search);
if (query.category) params.append('category', query.category);
if (query.nodes) params.append('nodes', query.nodes);
return params.toString();
}
/**
* Fetch template/workflow list from n8n API
*/
export async function fetchTemplateList(query: {
search?: string;
category?: Category;
rows?: number;
nodes?: string;
}): Promise<TemplateSearchResponse> {
const queryString = buildSearchQueryString(query);
const url = `${N8N_API_BASE_URL}/templates/search${queryString ? `?${queryString}` : ''}`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Accept: 'application/json',
},
});
if (!response.ok) {
throw new Error(`Failed to fetch templates: ${response.status} ${response.statusText}`);
}
const data: unknown = await response.json();
if (!isTemplateSearchResponse(data)) {
throw new Error('Invalid response format from templates API');
}
return data;
}
/**
* Fetch a specific workflow template by ID from n8n API
*/
export async function fetchTemplateByID(id: number): Promise<TemplateFetchResponse> {
const url = `${N8N_API_BASE_URL}/workflows/templates/${id}`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Accept: 'application/json',
},
});
if (!response.ok) {
throw new Error(`Failed to fetch template ${id}: ${response.status} ${response.statusText}`);
}
const data: unknown = await response.json();
if (!isTemplateFetchResponse(data)) {
throw new Error(`Invalid response format from template ${id} API`);
}
return data;
}
/**
* Result of fetching workflows from templates
*/
export interface FetchWorkflowsResult {
workflows: WorkflowMetadata[];
totalFound: number;
templateIds: number[];
}
/**
* Fetch workflows from templates API and return full workflow data.
* Shared utility used by both search-template-structures and search-template-parameters tools.
*/
export async function fetchWorkflowsFromTemplates(
query: {
search?: string;
category?: Category;
rows?: number;
nodes?: string;
},
options?: {
/** Maximum number of templates to fetch full data for (default: all) */
maxTemplates?: number;
},
): Promise<FetchWorkflowsResult> {
const { maxTemplates } = options ?? {};
// First, fetch the list of workflow templates (metadata)
const searchResponse = await fetchTemplateList(query);
// Determine which templates to fetch full data for
const templatesToFetch = maxTemplates
? searchResponse.workflows.slice(0, maxTemplates)
: searchResponse.workflows;
// Fetch complete workflow data for each template
const workflowResults = await Promise.all(
templatesToFetch.map(async (template) => {
try {
const fullWorkflow = await fetchTemplateByID(template.id);
return {
metadata: {
templateId: template.id,
name: template.name,
description: template.description,
workflow: fullWorkflow.workflow,
} satisfies WorkflowMetadata,
templateId: template.id,
};
} catch {
// Individual template fetch failures are non-fatal
return null;
}
}),
);
// Filter out failed fetches
const validResults = workflowResults.filter(
(result): result is NonNullable<typeof result> => result !== null,
);
return {
workflows: validResults.map((r) => r.metadata),
totalFound: searchResponse.totalWorkflows,
templateIds: validResults.map((r) => r.templateId),
};
}

View File

@ -1,124 +0,0 @@
/**
* Local types for template API data.
* These mirror the shapes returned by the n8n template API
* without depending on n8n-workflow.
*/
// ── Template node & workflow shapes ─────────────────────────────────────────
export interface TemplateNode {
id?: string;
name: string;
type: string;
typeVersion: number;
position: [number, number];
parameters: Record<string, unknown>;
}
/**
* Connection entry in the n8n connections format.
* Each entry points to a target node with an optional input index.
*/
export interface ConnectionEntry {
node: string;
type?: string;
index?: number;
}
/**
* Connections map: sourceNode -> connectionType -> outputIndex[] -> ConnectionEntry[]
* Example: { "Node A": { main: [[{ node: "Node B" }]] } }
*/
export type TemplateConnections = Record<string, Record<string, Array<ConnectionEntry[] | null>>>;
export interface TemplateWorkflow {
name?: string;
nodes: TemplateNode[];
connections: TemplateConnections;
}
// ── Template API request/response shapes ────────────────────────────────────
// Retrieved from https://api.n8n.io/api/templates/categories
export const categories = [
'AI',
'AI Chatbot',
'AI RAG',
'AI Summarization',
'Content Creation',
'CRM',
'Crypto Trading',
'DevOps',
'Document Extraction',
'Document Ops',
'Engineering',
'File Management',
'HR',
'Internal Wiki',
'Invoice Processing',
'IT Ops',
'Lead Generation',
'Lead Nurturing',
'Marketing',
'Market Research',
'Miscellaneous',
'Multimodal AI',
'Other',
'Personal Productivity',
'Project Management',
'Sales',
'SecOps',
'Social Media',
'Support',
'Support Chatbot',
'Ticket Management',
] as const;
export type Category = (typeof categories)[number];
export interface TemplateSearchQuery {
search?: string;
rows?: number;
category?: Category;
nodes?: string;
}
export interface TemplateWorkflowDescription {
id: number;
name: string;
description: string;
}
export interface TemplateSearchResponse {
totalWorkflows: number;
workflows: TemplateWorkflowDescription[];
}
export interface TemplateFetchResponse {
id: number;
name: string;
workflow: TemplateWorkflow;
}
// ── Processed workflow metadata ─────────────────────────────────────────────
export interface WorkflowMetadata {
templateId: number;
name: string;
description?: string;
workflow: TemplateWorkflow;
}
// ── Node configuration types ────────────────────────────────────────────────
export interface NodeConfigurationEntry {
version: number;
parameters: Record<string, unknown>;
}
/**
* Map of node type to array of parameter configurations with version info.
* Key: node type (e.g. 'n8n-nodes-base.telegram')
* Value: array of configuration entries with version and parameters
*/
export type NodeConfigurationsMap = Record<string, NodeConfigurationEntry[]>;

View File

@ -1,890 +0,0 @@
import type { TemplateConnections, TemplateNode, WorkflowMetadata } from '../templates/types';
/**
* Input type for mermaidStringify when you only have workflow data
* without full template metadata.
*/
export interface MermaidWorkflowInput {
workflow: {
name?: string;
nodes: TemplateNode[];
connections: TemplateConnections;
};
}
/**
* Options for mermaid diagram generation
*/
export interface MermaidOptions {
/** Include node type in comments (default: true) */
includeNodeType?: boolean;
/** Include node parameters in comments (default: true) */
includeNodeParameters?: boolean;
/** Include node name in node definition (default: true) */
includeNodeName?: boolean;
/** Include node UUID in comments for reference (default: true) */
includeNodeId?: boolean;
}
const DEFAULT_MERMAID_OPTIONS: Required<MermaidOptions> = {
includeNodeType: true,
includeNodeParameters: true,
includeNodeName: true,
includeNodeId: true,
};
/** Node types that represent conditional/branching logic (rendered as diamond shape) */
const CONDITIONAL_NODE_TYPES = new Set([
'n8n-nodes-base.if',
'n8n-nodes-base.switch',
'n8n-nodes-base.filter',
]);
/** Node type for AI agents that should be wrapped in subgraphs */
const AGENT_NODE_TYPE = '@n8n/n8n-nodes-langchain.agent';
const STICKY_NOTE_TYPE = 'n8n-nodes-base.stickyNote';
/** Default node dimensions when checking sticky overlap */
const DEFAULT_NODE_WIDTH = 100;
const DEFAULT_NODE_HEIGHT = 100;
/** Default sticky dimensions */
const DEFAULT_STICKY_WIDTH = 150;
const DEFAULT_STICKY_HEIGHT = 80;
/**
* Represents a sticky note with its bounds and content
*/
interface StickyBounds {
node: TemplateNode;
x: number;
y: number;
width: number;
height: number;
content: string;
}
/**
* Result of categorizing sticky notes by their overlap with regular nodes
*/
interface StickyOverlapResult {
noOverlap: StickyBounds[];
singleNodeOverlap: Map<string, StickyBounds>;
multiNodeOverlap: Array<{ sticky: StickyBounds; nodeNames: string[] }>;
}
/**
* Represents an agent node with its AI-connected nodes for subgraph grouping
*/
interface AgentSubgraph {
agentNode: TemplateNode;
aiConnectedNodeNames: string[];
nestedStickySubgraphs: Array<{ sticky: StickyBounds; nodeNames: string[] }>;
}
/**
* Builder class for generating Mermaid flowchart diagrams from n8n workflows
*/
class MermaidBuilder {
private readonly nodes: TemplateNode[];
private readonly connections: TemplateConnections;
private readonly options: Required<MermaidOptions>;
private readonly nodeIdMap: Map<string, string>;
private readonly nodeByName: Map<string, TemplateNode>;
private readonly stickyOverlaps: StickyOverlapResult;
private readonly agentSubgraphs: AgentSubgraph[];
private readonly nodesInSubgraphs: Set<string>;
private readonly definedNodes = new Set<string>();
private readonly lines: string[] = [];
private subgraphCounter = 0;
constructor(
nodes: TemplateNode[],
connections: TemplateConnections,
options: Required<MermaidOptions>,
) {
const regularNodes = nodes.filter((n) => n.type !== STICKY_NOTE_TYPE);
const stickyNotes = nodes.filter((n) => n.type === STICKY_NOTE_TYPE);
this.nodes = regularNodes;
this.connections = connections;
this.options = options;
this.nodeIdMap = this.createNodeIdMap();
this.nodeByName = new Map(regularNodes.map((n) => [n.name, n]));
this.stickyOverlaps = this.categorizeStickyOverlaps(stickyNotes);
const nodesInStickySubgraphs = new Set<string>();
for (const { nodeNames } of this.stickyOverlaps.multiNodeOverlap) {
for (const name of nodeNames) {
nodesInStickySubgraphs.add(name);
}
}
this.agentSubgraphs = this.findAgentSubgraphs(nodesInStickySubgraphs);
this.nodesInSubgraphs = new Set<string>(nodesInStickySubgraphs);
for (const { agentNode, aiConnectedNodeNames } of this.agentSubgraphs) {
this.nodesInSubgraphs.add(agentNode.name);
for (const name of aiConnectedNodeNames) {
this.nodesInSubgraphs.add(name);
}
}
}
/**
* Build the complete mermaid diagram
*/
build(): string[] {
// Add comments for stickies that don't overlap any nodes
for (const sticky of this.stickyOverlaps.noOverlap) {
this.lines.push(this.formatStickyComment(sticky.content));
}
// Build main flow
this.buildMainFlow();
// Build subgraph sections
this.buildStickySubgraphs();
this.buildAgentSubgraphs();
// Build cross-subgraph connections
this.buildConnectionsToSubgraphs();
this.buildConnectionsFromSubgraphs();
this.buildInterSubgraphConnections();
return ['```mermaid', 'flowchart TD', ...this.lines, '```'];
}
// Initialization helpers
private createNodeIdMap(): Map<string, string> {
const map = new Map<string, string>();
this.nodes.forEach((node, idx) => {
map.set(node.name, `n${idx + 1}`);
});
return map;
}
private categorizeStickyOverlaps(stickyNotes: TemplateNode[]): StickyOverlapResult {
const result: StickyOverlapResult = {
noOverlap: [],
singleNodeOverlap: new Map(),
multiNodeOverlap: [],
};
for (const sticky of stickyNotes) {
const bounds = this.extractStickyBounds(sticky);
if (!bounds.content) continue;
const overlappingNodes = this.nodes.filter((node) =>
this.isNodeWithinStickyBounds(node.position[0], node.position[1], bounds),
);
if (overlappingNodes.length === 0) {
result.noOverlap.push(bounds);
} else if (overlappingNodes.length === 1) {
result.singleNodeOverlap.set(overlappingNodes[0].name, bounds);
} else {
result.multiNodeOverlap.push({
sticky: bounds,
nodeNames: overlappingNodes.map((n) => n.name),
});
}
}
return result;
}
private extractStickyBounds(node: TemplateNode): StickyBounds {
return {
node,
x: node.position[0],
y: node.position[1],
width:
typeof node.parameters.width === 'number' ? node.parameters.width : DEFAULT_STICKY_WIDTH,
height:
typeof node.parameters.height === 'number' ? node.parameters.height : DEFAULT_STICKY_HEIGHT,
content: typeof node.parameters.content === 'string' ? node.parameters.content.trim() : '',
};
}
private isNodeWithinStickyBounds(nodeX: number, nodeY: number, sticky: StickyBounds): boolean {
const nodeCenterX = nodeX + DEFAULT_NODE_WIDTH / 2;
const nodeCenterY = nodeY + DEFAULT_NODE_HEIGHT / 2;
return (
nodeCenterX >= sticky.x &&
nodeCenterX <= sticky.x + sticky.width &&
nodeCenterY >= sticky.y &&
nodeCenterY <= sticky.y + sticky.height
);
}
private findAgentSubgraphs(nodesInStickySubgraphs: Set<string>): AgentSubgraph[] {
const agentSubgraphs: AgentSubgraph[] = [];
const agentNodes = this.nodes.filter(
(n) => n.type === AGENT_NODE_TYPE && !nodesInStickySubgraphs.has(n.name),
);
const reverseConnections = this.buildReverseConnectionMap();
for (const agentNode of agentNodes) {
const incomingConns = reverseConnections.get(agentNode.name) ?? [];
const aiConnectedNodeNames = incomingConns
.filter(
({ connType, sourceName }) =>
connType !== 'main' && !nodesInStickySubgraphs.has(sourceName),
)
.map(({ sourceName }) => sourceName);
const nestedStickySubgraphs = this.findNestedStickySubgraphs(incomingConns);
if (aiConnectedNodeNames.length > 0 || nestedStickySubgraphs.length > 0) {
agentSubgraphs.push({ agentNode, aiConnectedNodeNames, nestedStickySubgraphs });
}
}
return agentSubgraphs;
}
private findNestedStickySubgraphs(
incomingConns: Array<{ sourceName: string; connType: string }>,
): Array<{ sticky: StickyBounds; nodeNames: string[] }> {
const nested: Array<{ sticky: StickyBounds; nodeNames: string[] }> = [];
for (const stickySubgraph of this.stickyOverlaps.multiNodeOverlap) {
const allNodesConnectToAgent = stickySubgraph.nodeNames.every((nodeName) =>
incomingConns.some(
({ sourceName, connType }) => sourceName === nodeName && connType !== 'main',
),
);
if (allNodesConnectToAgent) {
nested.push(stickySubgraph);
}
}
return nested;
}
private buildReverseConnectionMap(): Map<
string,
Array<{ sourceName: string; connType: string }>
> {
const reverseConnections = new Map<string, Array<{ sourceName: string; connType: string }>>();
for (const [sourceName, sourceConns] of Object.entries(this.connections)) {
for (const { nodeName: targetName, connType } of this.getConnectionTargets(sourceConns)) {
if (!reverseConnections.has(targetName)) {
reverseConnections.set(targetName, []);
}
reverseConnections.get(targetName)!.push({ sourceName, connType });
}
}
return reverseConnections;
}
// Connection helpers
private getConnectionTargets(
nodeConns: TemplateConnections[string],
): Array<{ nodeName: string; connType: string }> {
const targets: Array<{ nodeName: string; connType: string }> = [];
for (const [connType, connList] of Object.entries(nodeConns)) {
for (const connArray of connList) {
if (!connArray) continue;
for (const conn of connArray) {
targets.push({ nodeName: conn.node, connType });
}
}
}
return targets;
}
private getMainConnectionTargets(nodeConns: TemplateConnections[string]): string[] {
if (!nodeConns.main) return [];
return nodeConns.main
.filter((connArray): connArray is NonNullable<typeof connArray> => connArray !== null)
.flatMap((connArray) => connArray.map((conn) => conn.node));
}
private findStartNodes(): TemplateNode[] {
const nodesWithIncoming = new Set<string>();
Object.values(this.connections)
.filter((conn) => conn.main)
.forEach((sourceConnections) => {
for (const connArray of sourceConnections.main) {
if (!connArray) continue;
for (const conn of connArray) {
nodesWithIncoming.add(conn.node);
}
}
});
return this.nodes.filter((n) => !nodesWithIncoming.has(n.name));
}
// Node definition helpers
private formatStickyComment(content: string): string {
return `%% ${content.replace(/\n/g, ' ').replace(/\s+/g, ' ').trim()}`;
}
private getNextSubgraphId(): string {
this.subgraphCounter++;
return `sg${this.subgraphCounter}`;
}
private buildNodeDefinition(node: TemplateNode, id: string): string {
const isConditional = CONDITIONAL_NODE_TYPES.has(node.type);
if (this.options.includeNodeName) {
const escapedName = node.name.replace(/"/g, "'");
return isConditional ? `${id}{"${escapedName}"}` : `${id}["${escapedName}"]`;
}
return id;
}
private buildNodeCommentLines(node: TemplateNode): string[] {
const lines: string[] = [];
if (
this.options.includeNodeType ||
this.options.includeNodeParameters ||
this.options.includeNodeId
) {
const idPart = this.options.includeNodeId && node.id ? `[${node.id}] ` : '';
const typePart = this.options.includeNodeType ? this.buildNodeTypePart(node) : '';
const paramsPart =
this.options.includeNodeParameters && Object.keys(node.parameters).length > 0
? ` | ${JSON.stringify(node.parameters)}`
: '';
if (idPart || typePart || paramsPart) {
lines.push(`%% ${idPart}${typePart}${paramsPart}`);
}
}
return lines;
}
private buildNodeTypePart(node: TemplateNode): string {
const parts = [node.type];
if (typeof node.parameters.resource === 'string' && node.parameters.resource) {
parts.push(node.parameters.resource);
}
if (typeof node.parameters.operation === 'string' && node.parameters.operation) {
parts.push(node.parameters.operation);
}
return parts.join(':');
}
private buildSingleNodeLines(node: TemplateNode, id: string): string[] {
const lines = this.buildNodeCommentLines(node);
lines.push(this.buildNodeDefinition(node, id));
return lines;
}
private defineNodeIfNeeded(nodeName: string): string {
const node = this.nodeByName.get(nodeName);
const id = this.nodeIdMap.get(nodeName);
if (!node || !id) return id ?? '';
if (!this.definedNodes.has(nodeName)) {
this.definedNodes.add(nodeName);
const stickyForNode = this.stickyOverlaps.singleNodeOverlap.get(nodeName);
if (stickyForNode) {
this.lines.push(this.formatStickyComment(stickyForNode.content));
}
this.lines.push(...this.buildNodeCommentLines(node));
return this.buildNodeDefinition(node, id);
}
return id;
}
/**
* Defines target node if not already defined, and adds connection from source.
* Returns true if target was newly defined with a 'main' connection type.
*/
private defineTargetAndConnect(sourceId: string, targetName: string, connType: string): boolean {
const targetId = this.nodeIdMap.get(targetName);
if (!targetId) return false;
if (!this.definedNodes.has(targetName)) {
const targetNode = this.nodeByName.get(targetName);
if (targetNode) {
const stickyForNode = this.stickyOverlaps.singleNodeOverlap.get(targetName);
if (stickyForNode) {
this.lines.push(this.formatStickyComment(stickyForNode.content));
}
this.lines.push(...this.buildNodeCommentLines(targetNode));
this.addConnection(sourceId, this.buildNodeDefinition(targetNode, targetId), connType);
this.definedNodes.add(targetName);
return connType === 'main';
}
} else {
this.addConnection(sourceId, targetId, connType);
}
return false;
}
private addConnection(sourceId: string, targetDef: string, connType: string): void {
const arrow = connType === 'main' ? '-->' : `-.${connType}.->`;
this.lines.push(`${sourceId} ${arrow} ${targetDef}`);
}
// Main flow building
private buildMainFlow(): void {
const visited = new Set<string>();
const startNodes = this.findStartNodes();
const traverse = (nodeName: string) => {
if (visited.has(nodeName)) return;
visited.add(nodeName);
const nodeConns = this.connections[nodeName];
const targets = nodeConns ? this.getConnectionTargets(nodeConns) : [];
for (const { nodeName: targetName, connType } of targets) {
if (this.nodesInSubgraphs.has(targetName) || this.nodesInSubgraphs.has(nodeName)) continue;
const sourceId = this.nodeIdMap.get(nodeName);
const targetDef = this.defineNodeIfNeeded(targetName);
if (sourceId) {
this.addConnection(sourceId, targetDef, connType);
}
}
if (nodeConns) {
this.getMainConnectionTargets(nodeConns)
.filter((target) => !this.nodesInSubgraphs.has(target))
.forEach((target) => traverse(target));
}
};
for (const startNode of startNodes) {
if (this.nodesInSubgraphs.has(startNode.name)) continue;
const id = this.nodeIdMap.get(startNode.name);
if (id && !this.definedNodes.has(startNode.name)) {
const stickyForNode = this.stickyOverlaps.singleNodeOverlap.get(startNode.name);
if (stickyForNode) {
this.lines.push(this.formatStickyComment(stickyForNode.content));
}
this.lines.push(...this.buildSingleNodeLines(startNode, id));
this.definedNodes.add(startNode.name);
}
traverse(startNode.name);
}
}
// Sticky subgraph building
private buildStickySubgraphs(): void {
const nestedStickyIds = this.getNestedStickyIds();
for (const { sticky, nodeNames } of this.stickyOverlaps.multiNodeOverlap) {
if (nestedStickyIds.has(sticky.node.id ?? '')) continue;
this.buildSingleStickySubgraph(sticky, nodeNames);
}
}
private getNestedStickyIds(): Set<string> {
const ids = new Set<string>();
for (const { nestedStickySubgraphs } of this.agentSubgraphs) {
for (const { sticky } of nestedStickySubgraphs) {
ids.add(sticky.node.id ?? '');
}
}
return ids;
}
private buildSingleStickySubgraph(sticky: StickyBounds, nodeNames: string[]): void {
const subgraphId = this.getNextSubgraphId();
const subgraphLabel = sticky.content.replace(/\n/g, ' ').replace(/\s+/g, ' ').trim();
this.lines.push(this.formatStickyComment(sticky.content));
this.lines.push(`subgraph ${subgraphId}["${subgraphLabel.replace(/"/g, "'")}"]`);
const subgraphNodeSet = new Set(nodeNames);
const subgraphDefinedNodes = new Set<string>();
// Find and define start nodes
const startNodes = this.findSubgraphStartNodes(nodeNames, subgraphNodeSet);
for (const startNode of startNodes) {
const id = this.nodeIdMap.get(startNode.name);
if (id && !subgraphDefinedNodes.has(startNode.name)) {
this.lines.push(...this.buildSingleNodeLines(startNode, id));
subgraphDefinedNodes.add(startNode.name);
}
}
// Build internal connections
this.buildSubgraphInternalConnections(startNodes, subgraphNodeSet, subgraphDefinedNodes);
// Mark all as defined
for (const name of nodeNames) {
this.definedNodes.add(name);
}
this.lines.push('end');
}
private findSubgraphStartNodes(
nodeNames: string[],
subgraphNodeSet: Set<string>,
): TemplateNode[] {
const nodesWithInternalIncoming = new Set<string>();
for (const nodeName of nodeNames) {
const nodeConns = this.connections[nodeName];
if (!nodeConns) continue;
for (const { nodeName: targetName } of this.getConnectionTargets(nodeConns)) {
if (subgraphNodeSet.has(targetName)) {
nodesWithInternalIncoming.add(targetName);
}
}
}
return nodeNames
.filter((name) => !nodesWithInternalIncoming.has(name))
.map((name) => this.nodeByName.get(name))
.filter((node): node is TemplateNode => node !== undefined);
}
private buildSubgraphInternalConnections(
startNodes: TemplateNode[],
subgraphNodeSet: Set<string>,
subgraphDefinedNodes: Set<string>,
): void {
const visited = new Set<string>();
const traverse = (nodeName: string) => {
if (visited.has(nodeName)) return;
visited.add(nodeName);
const nodeConns = this.connections[nodeName];
if (!nodeConns) return;
const sourceId = this.nodeIdMap.get(nodeName);
if (!sourceId) return;
for (const { nodeName: targetName, connType } of this.getConnectionTargets(nodeConns)) {
if (!subgraphNodeSet.has(targetName)) continue;
const targetId = this.nodeIdMap.get(targetName);
const targetNode = this.nodeByName.get(targetName);
if (!targetId || !targetNode) continue;
const arrow = connType === 'main' ? '-->' : `-.${connType}.->`;
if (!subgraphDefinedNodes.has(targetName)) {
this.lines.push(...this.buildNodeCommentLines(targetNode));
this.lines.push(`${sourceId} ${arrow} ${this.buildNodeDefinition(targetNode, targetId)}`);
subgraphDefinedNodes.add(targetName);
} else {
this.lines.push(`${sourceId} ${arrow} ${targetId}`);
}
}
this.getMainConnectionTargets(nodeConns)
.filter((t) => subgraphNodeSet.has(t))
.forEach((t) => traverse(t));
};
startNodes.forEach((n) => traverse(n.name));
}
// Agent subgraph building
private buildAgentSubgraphs(): void {
for (const agentSubgraph of this.agentSubgraphs) {
this.buildSingleAgentSubgraph(agentSubgraph);
}
}
private buildSingleAgentSubgraph(agentSubgraph: AgentSubgraph): void {
const { agentNode, aiConnectedNodeNames, nestedStickySubgraphs } = agentSubgraph;
const agentId = this.nodeIdMap.get(agentNode.name);
if (!agentId) return;
const subgraphId = this.getNextSubgraphId();
this.lines.push(`subgraph ${subgraphId}["${agentNode.name.replace(/"/g, "'")}"]`);
// Define direct AI-connected nodes
for (const nodeName of aiConnectedNodeNames) {
this.defineAgentConnectedNode(nodeName);
}
// Build nested sticky subgraphs
for (const { sticky, nodeNames } of nestedStickySubgraphs) {
this.buildNestedStickySubgraph(sticky, nodeNames);
}
// Define agent node and its connections
this.buildAgentNodeConnections(agentNode, agentId, aiConnectedNodeNames, nestedStickySubgraphs);
// Mark all as defined
this.markAgentSubgraphNodesDefined(agentNode, aiConnectedNodeNames, nestedStickySubgraphs);
this.lines.push('end');
}
private defineAgentConnectedNode(nodeName: string): void {
const node = this.nodeByName.get(nodeName);
const id = this.nodeIdMap.get(nodeName);
if (!node || !id) return;
const stickyForNode = this.stickyOverlaps.singleNodeOverlap.get(nodeName);
if (stickyForNode) {
this.lines.push(this.formatStickyComment(stickyForNode.content));
}
this.lines.push(...this.buildSingleNodeLines(node, id));
}
private buildNestedStickySubgraph(sticky: StickyBounds, nodeNames: string[]): void {
const nestedSubgraphId = this.getNextSubgraphId();
const label = sticky.content.replace(/\n/g, ' ').replace(/\s+/g, ' ').trim();
this.lines.push(this.formatStickyComment(sticky.content));
this.lines.push(`subgraph ${nestedSubgraphId}["${label.replace(/"/g, "'")}"]`);
for (const nodeName of nodeNames) {
const node = this.nodeByName.get(nodeName);
const id = this.nodeIdMap.get(nodeName);
if (node && id) {
this.lines.push(...this.buildSingleNodeLines(node, id));
}
}
this.lines.push('end');
}
private buildAgentNodeConnections(
agentNode: TemplateNode,
agentId: string,
aiConnectedNodeNames: string[],
nestedStickySubgraphs: Array<{ sticky: StickyBounds; nodeNames: string[] }>,
): void {
const stickyForAgent = this.stickyOverlaps.singleNodeOverlap.get(agentNode.name);
if (stickyForAgent) {
this.lines.push(this.formatStickyComment(stickyForAgent.content));
}
this.lines.push(...this.buildNodeCommentLines(agentNode));
const allAiNodeNames = [
...aiConnectedNodeNames,
...nestedStickySubgraphs.flatMap(({ nodeNames }) => nodeNames),
];
let agentDefined = false;
for (const nodeName of allAiNodeNames) {
const sourceId = this.nodeIdMap.get(nodeName);
const nodeConns = this.connections[nodeName];
if (!sourceId || !nodeConns) continue;
for (const { nodeName: targetName, connType } of this.getConnectionTargets(nodeConns)) {
if (targetName !== agentNode.name || connType === 'main') continue;
const arrow = `-.${connType}.->`;
if (!agentDefined) {
this.lines.push(`${sourceId} ${arrow} ${this.buildNodeDefinition(agentNode, agentId)}`);
agentDefined = true;
} else {
this.lines.push(`${sourceId} ${arrow} ${agentId}`);
}
}
}
if (!agentDefined) {
this.lines.push(this.buildNodeDefinition(agentNode, agentId));
}
}
private markAgentSubgraphNodesDefined(
agentNode: TemplateNode,
aiConnectedNodeNames: string[],
nestedStickySubgraphs: Array<{ sticky: StickyBounds; nodeNames: string[] }>,
): void {
for (const name of aiConnectedNodeNames) {
this.definedNodes.add(name);
}
for (const { nodeNames } of nestedStickySubgraphs) {
for (const name of nodeNames) {
this.definedNodes.add(name);
}
}
this.definedNodes.add(agentNode.name);
}
// Cross-subgraph connections
private buildConnectionsToSubgraphs(): void {
for (const nodeName of this.definedNodes) {
if (this.nodesInSubgraphs.has(nodeName)) continue;
const nodeConns = this.connections[nodeName];
if (!nodeConns) continue;
for (const { nodeName: targetName, connType } of this.getConnectionTargets(nodeConns)) {
if (this.nodesInSubgraphs.has(targetName)) {
const sourceId = this.nodeIdMap.get(nodeName);
const targetId = this.nodeIdMap.get(targetName);
if (sourceId && targetId) {
this.addConnection(sourceId, targetId, connType);
}
}
}
}
}
private buildConnectionsFromSubgraphs(): void {
const nodesToProcess: string[] = [];
for (const nodeName of this.nodesInSubgraphs) {
const nodeConns = this.connections[nodeName];
if (!nodeConns) continue;
const sourceId = this.nodeIdMap.get(nodeName);
if (!sourceId) continue;
for (const { nodeName: targetName, connType } of this.getConnectionTargets(nodeConns)) {
if (this.nodesInSubgraphs.has(targetName)) continue;
const wasNewMainConnection = this.defineTargetAndConnect(sourceId, targetName, connType);
if (wasNewMainConnection) {
nodesToProcess.push(targetName);
}
}
}
this.continueTraversalFromNodes(nodesToProcess);
}
private continueTraversalFromNodes(nodesToProcess: string[]): void {
const visited = new Set<string>();
const traverse = (nodeName: string) => {
if (visited.has(nodeName) || this.nodesInSubgraphs.has(nodeName)) return;
visited.add(nodeName);
const nodeConns = this.connections[nodeName];
if (!nodeConns) return;
const sourceId = this.nodeIdMap.get(nodeName);
if (!sourceId) return;
for (const { nodeName: targetName, connType } of this.getConnectionTargets(nodeConns)) {
if (this.nodesInSubgraphs.has(targetName)) {
const targetId = this.nodeIdMap.get(targetName);
if (targetId) {
this.addConnection(sourceId, targetId, connType);
}
continue;
}
this.defineTargetAndConnect(sourceId, targetName, connType);
}
this.getMainConnectionTargets(nodeConns)
.filter((t) => !this.nodesInSubgraphs.has(t))
.forEach((t) => traverse(t));
};
nodesToProcess.forEach((n) => traverse(n));
}
private buildInterSubgraphConnections(): void {
const nestedStickyIds = this.getNestedStickyIds();
const outputConnections = new Set<string>();
for (const nodeName of this.nodesInSubgraphs) {
const nodeConns = this.connections[nodeName];
if (!nodeConns) continue;
for (const { nodeName: targetName, connType } of this.getConnectionTargets(nodeConns)) {
if (!this.nodesInSubgraphs.has(targetName)) continue;
// Skip connections involving nested stickies (handled internally)
if (this.isInNestedSticky(nodeName, nestedStickyIds)) continue;
if (this.isInNestedSticky(targetName, nestedStickyIds)) continue;
const sourceSubgraphId = this.getSubgraphId(nodeName, nestedStickyIds);
const targetSubgraphId = this.getSubgraphId(targetName, nestedStickyIds);
// Skip if both nodes are in the same subgraph (connections already handled internally)
if (sourceSubgraphId === targetSubgraphId) continue;
const sourceId = this.nodeIdMap.get(nodeName);
const targetId = this.nodeIdMap.get(targetName);
if (!sourceId || !targetId) continue;
const connKey = `${sourceId}-${connType}-${targetId}`;
if (outputConnections.has(connKey)) continue;
outputConnections.add(connKey);
this.addConnection(sourceId, targetId, connType);
}
}
}
private isInNestedSticky(nodeName: string, nestedStickyIds: Set<string>): boolean {
return this.stickyOverlaps.multiNodeOverlap.some(
({ sticky, nodeNames }) =>
nodeNames.includes(nodeName) && nestedStickyIds.has(sticky.node.id ?? ''),
);
}
/**
* Returns a unique identifier for the subgraph a node belongs to.
*/
private getSubgraphId(nodeName: string, nestedStickyIds: Set<string>): string {
// Check if in a standalone sticky subgraph
const stickySubgraph = this.stickyOverlaps.multiNodeOverlap.find(
({ sticky, nodeNames }) =>
nodeNames.includes(nodeName) && !nestedStickyIds.has(sticky.node.id ?? ''),
);
if (stickySubgraph) {
return `sticky:${stickySubgraph.sticky.node.id}`;
}
// Check if in an agent subgraph
const agentSubgraph = this.agentSubgraphs.find(
({ agentNode, aiConnectedNodeNames }) =>
agentNode.name === nodeName || aiConnectedNodeNames.includes(nodeName),
);
if (agentSubgraph) {
return `agent:${agentSubgraph.agentNode.id}`;
}
return 'none';
}
}
// Public API
/**
* Generates a Mermaid flowchart diagram string from a workflow.
*/
export function mermaidStringify(
input: WorkflowMetadata | MermaidWorkflowInput,
options?: MermaidOptions,
): string {
const { workflow: wf } = input;
const mergedOptions: Required<MermaidOptions> = {
...DEFAULT_MERMAID_OPTIONS,
...options,
};
const builder = new MermaidBuilder(wf.nodes, wf.connections, mergedOptions);
const lines = builder.build();
return lines.join('\n');
}

View File

@ -1,146 +0,0 @@
import type {
NodeConfigurationEntry,
NodeConfigurationsMap,
TemplateNode,
WorkflowMetadata,
} from '../templates/types';
/**
* Average character-to-token ratio for Anthropic models.
* Used for rough token count estimation from character counts.
*/
const AVG_CHARS_PER_TOKEN = 3.5;
/**
* Maximum characters allowed for a single node example configuration.
* Examples exceeding this limit are filtered out to avoid context bloat.
* Based on ~5000 tokens at AVG_CHARS_PER_TOKEN ratio.
*/
const MAX_NODE_EXAMPLE_CHARS = 5000 * AVG_CHARS_PER_TOKEN;
const STICKY_NOTE_TYPE = 'n8n-nodes-base.stickyNote';
/**
* Collect configuration from a single node if it meets size requirements.
* Returns null if the node has no parameters or exceeds size limits.
*/
export function collectSingleNodeConfiguration(node: TemplateNode): NodeConfigurationEntry | null {
const hasParams = Object.keys(node.parameters).length > 0;
if (!hasParams) return null;
const parametersStr = JSON.stringify(node.parameters);
if (parametersStr.length > MAX_NODE_EXAMPLE_CHARS) return null;
return {
version: node.typeVersion,
parameters: node.parameters,
};
}
/**
* Add a node configuration to a configurations map.
* Mutates the map in place for efficiency when processing multiple nodes.
*/
export function addNodeConfigurationToMap(
nodeType: string,
config: NodeConfigurationEntry,
configurationsMap: NodeConfigurationsMap,
): void {
if (!configurationsMap[nodeType]) {
configurationsMap[nodeType] = [];
}
configurationsMap[nodeType].push(config);
}
/**
* Collect node configurations from multiple workflows.
* Skips sticky notes and nodes that exceed size limits.
*/
export function collectNodeConfigurationsFromWorkflows(
workflows: WorkflowMetadata[],
): NodeConfigurationsMap {
const configurations: NodeConfigurationsMap = {};
for (const workflow of workflows) {
for (const node of workflow.workflow.nodes) {
// Skip sticky notes
if (node.type === STICKY_NOTE_TYPE) continue;
const config = collectSingleNodeConfiguration(node);
if (config) {
addNodeConfigurationToMap(node.type, config, configurations);
}
}
}
return configurations;
}
/**
* Get node configurations filtered by node type from workflow metadata.
* Optionally filters by node version as well.
*/
export function getNodeConfigurationsFromTemplates(
templates: WorkflowMetadata[],
nodeType: string,
nodeVersion?: number,
): NodeConfigurationEntry[] {
const configurations: NodeConfigurationEntry[] = [];
for (const template of templates) {
for (const node of template.workflow.nodes) {
if (node.type !== nodeType) continue;
if (nodeVersion !== undefined && node.typeVersion !== nodeVersion) continue;
const config = collectSingleNodeConfiguration(node);
if (config) {
configurations.push(config);
}
}
}
return configurations;
}
/**
* Format node configuration examples as markdown with character limit.
*/
export function formatNodeConfigurationExamples(
nodeType: string,
configurations: NodeConfigurationEntry[],
nodeVersion?: number,
maxExamples: number = 1,
maxChars: number = MAX_NODE_EXAMPLE_CHARS,
): string {
// Filter by version if specified
const filtered = nodeVersion
? configurations.filter((c) => c.version === nodeVersion)
: configurations;
if (filtered.length === 0) {
return `## Node Configuration Examples: ${nodeType}\n\nNo examples found.`;
}
// Limit to maxExamples and accumulate within character limit
const limited = filtered.slice(0, maxExamples);
const { parts } = limited.reduce<{ parts: string[]; chars: number }>(
(acc, config) => {
const exampleStr = JSON.stringify(config.parameters, null, 2);
if (acc.chars + exampleStr.length <= maxChars) {
acc.parts.push(
`### Example (version ${config.version})`,
'',
'```json',
exampleStr,
'```',
'',
);
acc.chars += exampleStr.length;
}
return acc;
},
{ parts: [], chars: 0 },
);
return [`## Node Configuration Examples: ${nodeType}`, '', ...parts].join('\n');
}

View File

@ -1,4 +1,4 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatAnthropic, type ChatAnthropicInput } from '@langchain/anthropic';
import type { LLMResult } from '@langchain/core/outputs';
import {
getProxyAgent,
@ -21,7 +21,7 @@ import { searchModels } from './methods/searchModels';
const ANTHROPIC_MODEL_BUILDER_HINT = {
message:
'Default to claude-sonnet-4-6 (latest Sonnet); use claude-opus-4-7 when the user needs the most capable model. Never use Claude Sonnet 4.5, Claude 3.x, Claude 2, or LEGACY options — those are superseded and are not valid choices.',
'Default to claude-sonnet-4-6 (latest Sonnet); use claude-opus-4-7 when the user needs the most capable model. Never use Claude Sonnet 4.5, Claude 3.x, Claude 2, or LEGACY options — those are superseded and are not valid choices. When extended thinking is needed on Opus 4.7+, set Thinking Mode to Adaptive and choose an Effort level. The legacy Manual thinking mode is rejected by Opus 4.7.',
};
const modelField: INodeProperties = {
@ -92,8 +92,8 @@ export class LmChatAnthropic implements INodeType {
name: 'lmChatAnthropic',
icon: 'file:anthropic.svg',
group: ['transform'],
version: [1, 1.1, 1.2, 1.3, 1.4],
defaultVersion: 1.4,
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5],
defaultVersion: 1.5,
description: 'Language Model Anthropic',
defaults: {
name: 'Anthropic Chat Model',
@ -225,7 +225,44 @@ export class LmChatAnthropic implements INodeType {
'The model. Choose from the list, or specify an ID. <a href="https://docs.anthropic.com/claude/docs/models-overview">Learn more</a>.',
displayOptions: {
show: {
'@version': [{ _cnd: { gte: 1.4 } }],
'@version': [1.4],
},
},
},
{
displayName: 'Model',
name: 'model',
type: 'resourceLocator',
default: {
mode: 'list',
value: 'claude-sonnet-4-6',
cachedResultName: 'Claude Sonnet 4.6',
},
builderHint: ANTHROPIC_MODEL_BUILDER_HINT,
required: true,
modes: [
{
displayName: 'From List',
name: 'list',
type: 'list',
placeholder: 'Select a model...',
typeOptions: {
searchListMethod: 'searchModels',
searchable: true,
},
},
{
displayName: 'ID',
name: 'id',
type: 'string',
placeholder: 'Claude Sonnet',
},
],
description:
'The model. Choose from the list, or specify an ID. <a href="https://docs.anthropic.com/claude/docs/models-overview">Learn more</a>.',
displayOptions: {
show: {
'@version': [{ _cnd: { gte: 1.5 } }],
},
},
},
@ -255,6 +292,7 @@ export class LmChatAnthropic implements INodeType {
displayOptions: {
hide: {
thinking: [true],
thinkingMode: ['adaptive', 'manual'],
},
},
},
@ -269,6 +307,7 @@ export class LmChatAnthropic implements INodeType {
displayOptions: {
hide: {
thinking: [true],
thinkingMode: ['adaptive', 'manual'],
},
},
},
@ -283,6 +322,7 @@ export class LmChatAnthropic implements INodeType {
displayOptions: {
hide: {
thinking: [true],
thinkingMode: ['adaptive', 'manual'],
},
},
},
@ -292,6 +332,11 @@ export class LmChatAnthropic implements INodeType {
type: 'boolean',
default: false,
description: 'Whether to enable thinking mode for the model',
displayOptions: {
show: {
'@version': [{ _cnd: { lte: 1.4 } }],
},
},
},
{
displayName: 'Thinking Budget (Tokens)',
@ -301,10 +346,94 @@ export class LmChatAnthropic implements INodeType {
description: 'The maximum number of tokens to use for thinking',
displayOptions: {
show: {
'@version': [{ _cnd: { lte: 1.4 } }],
thinking: [true],
},
},
},
{
displayName: 'Thinking Mode',
name: 'thinkingMode',
type: 'options',
default: 'disabled',
description: 'How extended thinking should be configured for the model',
options: [
{
name: 'Disabled',
value: 'disabled',
description: 'No extended thinking',
},
{
name: 'Adaptive (Recommended)',
value: 'adaptive',
description: 'Claude decides how much to think; control with Effort',
},
{
name: 'Manual (Deprecated)',
value: 'manual',
description: 'Legacy fixed-budget mode; rejected by Opus 4.7+',
},
],
displayOptions: {
show: {
'@version': [{ _cnd: { gte: 1.5 } }],
},
},
},
{
displayName: 'Effort',
name: 'effort',
type: 'options',
default: 'medium',
description: 'Effort level for adaptive thinking',
// eslint-disable-next-line n8n-nodes-base/node-param-options-type-unsorted-items
options: [
{ name: 'Low', value: 'low' },
{ name: 'Medium', value: 'medium' },
{ name: 'High', value: 'high' },
{ name: 'X-High', value: 'xhigh' },
{ name: 'Max', value: 'max' },
],
displayOptions: {
show: {
'@version': [{ _cnd: { gte: 1.5 } }],
thinkingMode: ['adaptive'],
'/model.value': [{ _cnd: { includes: 'opus' } }],
},
},
},
{
displayName: 'Effort',
name: 'effort',
type: 'options',
default: 'medium',
description: 'Effort level for adaptive thinking',
options: [
{ name: 'Low', value: 'low' },
{ name: 'Medium', value: 'medium' },
{ name: 'High', value: 'high' },
],
displayOptions: {
show: {
'@version': [{ _cnd: { gte: 1.5 } }],
thinkingMode: ['adaptive'],
'/model.value': [{ _cnd: { regex: '^(?!.*opus).*' } }],
},
},
},
{
displayName: 'Thinking Budget (Tokens)',
name: 'thinkingBudget',
type: 'number',
default: MIN_THINKING_BUDGET,
description: 'Maximum tokens used for thinking. Manual mode is rejected by Opus 4.7+.',
displayOptions: {
show: {
'@version': [{ _cnd: { gte: 1.5 } }],
thinkingMode: ['manual'],
},
},
},
],
},
],
@ -333,30 +462,42 @@ export class LmChatAnthropic implements INodeType {
const options = this.getNodeParameter('options', itemIndex, {}) as {
maxTokensToSample?: number;
temperature: number;
temperature?: number;
topK?: number;
topP?: number;
thinking?: boolean;
thinkingBudget?: number;
};
let invocationKwargs = {};
const tokensUsageParser = (result: LLMResult) => {
const usage = (result?.llmOutput?.usage as {
input_tokens: number;
output_tokens: number;
}) ?? {
input_tokens: 0,
output_tokens: 0,
};
return {
completionTokens: usage.output_tokens,
promptTokens: usage.input_tokens,
totalTokens: usage.input_tokens + usage.output_tokens,
};
thinkingMode?: 'disabled' | 'adaptive' | 'manual';
effort?: 'low' | 'medium' | 'high' | 'xhigh' | 'max';
};
if (options.thinking) {
const isOpus47Model = modelName.startsWith('claude-opus-4-7');
const thinkingMode: 'disabled' | 'adaptive' | 'manual' =
version >= 1.5
? (options.thinkingMode ?? 'disabled')
: options.thinking
? 'manual'
: 'disabled';
if (thinkingMode === 'manual' && isOpus47Model) {
throw new NodeOperationError(
this.getNode(),
`Manual thinking mode is not supported on "${modelName}". Use Thinking Mode = Adaptive (with Effort) instead.`,
{ itemIndex },
);
}
let invocationKwargs: Record<string, unknown> = {};
if (thinkingMode === 'adaptive') {
invocationKwargs = {
thinking: { type: 'adaptive' },
output_config: { effort: options.effort ?? 'medium' },
max_tokens: options.maxTokensToSample ?? DEFAULT_MAX_TOKENS,
top_k: undefined,
top_p: undefined,
temperature: undefined,
};
} else if (thinkingMode === 'manual') {
invocationKwargs = {
thinking: {
type: 'enabled',
@ -376,6 +517,21 @@ export class LmChatAnthropic implements INodeType {
};
}
const tokensUsageParser = (result: LLMResult) => {
const usage = (result?.llmOutput?.usage as {
input_tokens: number;
output_tokens: number;
}) ?? {
input_tokens: 0,
output_tokens: 0,
};
return {
completionTokens: usage.output_tokens,
promptTokens: usage.input_tokens,
totalTokens: usage.input_tokens + usage.output_tokens,
};
};
const clientOptions: {
fetchOptions?: { dispatcher: ReturnType<typeof getProxyAgent> };
defaultHeaders?: Record<string, string>;
@ -412,19 +568,25 @@ export class LmChatAnthropic implements INodeType {
}
: undefined;
const model = new ChatAnthropic({
const chatAnthropicParams: ChatAnthropicInput = {
anthropicApiKey: credentials.apiKey,
model: modelName,
anthropicApiUrl: baseURL,
maxTokens: options.maxTokensToSample,
temperature: options.temperature,
topK: options.topK,
topP: options.topP,
callbacks: [new N8nLlmTracing(this, { tokensUsageParser })],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, gatewayErrorHandler),
invocationKwargs,
clientOptions,
});
};
// Opus 4.7 rejects temperature/topK/topP at the SDK layer regardless of thinking mode
if (!isOpus47Model) {
chatAnthropicParams.temperature = options.temperature;
chatAnthropicParams.topK = options.topK;
chatAnthropicParams.topP = options.topP;
}
const model = new ChatAnthropic(chatAnthropicParams);
// Some Anthropic models do not support Langchain default of -1 for topP so we need to unset it
if (options.topP === undefined) {

View File

@ -4,7 +4,7 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing, getProxyAgent } from '@n8n/ai-utilities';
import { createMockExecuteFunction } from 'n8n-nodes-base/test/nodes/Helpers';
import type { INode, ISupplyDataFunctions } from 'n8n-workflow';
import type { INode, INodeProperties, ISupplyDataFunctions } from 'n8n-workflow';
import { NodeOperationError } from 'n8n-workflow';
import { LmChatAnthropic } from '../LmChatAnthropic.node';
@ -79,7 +79,7 @@ describe('LmChatAnthropic', () => {
displayName: 'Anthropic Chat Model',
name: 'lmChatAnthropic',
group: ['transform'],
version: [1, 1.1, 1.2, 1.3, 1.4],
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5],
description: 'Language Model Anthropic',
});
});
@ -570,13 +570,12 @@ describe('LmChatAnthropic', () => {
});
});
it('should have Claude Sonnet 4.6 as default for v1.4+ resource locator', () => {
it('should have Claude Sonnet 4.6 as default for v1.4 resource locator', () => {
const v14ModelField = lmChatAnthropic.description.properties.find(
(p) =>
p.name === 'model' &&
p.type === 'resourceLocator' &&
(p.displayOptions?.show?.['@version']?.[0] as { _cnd?: { gte?: number } })?._cnd?.gte ===
1.4,
p.displayOptions?.show?.['@version']?.[0] === 1.4,
);
expect(v14ModelField).toBeDefined();
@ -588,6 +587,242 @@ describe('LmChatAnthropic', () => {
});
});
describe('thinking modes (v1.5)', () => {
it('should not set thinking-related invocationKwargs when thinkingMode is disabled', async () => {
const mockContext = setupMockContext({ typeVersion: 1.5 });
mockContext.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model.value') return 'claude-sonnet-4-6';
if (paramName === 'options')
return { thinkingMode: 'disabled', temperature: 0.5, topK: 10, topP: 0.8 };
return undefined;
});
await lmChatAnthropic.supplyData.call(mockContext, 0);
expect(MockedChatAnthropic).toHaveBeenCalledWith(
expect.objectContaining({
model: 'claude-sonnet-4-6',
temperature: 0.5,
topK: 10,
topP: 0.8,
invocationKwargs: {},
}),
);
});
it('should configure adaptive thinking with default effort (medium)', async () => {
const mockContext = setupMockContext({ typeVersion: 1.5 });
mockContext.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model.value') return 'claude-sonnet-4-6';
if (paramName === 'options') return { thinkingMode: 'adaptive' };
return undefined;
});
await lmChatAnthropic.supplyData.call(mockContext, 0);
expect(MockedChatAnthropic).toHaveBeenCalledWith(
expect.objectContaining({
model: 'claude-sonnet-4-6',
invocationKwargs: {
thinking: { type: 'adaptive' },
output_config: { effort: 'medium' },
max_tokens: 4096,
top_k: undefined,
top_p: undefined,
temperature: undefined,
},
}),
);
});
it.each(['low', 'medium', 'high', 'xhigh', 'max'] as const)(
'should forward effort=%s for adaptive mode',
async (effort) => {
const mockContext = setupMockContext({ typeVersion: 1.5 });
mockContext.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model.value') return 'claude-opus-4-7-20251101';
if (paramName === 'options') return { thinkingMode: 'adaptive', effort };
return undefined;
});
await lmChatAnthropic.supplyData.call(mockContext, 0);
expect(MockedChatAnthropic).toHaveBeenCalledWith(
expect.objectContaining({
invocationKwargs: expect.objectContaining({
thinking: { type: 'adaptive' },
output_config: { effort },
}),
}),
);
},
);
it('should keep legacy enabled+budget payload for manual thinkingMode on Sonnet 4.6', async () => {
const mockContext = setupMockContext({ typeVersion: 1.5 });
mockContext.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model.value') return 'claude-sonnet-4-6';
if (paramName === 'options')
return { thinkingMode: 'manual', thinkingBudget: 2048, maxTokensToSample: 4096 };
return undefined;
});
await lmChatAnthropic.supplyData.call(mockContext, 0);
expect(MockedChatAnthropic).toHaveBeenCalledWith(
expect.objectContaining({
model: 'claude-sonnet-4-6',
invocationKwargs: {
thinking: { type: 'enabled', budget_tokens: 2048 },
max_tokens: 4096,
top_k: undefined,
top_p: undefined,
temperature: undefined,
},
}),
);
});
it('should strip temperature/topK/topP from constructor when model is Opus 4.7 (disabled mode)', async () => {
const mockContext = setupMockContext({ typeVersion: 1.5 });
mockContext.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model.value') return 'claude-opus-4-7-20251101';
if (paramName === 'options')
return { thinkingMode: 'disabled', temperature: 0.5, topK: 40, topP: 0.9 };
return undefined;
});
await lmChatAnthropic.supplyData.call(mockContext, 0);
const callArgs = MockedChatAnthropic.mock.calls[0][0]!;
expect(callArgs.model).toBe('claude-opus-4-7-20251101');
expect(callArgs).not.toHaveProperty('temperature');
expect(callArgs).not.toHaveProperty('topK');
expect(callArgs).not.toHaveProperty('topP');
});
it('should throw NodeOperationError when manual mode is selected on Opus 4.7', async () => {
const mockContext = setupMockContext({ typeVersion: 1.5 });
mockContext.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model.value') return 'claude-opus-4-7-20251101';
if (paramName === 'options') return { thinkingMode: 'manual', thinkingBudget: 2048 };
return undefined;
});
await expect(lmChatAnthropic.supplyData.call(mockContext, 0)).rejects.toThrow(
NodeOperationError,
);
expect(MockedChatAnthropic).not.toHaveBeenCalled();
});
it('should still emit legacy thinking payload when thinking=true on v1.4', async () => {
const mockContext = setupMockContext({ typeVersion: 1.4 });
mockContext.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model.value') return 'claude-sonnet-4-6';
if (paramName === 'options')
return { thinking: true, thinkingBudget: 1500, maxTokensToSample: 4096 };
return undefined;
});
await lmChatAnthropic.supplyData.call(mockContext, 0);
expect(MockedChatAnthropic).toHaveBeenCalledWith(
expect.objectContaining({
invocationKwargs: {
thinking: { type: 'enabled', budget_tokens: 1500 },
max_tokens: 4096,
top_k: undefined,
top_p: undefined,
temperature: undefined,
},
}),
);
});
it('should emit empty invocationKwargs when thinking=false on v1.4', async () => {
const mockContext = setupMockContext({ typeVersion: 1.4 });
mockContext.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model.value') return 'claude-sonnet-4-6';
if (paramName === 'options') return { thinking: false };
return undefined;
});
await lmChatAnthropic.supplyData.call(mockContext, 0);
expect(MockedChatAnthropic).toHaveBeenCalledWith(
expect.objectContaining({ invocationKwargs: {} }),
);
});
it('should describe v1.5 model field, thinkingMode, and gated effort fields', () => {
const properties = lmChatAnthropic.description.properties;
const v15ModelField = properties.find(
(p) =>
p.name === 'model' &&
p.type === 'resourceLocator' &&
(p.displayOptions?.show?.['@version']?.[0] as { _cnd?: { gte?: number } })?._cnd?.gte ===
1.5,
);
expect(v15ModelField).toBeDefined();
expect(v15ModelField!.default).toEqual({
mode: 'list',
value: 'claude-sonnet-4-6',
cachedResultName: 'Claude Sonnet 4.6',
});
const optionsField = properties.find((p) => p.name === 'options' && p.type === 'collection');
expect(optionsField).toBeDefined();
const innerOptions = (optionsField as { options: INodeProperties[] }).options;
const thinkingMode = innerOptions.find((o) => o.name === 'thinkingMode');
expect(thinkingMode).toBeDefined();
expect(thinkingMode!.type).toBe('options');
const modeValues = (thinkingMode as { options: Array<{ value: string }> }).options.map(
(o) => o.value,
);
expect(modeValues).toEqual(['disabled', 'adaptive', 'manual']);
const effortFields = innerOptions.filter((o) => o.name === 'effort');
expect(effortFields).toHaveLength(2);
const opusEffort = effortFields.find((f) => {
const cnd = (
f.displayOptions?.show?.['/model.value']?.[0] as {
_cnd?: { includes?: string };
}
)?._cnd;
return cnd?.includes === 'opus';
});
expect(opusEffort).toBeDefined();
expect(
(opusEffort as { options: Array<{ value: string }> }).options.map((o) => o.value),
).toEqual(['low', 'medium', 'high', 'xhigh', 'max']);
const nonOpusEffort = effortFields.find((f) => {
const cnd = (
f.displayOptions?.show?.['/model.value']?.[0] as {
_cnd?: { regex?: string };
}
)?._cnd;
return typeof cnd?.regex === 'string';
});
expect(nonOpusEffort).toBeDefined();
expect(
(nonOpusEffort as { options: Array<{ value: string }> }).options.map((o) => o.value),
).toEqual(['low', 'medium', 'high']);
});
});
describe('methods', () => {
it('should have searchModels method', () => {
expect(lmChatAnthropic.methods).toEqual({

View File

@ -64,7 +64,7 @@ describe('LmChatAnthropic', () => {
displayName: 'Anthropic Chat Model',
name: 'lmChatAnthropic',
group: ['transform'],
version: [1, 1.1, 1.2, 1.3, 1.4],
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5],
description: 'Language Model Anthropic',
});
});

View File

@ -84,6 +84,10 @@ const createMockSDKFunctions = (): SDKFunctions => ({
fromAi: jest.fn(
(key: string, desc?: string) => `={{ $fromAI('${key}'${desc ? `, '${desc}'` : ''}) }}`,
),
nodeJson: jest.fn((node: { name: string } | string, path: string) => {
const name = typeof node === 'string' ? node : node.name;
return `={{ $('${name}').item.json.${path} }}`;
}),
});
describe('AST Interpreter', () => {
@ -224,6 +228,14 @@ describe('AST Interpreter', () => {
expect(result).toContain('$fromAI');
});
it('should call nodeJson function', () => {
const code = "export default nodeJson('Telegram Trigger', 'message.chat.id');";
const result = interpretSDKCode(code, sdkFunctions);
expect(sdkFunctions.nodeJson).toHaveBeenCalledWith('Telegram Trigger', 'message.chat.id');
expect(result).toBe("={{ $('Telegram Trigger').item.json.message.chat.id }}");
});
it('should chain method calls', () => {
const code = `
const wf = workflow('id', 'name');

View File

@ -40,6 +40,7 @@ export const ALLOWED_SDK_FUNCTIONS = new Set([
// Utility
'fromAi', // NEW: replaces ($) => $.fromAi() pattern
'nodeJson',
]);
/**

View File

@ -37,6 +37,8 @@ interface TestWorkflow {
expectedErrors?: ExpectedError[];
/** Warnings expected from validateWorkflow when run with a nodeTypesProvider. */
expectedValidationWarnings?: ExpectedWarning[];
/** Errors expected from WorkflowBuilder.validate() (plugin validator pipeline). */
expectedBuilderErrors?: ExpectedError[];
}
function loadWorkflowsFromDir(dir: string, workflows: TestWorkflow[]): void {
@ -56,6 +58,7 @@ function loadWorkflowsFromDir(dir: string, workflows: TestWorkflow[]): void {
expectedWarnings?: ExpectedWarning[];
expectedErrors?: ExpectedError[];
expectedValidationWarnings?: ExpectedWarning[];
expectedBuilderErrors?: ExpectedError[];
}>;
};
@ -74,6 +77,7 @@ function loadWorkflowsFromDir(dir: string, workflows: TestWorkflow[]): void {
expectedWarnings: entry.expectedWarnings,
expectedErrors: entry.expectedErrors,
expectedValidationWarnings: entry.expectedValidationWarnings,
expectedBuilderErrors: entry.expectedBuilderErrors,
});
}
}
@ -2634,6 +2638,39 @@ describe('Codegen Roundtrip with Real Workflows', () => {
}
});
describe('Committed workflows — builder validator errors', () => {
const normalizeError = (e: ExpectedError): string => `${e.code}:${e.nodeName ?? ''}`;
const workflowsWithExpectedBuilderErrors = workflows.filter(
(w) => w.expectedBuilderErrors && w.expectedBuilderErrors.length > 0,
);
if (workflowsWithExpectedBuilderErrors.length === 0) {
it('has at least one fixture with expectedBuilderErrors declared', () => {
expect(workflowsWithExpectedBuilderErrors.length).toBeGreaterThan(0);
});
} else {
workflowsWithExpectedBuilderErrors.forEach(({ id, name, json, expectedBuilderErrors }) => {
it(`emits expected builder validation errors for workflow ${id}: "${name}"`, () => {
const code = generateWorkflowCode(json);
const builder = parseWorkflowCodeToBuilder(code);
const result = builder.validate({ allowDisconnectedNodes: true });
const actualErrors: ExpectedError[] = result.errors
.map((e) => ({ code: e.code, nodeName: e.nodeName }))
.sort((a, b) => normalizeError(a).localeCompare(normalizeError(b)));
const expected = (expectedBuilderErrors ?? [])
.slice()
.sort((a, b) => normalizeError(a).localeCompare(normalizeError(b)));
expect(actualErrors).toEqual(expected);
expect(result.valid).toBe(false);
});
});
}
});
describe('Committed workflows — schema validation errors', () => {
// Mirror the relevant builderHint.inputs / builderHint.outputs declarations from the
// real node types so validateWorkflow can resolve required AI inputs and emit

View File

@ -17,6 +17,33 @@ describe('parseWorkflowCodeToBuilder', () => {
expect(json.name).toBe('My Workflow');
expect(json.nodes).toHaveLength(1);
});
it('should parse SDK code using nodeJson()', () => {
const code = `
const telegramTrigger = trigger({
type: 'n8n-nodes-base.telegramTrigger',
version: 1,
config: { name: 'Telegram Trigger', parameters: {} }
});
const setChat = node({
type: 'n8n-nodes-base.set',
version: 3.4,
config: {
name: 'Set Chat',
parameters: { chatId: nodeJson(telegramTrigger, 'message.chat.id') }
}
});
export default workflow('test-id', 'My Workflow').add(telegramTrigger).to(setChat);
`;
const builder = parseWorkflowCodeToBuilder(code);
const json = builder.toJSON();
const setNode = json.nodes.find((node) => node.name === 'Set Chat');
expect(setNode?.parameters?.chatId).toBe(
"={{ $('Telegram Trigger').item.json.message.chat.id }}",
);
});
});
describe('plain object code (WorkflowJSON)', () => {

View File

@ -6,7 +6,7 @@
*/
import { interpretSDKCode, InterpreterError, SecurityError } from '../ast-interpreter';
import type { SDKFunctions } from '../ast-interpreter';
import { expr as exprFn } from '../expression';
import { expr as exprFn, nodeJson as nodeJsonFn } from '../expression';
import { isWorkflowBuilder, isWorkflowJSON } from '../typeguards';
import type { WorkflowJSON, WorkflowBuilder } from '../types/base';
import { workflow as workflowFn } from '../workflow-builder';
@ -553,6 +553,7 @@ const sdkFunctions: SDKFunctions = {
reranker: rerankerFn,
fromAi: fromAiFn,
expr: exprFn,
nodeJson: nodeJsonFn,
};
/**

View File

@ -1,4 +1,4 @@
import { parseExpression, expr, createFromAIExpression } from './expression';
import { parseExpression, expr, nodeJson, createFromAIExpression } from './expression';
describe('Expression System', () => {
describe('expr() helper for expressions', () => {
@ -51,6 +51,32 @@ describe('Expression System', () => {
});
});
describe('nodeJson() helper for explicit node references', () => {
it('should build expression from a node instance and dot path', () => {
const node = { name: 'Telegram Trigger' };
const result = nodeJson(node as never, 'message.chat.id');
expect(result).toBe("={{ $('Telegram Trigger').item.json.message.chat.id }}");
});
it('should build expression from a node name and array path', () => {
const result = nodeJson('Set User', ['profile', 'user-id']);
expect(result).toBe('={{ $(\'Set User\').item.json.profile["user-id"] }}');
});
it('should escape node names', () => {
const result = nodeJson("Bob's Trigger", 'message.text');
expect(result).toBe("={{ $('Bob\\'s Trigger').item.json.message.text }}");
});
it('should throw for an empty path', () => {
expect(() => nodeJson('Set', '')).toThrow('nodeJson() requires a non-empty JSON path.');
});
});
describe('createFromAIExpression() key sanitization', () => {
it('should sanitize keys with spaces', () => {
const result = createFromAIExpression('user email');

View File

@ -3,5 +3,6 @@ export {
parseExpression,
isExpression,
expr,
nodeJson,
createFromAIExpression,
} from './expression/index';

View File

@ -1,6 +1,6 @@
import { FROM_AI_AUTO_GENERATED_MARKER } from 'n8n-workflow';
import type { FromAIArgumentType } from '../types/base';
import type { FromAIArgumentType, NodeInstance } from '../types/base';
/**
* Parse n8n expression string to extract the inner expression
@ -81,6 +81,66 @@ export function expr(expression: string): string {
return '=' + normalized;
}
// =============================================================================
// Explicit Node JSON Reference Generator
// =============================================================================
type NodeJsonReference = NodeInstance<string, string, unknown> | string;
const IDENTIFIER_PATH_SEGMENT = /^[A-Za-z_$][A-Za-z0-9_$]*$/;
function resolveNodeName(node: NodeJsonReference): string {
return typeof node === 'string' ? node : node.name;
}
function escapeNodeName(nodeName: string): string {
return nodeName.replace(/\\/g, '\\\\').replace(/'/g, "\\'");
}
function normalizePath(path: string | readonly string[]): string[] {
const segments = typeof path === 'string' ? path.split('.') : [...path];
const normalized = segments.map((segment) => segment.trim());
if (normalized.length === 0 || normalized.some((segment) => segment.length === 0)) {
throw new Error('nodeJson() requires a non-empty JSON path.');
}
return normalized;
}
function formatPathSegment(segment: string): string {
if (IDENTIFIER_PATH_SEGMENT.test(segment)) {
return `.${segment}`;
}
return `[${JSON.stringify(segment)}]`;
}
/**
* Build an expression that references JSON data from a specific node by name.
*
* Prefer this over `$json` when a value comes from an AI subnode, a fan-in
* branch, or any node other than the immediate main-flow predecessor.
*
* @example
* ```typescript
* nodeJson(telegramTrigger, 'message.chat.id')
* // "={{ $('Telegram Trigger').item.json.message.chat.id }}"
*
* nodeJson('Set User', ['profile', 'user-id'])
* // "={{ $('Set User').item.json.profile[\"user-id\"] }}"
* ```
*/
export function nodeJson(node: NodeJsonReference, path: string | readonly string[]): string {
const nodeName = resolveNodeName(node);
if (!nodeName) {
throw new Error('nodeJson() requires a node or node name.');
}
const pathExpression = normalizePath(path).map(formatPathSegment).join('');
return `={{ $('${escapeNodeName(nodeName)}').item.json${pathExpression} }}`;
}
// =============================================================================
// $fromAI Expression Generator
// =============================================================================

View File

@ -1100,7 +1100,11 @@ describe('graph validation', () => {
config: {
name: 'Store Data',
parameters: {
assignments: { assignments: [{ name: 'api_key', value: 'secret123' }] },
assignments: {
assignments: [
{ id: 'api-key-assignment', name: 'api_key', value: 'secret123', type: 'string' },
],
},
},
},
});
@ -1110,7 +1114,11 @@ describe('graph validation', () => {
config: {
name: 'Store Data',
parameters: {
assignments: { assignments: [{ name: 'password', value: 'secret456' }] },
assignments: {
assignments: [
{ id: 'password-assignment', name: 'password', value: 'secret456', type: 'string' },
],
},
},
},
});

View File

@ -138,6 +138,7 @@ export {
parseExpression,
isExpression,
expr,
nodeJson,
createFromAIExpression,
} from './expression';

View File

@ -33,6 +33,17 @@ Include information with the user prompt such as timestamp, user ID, or session
If there are other agents involved in the workflow you should share memory between the chatbot and those other agents where it makes sense.
Connect the same memory node to multiple agents to enable data sharing and context continuity.
### Memory Session Keys
Memory session keys must uniquely identify the user or chat. In AI Agent memory subnodes, do not use $json for a custom session key because the memory subnode does not have the same immediate predecessor context as a main-flow node.
Use nodeJson(triggerNode, 'field.path') for external chat platforms:
- Telegram: sessionIdType = customKey, sessionKey = nodeJson(telegramTrigger, 'message.chat.id')
- Slack: sessionIdType = customKey, sessionKey = nodeJson(slackTrigger, 'event.channel')
- WhatsApp: sessionIdType = customKey, sessionKey = nodeJson(whatsAppTrigger, 'messages.0.from')
For the built-in n8n Chat Trigger, prefer memory parameters sessionIdType = fromInput and omit a custom sessionKey, because the Chat Trigger provides the session ID directly to the AI Agent.
## Context Engineering & AI Agent Output
It can be beneficial to respond to the user as a tool of the chatbot agent rather than using the agent output - this allows the agent to loop/carry out multiple responses if necessary.

View File

@ -19,6 +19,13 @@ AI Agent nodes (n8n-nodes-langchain.agent) wrap their response in an "output" ob
- Use \`$('AI Agent').item.json.output.fieldName\` when referencing a node, instead of \`$('AI Agent').item.json.fieldName\`
- WRONG: \`$json.summary\` → CORRECT: \`$json.output.summary\`
#### AI Agent Subnode Input Context
AI Agent subnodes (memory, language models, tools, parsers, retrievers, and vector stores) are connected through AI connections, not the normal main data path.
- For memory custom session keys, do NOT use \`$json.chatId\` or \`$json.sessionId\`; reference the trigger/source node explicitly.
- Use \`nodeJson(triggerNode, 'message.chat.id')\` or \`$('Trigger Node').item.json.message.chat.id\`.
- For tool parameters controlled by the agent, use \`$fromAI(...)\` instead of upstream JSON.
- The built-in Chat Trigger memory shortcut is \`sessionIdType: 'fromInput'\`, where no custom session key expression is needed.
#### Webhook Node Output Structure
When referencing data from a Webhook node (n8n-nodes-base.webhook), the incoming request is structured under \`$json\`:
- \`$json.headers\` - HTTP headers, example: \`$json.headers.authorization\`

View File

@ -20,5 +20,8 @@ export const ADDITIONAL_FUNCTIONS = `Additional SDK functions:
- \`.onError(handler)\` — connects a node's error output to a handler node. Requires \`onError: 'continueErrorOutput'\` in the node config.
Example: \`httpNode.onError(errorHandler)\` (with \`config: { onError: 'continueErrorOutput' }\`)
- \`nodeJson(node, 'field.path')\` — creates an explicit expression reference to JSON data from a specific node. Use this instead of \`$json\` in AI Agent subnodes, fan-in nodes, or when reading further upstream data.
Example: \`sessionKey: nodeJson(telegramTrigger, 'message.chat.id')\`
- Additional subnode factories (all follow the same pattern as \`languageModel()\` and \`tool()\`):
\`memory()\`, \`outputParser()\`, \`embeddings()\`, \`vectorStore()\`, \`retriever()\`, \`documentLoader()\`, \`textSplitter()\``;

View File

@ -10,6 +10,7 @@ export const EXPRESSION_REFERENCE = `Available variables inside \`expr('{{ ... }
- \`$json\` — current item's JSON data from the immediate predecessor node
- \`$('NodeName').item.json\` — access any node's output by name
- \`nodeJson(node, 'field.path')\` — SDK helper that creates \`={{ $('NodeName').item.json.field.path }}\`
- \`$input.first()\` — first item from immediate predecessor
- \`$input.all()\` — all items from immediate predecessor
- \`$input.item\` — current item being processed
@ -35,4 +36,16 @@ Dynamic data from other nodes — \`$()\` MUST always be inside \`{{ }}\`, never
- WRONG: \`expr('{{ ' + JSON.stringify($('Source').all().map(i => i.json.name)) + ' }}')\`$() outside {{ }}
- CORRECT: \`expr('{{ $("Source").all().map(i => ({ option: i.json.name })) }}')\`$() inside {{ }}
- CORRECT: \`expr('{{ { "fields": [{ "values": $("Fetch Projects").all().map(i => ({ option: i.json.name })) }] } }}')\` — complex JSON inside {{ }}`;
- CORRECT: \`expr('{{ { "fields": [{ "values": $("Fetch Projects").all().map(i => ({ option: i.json.name })) }] } }}')\` — complex JSON inside {{ }}
When \`$json\` is unsafe - use \`nodeJson(node, 'path')\` or \`$('NodeName').item.json.path\` instead:
- AI Agent subnodes: memory, language model, parser, retriever, vector store, and tool subnodes do not have the same immediate predecessor context as a main-flow node.
WRONG: \`sessionKey: expr('{{ $json.chatId }}')\`
CORRECT: \`sessionKey: nodeJson(telegramTrigger, 'message.chat.id')\`
- Multi-branch fan-in: if a node receives data after IF/Switch/Merge-style branching, \`$json\` only means the current incoming item and may not contain the source field you need.
WRONG: \`expr('{{ $json.userId }}')\`
CORRECT: \`nodeJson(userLookup, 'user.id')\`
- Further-upstream data: if the value comes from any node other than the immediate main predecessor, reference that node explicitly.
WRONG: \`expr('{{ $json.email }}')\`
CORRECT: \`nodeJson(formTrigger, 'body.email')\``;

View File

@ -0,0 +1,16 @@
import { WORKFLOW_SDK_PATTERNS } from './workflow-patterns';
describe('WORKFLOW_SDK_PATTERNS', () => {
it('does not show empty parameter blocks for Set node examples', () => {
const setNodeExamples = [
...WORKFLOW_SDK_PATTERNS.matchAll(
/node\(\{\n\s+type: 'n8n-nodes-base\.set',[^\n]*[\s\S]*?\n\}\);/g,
),
];
expect(setNodeExamples.length).toBeGreaterThan(0);
expect(setNodeExamples.map((example) => example[0])).not.toContainEqual(
expect.stringContaining('parameters: {}'),
);
});
});

View File

@ -35,7 +35,18 @@ const fetchData = node({
const processData = node({
type: 'n8n-nodes-base.set',
version: 3.4,
config: { name: 'Process Data', parameters: {} }
config: {
name: 'Process Data',
parameters: {
mode: 'manual',
includeOtherFields: true,
assignments: {
assignments: [
{ id: 'processed-title', name: 'processedTitle', value: expr('{{ $json.title }}'), type: 'string' }
]
}
}
}
});
// 2. Compose workflow
@ -255,7 +266,18 @@ const fetchRecords = node({
const finalizeResults = node({
type: 'n8n-nodes-base.set',
version: 3.4,
config: { name: 'Finalize', parameters: {} }
config: {
name: 'Finalize',
parameters: {
mode: 'manual',
includeOtherFields: true,
assignments: {
assignments: [
{ id: 'processed-at', name: 'processedAt', value: expr('{{ $now.toISO() }}'), type: 'string' }
]
}
}
}
});
const processRecord = node({
@ -288,7 +310,18 @@ const webhookTrigger = trigger({
const processWebhook = node({
type: 'n8n-nodes-base.set',
version: 3.4,
config: { name: 'Process Webhook', parameters: {} }
config: {
name: 'Process Webhook',
parameters: {
mode: 'manual',
includeOtherFields: true,
assignments: {
assignments: [
{ id: 'source', name: 'source', value: 'webhook', type: 'string' }
]
}
}
}
});
const scheduleTrigger = trigger({
@ -300,7 +333,18 @@ const scheduleTrigger = trigger({
const processSchedule = node({
type: 'n8n-nodes-base.set',
version: 3.4,
config: { name: 'Process Schedule', parameters: {} }
config: {
name: 'Process Schedule',
parameters: {
mode: 'manual',
includeOtherFields: true,
assignments: {
assignments: [
{ id: 'source', name: 'source', value: 'schedule', type: 'string' }
]
}
}
}
});
export default workflow('id', 'name')
@ -333,7 +377,18 @@ const scheduleTrigger = trigger({
const processData = node({
type: 'n8n-nodes-base.set',
version: 3.4,
config: { name: 'Process Data', parameters: {} }
config: {
name: 'Process Data',
parameters: {
mode: 'manual',
includeOtherFields: true,
assignments: {
assignments: [
{ id: 'received-at', name: 'receivedAt', value: expr('{{ $now.toISO() }}'), type: 'string' }
]
}
}
}
});
const sendNotification = node({

View File

@ -20,6 +20,7 @@ describe('Default Plugins', () => {
expect(validatorIds).toContain('core:disconnected-node');
expect(validatorIds).toContain('core:agent');
expect(validatorIds).toContain('core:http-request');
expect(validatorIds).toContain('core:memory-session-key');
});
it('registerDefaultPlugins registers core composite handlers', () => {

View File

@ -20,6 +20,7 @@ import {
fromAiValidator,
httpRequestValidator,
maxNodesValidator,
memorySessionKeyValidator,
mergeNodeValidator,
missingTriggerValidator,
noNodesValidator,
@ -51,6 +52,7 @@ const coreValidators: ValidatorPlugin[] = [
httpRequestValidator,
toolNodeValidator,
fromAiValidator,
memorySessionKeyValidator,
// Node-type validators (medium priority)
setNodeValidator,

View File

@ -1,7 +1,13 @@
/**
* Test that the public API exports are accessible
*/
import { PluginRegistry, pluginRegistry, registerDefaultPlugins, workflow } from '../../index';
import {
PluginRegistry,
nodeJson,
pluginRegistry,
registerDefaultPlugins,
workflow,
} from '../../index';
import type {
ValidationIssue,
PluginContext,
@ -13,6 +19,13 @@ import type {
} from '../../index';
describe('Public API exports', () => {
describe('Expression helper exports', () => {
it('exports nodeJson function', () => {
expect(nodeJson).toBeDefined();
expect(typeof nodeJson).toBe('function');
});
});
describe('Registry exports', () => {
it('exports PluginRegistry class', () => {
expect(PluginRegistry).toBeDefined();

View File

@ -15,6 +15,7 @@ export { filterNodeValidator } from './filter-node-validator';
export { fromAiValidator } from './from-ai-validator';
export { httpRequestValidator } from './http-request-validator';
export { maxNodesValidator } from './max-nodes-validator';
export { memorySessionKeyValidator } from './memory-session-key-validator';
export { mergeNodeValidator } from './merge-node-validator';
export { missingTriggerValidator } from './missing-trigger-validator';
export { noNodesValidator } from './no-nodes-validator';

View File

@ -0,0 +1,173 @@
import { memorySessionKeyValidator } from './memory-session-key-validator';
import type { GraphNode, NodeInstance } from '../../../types/base';
import type { PluginContext } from '../types';
function createMockNode(
type: string,
name: string,
config: { parameters?: Record<string, unknown> } = {},
subnodeType?: string,
): NodeInstance<string, string, unknown> {
return {
type,
name,
version: '1',
config: {
parameters: config.parameters ?? {},
},
...(subnodeType ? { _subnodeType: subnodeType } : {}),
} as NodeInstance<string, string, unknown>;
}
function createGraphNode(node: NodeInstance<string, string, unknown>): GraphNode {
return {
instance: node,
connections: new Map(),
};
}
function createMockPluginContext(): PluginContext {
return {
nodes: new Map(),
workflowId: 'test-workflow',
workflowName: 'Test Workflow',
settings: {},
};
}
describe('memorySessionKeyValidator', () => {
describe('metadata', () => {
it('has correct id', () => {
expect(memorySessionKeyValidator.id).toBe('core:memory-session-key');
});
it('has correct name', () => {
expect(memorySessionKeyValidator.name).toBe('Memory Session Key Validator');
});
});
describe('validateNode', () => {
it('returns warning for an AI memory subnode custom session key using $json', () => {
const node = createMockNode(
'@n8n/n8n-nodes-langchain.memoryBufferWindow',
'Conversation Memory',
{
parameters: {
sessionIdType: 'customKey',
sessionKey: '={{ $json.chatId }}',
},
},
'ai_memory',
);
const ctx = createMockPluginContext();
const issues = memorySessionKeyValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toContainEqual(
expect.objectContaining({
code: 'UNSAFE_MEMORY_SESSION_KEY_EXPRESSION',
severity: 'error',
violationLevel: 'major',
nodeName: 'Conversation Memory',
parameterPath: 'sessionKey',
}),
);
});
it('returns warning for legacy memory sessionId parameters using $json', () => {
const node = createMockNode(
'@n8n/n8n-nodes-langchain.memoryMotorhead',
'Conversation Memory',
{
parameters: {
sessionIdType: 'customKey',
sessionId: '={{ $json.chatId }}',
},
},
'ai_memory',
);
const ctx = createMockPluginContext();
const issues = memorySessionKeyValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toContainEqual(
expect.objectContaining({
code: 'UNSAFE_MEMORY_SESSION_KEY_EXPRESSION',
parameterPath: 'sessionId',
}),
);
});
it('returns no warning for explicit node references', () => {
const node = createMockNode(
'@n8n/n8n-nodes-langchain.memoryBufferWindow',
'Conversation Memory',
{
parameters: {
sessionIdType: 'customKey',
sessionKey: "={{ $('Telegram Trigger').item.json.message.chat.id }}",
},
},
'ai_memory',
);
const ctx = createMockPluginContext();
const issues = memorySessionKeyValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toHaveLength(0);
});
it('returns no warning for Chat Trigger fromInput memory mode', () => {
const node = createMockNode(
'@n8n/n8n-nodes-langchain.memoryBufferWindow',
'Conversation Memory',
{
parameters: {
sessionIdType: 'fromInput',
sessionKey: '={{ $json.sessionId }}',
},
},
'ai_memory',
);
const ctx = createMockPluginContext();
const issues = memorySessionKeyValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toHaveLength(0);
});
it('returns no warning for non-memory subnodes using $json', () => {
const node = createMockNode(
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'OpenAI Chat Model',
{
parameters: {
model: 'gpt-5.4',
options: {
baseURL: '={{ $json.baseUrl }}',
},
},
},
'ai_languageModel',
);
const ctx = createMockPluginContext();
const issues = memorySessionKeyValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toHaveLength(0);
});
it('returns no warning for a regular agent text parameter using $json', () => {
const node = createMockNode('@n8n/n8n-nodes-langchain.agent', 'AI Agent', {
parameters: {
text: '={{ $json.chatInput }}',
},
});
const ctx = createMockPluginContext();
const issues = memorySessionKeyValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toHaveLength(0);
});
});
});

View File

@ -0,0 +1,83 @@
/**
* Memory Session Key Validator Plugin
*
* Validates that AI memory subnodes do not use ambiguous `$json` references
* for manually configured session keys.
*/
import type { GraphNode, NodeInstance } from '../../../types/base';
import type { ValidatorPlugin, ValidationIssue, PluginContext } from '../types';
const MEMORY_SUBNODE_TYPE = 'ai_memory';
const SESSION_KEY_PARAMETERS = ['sessionKey', 'sessionId'];
function hasSubnodeType(
node: NodeInstance<string, string, unknown>,
): node is NodeInstance<string, string, unknown> & { readonly _subnodeType: string } {
return '_subnodeType' in node && typeof node._subnodeType === 'string';
}
function isMemorySubnode(node: NodeInstance<string, string, unknown>): boolean {
return hasSubnodeType(node) && node._subnodeType === MEMORY_SUBNODE_TYPE;
}
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null && !Array.isArray(value);
}
function usesFromInputSessionId(parameters: Record<string, unknown>): boolean {
return parameters.sessionIdType === 'fromInput';
}
function isUnsafeSessionExpression(value: unknown): value is string {
return (
typeof value === 'string' &&
value.includes('$json') &&
(value.startsWith('=') || value.includes('{{'))
);
}
function createIssue(nodeName: string, parameterPath: string): ValidationIssue {
return {
code: 'UNSAFE_MEMORY_SESSION_KEY_EXPRESSION',
message: `'${nodeName}' parameter '${parameterPath}' uses $json in an AI memory subnode session key. Use an explicit node reference such as nodeJson(trigger, 'message.chat.id') or $('Trigger').item.json.message.chat.id.`,
severity: 'error',
violationLevel: 'major',
nodeName,
parameterPath,
};
}
/**
* Validator for AI memory subnode session key expressions.
*
* Checks for:
* - `$json` in manually configured memory session keys
* - Does not flag Chat Trigger's `fromInput` memory mode
*/
export const memorySessionKeyValidator: ValidatorPlugin = {
id: 'core:memory-session-key',
name: 'Memory Session Key Validator',
priority: 50,
validateNode(
node: NodeInstance<string, string, unknown>,
_graphNode: GraphNode,
_ctx: PluginContext,
): ValidationIssue[] {
if (!isMemorySubnode(node)) {
return [];
}
const parameters = node.config?.parameters;
if (!isRecord(parameters) || usesFromInputSessionId(parameters)) {
return [];
}
return SESSION_KEY_PARAMETERS.flatMap((parameterName) =>
isUnsafeSessionExpression(parameters[parameterName])
? [createIssue(node.name, parameterName)]
: [],
);
},
};

View File

@ -2,15 +2,17 @@ import { setNodeValidator } from './set-node-validator';
import type { GraphNode, NodeInstance } from '../../../types/base';
import type { PluginContext } from '../types';
// Helper to create a mock node instance
// Helper to create a mock node instance.
// Defaults to version 3.4 because assignment validation applies to Set v3.3+.
function createMockNode(
type: string,
config: { parameters?: Record<string, unknown> } = {},
version: string = '3.4',
): NodeInstance<string, string, unknown> {
return {
type,
name: 'Test Node',
version: '1',
version,
config: {
parameters: config.parameters ?? {},
},
@ -35,6 +37,15 @@ function createMockPluginContext(): PluginContext {
};
}
function createAssignment(name: string, value: unknown, type: string) {
return {
id: `${name}-assignment`,
name,
value,
type,
};
}
describe('setNodeValidator', () => {
describe('metadata', () => {
it('has correct id', () => {
@ -55,7 +66,7 @@ describe('setNodeValidator', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
assignments: {
assignments: [{ name: 'password', value: 'secret123', type: 'string' }],
assignments: [createAssignment('password', 'secret123', 'string')],
},
},
});
@ -75,7 +86,7 @@ describe('setNodeValidator', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
assignments: {
assignments: [{ name: 'api_key', value: 'key123', type: 'string' }],
assignments: [createAssignment('api_key', 'key123', 'string')],
},
},
});
@ -95,7 +106,7 @@ describe('setNodeValidator', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
assignments: {
assignments: [{ name: 'secret', value: 'mysecret', type: 'string' }],
assignments: [createAssignment('secret', 'mysecret', 'string')],
},
},
});
@ -115,7 +126,7 @@ describe('setNodeValidator', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
assignments: {
assignments: [{ name: 'token', value: 'mytoken', type: 'string' }],
assignments: [createAssignment('token', 'mytoken', 'string')],
},
},
});
@ -136,8 +147,8 @@ describe('setNodeValidator', () => {
parameters: {
assignments: {
assignments: [
{ name: 'username', value: 'john', type: 'string' },
{ name: 'email', value: 'john@example.com', type: 'string' },
createAssignment('username', 'john', 'string'),
createAssignment('email', 'john@example.com', 'string'),
],
},
},
@ -169,13 +180,99 @@ describe('setNodeValidator', () => {
expect(issues).toHaveLength(0);
});
it.each(['1', '2'])('skips validation for legacy Set node version %s', (version) => {
const node = createMockNode(
'n8n-nodes-base.set',
{
parameters: {
mode: 'keepAllExistingFields',
assignments: {
assignments: [{ name: 'password', value: 'secret', type: 'string' }],
},
},
},
version,
);
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toHaveLength(0);
});
it.each(['3', '3.1', '3.2'])(
'returns SET_INVALID_MODE error for unsupported Set node mode on version %s',
(version) => {
const node = createMockNode(
'n8n-nodes-base.set',
{
parameters: {
mode: 'keepAllExistingFields',
assignments: {
assignments: [{ name: 'caption', value: '={{ $json.title }}', type: 'string' }],
},
},
},
version,
);
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toEqual([
expect.objectContaining({
code: 'SET_INVALID_MODE',
severity: 'error',
parameterPath: 'parameters.mode',
}),
]);
},
);
it.each(['3', '3.1', '3.2'])(
'skips assignment validation for Set node version %s',
(version) => {
const node = createMockNode(
'n8n-nodes-base.set',
{
parameters: {
mode: 'manual',
assignments: {
assignments: [{ name: 'password', value: 'secret', type: 'string' }],
},
},
},
version,
);
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toHaveLength(0);
},
);
it('allows raw mode because it is a valid Set node output mode', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
mode: 'raw',
jsonOutput: '={{ { id: $json.id, title: $json.title } }}',
},
});
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toHaveLength(0);
});
it('returns warnings for multiple credential-like fields', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
assignments: {
assignments: [
{ name: 'password', value: 'secret', type: 'string' },
{ name: 'api_key', value: 'key', type: 'string' },
createAssignment('password', 'secret', 'string'),
createAssignment('api_key', 'key', 'string'),
],
},
},
@ -191,7 +288,7 @@ describe('setNodeValidator', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
assignments: {
assignments: [{ name: 'password', value: 'secret', type: 'string' }],
assignments: [createAssignment('password', 'secret', 'string')],
},
},
});
@ -202,5 +299,121 @@ describe('setNodeValidator', () => {
expect(issues[0]?.nodeName).toBe('My Set Node');
});
it('returns SET_INVALID_MODE error for unsupported Set node modes', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
mode: 'keepAllExistingFields',
includeOtherFields: true,
assignments: {
assignments: [createAssignment('caption', '={{ $json.title }}', 'string')],
},
},
});
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toContainEqual(
expect.objectContaining({
code: 'SET_INVALID_MODE',
severity: 'error',
}),
);
});
it('returns SET_INVALID_ASSIGNMENT error when a manual assignment is missing an id', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
mode: 'manual',
includeOtherFields: true,
assignments: {
assignments: [{ name: 'caption', value: '={{ $json.title }}', type: 'string' }],
},
},
});
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toContainEqual(
expect.objectContaining({
code: 'SET_INVALID_ASSIGNMENT',
severity: 'error',
parameterPath: 'parameters.assignments.assignments[0].id',
}),
);
});
it('returns SET_INVALID_ASSIGNMENT error when a manual assignment omits value', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
mode: 'manual',
assignments: {
assignments: [{ id: 'caption-assignment', name: 'caption', type: 'string' }],
},
},
});
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toContainEqual(
expect.objectContaining({
code: 'SET_INVALID_ASSIGNMENT',
severity: 'error',
parameterPath: 'parameters.assignments.assignments[0].value',
}),
);
});
it('returns SET_INVALID_ASSIGNMENT error when a manual assignment has explicit undefined value', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
mode: 'manual',
assignments: {
assignments: [
{
id: 'caption-assignment',
name: 'caption',
type: 'string',
value: undefined,
},
],
},
},
});
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toContainEqual(
expect.objectContaining({
code: 'SET_INVALID_ASSIGNMENT',
severity: 'error',
parameterPath: 'parameters.assignments.assignments[0].value',
}),
);
});
it('returns no issues for a canonical manual Set v3.4 assignment shape', () => {
const node = createMockNode('n8n-nodes-base.set', {
parameters: {
mode: 'manual',
includeOtherFields: true,
assignments: {
assignments: [
createAssignment('caption', '={{ $json.title }}', 'string'),
createAssignment('score', 12, 'number'),
],
},
},
});
const ctx = createMockPluginContext();
const issues = setNodeValidator.validateNode(node, createGraphNode(node), ctx);
expect(issues).toHaveLength(0);
});
});
});

View File

@ -1,10 +1,12 @@
/**
* Set Node Validator Plugin
*
* Validates Set nodes for security issues like credential-like field names.
* Validates Set nodes for contract issues and security issues like
* credential-like field names.
*/
import type { GraphNode, NodeInstance } from '../../../types/base';
import { parseVersion } from '../../string-utils';
import { isCredentialFieldName } from '../../validation-helpers';
import {
type ValidatorPlugin,
@ -15,10 +17,37 @@ import {
formatNodeRef,
} from '../types';
const SUPPORTED_MODES = new Set(['manual', 'raw']);
// Set node v3.x declares `mode: "manual" | "raw"`. v3.3 introduced the
// `assignments` collection (`{ id, name, value, type }`). v1/v2 use
// `parameters.values.*`; v3.0-3.2 use `parameters.fields.values[]`.
const MIN_MODE_VERSION = 3;
const MIN_ASSIGNMENT_VERSION = 3.3;
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null && !Array.isArray(value);
}
function isNonEmptyString(value: unknown): value is string {
return typeof value === 'string' && value.trim() !== '';
}
function formatMode(mode: unknown): string {
if (typeof mode === 'string') return mode;
if (typeof mode === 'number' || typeof mode === 'boolean' || mode === null) {
return JSON.stringify(mode);
}
if (Array.isArray(mode)) return '[array]';
return '[object]';
}
/**
* Validator for Set nodes.
*
* Checks for:
* - Invalid Set node mode values
* - Invalid assignment collection entries
* - Credential-like field names in assignments (password, api_key, secret, token, etc.)
*/
export const setNodeValidator: ValidatorPlugin = {
@ -33,9 +62,16 @@ export const setNodeValidator: ValidatorPlugin = {
ctx: PluginContext,
): ValidationIssue[] {
const issues: ValidationIssue[] = [];
const params = node.config?.parameters as Record<string, unknown> | undefined;
if (!params) {
const nodeVersion = parseVersion(node.version);
if (nodeVersion < MIN_MODE_VERSION) {
return issues;
}
const params = node.config?.parameters;
if (!isRecord(params)) {
return issues;
}
@ -47,16 +83,106 @@ export const setNodeValidator: ValidatorPlugin = {
const origForWarning = renamed ? originalName : undefined;
const nodeRef = formatNodeRef(displayName, origForWarning, node.type);
const assignments = params.assignments as
| { assignments?: Array<{ name?: string; value?: unknown; type?: string }> }
| undefined;
const mode = params.mode;
if (mode !== undefined && (!isNonEmptyString(mode) || !SUPPORTED_MODES.has(mode))) {
issues.push({
code: 'SET_INVALID_MODE',
message:
`${nodeRef} uses unsupported Set node mode "${formatMode(mode)}". ` +
'Use "manual" for field mapping or "raw" for JSON output. ' +
'To keep existing input fields while mapping fields, use mode: "manual" with includeOtherFields: true.',
severity: 'error',
violationLevel: 'major',
nodeName: displayName,
parameterPath: 'parameters.mode',
originalName: origForWarning,
});
}
if (!assignments?.assignments) {
if (nodeVersion < MIN_ASSIGNMENT_VERSION) {
return issues;
}
for (const assignment of assignments.assignments) {
if (assignment.name && isCredentialFieldName(assignment.name)) {
const assignments = params.assignments;
if (assignments === undefined) {
return issues;
}
if (!isRecord(assignments)) {
issues.push({
code: 'SET_INVALID_ASSIGNMENT',
message: `${nodeRef} has invalid assignments. Expected parameters.assignments to be an object containing assignments: [].`,
severity: 'error',
violationLevel: 'major',
nodeName: displayName,
parameterPath: 'parameters.assignments',
originalName: origForWarning,
});
return issues;
}
const assignmentItems = assignments.assignments;
if (assignmentItems === undefined) {
return issues;
}
if (!Array.isArray(assignmentItems)) {
issues.push({
code: 'SET_INVALID_ASSIGNMENT',
message: `${nodeRef} has invalid assignments. Expected parameters.assignments.assignments to be an array.`,
severity: 'error',
violationLevel: 'major',
nodeName: displayName,
parameterPath: 'parameters.assignments.assignments',
originalName: origForWarning,
});
return issues;
}
for (const [index, assignment] of assignmentItems.entries()) {
const parameterPath = `parameters.assignments.assignments[${index}]`;
if (!isRecord(assignment)) {
issues.push({
code: 'SET_INVALID_ASSIGNMENT',
message: `${nodeRef} has an invalid assignment at index ${index}. Expected an object with id, name, value, and type.`,
severity: 'error',
violationLevel: 'major',
nodeName: displayName,
parameterPath,
originalName: origForWarning,
});
continue;
}
for (const key of ['id', 'name', 'type']) {
if (!isNonEmptyString(assignment[key])) {
issues.push({
code: 'SET_INVALID_ASSIGNMENT',
message: `${nodeRef} assignment at index ${index} is missing a non-empty "${key}" field.`,
severity: 'error',
violationLevel: 'major',
nodeName: displayName,
parameterPath: `${parameterPath}.${key}`,
originalName: origForWarning,
});
}
}
if (assignment.value === undefined) {
issues.push({
code: 'SET_INVALID_ASSIGNMENT',
message: `${nodeRef} assignment at index ${index} is missing the "value" field.`,
severity: 'error',
violationLevel: 'major',
nodeName: displayName,
parameterPath: `${parameterPath}.value`,
originalName: origForWarning,
});
}
if (isNonEmptyString(assignment.name) && isCredentialFieldName(assignment.name)) {
issues.push({
code: 'SET_CREDENTIAL_FIELD',
message: `${nodeRef} has a field named "${assignment.name}" which appears to be storing credentials. Use n8n's credential system instead.`,

View File

@ -0,0 +1,39 @@
{
"nodes": [
{
"parameters": {},
"id": "11111111-1111-1111-1111-111111111111",
"name": "When clicking 'Test workflow'",
"type": "n8n-nodes-base.manualTrigger",
"typeVersion": 1,
"position": [0, 0]
},
{
"parameters": {
"mode": "keepAllExistingFields",
"includeOtherFields": true,
"assignments": {
"assignments": [
{
"id": "caption",
"name": "caption",
"value": "={{ $json.title }}",
"type": "string"
}
]
},
"options": {}
},
"id": "22222222-2222-2222-2222-222222222222",
"name": "Edit Fields",
"type": "n8n-nodes-base.set",
"typeVersion": 3.4,
"position": [220, 0]
}
],
"connections": {
"When clicking 'Test workflow'": {
"main": [[{ "node": "Edit Fields", "type": "main", "index": 0 }]]
}
}
}

View File

@ -0,0 +1,38 @@
{
"nodes": [
{
"parameters": {},
"id": "33333333-3333-3333-3333-333333333333",
"name": "When clicking 'Test workflow'",
"type": "n8n-nodes-base.manualTrigger",
"typeVersion": 1,
"position": [0, 0]
},
{
"parameters": {
"mode": "manual",
"includeOtherFields": true,
"assignments": {
"assignments": [
{
"name": "caption",
"value": "={{ $json.title }}",
"type": "string"
}
]
},
"options": {}
},
"id": "44444444-4444-4444-4444-444444444444",
"name": "Edit Fields",
"type": "n8n-nodes-base.set",
"typeVersion": 3.4,
"position": [220, 0]
}
],
"connections": {
"When clicking 'Test workflow'": {
"main": [[{ "node": "Edit Fields", "type": "main", "index": 0 }]]
}
}
}

View File

@ -87,6 +87,18 @@
"expectedValidationWarnings": [
{ "code": "INVALID_OUTPUT_FOR_MODE", "nodeName": "Retrieve Relevant Regulations" }
]
},
{
"id": 7,
"name": "Set node with invalid mode (keepAllExistingFields)",
"success": true,
"expectedBuilderErrors": [{ "code": "SET_INVALID_MODE", "nodeName": "Edit Fields" }]
},
{
"id": 8,
"name": "Set node v3.4 assignment missing id field",
"success": true,
"expectedBuilderErrors": [{ "code": "SET_INVALID_ASSIGNMENT", "nodeName": "Edit Fields" }]
}
]
}

View File

@ -682,8 +682,10 @@ describe('WorkflowExecuteAdditionalData', () => {
});
describe('getBase', () => {
const mockWebhookBaseUrl = 'webhook-base-url.com';
const mockWebhookBaseUrl = 'https://webhook.example.com/';
const mockInstanceBaseUrl = 'https://editor.example.com';
jest.spyOn(urlService, 'getWebhookBaseUrl').mockReturnValue(mockWebhookBaseUrl);
jest.spyOn(urlService, 'getInstanceBaseUrl').mockReturnValue(mockInstanceBaseUrl);
const globalConfig = mockInstance(GlobalConfig);
Container.set(GlobalConfig, globalConfig);
@ -706,7 +708,7 @@ describe('WorkflowExecuteAdditionalData', () => {
credentialsHelper,
executeWorkflow: expect.any(Function),
restApiUrl: `${mockWebhookBaseUrl}/rest/`,
instanceBaseUrl: mockWebhookBaseUrl,
instanceBaseUrl: `${mockInstanceBaseUrl}/`,
formWaitingBaseUrl: `${mockWebhookBaseUrl}/form-waiting/`,
webhookBaseUrl: `${mockWebhookBaseUrl}/webhook/`,
webhookWaitingBaseUrl: `${mockWebhookBaseUrl}/webhook-waiting/`,

View File

@ -78,6 +78,45 @@ describe('ProjectController', () => {
});
});
describe('getSharingCandidates', () => {
it('calls service with query options and returns enriched { count, data }', async () => {
const projects = [
{ id: 'p1', name: 'Project 1' },
{ id: 'p2', name: 'Peer personal project' },
];
const enriched = projects.map((p) => ({
...p,
role: 'global:member',
scopes: ['user:list'],
}));
(projectsService.getShareableProjectsAndCount as jest.Mock).mockResolvedValue([projects, 2]);
(projectsService.addUserScopes as jest.Mock).mockResolvedValue(enriched);
const res = makeRes();
const query = { skip: 0, take: 50, search: '' };
await controller.getSharingCandidates(req, res, query as any);
expect(projectsService.getShareableProjectsAndCount).toHaveBeenCalledWith(req.user, query);
expect(projectsService.addUserScopes).toHaveBeenCalledWith(req.user, projects);
expect(res.json).toHaveBeenCalledWith({ count: 2, data: enriched });
});
it('always returns the { count, data } envelope (no bare-array path)', async () => {
(projectsService.getShareableProjectsAndCount as jest.Mock).mockResolvedValue([[], 0]);
(projectsService.addUserScopes as jest.Mock).mockResolvedValue([]);
const res = makeRes();
const parsed = ListProjectsQueryDto.safeParse({});
expect(parsed.success).toBe(true);
const query = parsed.data!;
await controller.getSharingCandidates(req, res, query);
expect(res.json).toHaveBeenCalledWith({ count: 0, data: [] });
});
});
it('emits team-project-updated with full members list on addProjectUsers', async () => {
// Arrange
const projectId = 'p1';

View File

@ -75,6 +75,26 @@ export class ProjectController {
return await this.projectsService.getProjectCounts();
}
// Lists projects a caller can pick as share targets, including peer
// personal projects so the workflow / credential share dropdowns can
// surface other users. Gated on `user:list` (the same boundary that
// `GET /rest/users` enforces) — restricted roles without that scope
// (e.g. chat-only users) cannot enumerate peer personal projects here.
@Get('/sharing-candidates')
@GlobalScope('user:list')
async getSharingCandidates(
req: AuthenticatedRequest,
res: Response,
@Query payload: ListProjectsQueryDto,
) {
const [data, count] = await this.projectsService.getShareableProjectsAndCount(
req.user,
payload,
);
const enriched = await this.projectsService.addUserScopes(req.user, data);
return res.json({ count, data: enriched });
}
@Post('/')
@GlobalScope('project:create')
// Using admin as all plans that contain projects should allow admins at the very least

View File

@ -211,6 +211,25 @@ export class CredentialsHelper extends ICredentialsHelper {
return undefined;
}
/**
* Invokes a credential's `preAuthentication` hook for in-memory transformation,
* without the expirable-property guard or DB persistence. Used by `requestOAuth2`
* to transform `oauthTokenData` on every request (e.g. extracting a claim from a
* decrypted JWE/JWT) without writing to the database on every call.
*/
async runPreAuthentication(
helpers: IHttpRequestHelper,
credentials: ICredentialDataDecryptedObject,
typeName: string,
): Promise<ICredentialDataDecryptedObject | undefined> {
const credentialType = this.credentialTypes.getByName(typeName);
if (typeof credentialType.preAuthentication !== 'function') {
return undefined;
}
const output = await credentialType.preAuthentication.call(helpers, credentials);
return (output as ICredentialDataDecryptedObject) ?? undefined;
}
/**
* Resolves the given value in case it is an expression
*/

View File

@ -120,6 +120,14 @@ export const eventNamesAudit = [
'n8n.audit.role-mapping.rule.updated',
'n8n.audit.role-mapping.rule.deleted',
'n8n.audit.role-mapping.rules.bulk-deleted',
'n8n.audit.cluster.version-mismatch.detected',
'n8n.audit.cluster.version-mismatch.resolved',
'n8n.audit.cluster.hostid-clash.detected',
'n8n.audit.cluster.hostid-clash.resolved',
'n8n.audit.cluster.split-brain.detected',
'n8n.audit.cluster.split-brain.resolved',
'n8n.audit.cluster.instance-joined',
'n8n.audit.cluster.instance-left',
] as const;
export type EventNamesWorkflowType = (typeof eventNamesWorkflow)[number];
@ -158,3 +166,6 @@ export type EventMessageTypes =
| EventMessageAiNode
| EventMessageQueue
| EventMessageRunner;
export const isNodeEventMessage = (message: EventMessageTypes): message is EventMessageNode =>
message.eventName.startsWith('n8n.node.');

View File

@ -36,7 +36,6 @@ export interface MessageWithCallback {
}
export interface MessageEventBusInitializeOptions {
skipRecoveryPass?: boolean;
workerId?: string;
webhookProcessorId?: string;
}
@ -69,7 +68,6 @@ export class MessageEventBus extends EventEmitter {
*
* Sets `isInitialized` to `true` once finished.
*/
// eslint-disable-next-line complexity
async initialize(options?: MessageEventBusInitializeOptions): Promise<void> {
if (this.isInitialized) {
return;
@ -95,97 +93,8 @@ export class MessageEventBus extends EventEmitter {
this.logger.warn('Could not initialize event writer');
}
if (options?.skipRecoveryPass) {
this.logger.debug('Skipping unsent event check');
} else {
// unsent event check:
// - find unsent messages in current event log(s)
// - cycle event logs and start the logging to a fresh file
// - retry sending events
this.logger.debug('Checking for unsent event messages');
const unsentAndUnfinished = await this.getUnsentAndUnfinishedExecutions();
this.logger.debug(
`Start logging into ${this.logWriter?.getLogFileName() ?? 'unknown filename'} `,
);
this.logWriter?.startLogging();
await this.send(unsentAndUnfinished.unsentMessages);
await this.performStartupRecovery();
let unfinishedExecutionIds = Object.keys(unsentAndUnfinished.unfinishedExecutions);
// if we are in queue mode, running jobs may still be running on a worker despite the main process
// crashing, so we can't just mark them as crashed
if (this.globalConfig.executions.mode !== 'queue') {
const dbUnfinishedExecutionIds = (
await this.executionRepository.find({
where: {
status: In(['running', 'unknown']),
},
select: ['id'],
})
).map((e) => e.id);
unfinishedExecutionIds = Array.from(
new Set<string>([...unfinishedExecutionIds, ...dbUnfinishedExecutionIds]),
);
}
if (unfinishedExecutionIds.length > 0) {
const activeWorkflows = await this.workflowRepository.find({
where: { activeVersionId: Not(IsNull()) },
select: ['id', 'name'],
});
if (activeWorkflows.length > 0) {
this.logger.info('Currently active workflows:');
for (const workflowData of activeWorkflows) {
this.logger.info(` - ${workflowData.name} (ID: ${workflowData.id})`);
}
}
const recoveryAlreadyAttempted = this.logWriter?.isRecoveryProcessRunning();
if (recoveryAlreadyAttempted || this.globalConfig.eventBus.crashRecoveryMode === 'simple') {
await this.executionRepository.markAsCrashed(unfinishedExecutionIds);
// if we end up here, it means that the previous recovery process did not finish
// a possible reason would be that recreating the workflow data itself caused e.g an OOM error
// in that case, we do not want to retry the recovery process, but rather mark the executions as crashed
if (recoveryAlreadyAttempted)
this.logger.warn('Skipped recovery process since it previously failed.');
} else {
// start actual recovery process and write recovery process flag file
this.logWriter?.startRecoveryProcess();
const recoveredIds: string[] = [];
const crashedWorkflowIds: Set<string> = new Set();
for (const executionId of unfinishedExecutionIds) {
const logMessages = unsentAndUnfinished.unfinishedExecutions[executionId];
const recoveredExecution = await this.recoveryService.recoverFromLogs(
executionId,
logMessages ?? [],
);
if (recoveredExecution) {
if (recoveredExecution.status === 'crashed') {
crashedWorkflowIds.add(recoveredExecution.workflowId);
}
recoveredIds.push(executionId);
}
}
if (recoveredIds.length > 0) {
this.logger.warn(`Found unfinished executions: ${recoveredIds.join(', ')}`);
this.logger.info(
'This could be due to a crash of an active workflow or a restart of n8n',
);
}
if (
this.globalConfig.executions.recovery.workflowDeactivationEnabled &&
crashedWorkflowIds.size > 0
) {
await this.recoveryService.autoDeactivateWorkflowsIfNeeded(crashedWorkflowIds);
}
}
// remove the recovery process flag file
this.logWriter?.endRecoveryProcess();
}
}
// if configured, run this test every n ms
if (this.globalConfig.eventBus.checkUnsentInterval > 0) {
if (this.pushIntervalTimer) {
@ -326,4 +235,95 @@ export class MessageEventBus extends EventEmitter {
async sendQueueEvent(options: EventMessageQueueOptions) {
await this.send(new EventMessageQueue(options));
}
// eslint-disable-next-line complexity
private async performStartupRecovery() {
// unsent event check:
// - find unsent messages in current event log(s)
// - cycle event logs and start the logging to a fresh file
// - retry sending events
this.logger.debug('Checking for unsent event messages');
const unsentAndUnfinished = await this.getUnsentAndUnfinishedExecutions();
this.logger.debug(
`Start logging into ${this.logWriter?.getLogFileName() ?? 'unknown filename'} `,
);
this.logWriter?.startLogging();
await this.send(unsentAndUnfinished.unsentMessages);
let unfinishedExecutionIds = Object.keys(unsentAndUnfinished.unfinishedExecutions);
// if we are in queue mode, running jobs may still be running on a worker despite the main process
// crashing, so we can't just mark them as crashed
if (this.globalConfig.executions.mode !== 'queue') {
const dbUnfinishedExecutionIds = (
await this.executionRepository.find({
where: {
status: In(['running', 'unknown']),
},
select: ['id'],
})
).map((e) => e.id);
unfinishedExecutionIds = Array.from(
new Set<string>([...unfinishedExecutionIds, ...dbUnfinishedExecutionIds]),
);
}
if (unfinishedExecutionIds.length > 0) {
const activeWorkflows = await this.workflowRepository.find({
where: { activeVersionId: Not(IsNull()) },
select: ['id', 'name'],
});
if (activeWorkflows.length > 0) {
this.logger.info('Currently active workflows:');
for (const workflowData of activeWorkflows) {
this.logger.info(` - ${workflowData.name} (ID: ${workflowData.id})`);
}
}
const recoveryAlreadyAttempted = this.logWriter?.isRecoveryProcessRunning();
if (recoveryAlreadyAttempted || this.globalConfig.eventBus.crashRecoveryMode === 'simple') {
await this.executionRepository.markAsCrashed(unfinishedExecutionIds);
// if we end up here, it means that the previous recovery process did not finish
// a possible reason would be that recreating the workflow data itself caused e.g an OOM error
// in that case, we do not want to retry the recovery process, but rather mark the executions as crashed
if (recoveryAlreadyAttempted)
this.logger.warn('Skipped recovery process since it previously failed.');
} else {
// start actual recovery process and write recovery process flag file
this.logWriter?.startRecoveryProcess();
const recoveredIds: string[] = [];
const crashedWorkflowIds: Set<string> = new Set();
for (const executionId of unfinishedExecutionIds) {
const logMessages = unsentAndUnfinished.unfinishedExecutions[executionId];
const recoveredExecution = await this.recoveryService.recoverFromLogs(
executionId,
logMessages ?? [],
);
if (recoveredExecution) {
if (recoveredExecution.status === 'crashed') {
crashedWorkflowIds.add(recoveredExecution.workflowId);
}
recoveredIds.push(executionId);
}
}
if (recoveredIds.length > 0) {
this.logger.warn(`Found unfinished executions: ${recoveredIds.join(', ')}`);
this.logger.info(
'This could be due to a crash of an active workflow or a restart of n8n',
);
}
if (
this.globalConfig.executions.recovery.workflowDeactivationEnabled &&
crashedWorkflowIds.size > 0
) {
await this.recoveryService.autoDeactivateWorkflowsIfNeeded(crashedWorkflowIds);
}
}
// remove the recovery process flag file
this.logWriter?.endRecoveryProcess();
}
}
}

View File

@ -0,0 +1,129 @@
import { Logger } from '@n8n/backend-common';
import { mockInstance } from '@n8n/backend-test-utils';
import { GlobalConfig } from '@n8n/config';
import { Container } from '@n8n/di';
import { mock } from 'jest-mock-extended';
import { ErrorReporter } from 'n8n-core';
import type { INode, IRun, IWorkflowBase } from 'n8n-workflow';
import { createRunExecutionData, NodeOperationError } from 'n8n-workflow';
import { OwnershipService } from '@/services/ownership.service';
import { UrlService } from '@/services/url.service';
import { WorkflowExecutionService } from '@/workflows/workflow-execution.service';
import { executeErrorWorkflow } from '../execute-error-workflow';
describe('executeErrorWorkflow', () => {
mockInstance(Logger);
mockInstance(ErrorReporter);
const globalConfig = mockInstance(GlobalConfig);
const urlService = mockInstance(UrlService);
const ownershipService = mockInstance(OwnershipService);
const workflowExecutionService = mockInstance(WorkflowExecutionService);
Container.set(GlobalConfig, globalConfig);
Container.set(UrlService, urlService);
Container.set(OwnershipService, ownershipService);
Container.set(WorkflowExecutionService, workflowExecutionService);
const mockNode = mock<INode>({
name: 'TestNode',
type: 'n8n-nodes-base.set',
typeVersion: 1,
position: [0, 0],
parameters: {},
});
beforeEach(() => {
jest.resetAllMocks();
globalConfig.nodes = mock<GlobalConfig['nodes']>({
errorTriggerType: 'n8n-nodes-base.errorTrigger',
});
});
describe('pastExecutionUrl', () => {
it('should use getInstanceBaseUrl for pastExecutionUrl', () => {
const mockInstanceBaseUrl = 'https://editor.example.com';
urlService.getInstanceBaseUrl.mockReturnValue(mockInstanceBaseUrl);
const workflowData = mock<IWorkflowBase>({
id: 'workflow-123',
name: 'Test Workflow',
settings: { errorWorkflow: 'error-workflow-456' },
nodes: [],
});
const testError = new NodeOperationError(mockNode, 'Test error');
const fullRunData: IRun = {
data: createRunExecutionData({
resultData: {
error: testError,
lastNodeExecuted: 'TestNode',
runData: {},
},
}),
mode: 'trigger',
startedAt: new Date(),
storedAt: 'db',
status: 'error',
};
const mockProject = { id: 'project-123' };
ownershipService.getWorkflowProjectCached.mockResolvedValue(mockProject as never);
workflowExecutionService.executeErrorWorkflow.mockResolvedValue(undefined);
executeErrorWorkflow(workflowData, fullRunData, 'trigger', 'execution-789');
expect(urlService.getInstanceBaseUrl).toHaveBeenCalled();
});
it('should construct correct pastExecutionUrl format with instanceBaseUrl', async () => {
const mockInstanceBaseUrl = 'https://editor.example.com';
urlService.getInstanceBaseUrl.mockReturnValue(mockInstanceBaseUrl);
const workflowData = mock<IWorkflowBase>({
id: 'workflow-123',
name: 'Test Workflow',
settings: { errorWorkflow: 'error-workflow-456' },
nodes: [],
});
const testError = new NodeOperationError(mockNode, 'Test error');
const fullRunData: IRun = {
data: createRunExecutionData({
resultData: {
error: testError,
lastNodeExecuted: 'TestNode',
runData: {},
},
}),
mode: 'trigger',
startedAt: new Date(),
storedAt: 'db',
status: 'error',
};
const mockProject = { id: 'project-123' };
ownershipService.getWorkflowProjectCached.mockResolvedValue(mockProject as never);
let capturedWorkflowErrorData: unknown;
workflowExecutionService.executeErrorWorkflow.mockImplementation(
async (_errorWorkflow, workflowErrorData) => {
capturedWorkflowErrorData = workflowErrorData;
},
);
executeErrorWorkflow(workflowData, fullRunData, 'trigger', 'execution-789');
// Wait for async operations
await new Promise(process.nextTick);
expect(capturedWorkflowErrorData).toMatchObject({
execution: {
id: 'execution-789',
url: 'https://editor.example.com/workflow/workflow-123/executions/execution-789',
},
});
});
});
});

View File

@ -31,7 +31,7 @@ export function executeErrorWorkflow(
// Check if there was an error and if so if an errorWorkflow or a trigger is set
let pastExecutionUrl: string | undefined;
if (executionId !== undefined) {
pastExecutionUrl = `${Container.get(UrlService).getWebhookBaseUrl()}workflow/${
pastExecutionUrl = `${Container.get(UrlService).getInstanceBaseUrl()}/workflow/${
workflowData.id
}/executions/${executionId}`;
}

View File

@ -276,10 +276,55 @@ describe('ExecutionRecoveryService', () => {
*/
assert(amendedExecution);
expect(amendedExecution.stoppedAt).not.toBe(execution.stoppedAt);
expect(amendedExecution.data).toEqual({ resultData: { runData: {} } });
expect(amendedExecution.data).toEqual({ version: 1, resultData: { runData: {} } });
expect(amendedExecution.status).toBe('crashed');
});
test('for running execution without `runData`, should reconstruct missing node data', async () => {
/**
* Arrange
*/
const workflow = await createWorkflow(OOM_WORKFLOW);
const executionDataWithoutRunData = structuredClone(IN_PROGRESS_EXECUTION_DATA);
// @ts-expect-error CAT-752
delete executionDataWithoutRunData.resultData.runData;
const execution = await createExecution(
{
status: 'running',
data: stringify(executionDataWithoutRunData),
},
workflow,
);
const messages = setupMessages(execution.id, workflow.name);
/**
* Act
*/
const amendedExecution = await executionRecoveryService.recoverFromLogs(
execution.id,
messages,
);
/**
* Assert
*/
const resultData = amendedExecution?.data.resultData;
if (!resultData) fail('Expected `resultData` to be defined');
expect(resultData.error).toBeInstanceOf(WorkflowCrashedError);
expect(resultData.lastNodeExecuted).toBe('DebugHelper');
const runData = resultData.runData;
if (!runData) fail('Expected `runData` to be defined');
expect(runData['When clicking "Execute workflow"']?.at(0)?.executionStatus).toBe('success');
expect(runData.DebugHelper?.at(0)?.executionStatus).toBe('crashed');
});
test('for running execution, should update `status`, `stoppedAt` and `data` if last node did not finish', async () => {
/**
* Arrange

View File

@ -23,7 +23,7 @@ import { Push } from '@/push';
import { OwnershipService } from '@/services/ownership.service';
import { UserManagementMailer } from '@/user-management/email/user-management-mailer';
import type { EventMessageTypes } from '../eventbus/event-message-classes';
import { isNodeEventMessage, type EventMessageTypes } from '../eventbus/event-message-classes';
/**
* Service for recovering key properties in executions.
@ -128,9 +128,9 @@ export class ExecutionRecoveryService {
private async amend(executionId: string, messages: EventMessageTypes[]) {
if (messages.length === 0) return await this.amendWithoutLogs(executionId);
const { nodeMessages, workflowMessages } = this.toRelevantMessages(messages);
const { nodeMessagesByName, workflowMessages } = this.toRelevantMessages(messages);
if (nodeMessages.length === 0) return null;
if (Object.keys(nodeMessagesByName).length === 0) return null;
const execution = await this.executionRepository.findSingleExecution(executionId, {
includeData: true,
@ -149,11 +149,16 @@ export class ExecutionRecoveryService {
return null;
}
const runExecutionData = execution.data ?? { resultData: { runData: {} } };
const runExecutionData = execution.data ?? createEmptyRunExecutionData();
// CAT-752: runData can be missing even tho according to the type it shouldn't be.
// We initialize it to avoid referencing a property of undefined later on.
runExecutionData.resultData.runData ??= {};
let lastNodeRunTimestamp: DateTime | undefined;
for (const node of execution.workflowData.nodes) {
const nodeMessages = nodeMessagesByName[node.name] ?? [];
const nodeStartedMessage = nodeMessages.find(
(m) => m.payload.nodeName === node.name && m.eventName === 'n8n.node.started',
);
@ -217,19 +222,21 @@ export class ExecutionRecoveryService {
private toRelevantMessages(messages: EventMessageTypes[]) {
return messages.reduce<{
nodeMessages: EventMessageTypes[];
nodeMessagesByName: Record<string, EventMessageTypes[]>;
workflowMessages: EventMessageTypes[];
}>(
(acc, cur) => {
if (cur.eventName.startsWith('n8n.node.')) {
acc.nodeMessages.push(cur);
if (isNodeEventMessage(cur)) {
const nodeName = cur.payload.nodeName;
acc.nodeMessagesByName[nodeName] ??= [];
acc.nodeMessagesByName[nodeName].push(cur);
} else if (cur.eventName.startsWith('n8n.workflow.')) {
acc.workflowMessages.push(cur);
}
return acc;
},
{ nodeMessages: [], workflowMessages: [] },
{ nodeMessagesByName: {}, workflowMessages: [] },
);
}

View File

@ -1,11 +1,12 @@
import { mockInstance, mockLogger } from '@n8n/backend-test-utils';
import { InstanceSettings } from 'n8n-core';
import { EncryptionKeyProxy, InstanceSettings } from 'n8n-core';
import { KeyManagerService } from '../key-manager.service';
import { EncryptionBootstrapService } from '../encryption-bootstrap.service';
describe('EncryptionBootstrapService', () => {
const keyManager = mockInstance(KeyManagerService);
const encryptionKeyProxy = mockInstance(EncryptionKeyProxy);
beforeEach(() => {
jest.clearAllMocks();
@ -17,6 +18,7 @@ describe('EncryptionBootstrapService', () => {
new EncryptionBootstrapService(
keyManager,
mockInstance(InstanceSettings, { encryptionKey: 'test-instance-key' }),
encryptionKeyProxy,
mockLogger(),
);
@ -32,6 +34,12 @@ describe('EncryptionBootstrapService', () => {
expect(keyManager.bootstrapGcmKey).toHaveBeenCalled();
});
it('wires the key manager into the encryption key proxy', async () => {
await createService().run();
expect(encryptionKeyProxy.setProvider).toHaveBeenCalledWith(keyManager);
});
it('bootstraps CBC before GCM', async () => {
const order: string[] = [];
keyManager.bootstrapLegacyCbcKey.mockImplementation(async () => {

View File

@ -1,6 +1,6 @@
import { Logger } from '@n8n/backend-common';
import { Service } from '@n8n/di';
import { InstanceSettings } from 'n8n-core';
import { EncryptionKeyProxy, InstanceSettings } from 'n8n-core';
import { KeyManagerService } from './key-manager.service';
@ -9,6 +9,7 @@ export class EncryptionBootstrapService {
constructor(
private readonly keyManager: KeyManagerService,
private readonly instanceSettings: InstanceSettings,
private readonly encryptionKeyProxy: EncryptionKeyProxy,
private readonly logger: Logger,
) {
this.logger = this.logger.scoped('encryption-key-manager');
@ -17,6 +18,7 @@ export class EncryptionBootstrapService {
async run(): Promise<void> {
await this.keyManager.bootstrapLegacyCbcKey(this.instanceSettings.encryptionKey);
await this.keyManager.bootstrapGcmKey();
this.encryptionKeyProxy.setProvider(this.keyManager);
this.logger.debug('Encryption key bootstrap complete');
}
}

View File

@ -17,6 +17,7 @@ import {
type IRun,
type WorkflowExecuteMode,
} from 'n8n-workflow';
import assert from 'node:assert';
import type { TypeUnit } from '@/modules/insights/database/entities/insights-shared';
import { InsightsMetadataRepository } from '@/modules/insights/database/repositories/insights-metadata.repository';
@ -95,9 +96,7 @@ describe('workflowExecuteAfterHandler', () => {
// ASSERT
const metadata = await insightsMetadataRepository.findOneBy({ workflowId: workflow.id });
if (!metadata) {
return fail('expected metadata to exist');
}
assert(metadata, 'Expected metadata to exist');
expect(metadata).toMatchObject({
workflowId: workflow.id,
@ -218,9 +217,7 @@ describe('workflowExecuteAfterHandler', () => {
// ASSERT
const metadata = await insightsMetadataRepository.findOneBy({ workflowId: workflow.id });
if (!metadata) {
return fail('expected metadata to exist');
}
assert(metadata, 'Expected metadata to exist');
expect(metadata).toMatchObject({
workflowId: workflow.id,
@ -629,6 +626,76 @@ describe('workflowExecuteAfterHandler - flushEvents', () => {
}
});
test('flushEvents rounds fractional time_saved_min for PostgreSQL BIGINT on insights_raw.value', async () => {
repoMocks.insertInsightsRaw.mockClear();
workflow.settings = {
timeSavedMode: 'dynamic',
};
const ctx = mock<WorkflowExecuteAfterContext>({
workflow,
runData: mock<IRun>({
mode: 'webhook',
status: 'success',
startedAt: startedAt.toJSDate(),
stoppedAt: stoppedAt.toJSDate(),
data: {
resultData: {
runData: {
timeSavedNode: [{ metadata: { timeSaved: { minutes: 5.4 } } }],
},
},
},
}),
});
await insightsCollectionService.handleWorkflowExecuteAfter(ctx);
await insightsCollectionService.flushEvents();
expect(repoMocks.insertInsightsRaw).toHaveBeenCalledWith(
expect.arrayContaining([expect.objectContaining({ type: 'time_saved_min', value: 5 })]),
);
});
test.each<{ label: string; timeSavedPerExecution: number }>([
{ label: 'NaN', timeSavedPerExecution: Number.NaN },
{ label: 'Infinity', timeSavedPerExecution: Number.POSITIVE_INFINITY },
])(
'flushEvents normalizes time_saved_min to 0 when timeSavedPerExecution is $label (PostgreSQL BIGINT)',
async ({ timeSavedPerExecution }) => {
repoMocks.insertInsightsRaw.mockClear();
workflow.settings = {
timeSavedMode: 'fixed',
timeSavedPerExecution,
};
const ctx = mock<WorkflowExecuteAfterContext>({ workflow, runData });
await insightsCollectionService.handleWorkflowExecuteAfter(ctx);
await insightsCollectionService.flushEvents();
expect(repoMocks.insertInsightsRaw).toHaveBeenCalledWith(
expect.arrayContaining([expect.objectContaining({ type: 'time_saved_min', value: 0 })]),
);
},
);
test('flushEvents normalizes runtime_ms to 0 when runtime is NaN (PostgreSQL BIGINT)', async () => {
repoMocks.insertInsightsRaw.mockClear();
const badRuntimeRunData = mock<IRun>({
mode: 'trigger',
status: 'success',
startedAt: new Date(Number.NaN),
stoppedAt: stoppedAt.toJSDate(),
});
const ctx = mock<WorkflowExecuteAfterContext>({ workflow, runData: badRuntimeRunData });
await insightsCollectionService.handleWorkflowExecuteAfter(ctx);
await insightsCollectionService.flushEvents();
expect(repoMocks.insertInsightsRaw).toHaveBeenCalledWith(
expect.arrayContaining([expect.objectContaining({ type: 'runtime_ms', value: 0 })]),
);
});
test('waits for ongoing flush during shutdown', async () => {
// ARRANGE
const config = Container.get(InsightsConfig);

View File

@ -55,6 +55,17 @@ const MIN_RUNTIME = 0;
// PostgreSQL INTEGER max (signed 32-bit)
const MAX_RUNTIME = 2 ** 31 - 1;
/**
* `insights_raw.value` is stored as BIGINT in PostgreSQL. Non-integer JavaScript
* numbers are serialized with a fractional part and rejected by the driver
*/
function integerValueForInsightsRaw(value: number): number {
if (!Number.isFinite(value)) {
return 0;
}
return Math.round(value);
}
type BufferedInsight = Pick<InsightsRaw, 'type' | 'value' | 'timestamp'> & {
workflowId: string;
workflowName: string;
@ -256,7 +267,7 @@ export class InsightsCollectionService {
}
insight.metaId = metadata.metaId;
insight.type = event.type;
insight.value = event.value;
insight.value = integerValueForInsightsRaw(event.value);
insight.timestamp = event.timestamp;
events.push(insight);

View File

@ -1919,3 +1919,121 @@ describe('resolveDataTableByIdOrName', () => {
expect(logger.warn).toHaveBeenCalledTimes(1);
});
});
// ---------------------------------------------------------------------------
// createExecutionAdapter run() forces save settings
// ---------------------------------------------------------------------------
function createRunAdapterForTests(workflow: Record<string, unknown>) {
const mockWorkflowFinderService = {
findWorkflowForUser: jest.fn().mockResolvedValue(workflow),
};
const mockWorkflowRunner = {
run: jest.fn().mockResolvedValue('exec-1'),
};
const mockActiveExecutions = {
has: jest.fn().mockReturnValue(false),
};
const mockExecutionRepository = {
findSingleExecution: jest.fn().mockResolvedValue(undefined),
};
const mockUser = { id: 'user-1', role: { slug: 'global:member' } } as unknown as User;
const service = new InstanceAiAdapterService(
{ error: jest.fn(), scoped: jest.fn().mockReturnThis() } as unknown as ConstructorParameters<
typeof InstanceAiAdapterService
>[0],
{ ai: { allowSendingParameterValues: false } } as unknown as ConstructorParameters<
typeof InstanceAiAdapterService
>[1],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[2],
mockWorkflowFinderService as unknown as ConstructorParameters<
typeof InstanceAiAdapterService
>[3],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[4],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[5],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[6],
mockExecutionRepository as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[7],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[8],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[9],
mockActiveExecutions as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[10],
mockWorkflowRunner as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[11],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[12],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[13],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[14],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[15],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[16],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[17],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[18],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[19],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[20],
{
getPreferences: jest.fn().mockReturnValue({ branchReadOnly: false }),
} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[21],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[22],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[23],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[24],
{ isLicensed: jest.fn().mockReturnValue(false) } as unknown as ConstructorParameters<
typeof InstanceAiAdapterService
>[25],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[26],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[27],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[28],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[29],
{} as unknown as ConstructorParameters<typeof InstanceAiAdapterService>[30],
);
const adapter = service.createContext(mockUser).executionService;
return { adapter, mockWorkflowRunner };
}
describe('createExecutionAdapter run()', () => {
beforeEach(() => {
jest.clearAllMocks();
});
it('forces save settings so the agent can read the result back', async () => {
const { adapter, mockWorkflowRunner } = createRunAdapterForTests({
id: 'wf-1',
nodes: [],
settings: {
saveManualExecutions: false,
saveDataSuccessExecution: 'none',
saveDataErrorExecution: 'none',
executionOrder: 'v1',
},
});
await adapter.run('wf-1');
expect(mockWorkflowRunner.run).toHaveBeenCalledTimes(1);
const runData = mockWorkflowRunner.run.mock.calls[0][0];
expect(runData.workflowData.settings).toMatchObject({
executionOrder: 'v1',
saveManualExecutions: true,
saveDataSuccessExecution: 'all',
saveDataErrorExecution: 'all',
});
});
it('still applies overrides when the workflow has no settings', async () => {
const { adapter, mockWorkflowRunner } = createRunAdapterForTests({
id: 'wf-1',
nodes: [],
});
await adapter.run('wf-1');
const runData = mockWorkflowRunner.run.mock.calls[0][0];
expect(runData.workflowData.settings).toEqual({
saveManualExecutions: true,
saveDataSuccessExecution: 'all',
saveDataErrorExecution: 'all',
});
});
});

View File

@ -738,11 +738,24 @@ export class InstanceAiAdapterService {
? (nodes.find((n) => n.name === options.triggerNodeName) ?? findTriggerNode(nodes))
: findTriggerNode(nodes);
// Force-save AI-initiated executions so that follow-up
// `executions(list/get/debug)` calls can read the result, regardless of
// instance-wide or per-workflow save settings. Manual mode is gated by
// `saveManualExecutions`; trigger modes (webhook, chat, trigger) are
// gated by the success/error settings — override all three.
const runData: IWorkflowExecutionDataProcess = {
executionMode: triggerNode
? getExecutionModeForTrigger(triggerNode)
: ('manual' as WorkflowExecuteMode),
workflowData: workflow,
workflowData: {
...workflow,
settings: {
...workflow.settings,
saveManualExecutions: true,
saveDataSuccessExecution: 'all',
saveDataErrorExecution: 'all',
},
},
userId: user.id,
pushRef,
};

View File

@ -0,0 +1,146 @@
import type { InstanceRegistration } from '@n8n/api-types';
import type { ClusterCheckContext } from '@n8n/decorators';
import { HostIdClashCheck } from '../../checks/hostid-clash.check';
const makeInstance = (override: Partial<InstanceRegistration> = {}): InstanceRegistration => ({
schemaVersion: 1 as const,
instanceKey: 'key',
hostId: 'host',
instanceType: 'main',
instanceRole: 'follower',
version: '1.0.0',
registeredAt: 0,
lastSeen: 0,
...override,
});
const inst = (instanceKey: string, hostId: string): InstanceRegistration =>
makeInstance({ instanceKey, hostId });
const makeContext = (
current: InstanceRegistration[],
previous: InstanceRegistration[] = [],
): ClusterCheckContext => ({
currentState: new Map(current.map((i) => [i.instanceKey, i])),
previousState: new Map(previous.map((i) => [i.instanceKey, i])),
diff: { added: [], removed: [], changed: [] },
});
describe('HostIdClashCheck', () => {
const check = new HostIdClashCheck();
it('has a stable name and displayName', () => {
expect(check.checkDescription).toEqual({
name: 'hostid-clash',
displayName: 'Host ID clash',
});
});
describe('state transitions', () => {
type Scenario = {
name: string;
previous: InstanceRegistration[];
current: InstanceRegistration[];
expectWarning: boolean;
expectAudit: 'detected' | 'resolved' | 'none';
};
const scenarios: Scenario[] = [
{
name: 'unique hostIds: no warning or events',
previous: [inst('a', 'host-1'), inst('b', 'host-2')],
current: [inst('a', 'host-1'), inst('b', 'host-2')],
expectWarning: false,
expectAudit: 'none',
},
{
name: 'newly detected clash: warning + detected audit',
previous: [],
current: [inst('a', 'shared'), inst('b', 'shared'), inst('c', 'unique')],
expectWarning: true,
expectAudit: 'detected',
},
{
name: 'clash with changed set of hostIds: fresh audit',
previous: [inst('a', 'host-a'), inst('b', 'host-a')],
current: [
inst('a', 'host-a'),
inst('b', 'host-a'),
inst('c', 'host-b'),
inst('d', 'host-b'),
],
expectWarning: true,
expectAudit: 'detected',
},
{
name: 'ongoing identical clash: warning only, no audit (deduplicated)',
previous: [inst('a', 'shared'), inst('b', 'shared')],
current: [inst('a', 'shared'), inst('b', 'shared')],
expectWarning: true,
expectAudit: 'none',
},
{
name: 'clash persists under same host with different instance keys: still deduplicated',
previous: [inst('a', 'shared'), inst('b', 'shared')],
current: [inst('c', 'shared'), inst('d', 'shared')],
expectWarning: true,
expectAudit: 'none',
},
{
name: 'clash resolved: resolved audit, no warning',
previous: [inst('a', 'shared'), inst('b', 'shared')],
current: [inst('a', 'host-a'), inst('b', 'host-b')],
expectWarning: false,
expectAudit: 'resolved',
},
];
it.each(scenarios)('$name', async (s) => {
const result = await check.run(makeContext(s.current, s.previous));
if (s.expectWarning) {
expect(result.warnings).toHaveLength(1);
expect(result.warnings?.[0].code).toBe('cluster.hostid-clash');
expect(result.warnings?.[0].severity).toBe('warning');
} else {
expect(result.warnings).toBeUndefined();
}
if (s.expectAudit === 'detected') {
expect(result.auditEvents).toEqual([
expect.objectContaining({ eventName: 'n8n.audit.cluster.hostid-clash.detected' }),
]);
} else if (s.expectAudit === 'resolved') {
expect(result.auditEvents).toEqual([
{ eventName: 'n8n.audit.cluster.hostid-clash.resolved', payload: {} },
]);
} else {
expect(result.auditEvents).toBeUndefined();
}
expect(result.pushNotifications).toBeUndefined();
});
});
it('reports all clashing hostIds in a deterministic order', async () => {
const current = [
inst('a', 'host-b'),
inst('b', 'host-b'),
inst('c', 'host-a'),
inst('d', 'host-a'),
];
const result = await check.run(makeContext(current));
expect(result.warnings?.[0].message).toBe(
'Detected multiple instances sharing the same hostId: host-a, host-b',
);
expect(result.warnings?.[0].context).toEqual({
clashing: [
{ hostId: 'host-a', instanceKeys: ['c', 'd'] },
{ hostId: 'host-b', instanceKeys: ['a', 'b'] },
],
});
});
});

View File

@ -0,0 +1,116 @@
import type { InstanceRegistration } from '@n8n/api-types';
import type { ClusterCheckContext, ClusterStateDiff } from '@n8n/decorators';
import { LifecycleCheck } from '../../checks/lifecycle.check';
const makeInstance = (override: Partial<InstanceRegistration> = {}): InstanceRegistration => ({
schemaVersion: 1 as const,
instanceKey: 'key',
hostId: 'host',
instanceType: 'main',
instanceRole: 'follower',
version: '1.0.0',
registeredAt: 0,
lastSeen: 0,
...override,
});
const makeContext = (diff: Partial<ClusterStateDiff>): ClusterCheckContext => ({
currentState: new Map(),
previousState: new Map(),
diff: {
added: diff.added ?? [],
removed: diff.removed ?? [],
changed: diff.changed ?? [],
},
});
describe('LifecycleCheck', () => {
const check = new LifecycleCheck();
it('has a stable name and displayName', () => {
expect(check.checkDescription).toEqual({
name: 'lifecycle',
displayName: 'Cluster membership',
});
});
type Scenario = {
name: string;
diff: Partial<ClusterStateDiff>;
expectedEvents: string[];
};
const scenarios: Scenario[] = [
{
name: 'no membership changes: empty result',
diff: {},
expectedEvents: [],
},
{
name: 'one joined audit event per added instance',
diff: {
added: [makeInstance({ instanceKey: 'a' }), makeInstance({ instanceKey: 'b' })],
},
expectedEvents: ['n8n.audit.cluster.instance-joined', 'n8n.audit.cluster.instance-joined'],
},
{
name: 'one left audit event per removed instance',
diff: { removed: [makeInstance({ instanceKey: 'c' })] },
expectedEvents: ['n8n.audit.cluster.instance-left'],
},
{
name: 'joined and left together in the same cycle',
diff: {
added: [makeInstance({ instanceKey: 'a' })],
removed: [makeInstance({ instanceKey: 'b' })],
},
expectedEvents: ['n8n.audit.cluster.instance-joined', 'n8n.audit.cluster.instance-left'],
},
{
name: 'ignores the `changed` bucket (other checks cover role/version mutations)',
diff: {
changed: [
{
previous: makeInstance({ instanceKey: 'a', version: '1.0.0' }),
current: makeInstance({ instanceKey: 'a', version: '1.1.0' }),
},
],
},
expectedEvents: [],
},
];
it.each(scenarios)('$name', async (s) => {
const result = await check.run(makeContext(s.diff));
expect(result.warnings).toBeUndefined();
expect(result.pushNotifications).toBeUndefined();
if (s.expectedEvents.length === 0) {
expect(result.auditEvents).toBeUndefined();
return;
}
expect(result.auditEvents?.map((e) => e.eventName)).toEqual(s.expectedEvents);
});
it('includes membership details in the audit payload', async () => {
const joined = makeInstance({
instanceKey: 'a',
hostId: 'host-1',
instanceType: 'worker',
instanceRole: 'follower',
version: '1.2.0',
});
const result = await check.run(makeContext({ added: [joined] }));
expect(result.auditEvents?.[0].payload).toEqual({
instanceKey: 'a',
hostId: 'host-1',
instanceType: 'worker',
instanceRole: 'follower',
version: '1.2.0',
});
});
});

View File

@ -0,0 +1,136 @@
import type { InstanceRegistration } from '@n8n/api-types';
import type { ClusterCheckContext } from '@n8n/decorators';
import { SplitBrainCheck } from '../../checks/split-brain.check';
const makeInstance = (override: Partial<InstanceRegistration> = {}): InstanceRegistration => ({
schemaVersion: 1 as const,
instanceKey: 'key',
hostId: 'host',
instanceType: 'main',
instanceRole: 'follower',
version: '1.0.0',
registeredAt: 0,
lastSeen: 0,
...override,
});
const inst = (
instanceKey: string,
role: InstanceRegistration['instanceRole'],
): InstanceRegistration => makeInstance({ instanceKey, instanceRole: role });
const makeContext = (
current: InstanceRegistration[],
previous: InstanceRegistration[] = [],
): ClusterCheckContext => ({
currentState: new Map(current.map((i) => [i.instanceKey, i])),
previousState: new Map(previous.map((i) => [i.instanceKey, i])),
diff: { added: [], removed: [], changed: [] },
});
describe('SplitBrainCheck', () => {
const check = new SplitBrainCheck();
it('has a stable name and displayName', () => {
expect(check.checkDescription).toEqual({
name: 'split-brain',
displayName: 'Split-brain',
});
});
describe('state transitions', () => {
type Scenario = {
name: string;
previous: InstanceRegistration[];
current: InstanceRegistration[];
expectWarning: boolean;
expectAudit: 'detected' | 'resolved' | 'none';
};
const scenarios: Scenario[] = [
{
name: 'single leader cluster: no warning or events',
previous: [inst('a', 'leader'), inst('b', 'follower')],
current: [inst('a', 'leader'), inst('b', 'follower')],
expectWarning: false,
expectAudit: 'none',
},
{
name: 'leaderless cluster (e.g. transition): no warning or events',
previous: [inst('a', 'follower'), inst('b', 'follower')],
current: [inst('a', 'follower'), inst('b', 'follower')],
expectWarning: false,
expectAudit: 'none',
},
{
name: 'newly detected split-brain: error warning + detected audit',
previous: [],
current: [inst('a', 'leader'), inst('b', 'leader')],
expectWarning: true,
expectAudit: 'detected',
},
{
name: 'split-brain with changed leader set: fresh audit',
previous: [inst('a', 'leader'), inst('b', 'leader')],
current: [inst('a', 'leader'), inst('c', 'leader')],
expectWarning: true,
expectAudit: 'detected',
},
{
name: 'ongoing identical split-brain: warning only, no audit (deduplicated)',
previous: [inst('a', 'leader'), inst('b', 'leader')],
current: [inst('a', 'leader'), inst('b', 'leader')],
expectWarning: true,
expectAudit: 'none',
},
{
name: 'split-brain resolved: resolved audit, no warning',
previous: [inst('a', 'leader'), inst('b', 'leader')],
current: [inst('a', 'leader'), inst('b', 'follower')],
expectWarning: false,
expectAudit: 'resolved',
},
];
it.each(scenarios)('$name', async (s) => {
const result = await check.run(makeContext(s.current, s.previous));
if (s.expectWarning) {
expect(result.warnings).toHaveLength(1);
expect(result.warnings?.[0].code).toBe('cluster.split-brain');
expect(result.warnings?.[0].severity).toBe('error');
} else {
expect(result.warnings).toBeUndefined();
}
if (s.expectAudit === 'detected') {
expect(result.auditEvents).toEqual([
expect.objectContaining({ eventName: 'n8n.audit.cluster.split-brain.detected' }),
]);
} else if (s.expectAudit === 'resolved') {
expect(result.auditEvents).toEqual([
{ eventName: 'n8n.audit.cluster.split-brain.resolved', payload: {} },
]);
} else {
expect(result.auditEvents).toBeUndefined();
}
expect(result.pushNotifications).toBeUndefined();
});
});
it('reports all leaders (sorted by instanceKey) in warning context and audit payload', async () => {
const current = [inst('z', 'leader'), inst('a', 'leader'), inst('m', 'follower')];
const result = await check.run(makeContext(current));
expect(result.warnings?.[0].message).toBe('Detected 2 instances claiming leader role: a, z');
expect(result.auditEvents?.[0].payload).toEqual({
leaders: [
{ instanceKey: 'a', hostId: 'host', instanceType: 'main' },
{ instanceKey: 'z', hostId: 'host', instanceType: 'main' },
],
});
});
});

View File

@ -0,0 +1,144 @@
import type { InstanceRegistration } from '@n8n/api-types';
import type { ClusterCheckContext } from '@n8n/decorators';
import { VersionMismatchCheck } from '../../checks/version-mismatch.check';
const makeInstance = (override: Partial<InstanceRegistration> = {}): InstanceRegistration => ({
schemaVersion: 1 as const,
instanceKey: 'key',
hostId: 'host',
instanceType: 'main',
instanceRole: 'follower',
version: '1.0.0',
registeredAt: 0,
lastSeen: 0,
...override,
});
const versions = (...vs: string[]): InstanceRegistration[] =>
vs.map((v, i) => makeInstance({ instanceKey: `k${i}`, version: v }));
const makeContext = (
current: InstanceRegistration[],
previous: InstanceRegistration[] = [],
): ClusterCheckContext => ({
currentState: new Map(current.map((i) => [i.instanceKey, i])),
previousState: new Map(previous.map((i) => [i.instanceKey, i])),
diff: { added: [], removed: [], changed: [] },
});
describe('VersionMismatchCheck', () => {
const check = new VersionMismatchCheck();
it('has a stable name and displayName', () => {
expect(check.checkDescription).toEqual({
name: 'version-mismatch',
displayName: 'Version mismatch',
});
});
describe('state transitions', () => {
type Scenario = {
name: string;
previous: string[];
current: string[];
expectWarning: boolean;
expectAudit: 'detected' | 'resolved' | 'none';
};
const scenarios: Scenario[] = [
{
name: 'single version cluster: no warning or events',
previous: ['1.0.0'],
current: ['1.0.0'],
expectWarning: false,
expectAudit: 'none',
},
{
name: 'newly detected mismatch: warning + detected audit',
previous: [],
current: ['1.0.0', '1.1.0'],
expectWarning: true,
expectAudit: 'detected',
},
{
name: 'mismatch with changed version set: fresh audit',
previous: ['1.0.0', '1.1.0'],
current: ['1.0.0', '1.2.0'],
expectWarning: true,
expectAudit: 'detected',
},
{
name: 'ongoing identical mismatch: warning only, no audit (deduplicated)',
previous: ['1.0.0', '1.1.0'],
current: ['1.0.0', '1.1.0'],
expectWarning: true,
expectAudit: 'none',
},
{
name: 'mismatch resolved: resolved audit, no warning',
previous: ['1.0.0', '1.1.0'],
current: ['1.1.0', '1.1.0'],
expectWarning: false,
expectAudit: 'resolved',
},
{
name: 'still resolved on next cycle: nothing emitted',
previous: ['1.1.0', '1.1.0'],
current: ['1.1.0', '1.1.0'],
expectWarning: false,
expectAudit: 'none',
},
];
it.each(scenarios)('$name', async (s) => {
const result = await check.run(makeContext(versions(...s.current), versions(...s.previous)));
if (s.expectWarning) {
expect(result.warnings).toHaveLength(1);
expect(result.warnings?.[0].code).toBe('cluster.version-mismatch');
expect(result.warnings?.[0].severity).toBe('error');
} else {
expect(result.warnings).toBeUndefined();
}
if (s.expectAudit === 'detected') {
expect(result.auditEvents).toEqual([
expect.objectContaining({
eventName: 'n8n.audit.cluster.version-mismatch.detected',
}),
]);
} else if (s.expectAudit === 'resolved') {
expect(result.auditEvents).toEqual([
{ eventName: 'n8n.audit.cluster.version-mismatch.resolved', payload: {} },
]);
} else {
expect(result.auditEvents).toBeUndefined();
}
expect(result.pushNotifications).toBeUndefined();
});
});
it('treats the version set as order-independent for deduplication', async () => {
const previous = versions('1.1.0', '1.0.0');
const current = versions('1.0.0', '1.1.0');
const result = await check.run(makeContext(current, previous));
expect(result.warnings).toHaveLength(1);
expect(result.auditEvents).toBeUndefined();
});
it('reports the sorted list of versions in warning context and audit payload', async () => {
const current = versions('1.1.0', '1.0.0', '1.0.0');
const result = await check.run(makeContext(current));
expect(result.warnings?.[0].context).toEqual({ versions: ['1.0.0', '1.1.0'] });
expect(result.warnings?.[0].message).toBe(
'Detected multiple n8n versions in the cluster: 1.0.0, 1.1.0',
);
expect(result.auditEvents?.[0].payload).toEqual({ versions: ['1.0.0', '1.1.0'] });
});
});

View File

@ -0,0 +1,81 @@
import type { InstanceRegistration } from '@n8n/api-types';
import {
ClusterCheck,
type ClusterCheckContext,
type ClusterCheckResult,
type IClusterCheck,
} from '@n8n/decorators';
const CHECK_CODE = 'cluster.hostid-clash';
const AUDIT_DETECTED = 'n8n.audit.cluster.hostid-clash.detected';
const AUDIT_RESOLVED = 'n8n.audit.cluster.hostid-clash.resolved';
/**
* Analyzes hostId collisions. `fingerprint` is a deterministic identity for the
* current set of clashing hostIds, used to deduplicate `detected` audit events
* across runs.
*
* Fingerprints on hostIds (not instance keys) because instance keys rotate on
* restart; the operator-relevant signal is which *hosts* are misconfigured.
*/
function computeFingerprint(instances: Iterable<InstanceRegistration>): {
hasClash: boolean;
fingerprint: string;
clashing: Array<{ hostId: string; instanceKeys: string[] }>;
} {
const byHost = new Map<string, string[]>();
for (const instance of instances) {
const keys = byHost.get(instance.hostId) ?? [];
keys.push(instance.instanceKey);
byHost.set(instance.hostId, keys);
}
const clashing = Array.from(byHost.entries())
.filter(([, keys]) => keys.length > 1)
.map(([hostId, keys]) => ({ hostId, instanceKeys: [...keys].sort() }))
.sort((a, b) => a.hostId.localeCompare(b.hostId));
return {
hasClash: clashing.length > 0,
fingerprint: clashing.map((c) => c.hostId).join('|'),
clashing,
};
}
@ClusterCheck()
export class HostIdClashCheck implements IClusterCheck {
checkDescription = {
name: 'hostid-clash',
displayName: 'Host ID clash',
};
async run(context: ClusterCheckContext): Promise<ClusterCheckResult> {
const current = computeFingerprint(context.currentState.values());
const previous = computeFingerprint(context.previousState.values());
if (!current.hasClash) {
if (previous.hasClash) {
return { auditEvents: [{ eventName: AUDIT_RESOLVED, payload: {} }] };
}
return {};
}
const hostIds = current.clashing.map((c) => c.hostId);
const result: ClusterCheckResult = {
warnings: [
{
code: CHECK_CODE,
message: `Detected multiple instances sharing the same hostId: ${hostIds.join(', ')}`,
severity: 'warning',
context: { clashing: current.clashing },
},
],
};
if (current.fingerprint !== previous.fingerprint) {
result.auditEvents = [{ eventName: AUDIT_DETECTED, payload: { clashing: current.clashing } }];
}
return result;
}
}

View File

@ -1 +1,4 @@
import './version-mismatch.check';
import './hostid-clash.check';
import './split-brain.check';
import './lifecycle.check';

View File

@ -0,0 +1,58 @@
import type { InstanceRegistration } from '@n8n/api-types';
import {
ClusterCheck,
type ClusterCheckAuditEvent,
type ClusterCheckContext,
type ClusterCheckResult,
type IClusterCheck,
} from '@n8n/decorators';
const AUDIT_JOINED = 'n8n.audit.cluster.instance-joined';
const AUDIT_LEFT = 'n8n.audit.cluster.instance-left';
function membershipPayload(instance: InstanceRegistration): Record<string, unknown> {
return {
instanceKey: instance.instanceKey,
hostId: instance.hostId,
instanceType: instance.instanceType,
instanceRole: instance.instanceRole,
version: instance.version,
};
}
/**
* Observability-only check that emits one audit event per instance that
* joined or left the cluster since the previous reconciliation cycle.
* Relies on `context.diff` which is pre-computed by the CheckService and
* already only contains changes between cycles, so no explicit deduplication
* is needed.
*/
@ClusterCheck()
export class LifecycleCheck implements IClusterCheck {
checkDescription = {
name: 'lifecycle',
displayName: 'Cluster membership',
};
async run(context: ClusterCheckContext): Promise<ClusterCheckResult> {
const auditEvents: ClusterCheckAuditEvent[] = [];
for (const joined of context.diff.added) {
auditEvents.push({
eventName: AUDIT_JOINED,
payload: membershipPayload(joined),
});
}
for (const left of context.diff.removed) {
auditEvents.push({
eventName: AUDIT_LEFT,
payload: membershipPayload(left),
});
}
if (auditEvents.length === 0) return {};
return { auditEvents };
}
}

View File

@ -0,0 +1,76 @@
import type { InstanceRegistration } from '@n8n/api-types';
import {
ClusterCheck,
type ClusterCheckContext,
type ClusterCheckResult,
type IClusterCheck,
} from '@n8n/decorators';
const CHECK_CODE = 'cluster.split-brain';
const AUDIT_DETECTED = 'n8n.audit.cluster.split-brain.detected';
const AUDIT_RESOLVED = 'n8n.audit.cluster.split-brain.resolved';
/**
* Analyzes leadership state. `fingerprint` is a deterministic identity for the
* current leader set, used to deduplicate `detected` audit events across runs.
*/
function computeFingerprint(instances: Iterable<InstanceRegistration>): {
splitBrain: boolean;
fingerprint: string;
leaders: Array<{ instanceKey: string; hostId: string; instanceType: string }>;
} {
const leaders = [...instances]
.filter((i) => i.instanceRole === 'leader')
.map((i) => ({
instanceKey: i.instanceKey,
hostId: i.hostId,
instanceType: i.instanceType,
}))
.sort((a, b) => a.instanceKey.localeCompare(b.instanceKey));
const splitBrain = leaders.length > 1;
return {
splitBrain,
fingerprint: splitBrain ? leaders.map((l) => l.instanceKey).join('|') : '',
leaders,
};
}
@ClusterCheck()
export class SplitBrainCheck implements IClusterCheck {
checkDescription = {
name: 'split-brain',
displayName: 'Split-brain',
};
async run(context: ClusterCheckContext): Promise<ClusterCheckResult> {
const current = computeFingerprint(context.currentState.values());
const previous = computeFingerprint(context.previousState.values());
if (!current.splitBrain) {
if (previous.splitBrain) {
return { auditEvents: [{ eventName: AUDIT_RESOLVED, payload: {} }] };
}
return {};
}
const leaderKeys = current.leaders.map((l) => l.instanceKey);
const result: ClusterCheckResult = {
warnings: [
{
code: CHECK_CODE,
message: `Detected ${current.leaders.length} instances claiming leader role: ${leaderKeys.join(', ')}`,
severity: 'error',
context: { leaders: current.leaders },
},
],
};
if (current.fingerprint !== previous.fingerprint) {
result.auditEvents = [{ eventName: AUDIT_DETECTED, payload: { leaders: current.leaders } }];
}
return result;
}
}

View File

@ -1,39 +1,59 @@
import type { InstanceRegistration } from '@n8n/api-types';
import {
ClusterCheck,
ClusterCheckContext,
ClusterCheckResult,
IClusterCheck,
type ClusterCheckContext,
type ClusterCheckResult,
type IClusterCheck,
} from '@n8n/decorators';
const CHECK_CODE = 'cluster.version-mismatch';
const AUDIT_DETECTED = 'n8n.audit.cluster.version-mismatch.detected';
const AUDIT_RESOLVED = 'n8n.audit.cluster.version-mismatch.resolved';
/**
* Returns the set of distinct versions running in the cluster as a
* deterministic fingerprint (sorted, pipe-joined). Returns an empty string
* when there is only a single version, indicating "no mismatch".
*/
function computeFingerprint(instances: Iterable<InstanceRegistration>): string {
const versions = [...new Set([...instances].map((i) => i.version))].sort();
return versions.length > 1 ? versions.join('|') : '';
}
@ClusterCheck()
export class VersionMismatchCheck implements IClusterCheck {
constructor() {}
checkDescription = {
name: 'version-mismatch',
displayName: 'Version mismatch',
};
async run(context: ClusterCheckContext): Promise<ClusterCheckResult> {
const allInstanceVersions = [...context.currentState.values()].map((i) => i.version);
const versions = [...new Set<string>(allInstanceVersions)];
const currentFingerprint = computeFingerprint(context.currentState.values());
const previousFingerprint = computeFingerprint(context.previousState.values());
if (versions.length <= 1) {
// Zero instances or a single version — no mismatch.
if (currentFingerprint === '') {
if (previousFingerprint !== '') {
return { auditEvents: [{ eventName: AUDIT_RESOLVED, payload: {} }] };
}
return {};
}
return {
const versions = currentFingerprint.split('|');
const result: ClusterCheckResult = {
warnings: [
{
code: 'cluster.version-mismatch',
message: `Detected multiple N8N versions in the cluster!: ${versions.join(', ')}`,
code: CHECK_CODE,
message: `Detected multiple n8n versions in the cluster: ${versions.join(', ')}`,
severity: 'error',
context: {
versions,
},
context: { versions },
},
],
};
if (currentFingerprint !== previousFingerprint) {
result.auditEvents = [{ eventName: AUDIT_DETECTED, payload: { versions } }];
}
return result;
}
}

View File

@ -17,7 +17,6 @@ import { mock } from 'jest-mock-extended';
import { v4 as uuid } from 'uuid';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import { UrlService } from '@/services/url.service';
import { UserService } from '@/services/user.service';
@ -656,15 +655,12 @@ describe('UserService', () => {
});
describe('assertGetUsersAccess', () => {
it('should allow project admin to list all users', async () => {
it('should allow global member to list all users without project filter', async () => {
const member = Object.assign(new User(), { role: GLOBAL_MEMBER_ROLE });
projectService.getProjectIdsWithScope.mockResolvedValueOnce(['project-1']);
await expect(userService.assertGetUsersAccess(member)).resolves.toBeUndefined();
expect(projectService.getProjectIdsWithScope).toHaveBeenCalledWith(member, [
'project:update',
]);
expect(projectService.getProjectIdsWithScope).not.toHaveBeenCalled();
});
it('should allow non-admin members to list users by projectId', async () => {
@ -678,13 +674,6 @@ describe('UserService', () => {
]);
});
it('should throw ForbiddenError for member without project admin scope', async () => {
const member = Object.assign(new User(), { role: GLOBAL_MEMBER_ROLE });
projectService.getProjectIdsWithScope.mockResolvedValueOnce([]);
await expect(userService.assertGetUsersAccess(member)).rejects.toThrow(ForbiddenError);
});
it('should throw NotFoundError when filtering by unknown projectId', async () => {
const member = Object.assign(new User(), { role: GLOBAL_MEMBER_ROLE });
projectService.getProjectWithScope.mockResolvedValueOnce(null);

View File

@ -278,6 +278,20 @@ export class ProjectService {
return await this.projectRepository.getAccessibleProjectsAndCount(user.id, options);
}
// Returns the projects a caller can pick as share targets, including peer
// personal projects. Admins (project:read) still see everything; non-admin
// callers also see all personal projects so the share dropdown can surface
// other users. See `ProjectRepository.getShareableProjectsAndCount`.
async getShareableProjectsAndCount(
user: User,
options: ProjectListOptions,
): Promise<[Project[], number]> {
if (hasGlobalScope(user, 'project:read')) {
return await this.projectRepository.findAllProjectsAndCount(options);
}
return await this.projectRepository.getShareableProjectsAndCount(user.id, options);
}
async getPersonalProjectOwners(projectIds: string[]): Promise<ProjectRelation[]> {
return await this.projectRelationRepository.getPersonalProjectOwners(projectIds);
}

View File

@ -23,7 +23,6 @@ import type { IUserSettings } from 'n8n-workflow';
import { UserError } from 'n8n-workflow';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
import { InternalServerError } from '@/errors/response-errors/internal-server.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import { EventService } from '@/events/event.service';
@ -80,17 +79,6 @@ export class UserService {
}
return;
}
const isInstanceAdmin = ['global:owner', 'global:admin'].includes(user.role.slug);
if (isInstanceAdmin) {
return;
}
const hasProjectUpdateScope =
(await this.projectService.getProjectIdsWithScope(user, ['project:update'])).length > 0;
if (!hasProjectUpdateScope) {
throw new ForbiddenError(
'Listing all users is limited to instance administrators and project admins. Filter by project to list project members.',
);
}
}
async updateSettings(userId: string, newSettings: Partial<IUserSettings>) {

View File

@ -471,7 +471,9 @@ export async function getBase({
executionTimeoutTimestamp?: number;
workflowSettings?: IWorkflowSettings;
} = {}): Promise<IWorkflowExecuteAdditionalData> {
const urlBaseWebhook = Container.get(UrlService).getWebhookBaseUrl();
const urlService = Container.get(UrlService);
const urlBaseWebhook = urlService.getWebhookBaseUrl();
const instanceBaseUrl = urlService.getInstanceBaseUrl();
const globalConfig = Container.get(GlobalConfig);
@ -484,7 +486,7 @@ export async function getBase({
credentialsHelper: Container.get(CredentialsHelper),
executeWorkflow,
restApiUrl: urlBaseWebhook + globalConfig.endpoints.rest,
instanceBaseUrl: urlBaseWebhook,
instanceBaseUrl: `${instanceBaseUrl}/`,
formWaitingBaseUrl: urlBaseWebhook + globalConfig.endpoints.formWaiting,
webhookBaseUrl: urlBaseWebhook + globalConfig.endpoints.webhook,
webhookWaitingBaseUrl: urlBaseWebhook + globalConfig.endpoints.webhookWaiting,

View File

@ -34,7 +34,7 @@ import {
saveCredential,
shareCredentialWithProjects,
} from './shared/db/credentials';
import { createMember, createOwner, createUser } from './shared/db/users';
import { createChatUser, createMember, createOwner, createUser } from './shared/db/users';
import * as utils from './shared/utils/';
import { ActiveWorkflowManager } from '@/active-workflow-manager';
@ -143,6 +143,133 @@ describe('GET /projects/', () => {
});
});
describe('GET /projects/sharing-candidates', () => {
test('member sees own personal project plus all peer personal projects', async () => {
const [member1, member2, member3] = await Promise.all([
createMember(),
createMember(),
createMember(),
]);
const [teamProject1, teamProject2] = await Promise.all([
createTeamProject(undefined, member1),
createTeamProject(),
]);
const [personal1, personal2, personal3] = await Promise.all([
getPersonalProject(member1),
getPersonalProject(member2),
getPersonalProject(member3),
]);
const resp = await testServer
.authAgentFor(member1)
.get('/projects/sharing-candidates')
.query({ take: 50, skip: 0 });
expect(resp.status).toBe(200);
const respProjects = resp.body.data as Project[];
// Own + peer personal projects appear (3 personal projects total)
expect(respProjects.find((p) => p.id === personal1.id)).not.toBeUndefined();
expect(respProjects.find((p) => p.id === personal2.id)).not.toBeUndefined();
expect(respProjects.find((p) => p.id === personal3.id)).not.toBeUndefined();
// Team project the caller is a member of appears
expect(respProjects.find((p) => p.id === teamProject1.id)).not.toBeUndefined();
// Team project the caller is NOT a member of does not appear
expect(respProjects.find((p) => p.id === teamProject2.id)).toBeUndefined();
});
test('member does not see peer team projects they are not a member of', async () => {
const [member1, member2] = await Promise.all([createMember(), createMember()]);
const peerOnlyTeam = await createTeamProject(undefined, member2);
const resp = await testServer
.authAgentFor(member1)
.get('/projects/sharing-candidates')
.query({ take: 50, skip: 0 });
expect(resp.status).toBe(200);
const respProjects = resp.body.data as Project[];
expect(respProjects.find((p) => p.id === peerOnlyTeam.id)).toBeUndefined();
});
test('search filter narrows results across personal and relation branches', async () => {
const [member1, peer] = await Promise.all([
createUser({ firstName: 'Alice', lastName: 'Anderson' }),
createUser({ firstName: 'Bob', lastName: 'Banana' }),
]);
const matchingTeam = await createTeamProject('Banana Republic', member1);
const nonMatchingTeam = await createTeamProject('Other Project', member1);
const resp = await testServer
.authAgentFor(member1)
.get('/projects/sharing-candidates')
.query({ take: 50, skip: 0, search: 'banana' });
expect(resp.status).toBe(200);
const respProjects = resp.body.data as Project[];
const peerPersonal = await getPersonalProject(peer);
// Matches by team name
expect(respProjects.find((p) => p.id === matchingTeam.id)).not.toBeUndefined();
// Matches peer personal project (name contains "Banana")
expect(respProjects.find((p) => p.id === peerPersonal.id)).not.toBeUndefined();
// Non-matching team does not appear
expect(respProjects.find((p) => p.id === nonMatchingTeam.id)).toBeUndefined();
});
test('type=team filter excludes peer personal projects', async () => {
const [member1, member2] = await Promise.all([createMember(), createMember()]);
const team = await createTeamProject(undefined, member1);
const peerPersonal = await getPersonalProject(member2);
const resp = await testServer
.authAgentFor(member1)
.get('/projects/sharing-candidates')
.query({ take: 50, skip: 0, type: 'team' });
expect(resp.status).toBe(200);
const respProjects = resp.body.data as Project[];
expect(respProjects.find((p) => p.id === team.id)).not.toBeUndefined();
expect(respProjects.find((p) => p.id === peerPersonal.id)).toBeUndefined();
});
test('owner sees all projects via the admin path', async () => {
const [owner, peer1, peer2] = await Promise.all([
createOwner(),
createMember(),
createMember(),
]);
const [team1, team2] = await Promise.all([createTeamProject(), createTeamProject()]);
const [ownerPersonal, peer1Personal, peer2Personal] = await Promise.all([
getPersonalProject(owner),
getPersonalProject(peer1),
getPersonalProject(peer2),
]);
const resp = await testServer
.authAgentFor(owner)
.get('/projects/sharing-candidates')
.query({ take: 50, skip: 0 });
expect(resp.status).toBe(200);
const respProjects = resp.body.data as Project[];
// All five projects accessible to the admin
for (const expected of [ownerPersonal, peer1Personal, peer2Personal, team1, team2]) {
expect(respProjects.find((p) => p.id === expected.id)).not.toBeUndefined();
}
});
test('caller without user:list global scope receives 403', async () => {
const chatUser = await createChatUser();
const resp = await testServer
.authAgentFor(chatUser)
.get('/projects/sharing-candidates')
.query({ take: 50, skip: 0 });
expect(resp.status).toBe(403);
});
});
describe('Project members endpoints', () => {
test('POST /projects/:projectId/users adds a member and emits telemetry', async () => {
const owner = await createOwner();

View File

@ -48,11 +48,13 @@ describe('With license unlimited quota:users', () => {
await authOwnerAgent.get('/users').expect(401);
});
test('should forbid global user list for a member API key', async () => {
test('should allow global user list for a member API key with user:list scope', async () => {
const member = await createMemberWithApiKey();
await createUser();
await testServer.publicApiAgentFor(member).get('/users').expect(403);
const response = await testServer.publicApiAgentFor(member).get('/users').expect(200);
expect(response.body.data.length).toBe(2);
});
test('should allow member to list users of a project they belong to', async () => {

View File

@ -1,5 +1,21 @@
import 'reflect-metadata';
// Clear proxy env vars so axios doesn't create HttpsProxyAgent for outbound requests.
// Nock 14 uses @mswjs/interceptors which cannot intercept requests routed through a
// proxy agent, causing "No socket was returned" failures when no real proxy is reachable.
for (const key of [
'HTTP_PROXY',
'http_proxy',
'HTTPS_PROXY',
'https_proxy',
'ALL_PROXY',
'all_proxy',
'NO_PROXY',
'no_proxy',
]) {
delete process.env[key];
}
jest.mock('@sentry/node');
jest.mock('@n8n_io/license-sdk');
jest.mock('@/telemetry');

Some files were not shown because too many files have changed in this diff Show More